Modify return pseudo-instructions to have pairs of registers: virtual and real. This allows us to constrain the virtual registers to the real ones specified by the abi, instead of directly emitting moves to those real registers.
231 lines
4.5 KiB
Plaintext
231 lines
4.5 KiB
Plaintext
test compile precise-output
|
|
target s390x
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; CALL
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %call(i64) -> i64 {
|
|
fn0 = %g(i64) -> i64
|
|
|
|
block0(v0: i64):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; stmg %r14, %r15, 112(%r15)
|
|
; aghi %r15, -160
|
|
; virtual_sp_offset_adjust 160
|
|
; block0:
|
|
; bras %r1, 12 ; data %g + 0 ; lg %r5, 0(%r1)
|
|
; basr %r14, %r5
|
|
; lmg %r14, %r15, 272(%r15)
|
|
; br %r14
|
|
|
|
function %call_uext(i32) -> i64 {
|
|
fn0 = %g(i32 uext) -> i64
|
|
|
|
block0(v0: i32):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; stmg %r14, %r15, 112(%r15)
|
|
; aghi %r15, -160
|
|
; virtual_sp_offset_adjust 160
|
|
; block0:
|
|
; llgfr %r2, %r2
|
|
; bras %r1, 12 ; data %g + 0 ; lg %r3, 0(%r1)
|
|
; basr %r14, %r3
|
|
; lmg %r14, %r15, 272(%r15)
|
|
; br %r14
|
|
|
|
function %ret_uext(i32) -> i32 uext {
|
|
block0(v0: i32):
|
|
return v0
|
|
}
|
|
|
|
; block0:
|
|
; llgfr %r2, %r2
|
|
; br %r14
|
|
|
|
function %call_uext(i32) -> i64 {
|
|
fn0 = %g(i32 sext) -> i64
|
|
|
|
block0(v0: i32):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; stmg %r14, %r15, 112(%r15)
|
|
; aghi %r15, -160
|
|
; virtual_sp_offset_adjust 160
|
|
; block0:
|
|
; lgfr %r2, %r2
|
|
; bras %r1, 12 ; data %g + 0 ; lg %r3, 0(%r1)
|
|
; basr %r14, %r3
|
|
; lmg %r14, %r15, 272(%r15)
|
|
; br %r14
|
|
|
|
function %ret_uext(i32) -> i32 sext {
|
|
block0(v0: i32):
|
|
return v0
|
|
}
|
|
|
|
; block0:
|
|
; lgfr %r2, %r2
|
|
; br %r14
|
|
|
|
function %call_colocated(i64) -> i64 {
|
|
fn0 = colocated %g(i64) -> i64
|
|
|
|
block0(v0: i64):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; stmg %r14, %r15, 112(%r15)
|
|
; aghi %r15, -160
|
|
; virtual_sp_offset_adjust 160
|
|
; block0:
|
|
; brasl %r14, %g
|
|
; lmg %r14, %r15, 272(%r15)
|
|
; br %r14
|
|
|
|
function %f2(i32) -> i64 {
|
|
fn0 = %g(i32 uext) -> i64
|
|
|
|
block0(v0: i32):
|
|
v1 = call fn0(v0)
|
|
return v1
|
|
}
|
|
|
|
; stmg %r14, %r15, 112(%r15)
|
|
; aghi %r15, -160
|
|
; virtual_sp_offset_adjust 160
|
|
; block0:
|
|
; llgfr %r2, %r2
|
|
; bras %r1, 12 ; data %g + 0 ; lg %r3, 0(%r1)
|
|
; basr %r14, %r3
|
|
; lmg %r14, %r15, 272(%r15)
|
|
; br %r14
|
|
|
|
function %call_indirect(i64, i64) -> i64 {
|
|
sig0 = (i64) -> i64
|
|
block0(v0: i64, v1: i64):
|
|
v2 = call_indirect.i64 sig0, v1(v0)
|
|
return v2
|
|
}
|
|
|
|
; stmg %r14, %r15, 112(%r15)
|
|
; aghi %r15, -160
|
|
; virtual_sp_offset_adjust 160
|
|
; block0:
|
|
; basr %r14, %r3
|
|
; lmg %r14, %r15, 272(%r15)
|
|
; br %r14
|
|
|
|
function %incoming_args(i64, i32, i32 uext, i32 sext, i16, i16 uext, i16 sext, i8, i8 uext, i8 sext) -> i64 {
|
|
block0(v0: i64, v1: i32, v2: i32, v3: i32, v4: i16, v5: i16, v6: i16, v7: i8, v8: i8, v9: i8):
|
|
v10 = uextend.i64 v1
|
|
v11 = uextend.i64 v2
|
|
v12 = uextend.i64 v3
|
|
v13 = uextend.i64 v4
|
|
v14 = uextend.i64 v5
|
|
v15 = uextend.i64 v6
|
|
v16 = uextend.i64 v7
|
|
v17 = uextend.i64 v8
|
|
v18 = uextend.i64 v9
|
|
v19 = iadd v0, v10
|
|
v20 = iadd v11, v12
|
|
v21 = iadd v13, v14
|
|
v22 = iadd v15, v16
|
|
v23 = iadd v17, v18
|
|
v24 = iadd v19, v20
|
|
v25 = iadd v21, v22
|
|
v26 = iadd v23, v24
|
|
v27 = iadd v25, v26
|
|
return v27
|
|
}
|
|
|
|
; stmg %r6, %r15, 48(%r15)
|
|
; block0:
|
|
; lg %r11, 160(%r15)
|
|
; lg %r12, 168(%r15)
|
|
; llgc %r13, 183(%r15)
|
|
; lg %r14, 184(%r15)
|
|
; lg %r8, 192(%r15)
|
|
; llgfr %r3, %r3
|
|
; llgfr %r4, %r4
|
|
; llgfr %r7, %r5
|
|
; llghr %r6, %r6
|
|
; llghr %r5, %r11
|
|
; llghr %r12, %r12
|
|
; llgcr %r13, %r13
|
|
; llgcr %r14, %r14
|
|
; llgcr %r8, %r8
|
|
; agrk %r3, %r2, %r3
|
|
; agr %r4, %r7
|
|
; agrk %r5, %r6, %r5
|
|
; agrk %r2, %r12, %r13
|
|
; agrk %r12, %r14, %r8
|
|
; agr %r3, %r4
|
|
; agrk %r4, %r5, %r2
|
|
; agrk %r3, %r12, %r3
|
|
; agrk %r2, %r4, %r3
|
|
; lmg %r6, %r15, 48(%r15)
|
|
; br %r14
|
|
|
|
function %incoming_args_i128(i128, i128, i128, i128, i128, i128, i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128, v2: i128, v3: i128, v4: i128, v5: i128, v6: i128, v7: i128):
|
|
v8 = iadd v0, v1
|
|
v9 = iadd v2, v3
|
|
v10 = iadd v4, v5
|
|
v11 = iadd v6, v7
|
|
v12 = iadd v8, v9
|
|
v13 = iadd v10, v11
|
|
v14 = iadd v12, v13
|
|
return v14
|
|
}
|
|
|
|
; block0:
|
|
; vl %v0, 0(%r3)
|
|
; vl %v1, 0(%r4)
|
|
; vl %v2, 0(%r5)
|
|
; vl %v3, 0(%r6)
|
|
; lg %r3, 160(%r15)
|
|
; vl %v4, 0(%r3)
|
|
; lg %r3, 168(%r15)
|
|
; vl %v5, 0(%r3)
|
|
; lg %r5, 176(%r15)
|
|
; vl %v6, 0(%r5)
|
|
; lg %r4, 184(%r15)
|
|
; vl %v7, 0(%r4)
|
|
; vaq %v16, %v0, %v1
|
|
; vaq %v17, %v2, %v3
|
|
; vaq %v18, %v4, %v5
|
|
; vaq %v19, %v6, %v7
|
|
; vaq %v16, %v16, %v17
|
|
; vaq %v17, %v18, %v19
|
|
; vaq %v16, %v16, %v17
|
|
; vst %v16, 0(%r2)
|
|
; br %r14
|
|
|
|
function %call_sret() -> i64 {
|
|
fn0 = colocated %g(i64 sret)
|
|
|
|
block0:
|
|
v1 = iconst.i64 0
|
|
call fn0(v1)
|
|
trap user0
|
|
}
|
|
|
|
; stmg %r14, %r15, 112(%r15)
|
|
; aghi %r15, -160
|
|
; virtual_sp_offset_adjust 160
|
|
; block0:
|
|
; lghi %r2, 0
|
|
; brasl %r14, %g
|
|
; trap
|
|
|