Remove some unnecessary moves in the x64 gen_memcpy implementation -- the call instruction that's generated will already constrain the args to those registers.
151 lines
3.1 KiB
Plaintext
151 lines
3.1 KiB
Plaintext
test compile precise-output
|
|
target x86_64
|
|
|
|
function u0:0(i64 sarg(64)) -> i8 system_v {
|
|
block0(v0: i64):
|
|
v1 = load.i8 v0
|
|
return v1
|
|
}
|
|
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; lea 16(%rbp), %rsi
|
|
; movzbq 0(%rsi), %rax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
|
|
function u0:1(i64 sarg(64), i64) -> i8 system_v {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = load.i8 v1
|
|
v3 = load.i8 v0
|
|
v4 = iadd.i8 v2, v3
|
|
return v4
|
|
}
|
|
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; lea 16(%rbp), %rsi
|
|
; movzbq 0(%rdi), %rax
|
|
; movzbq 0(%rsi), %r9
|
|
; addl %eax, %r9d, %eax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
|
|
function u0:2(i64) -> i8 system_v {
|
|
fn1 = colocated u0:0(i64 sarg(64)) -> i8 system_v
|
|
|
|
block0(v0: i64):
|
|
v1 = call fn1(v0)
|
|
return v1
|
|
}
|
|
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; movq %rdi, %rsi
|
|
; subq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust 64
|
|
; lea 0(%rsp), %rdi
|
|
; movl $64, %edx
|
|
; load_ext_name %Memcpy+0, %r11
|
|
; call *%r11
|
|
; call User(userextname0)
|
|
; addq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust -64
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
|
|
function u0:3(i64, i64) -> i8 system_v {
|
|
fn1 = colocated u0:0(i64, i64 sarg(64)) -> i8 system_v
|
|
|
|
block0(v0: i64, v1: i64):
|
|
v2 = call fn1(v0, v1)
|
|
return v2
|
|
}
|
|
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; subq %rsp, $16, %rsp
|
|
; movq %r13, 0(%rsp)
|
|
; block0:
|
|
; movq %rdi, %r13
|
|
; subq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust 64
|
|
; lea 0(%rsp), %rdi
|
|
; movl $64, %edx
|
|
; load_ext_name %Memcpy+0, %rax
|
|
; call *%rax
|
|
; movq %r13, %rdi
|
|
; call User(userextname0)
|
|
; addq %rsp, $64, %rsp
|
|
; virtual_sp_offset_adjust -64
|
|
; movq 0(%rsp), %r13
|
|
; addq %rsp, $16, %rsp
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
|
|
function u0:4(i64 sarg(128), i64 sarg(64)) -> i8 system_v {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = load.i8 v0
|
|
v3 = load.i8 v1
|
|
v4 = iadd.i8 v2, v3
|
|
return v4
|
|
}
|
|
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; block0:
|
|
; lea 16(%rbp), %rsi
|
|
; lea 144(%rbp), %rdi
|
|
; movzbq 0(%rsi), %rax
|
|
; movzbq 0(%rdi), %r9
|
|
; addl %eax, %r9d, %eax
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
|
|
function u0:5(i64, i64, i64) -> i8 system_v {
|
|
fn1 = colocated u0:0(i64, i64 sarg(128), i64 sarg(64)) -> i8 system_v
|
|
|
|
block0(v0: i64, v1: i64, v2: i64):
|
|
v3 = call fn1(v0, v1, v2)
|
|
return v3
|
|
}
|
|
|
|
; pushq %rbp
|
|
; movq %rsp, %rbp
|
|
; subq %rsp, $16, %rsp
|
|
; movq %r12, 0(%rsp)
|
|
; movq %r14, 8(%rsp)
|
|
; block0:
|
|
; movq %rdx, %r14
|
|
; movq %rdi, %r12
|
|
; subq %rsp, $192, %rsp
|
|
; virtual_sp_offset_adjust 192
|
|
; lea 0(%rsp), %rdi
|
|
; movl $128, %edx
|
|
; load_ext_name %Memcpy+0, %rax
|
|
; call *%rax
|
|
; lea 128(%rsp), %rdi
|
|
; movl $64, %edx
|
|
; load_ext_name %Memcpy+0, %r11
|
|
; movq %r14, %rsi
|
|
; call *%r11
|
|
; movq %r12, %rdi
|
|
; call User(userextname0)
|
|
; addq %rsp, $192, %rsp
|
|
; virtual_sp_offset_adjust -192
|
|
; movq 0(%rsp), %r12
|
|
; movq 8(%rsp), %r14
|
|
; addq %rsp, $16, %rsp
|
|
; movq %rbp, %rsp
|
|
; popq %rbp
|
|
; ret
|
|
|