Files
wasmtime/cranelift/filetests/filetests/isa/x64/struct-arg.clif
Chris Fallin cb48ea406e Switch default to new x86_64 backend.
This PR switches the default backend on x86, for both the
`cranelift-codegen` crate and for Wasmtime, to the new
(`MachInst`-style, `VCode`-based) backend that has been under
development and testing for some time now.

The old backend is still available by default in builds with the
`old-x86-backend` feature, or by requesting `BackendVariant::Legacy`
from the appropriate APIs.

As part of that switch, it adds some more runtime-configurable plumbing
to the testing infrastructure so that tests can be run using the
appropriate backend. `clif-util test` is now capable of parsing a
backend selector option from filetests and instantiating the correct
backend.

CI has been updated so that the old x86 backend continues to run its
tests, just as we used to run the new x64 backend separately.

At some point, we will remove the old x86 backend entirely, once we are
satisfied that the new backend has not caused any unforeseen issues and
we do not need to revert.
2021-04-02 11:35:53 -07:00

145 lines
3.6 KiB
Plaintext

test compile
target x86_64 machinst
function u0:0(i64 sarg(64)) -> i8 system_v {
block0(v0: i64):
v1 = load.i8 v0
return v1
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: lea 16(%rbp), %rsi
; nextln: movzbq 0(%rsi), %rsi
; nextln: movq %rsi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
function u0:1(i64 sarg(64), i64) -> i8 system_v {
block0(v0: i64, v1: i64):
v2 = load.i8 v1
v3 = load.i8 v0
v4 = iadd.i8 v2, v3
return v4
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: lea 16(%rbp), %rsi
; nextln: movzbq 0(%rdi), %rdi
; nextln: movzbq 0(%rsi), %rsi
; nextln: addl %esi, %edi
; nextln: movq %rdi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
function u0:2(i64) -> i8 system_v {
fn1 = colocated u0:0(i64 sarg(64)) -> i8 system_v
block0(v0: i64):
v1 = call fn1(v0)
return v1
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rdi, %rsi
; nextln: subq $$64, %rsp
; nextln: virtual_sp_offset_adjust 64
; nextln: lea 0(%rsp), %rdi
; nextln: movl $$64, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: call User { namespace: 0, index: 0 }
; nextln: addq $$64, %rsp
; nextln: virtual_sp_offset_adjust -64
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
function u0:3(i64, i64) -> i8 system_v {
fn1 = colocated u0:0(i64, i64 sarg(64)) -> i8 system_v
block0(v0: i64, v1: i64):
v2 = call fn1(v0, v1)
return v2
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: subq $$64, %rsp
; nextln: virtual_sp_offset_adjust 64
; nextln: lea 0(%rsp), %rdi
; nextln: movl $$64, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: movq %r12, %rdi
; nextln: call User { namespace: 0, index: 0 }
; nextln: addq $$64, %rsp
; nextln: virtual_sp_offset_adjust -64
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
function u0:4(i64 sarg(128), i64 sarg(64)) -> i8 system_v {
block0(v0: i64, v1: i64):
v2 = load.i8 v0
v3 = load.i8 v1
v4 = iadd.i8 v2, v3
return v4
}
; check: movq %rsp, %rbp
; nextln: lea 16(%rbp), %rsi
; nextln: lea 144(%rbp), %rdi
; nextln: movzbq 0(%rsi), %rsi
; nextln: movzbq 0(%rdi), %rdi
; nextln: addl %edi, %esi
; nextln: movq %rsi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
function u0:5(i64, i64, i64) -> i8 system_v {
fn1 = colocated u0:0(i64, i64 sarg(128), i64 sarg(64)) -> i8 system_v
block0(v0: i64, v1: i64, v2: i64):
v3 = call fn1(v0, v1, v2)
return v3
}
; check: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %r13, 8(%rsp)
; nextln: movq %rdi, %r12
; nextln: movq %rdx, %r13
; nextln: subq $$192, %rsp
; nextln: virtual_sp_offset_adjust 192
; nextln: lea 0(%rsp), %rdi
; nextln: movl $$128, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: lea 128(%rsp), %rdi
; nextln: movq %r13, %rsi
; nextln: movl $$64, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: movq %r12, %rdi
; nextln: call User { namespace: 0, index: 0 }
; nextln: addq $$192, %rsp
; nextln: virtual_sp_offset_adjust -192
; nextln: movq 0(%rsp), %r12
; nextln: movq 8(%rsp), %r13
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret