This PR switches the default backend on x86, for both the `cranelift-codegen` crate and for Wasmtime, to the new (`MachInst`-style, `VCode`-based) backend that has been under development and testing for some time now. The old backend is still available by default in builds with the `old-x86-backend` feature, or by requesting `BackendVariant::Legacy` from the appropriate APIs. As part of that switch, it adds some more runtime-configurable plumbing to the testing infrastructure so that tests can be run using the appropriate backend. `clif-util test` is now capable of parsing a backend selector option from filetests and instantiating the correct backend. CI has been updated so that the old x86 backend continues to run its tests, just as we used to run the new x64 backend separately. At some point, we will remove the old x86 backend entirely, once we are satisfied that the new backend has not caused any unforeseen issues and we do not need to revert.
84 lines
3.0 KiB
Plaintext
84 lines
3.0 KiB
Plaintext
; binary emission of 64-bit code.
|
|
test binemit
|
|
set opt_level=speed_and_size
|
|
set is_pic
|
|
target x86_64 legacy haswell
|
|
|
|
; The binary encodings can be verified with the command:
|
|
;
|
|
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/binary64-pic.clif | llvm-mc -show-encoding -triple=x86_64
|
|
;
|
|
|
|
; Tests for i64 instructions.
|
|
function %I64() {
|
|
sig0 = ()
|
|
fn0 = %foo()
|
|
fn1 = colocated %bar()
|
|
|
|
gv0 = symbol %some_gv
|
|
gv1 = symbol colocated %some_gv
|
|
|
|
; Use incoming_arg stack slots because they won't be relocated by the frame
|
|
; layout.
|
|
ss0 = incoming_arg 8, offset 0
|
|
ss1 = incoming_arg 1024, offset -1024
|
|
ss2 = incoming_arg 1024, offset -2048
|
|
ss3 = incoming_arg 8, offset -2056
|
|
|
|
block0:
|
|
|
|
; Colocated functions.
|
|
|
|
; asm: call foo
|
|
call fn1() ; bin: stk_ovf e8 CallPCRel4(%bar-4) 00000000
|
|
|
|
; asm: lea 0x0(%rip), %rax
|
|
[-,%rax] v0 = func_addr.i64 fn1 ; bin: 48 8d 05 PCRel4(%bar-4) 00000000
|
|
; asm: lea 0x0(%rip), %rsi
|
|
[-,%rsi] v1 = func_addr.i64 fn1 ; bin: 48 8d 35 PCRel4(%bar-4) 00000000
|
|
; asm: lea 0x0(%rip), %r10
|
|
[-,%r10] v2 = func_addr.i64 fn1 ; bin: 4c 8d 15 PCRel4(%bar-4) 00000000
|
|
|
|
; asm: call *%rax
|
|
call_indirect sig0, v0() ; bin: stk_ovf ff d0
|
|
; asm: call *%rsi
|
|
call_indirect sig0, v1() ; bin: stk_ovf ff d6
|
|
; asm: call *%r10
|
|
call_indirect sig0, v2() ; bin: stk_ovf 41 ff d2
|
|
|
|
; Non-colocated functions.
|
|
|
|
; asm: call foo@PLT
|
|
call fn0() ; bin: stk_ovf e8 CallPLTRel4(%foo-4) 00000000
|
|
|
|
; asm: mov 0x0(%rip), %rax
|
|
[-,%rax] v100 = func_addr.i64 fn0 ; bin: 48 8b 05 GOTPCRel4(%foo-4) 00000000
|
|
; asm: mov 0x0(%rip), %rsi
|
|
[-,%rsi] v101 = func_addr.i64 fn0 ; bin: 48 8b 35 GOTPCRel4(%foo-4) 00000000
|
|
; asm: mov 0x0(%rip), %r10
|
|
[-,%r10] v102 = func_addr.i64 fn0 ; bin: 4c 8b 15 GOTPCRel4(%foo-4) 00000000
|
|
|
|
; asm: call *%rax
|
|
call_indirect sig0, v100() ; bin: stk_ovf ff d0
|
|
; asm: call *%rsi
|
|
call_indirect sig0, v101() ; bin: stk_ovf ff d6
|
|
; asm: call *%r10
|
|
call_indirect sig0, v102() ; bin: stk_ovf 41 ff d2
|
|
|
|
; asm: mov 0x0(%rip), %rcx
|
|
[-,%rcx] v3 = symbol_value.i64 gv0 ; bin: 48 8b 0d GOTPCRel4(%some_gv-4) 00000000
|
|
; asm: mov 0x0(%rip), %rsi
|
|
[-,%rsi] v4 = symbol_value.i64 gv0 ; bin: 48 8b 35 GOTPCRel4(%some_gv-4) 00000000
|
|
; asm: mov 0x0(%rip), %r10
|
|
[-,%r10] v5 = symbol_value.i64 gv0 ; bin: 4c 8b 15 GOTPCRel4(%some_gv-4) 00000000
|
|
|
|
; asm: lea 0x0(%rip), %rcx
|
|
[-,%rcx] v6 = symbol_value.i64 gv1 ; bin: 48 8d 0d PCRel4(%some_gv-4) 00000000
|
|
; asm: lea 0x0(%rip), %rsi
|
|
[-,%rsi] v7 = symbol_value.i64 gv1 ; bin: 48 8d 35 PCRel4(%some_gv-4) 00000000
|
|
; asm: lea 0x0(%rip), %r10
|
|
[-,%r10] v8 = symbol_value.i64 gv1 ; bin: 4c 8d 15 PCRel4(%some_gv-4) 00000000
|
|
|
|
return
|
|
}
|