Remove the old x86 backend

This commit is contained in:
bjorn3
2021-06-18 17:28:55 +02:00
parent e989caf337
commit 9e34df33b9
246 changed files with 76 additions and 28804 deletions

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %amode_add(i64, i64) -> i64 {
block0(v0: i64, v1: i64):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %f0(b1, i32, i32) -> i32 {
; check: pushq %rbp

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %f(i32, i32) -> i32 {
block0(v0: i32, v1: i32):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %f0(i32, i32) -> i32 {
block0(v0: i32, v1: i32):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
;; system_v has first param in %rdi, fascall in %rcx
function %one_arg(i32) system_v {

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst has_lzcnt
target x86_64 has_lzcnt
function %clz(i64) -> i64 {
block0(v0: i64):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %f0(i64, i64) -> i64, i64 {
block0(v0: i64, v1: i64):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst has_bmi1
target x86_64 has_bmi1
function %ctz(i64) -> i64 {
block0(v0: i64):

View File

@@ -1,6 +1,6 @@
test compile
set avoid_div_traps=false
target x86_64 machinst
target x86_64
;; We should get the checked-div/rem sequence (`srem` pseudoinst below) even
;; when `avoid_div_traps` above is false (i.e. even when the host is normally

View File

@@ -1,7 +1,7 @@
test compile
set enable_llvm_abi_extensions=true
set unwind_info=true
target x86_64 machinst
target x86_64
function %f0(i64, i64, i64, i64) -> i64 windows_fastcall {
block0(v0: i64, v1: i64, v2: i64, v3: i64):
@@ -206,7 +206,7 @@ block0(v0: i64):
v18 = load.f64 v0+136
v19 = load.f64 v0+144
v20 = load.f64 v0+152
v21 = fadd.f64 v1, v2
v22 = fadd.f64 v3, v4
v23 = fadd.f64 v5, v6

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %f(f64) -> f64 {
block0(v0: f64):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %f(i32, i64 vmctx) -> i64 {
gv0 = vmctx

View File

@@ -1,6 +1,6 @@
test compile
set enable_llvm_abi_extensions=true
target x86_64 machinst
target x86_64
function %f0(i128, i128) -> i128 {
; check: pushq %rbp
@@ -190,7 +190,7 @@ block0(v0: i128, v1: i128):
; nextln: orq %rax, %r8
; nextln: andq $$1, %r8
; nextln: setnz %r8b
v4 = icmp slt v0, v1
; check: cmpq %rcx, %rsi
; nextln: setl %r9b
@@ -201,7 +201,7 @@ block0(v0: i128, v1: i128):
; nextln: orq %r9, %r10
; nextln: andq $$1, %r10
; nextln: setnz %r9b
v5 = icmp sle v0, v1
; check: cmpq %rcx, %rsi
; nextln: setl %r10b
@@ -212,7 +212,7 @@ block0(v0: i128, v1: i128):
; nextln: orq %r10, %r11
; nextln: andq $$1, %r11
; nextln: setnz %r10b
v6 = icmp sgt v0, v1
; check: cmpq %rcx, %rsi
; nextln: setnle %r11b
@@ -307,7 +307,7 @@ block0(v0: i128):
; nextln: setz %sil
; nextln: andb %dil, %sil
; nextln: jnz label1; j label2
jump block2
block1:
@@ -725,7 +725,7 @@ block2(v6: i128):
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}
function %f24(i128, i128, i64, i128, i128, i128) -> i128 {
@@ -1106,4 +1106,4 @@ block0(v0: i128, v1: i128):
; nextln: movq %rcx, %rdx
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; nextln: ret

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %add_from_mem_u32_1(i64, i32) -> i32 {
block0(v0: i64, v1: i32):

View File

@@ -1,6 +1,6 @@
test compile
set enable_simd
target x86_64 machinst skylake
target x86_64 skylake
function %move_registers(i32x4) -> b8x16 {
block0(v0: i32x4):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst has_popcnt has_sse42
target x86_64 has_popcnt has_sse42
function %popcnt(i64) -> i64 {
block0(v0: i64):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %popcnt64(i64) -> i64 {
block0(v0: i64):

View File

@@ -1,6 +1,6 @@
test compile
set enable_probestack=true
target x86_64 machinst
target x86_64
function %f1() -> i64 {
ss0 = explicit_slot 100000

View File

@@ -1,6 +1,6 @@
test compile
set enable_llvm_abi_extensions=true
target x86_64 machinst
target x86_64
function %f0(i32, i128, i128) -> i128 {
; check: pushq %rbp
@@ -24,6 +24,6 @@ block0(v0: i32, v1: i128, v2: i128):
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}

View File

@@ -1,6 +1,6 @@
test compile
set enable_simd
target x86_64 machinst skylake
target x86_64 skylake
function %bitselect_i16x8() -> i16x8 {
block0:

View File

@@ -1,6 +1,6 @@
test compile
set enable_simd
target x86_64 machinst skylake
target x86_64 skylake
function %icmp_ne_32x4(i32x4, i32x4) -> b32x4 {
block0(v0: i32x4, v1: i32x4):

View File

@@ -1,6 +1,6 @@
test compile
set enable_simd
target x86_64 machinst has_ssse3 has_sse41
target x86_64 has_ssse3 has_sse41
;; shuffle

View File

@@ -1,6 +1,6 @@
test compile
set enable_simd
target x86_64 machinst skylake
target x86_64 skylake
function %bnot_b32x4(b32x4) -> b32x4 {
block0(v0: b32x4):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
;; The goal of this test is to ensure that stack spills of an integer value,
;; which width is less than the machine word's size, cause the full word to be

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function u0:0(i64 sarg(64)) -> i8 system_v {
block0(v0: i64):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %f0(i64 sret) {
block0(v0: i64):

View File

@@ -1,6 +1,6 @@
test compile
set tls_model=elf_gd
target x86_64 machinst
target x86_64
function u0:0(i32) -> i64 {
gv0 = symbol colocated tls u1:0

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
function %elide_uextend_add(i32, i32) -> i64 {
block0(v0: i32, v1: i32):

View File

@@ -1,5 +1,5 @@
test compile
target x86_64 machinst
target x86_64
;; From: https://github.com/bytecodealliance/wasmtime/issues/2670

View File

@@ -1,13 +0,0 @@
test regalloc
target i686 legacy
; %rdi can't be used in a movsbl instruction, so test that the register
; allocator can move it to a register that can be.
function %test(i32 [%rdi]) -> i32 system_v {
block0(v0: i32 [%rdi]):
v1 = ireduce.i8 v0
v2 = sextend.i32 v1
return v2
}
; check: regmove v1, %rdi -> %rax

View File

@@ -1,19 +0,0 @@
test compile
target x86_64 legacy haswell
function %foo(i64, i64, i64, i32) -> b1 system_v {
block3(v0: i64, v1: i64, v2: i64, v3: i32):
v5 = icmp ne v2, v2
v8 = iconst.i64 0
jump block2(v8, v3, v5)
block2(v10: i64, v30: i32, v37: b1):
v18 = load.i32 notrap aligned v2
v27 = iadd.i64 v10, v10
v31 = icmp eq v30, v30
brz v31, block2(v27, v30, v37)
jump block0(v37)
block0(v35: b1):
return v35
}

View File

@@ -1,20 +0,0 @@
; Test the legalization of function signatures.
test legalizer
target i686 legacy
; regex: V=v\d+
function %f() {
sig0 = (i32) -> i32 system_v
; check: sig0 = (i32 [0]) -> i32 [%rax] system_v
sig1 = (i64) -> b1 system_v
; check: sig1 = (i32 [0], i32 [4]) -> b1 [%rax] system_v
sig2 = (f32, i64) -> f64 system_v
; check: sig2 = (f32 [0], i32 [4], i32 [8]) -> f64 [%xmm0] system_v
block0:
return
}

View File

@@ -1,37 +0,0 @@
; Test the legalization of function signatures.
test legalizer
target x86_64 legacy
; regex: V=v\d+
function %f() {
sig0 = (i32) -> i32 system_v
; check: sig0 = (i32 [%rdi]) -> i32 [%rax] system_v
sig1 = (i64) -> b1 system_v
; check: sig1 = (i64 [%rdi]) -> b1 [%rax] system_v
sig2 = (f32, i64) -> f64 system_v
; check: sig2 = (f32 [%xmm0], i64 [%rdi]) -> f64 [%xmm0] system_v
sig3 = () -> i128 system_v
; check: sig3 = () -> i64 [%rax], i64 [%rdx] system_v
sig4 = (i128) -> i128 system_v
; check: sig4 = (i64 [%rdi], i64 [%rsi]) -> i64 [%rax], i64 [%rdx] system_v
block0:
return
}
function %pass_stack_int64(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 vmctx) baldrdash_system_v {
sig0 = (i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 vmctx) baldrdash_system_v
fn0 = u0:0 sig0
block0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64, v5: i64, v6: i64, v7: i64, v8: i64, v9: i64, v10: i64, v11: i64, v12: i64, v13: i64, v14: i64, v15: i64, v16: i64, v17: i64, v18: i64, v19: i64, v20: i64):
call fn0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20)
jump block1
block1:
return
}

View File

@@ -1,25 +0,0 @@
; binary emission of 32-bit code.
test binemit
set opt_level=speed_and_size
set emit_all_ones_funcaddrs
target i686 legacy haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/allones_funcaddrs32.clif | llvm-mc -show-encoding -triple=i386
;
; Tests from binary32.clif affected by emit_all_ones_funcaddrs.
function %I32() {
sig0 = ()
fn0 = %foo()
block0:
; asm: movl $-1, %ecx
[-,%rcx] v400 = func_addr.i32 fn0 ; bin: b9 Abs4(%foo) ffffffff
; asm: movl $-1, %esi
[-,%rsi] v401 = func_addr.i32 fn0 ; bin: be Abs4(%foo) ffffffff
return ; bin: c3
}

View File

@@ -1,27 +0,0 @@
; binary emission of 64-bit code.
test binemit
set opt_level=speed_and_size
set emit_all_ones_funcaddrs
target x86_64 legacy haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/allones_funcaddrs64.clif | llvm-mc -show-encoding -triple=x86_64
;
; Tests from binary64.clif affected by emit_all_ones_funcaddrs.
function %I64() {
sig0 = ()
fn0 = %foo()
block0:
; asm: movabsq $-1, %rcx
[-,%rcx] v400 = func_addr.i64 fn0 ; bin: 48 b9 Abs8(%foo) ffffffffffffffff
; asm: movabsq $-1, %rsi
[-,%rsi] v401 = func_addr.i64 fn0 ; bin: 48 be Abs8(%foo) ffffffffffffffff
; asm: movabsq $-1, %r10
[-,%r10] v402 = func_addr.i64 fn0 ; bin: 49 ba Abs8(%foo) ffffffffffffffff
return ; bin: c3
}

View File

@@ -1,14 +0,0 @@
test compile
set enable_probestack=false
target i686 legacy
function u0:0(i32 vmctx) baldrdash_system_v {
sig0 = (i32 vmctx, i32 sigid) baldrdash_system_v
block0(v0: i32):
v2 = iconst.i32 0
v8 = iconst.i32 0
v9 = iconst.i32 0
call_indirect sig0, v8(v9, v2)
trap user0
}

View File

@@ -1,92 +0,0 @@
test compile
target x86_64 legacy baseline
; clz/ctz on 64 bit operands
function %i64_clz(i64) -> i64 {
block0(v10: i64):
v11 = clz v10
; check: x86_bsr
; check: selectif.i64
return v11
}
function %i64_ctz(i64) -> i64 {
block1(v20: i64):
v21 = ctz v20
; check: x86_bsf
; check: selectif.i64
return v21
}
; clz/ctz on 32 bit operands
function %i32_clz(i32) -> i32 {
block0(v10: i32):
v11 = clz v10
; check: x86_bsr
; check: selectif.i32
return v11
}
function %i32_ctz(i32) -> i32 {
block1(v20: i32):
v21 = ctz v20
; check: x86_bsf
; check: selectif.i32
return v21
}
; popcount on 64 bit operands
function %i64_popcount(i64) -> i64 {
block0(v30: i64):
v31 = popcnt v30;
; check: ushr_imm
; check: iconst.i64
; check: band
; check: isub
; check: ushr_imm
; check: band
; check: isub
; check: ushr_imm
; check: band
; check: isub
; check: ushr_imm
; check: iadd
; check: iconst.i64
; check: band
; check: iconst.i64
; check: imul
; check: ushr_imm
return v31;
}
; popcount on 32 bit operands
function %i32_popcount(i32) -> i32 {
block0(v40: i32):
v41 = popcnt v40;
; check: ushr_imm
; check: iconst.i32
; check: band
; check: isub
; check: ushr_imm
; check: band
; check: isub
; check: ushr_imm
; check: band
; check: isub
; check: ushr_imm
; check: iadd
; check: iconst.i32
; check: band
; check: iconst.i32
; check: imul
; check: ushr_imm
return v41;
}

View File

@@ -1,87 +0,0 @@
test binemit
set opt_level=speed_and_size
target x86_64 legacy baseline
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/baseline_clz_ctz_popcount_encoding.clif | llvm-mc -show-encoding -triple=x86_64
;
function %Foo() {
block0:
; 64-bit wide bsf
[-,%r11] v10 = iconst.i64 0x1234
; asm: bsfq %r11, %rcx
[-,%rcx,%rflags] v11, v12 = x86_bsf v10 ; bin: 49 0f bc cb
[-,%rdx] v14 = iconst.i64 0x5678
; asm: bsfq %rdx, %r12
[-,%r12,%rflags] v15, v16 = x86_bsf v14 ; bin: 4c 0f bc e2
; asm: bsfq %rdx, %rdi
[-,%rdi,%rflags] v17, v18 = x86_bsf v14 ; bin: 48 0f bc fa
; 32-bit wide bsf
[-,%r11] v20 = iconst.i32 0x1234
; asm: bsfl %r11d, %ecx
[-,%rcx,%rflags] v21, v22 = x86_bsf v20 ; bin: 41 0f bc cb
[-,%rdx] v24 = iconst.i32 0x5678
; asm: bsfl %edx, %r12d
[-,%r12,%rflags] v25, v26 = x86_bsf v24 ; bin: 44 0f bc e2
; asm: bsfl %edx, %esi
[-,%rsi,%rflags] v27, v28 = x86_bsf v24 ; bin: 0f bc f2
; 64-bit wide bsr
[-,%r11] v30 = iconst.i64 0x1234
; asm: bsrq %r11, %rcx
[-,%rcx,%rflags] v31, v32 = x86_bsr v30 ; bin: 49 0f bd cb
[-,%rdx] v34 = iconst.i64 0x5678
; asm: bsrq %rdx, %r12
[-,%r12,%rflags] v35, v36 = x86_bsr v34 ; bin: 4c 0f bd e2
; asm: bsrq %rdx, %rdi
[-,%rdi,%rflags] v37, v38 = x86_bsr v34 ; bin: 48 0f bd fa
; 32-bit wide bsr
[-,%r11] v40 = iconst.i32 0x1234
; asm: bsrl %r11d, %ecx
[-,%rcx,%rflags] v41, v42 = x86_bsr v40 ; bin: 41 0f bd cb
[-,%rdx] v44 = iconst.i32 0x5678
; asm: bsrl %edx, %r12d
[-,%r12,%rflags] v45, v46 = x86_bsr v44 ; bin: 44 0f bd e2
; asm: bsrl %edx, %esi
[-,%rsi,%rflags] v47, v48 = x86_bsr v44 ; bin: 0f bd f2
; 64-bit wide cmov
; asm: cmoveq %r11, %rdx
[-,%rdx] v51 = selectif.i64 eq v48, v30, v34 ; bin: 49 0f 44 d3
; asm: cmoveq %rdi, %rdx
[-,%rdx] v52 = selectif.i64 eq v48, v37, v34 ; bin: 48 0f 44 d7
; 32-bit wide cmov
; asm: cmovnel %r11d, %edx
[-,%rdx] v60 = selectif.i32 ne v48, v40, v44 ; bin: 41 0f 45 d3
; asm: cmovlel %esi, %edx
[-,%rdx] v61 = selectif.i32 sle v48, v27, v44 ; bin: 0f 4e d6
trap user0
}

View File

@@ -1,557 +0,0 @@
; Binary emission of 32-bit floating point code.
test binemit
target i686 legacy haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/binary32-float.clif | llvm-mc -show-encoding -triple=i386
;
function %F32() {
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
block0:
[-,%rcx] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
; asm: cvtsi2ss %ecx, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f32 v0 ; bin: f3 0f 2a e9
; asm: cvtsi2ss %esi, %xmm2
[-,%xmm2] v11 = fcvt_from_sint.f32 v1 ; bin: f3 0f 2a d6
; asm: cvtss2sd %xmm2, %xmm5
[-,%xmm5] v12 = fpromote.f64 v11 ; bin: f3 0f 5a ea
; asm: cvtss2sd %xmm5, %xmm2
[-,%xmm2] v13 = fpromote.f64 v10 ; bin: f3 0f 5a d5
; asm: movd %ecx, %xmm5
[-,%xmm5] v14 = bitcast.f32 v0 ; bin: 66 0f 6e e9
; asm: movd %esi, %xmm2
[-,%xmm2] v15 = bitcast.f32 v1 ; bin: 66 0f 6e d6
; asm: movd %xmm5, %ecx
[-,%rcx] v16 = bitcast.i32 v10 ; bin: 66 0f 7e e9
; asm: movd %xmm2, %esi
[-,%rsi] v17 = bitcast.i32 v11 ; bin: 66 0f 7e d6
; asm: movaps %xmm2, %xmm5
[-,%xmm5] v18 = copy v11 ; bin: 0f 28 ea
; asm: movaps %xmm5, %xmm2
[-,%xmm2] v19 = copy v10 ; bin: 0f 28 d5
; asm: movaps %xmm2, %xmm5
regmove v19, %xmm2 -> %xmm5 ; bin: 0f 28 ea
; asm: movaps %xmm5, %xmm2
regmove v19, %xmm5 -> %xmm2 ; bin: 0f 28 d5
; Binary arithmetic.
; asm: addss %xmm2, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f3 0f 58 ea
; asm: addss %xmm5, %xmm2
[-,%xmm2] v21 = fadd v11, v10 ; bin: f3 0f 58 d5
; asm: subss %xmm2, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f3 0f 5c ea
; asm: subss %xmm5, %xmm2
[-,%xmm2] v23 = fsub v11, v10 ; bin: f3 0f 5c d5
; asm: mulss %xmm2, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f3 0f 59 ea
; asm: mulss %xmm5, %xmm2
[-,%xmm2] v25 = fmul v11, v10 ; bin: f3 0f 59 d5
; asm: divss %xmm2, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f3 0f 5e ea
; asm: divss %xmm5, %xmm2
[-,%xmm2] v27 = fdiv v11, v10 ; bin: f3 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm2, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 0f 54 ea
; asm: andps %xmm5, %xmm2
[-,%xmm2] v31 = band v11, v10 ; bin: 0f 54 d5
; asm: andnps %xmm2, %xmm5
[-,%xmm5] v32 = band_not v11, v10 ; bin: 0f 55 ea
; asm: andnps %xmm5, %xmm2
[-,%xmm2] v33 = band_not v10, v11 ; bin: 0f 55 d5
; asm: orps %xmm2, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 0f 56 ea
; asm: orps %xmm5, %xmm2
[-,%xmm2] v35 = bor v11, v10 ; bin: 0f 56 d5
; asm: xorps %xmm2, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 0f 57 ea
; asm: xorps %xmm5, %xmm2
[-,%xmm2] v37 = bxor v11, v10 ; bin: 0f 57 d5
; Convert float to int. (No i64 dest on i386).
; asm: cvttss2si %xmm5, %ecx
[-,%rcx] v40 = x86_cvtt2si.i32 v10 ; bin: f3 0f 2c cd
; asm: cvttss2si %xmm2, %esi
[-,%rsi] v41 = x86_cvtt2si.i32 v11 ; bin: f3 0f 2c f2
; Min/max.
; asm: minss %xmm2, %xmm5
[-,%xmm5] v42 = x86_fmin v10, v11 ; bin: f3 0f 5d ea
; asm: minss %xmm5, %xmm2
[-,%xmm2] v43 = x86_fmin v11, v10 ; bin: f3 0f 5d d5
; asm: maxss %xmm2, %xmm5
[-,%xmm5] v44 = x86_fmax v10, v11 ; bin: f3 0f 5f ea
; asm: maxss %xmm5, %xmm2
[-,%xmm2] v45 = x86_fmax v11, v10 ; bin: f3 0f 5f d5
; Unary arithmetic.
; asm: sqrtss %xmm5, %xmm2
[-,%xmm2] v50 = sqrt v10 ; bin: f3 0f 51 d5
; asm: sqrtss %xmm2, %xmm5
[-,%xmm5] v51 = sqrt v11 ; bin: f3 0f 51 ea
; asm: roundss $0, %xmm5, %xmm4
[-,%xmm4] v52 = nearest v10 ; bin: 66 0f 3a 0a e5 00
; asm: roundss $0, %xmm2, %xmm5
[-,%xmm5] v53 = nearest v11 ; bin: 66 0f 3a 0a ea 00
; asm: roundss $0, %xmm5, %xmm2
[-,%xmm2] v54 = nearest v10 ; bin: 66 0f 3a 0a d5 00
; asm: roundss $1, %xmm5, %xmm4
[-,%xmm4] v55 = floor v10 ; bin: 66 0f 3a 0a e5 01
; asm: roundss $1, %xmm2, %xmm5
[-,%xmm5] v56 = floor v11 ; bin: 66 0f 3a 0a ea 01
; asm: roundss $1, %xmm5, %xmm2
[-,%xmm2] v57 = floor v10 ; bin: 66 0f 3a 0a d5 01
; asm: roundss $2, %xmm5, %xmm4
[-,%xmm4] v58 = ceil v10 ; bin: 66 0f 3a 0a e5 02
; asm: roundss $2, %xmm2, %xmm5
[-,%xmm5] v59 = ceil v11 ; bin: 66 0f 3a 0a ea 02
; asm: roundss $2, %xmm5, %xmm2
[-,%xmm2] v60 = ceil v10 ; bin: 66 0f 3a 0a d5 02
; asm: roundss $3, %xmm5, %xmm4
[-,%xmm4] v61 = trunc v10 ; bin: 66 0f 3a 0a e5 03
; asm: roundss $3, %xmm2, %xmm5
[-,%xmm5] v62 = trunc v11 ; bin: 66 0f 3a 0a ea 03
; asm: roundss $3, %xmm5, %xmm2
[-,%xmm2] v63 = trunc v10 ; bin: 66 0f 3a 0a d5 03
; Load/Store
; asm: movss (%ecx), %xmm5
[-,%xmm5] v100 = load.f32 v0 ; bin: heap_oob f3 0f 10 29
; asm: movss (%esi), %xmm2
[-,%xmm2] v101 = load.f32 v1 ; bin: heap_oob f3 0f 10 16
; asm: movss 50(%ecx), %xmm5
[-,%xmm5] v110 = load.f32 v0+50 ; bin: heap_oob f3 0f 10 69 32
; asm: movss -50(%esi), %xmm2
[-,%xmm2] v111 = load.f32 v1-50 ; bin: heap_oob f3 0f 10 56 ce
; asm: movss 10000(%ecx), %xmm5
[-,%xmm5] v120 = load.f32 v0+10000 ; bin: heap_oob f3 0f 10 a9 00002710
; asm: movss -10000(%esi), %xmm2
[-,%xmm2] v121 = load.f32 v1-10000 ; bin: heap_oob f3 0f 10 96 ffffd8f0
; asm: movss %xmm5, (%ecx)
[-] store.f32 v100, v0 ; bin: heap_oob f3 0f 11 29
; asm: movss %xmm2, (%esi)
[-] store.f32 v101, v1 ; bin: heap_oob f3 0f 11 16
; asm: movss %xmm5, 50(%ecx)
[-] store.f32 v100, v0+50 ; bin: heap_oob f3 0f 11 69 32
; asm: movss %xmm2, -50(%esi)
[-] store.f32 v101, v1-50 ; bin: heap_oob f3 0f 11 56 ce
; asm: movss %xmm5, 10000(%ecx)
[-] store.f32 v100, v0+10000 ; bin: heap_oob f3 0f 11 a9 00002710
; asm: movss %xmm2, -10000(%esi)
[-] store.f32 v101, v1-10000 ; bin: heap_oob f3 0f 11 96 ffffd8f0
; Spill / Fill.
; asm: movss %xmm5, 1032(%esp)
[-,ss1] v200 = spill v100 ; bin: stk_ovf f3 0f 11 ac 24 00000408
; asm: movss %xmm2, 1032(%esp)
[-,ss1] v201 = spill v101 ; bin: stk_ovf f3 0f 11 94 24 00000408
; asm: movss 1032(%esp), %xmm5
[-,%xmm5] v210 = fill v200 ; bin: f3 0f 10 ac 24 00000408
; asm: movss 1032(%esp), %xmm2
[-,%xmm2] v211 = fill v201 ; bin: f3 0f 10 94 24 00000408
; asm: movss %xmm5, 1032(%esp)
regspill v100, %xmm5 -> ss1 ; bin: stk_ovf f3 0f 11 ac 24 00000408
; asm: movss 1032(%esp), %xmm5
regfill v100, ss1 -> %xmm5 ; bin: f3 0f 10 ac 24 00000408
; Comparisons.
;
; Only `supported_floatccs` are tested here. Others are handled by
; legalization patterns.
; asm: ucomiss %xmm2, %xmm5
; asm: setnp %bl
[-,%rbx] v300 = fcmp ord v10, v11 ; bin: 0f 2e ea 0f 9b c3
; asm: ucomiss %xmm5, %xmm2
; asm: setp %bl
[-,%rbx] v301 = fcmp uno v11, v10 ; bin: 0f 2e d5 0f 9a c3
; asm: ucomiss %xmm2, %xmm5
; asm: setne %dl
[-,%rdx] v302 = fcmp one v10, v11 ; bin: 0f 2e ea 0f 95 c2
; asm: ucomiss %xmm5, %xmm2
; asm: sete %dl
[-,%rdx] v303 = fcmp ueq v11, v10 ; bin: 0f 2e d5 0f 94 c2
; asm: ucomiss %xmm2, %xmm5
; asm: seta %bl
[-,%rbx] v304 = fcmp gt v10, v11 ; bin: 0f 2e ea 0f 97 c3
; asm: ucomiss %xmm5, %xmm2
; asm: setae %bl
[-,%rbx] v305 = fcmp ge v11, v10 ; bin: 0f 2e d5 0f 93 c3
; asm: ucomiss %xmm2, %xmm5
; asm: setb %dl
[-,%rdx] v306 = fcmp ult v10, v11 ; bin: 0f 2e ea 0f 92 c2
; asm: ucomiss %xmm5, %xmm2
; asm: setbe %dl
[-,%rdx] v307 = fcmp ule v11, v10 ; bin: 0f 2e d5 0f 96 c2
; asm: ucomiss %xmm2, %xmm5
[-,%rflags] v310 = ffcmp v10, v11 ; bin: 0f 2e ea
; asm: ucomiss %xmm2, %xmm5
[-,%rflags] v311 = ffcmp v11, v10 ; bin: 0f 2e d5
; asm: ucomiss %xmm5, %xmm5
[-,%rflags] v312 = ffcmp v10, v10 ; bin: 0f 2e ed
; Load/Store Complex
[-,%rax] v350 = iconst.i32 1
[-,%rbx] v351 = iconst.i32 2
; asm: movss (%rax,%rbx,1),%xmm5
[-,%xmm5] v352 = load_complex.f32 v350+v351 ; bin: heap_oob f3 0f 10 2c 18
; asm: movss 0x32(%rax,%rbx,1),%xmm5
[-,%xmm5] v353 = load_complex.f32 v350+v351+50 ; bin: heap_oob f3 0f 10 6c 18 32
; asm: movss -0x32(%rax,%rbx,1),%xmm5
[-,%xmm5] v354 = load_complex.f32 v350+v351-50 ; bin: heap_oob f3 0f 10 6c 18 ce
; asm: movss 0x2710(%rax,%rbx,1),%xmm5
[-,%xmm5] v355 = load_complex.f32 v350+v351+10000 ; bin: heap_oob f3 0f 10 ac 18 00002710
; asm: movss -0x2710(%rax,%rbx,1),%xmm5
[-,%xmm5] v356 = load_complex.f32 v350+v351-10000 ; bin: heap_oob f3 0f 10 ac 18 ffffd8f0
; asm: movss %xmm5,(%rax,%rbx,1)
[-] store_complex.f32 v100, v350+v351 ; bin: heap_oob f3 0f 11 2c 18
; asm: movss %xmm5,0x32(%rax,%rbx,1)
[-] store_complex.f32 v100, v350+v351+50 ; bin: heap_oob f3 0f 11 6c 18 32
; asm: movss %xmm2,-0x32(%rax,%rbx,1)
[-] store_complex.f32 v101, v350+v351-50 ; bin: heap_oob f3 0f 11 54 18 ce
; asm: movss %xmm5,0x2710(%rax,%rbx,1)
[-] store_complex.f32 v100, v350+v351+10000 ; bin: heap_oob f3 0f 11 ac 18 00002710
; asm: movss %xmm2,-0x2710(%rax,%rbx,1)
[-] store_complex.f32 v101, v350+v351-10000 ; bin: heap_oob f3 0f 11 94 18 ffffd8f0
return
}
function %F64() {
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
block0:
[-,%rcx] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
; asm: cvtsi2sd %ecx, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f64 v0 ; bin: f2 0f 2a e9
; asm: cvtsi2sd %esi, %xmm2
[-,%xmm2] v11 = fcvt_from_sint.f64 v1 ; bin: f2 0f 2a d6
; asm: cvtsd2ss %xmm2, %xmm5
[-,%xmm5] v12 = fdemote.f32 v11 ; bin: f2 0f 5a ea
; asm: cvtsd2ss %xmm5, %xmm2
[-,%xmm2] v13 = fdemote.f32 v10 ; bin: f2 0f 5a d5
; No i64 <-> f64 bitcasts in 32-bit mode.
; asm: movaps %xmm2, %xmm5
[-,%xmm5] v18 = copy v11 ; bin: 0f 28 ea
; asm: movaps %xmm5, %xmm2
[-,%xmm2] v19 = copy v10 ; bin: 0f 28 d5
; asm: movaps %xmm2, %xmm5
regmove v19, %xmm2 -> %xmm5 ; bin: 0f 28 ea
; asm: movaps %xmm5, %xmm2
regmove v19, %xmm5 -> %xmm2 ; bin: 0f 28 d5
; Binary arithmetic.
; asm: addsd %xmm2, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f2 0f 58 ea
; asm: addsd %xmm5, %xmm2
[-,%xmm2] v21 = fadd v11, v10 ; bin: f2 0f 58 d5
; asm: subsd %xmm2, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f2 0f 5c ea
; asm: subsd %xmm5, %xmm2
[-,%xmm2] v23 = fsub v11, v10 ; bin: f2 0f 5c d5
; asm: mulsd %xmm2, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f2 0f 59 ea
; asm: mulsd %xmm5, %xmm2
[-,%xmm2] v25 = fmul v11, v10 ; bin: f2 0f 59 d5
; asm: divsd %xmm2, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f2 0f 5e ea
; asm: divsd %xmm5, %xmm2
[-,%xmm2] v27 = fdiv v11, v10 ; bin: f2 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm2, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 0f 54 ea
; asm: andps %xmm5, %xmm2
[-,%xmm2] v31 = band v11, v10 ; bin: 0f 54 d5
; asm: andnps %xmm2, %xmm5
[-,%xmm5] v32 = band_not v11, v10 ; bin: 0f 55 ea
; asm: andnps %xmm5, %xmm2
[-,%xmm2] v33 = band_not v10, v11 ; bin: 0f 55 d5
; asm: orps %xmm2, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 0f 56 ea
; asm: orps %xmm5, %xmm2
[-,%xmm2] v35 = bor v11, v10 ; bin: 0f 56 d5
; asm: xorps %xmm2, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 0f 57 ea
; asm: xorps %xmm5, %xmm2
[-,%xmm2] v37 = bxor v11, v10 ; bin: 0f 57 d5
; Convert float to int. (No i64 dest on i386).
; asm: cvttsd2si %xmm5, %ecx
[-,%rcx] v40 = x86_cvtt2si.i32 v10 ; bin: f2 0f 2c cd
; asm: cvttsd2si %xmm2, %esi
[-,%rsi] v41 = x86_cvtt2si.i32 v11 ; bin: f2 0f 2c f2
; Min/max.
; asm: minsd %xmm2, %xmm5
[-,%xmm5] v42 = x86_fmin v10, v11 ; bin: f2 0f 5d ea
; asm: minsd %xmm5, %xmm2
[-,%xmm2] v43 = x86_fmin v11, v10 ; bin: f2 0f 5d d5
; asm: maxsd %xmm2, %xmm5
[-,%xmm5] v44 = x86_fmax v10, v11 ; bin: f2 0f 5f ea
; asm: maxsd %xmm5, %xmm2
[-,%xmm2] v45 = x86_fmax v11, v10 ; bin: f2 0f 5f d5
; Unary arithmetic.
; asm: sqrtsd %xmm5, %xmm2
[-,%xmm2] v50 = sqrt v10 ; bin: f2 0f 51 d5
; asm: sqrtsd %xmm2, %xmm5
[-,%xmm5] v51 = sqrt v11 ; bin: f2 0f 51 ea
; asm: roundsd $0, %xmm5, %xmm4
[-,%xmm4] v52 = nearest v10 ; bin: 66 0f 3a 0b e5 00
; asm: roundsd $0, %xmm2, %xmm5
[-,%xmm5] v53 = nearest v11 ; bin: 66 0f 3a 0b ea 00
; asm: roundsd $0, %xmm5, %xmm2
[-,%xmm2] v54 = nearest v10 ; bin: 66 0f 3a 0b d5 00
; asm: roundsd $1, %xmm5, %xmm4
[-,%xmm4] v55 = floor v10 ; bin: 66 0f 3a 0b e5 01
; asm: roundsd $1, %xmm2, %xmm5
[-,%xmm5] v56 = floor v11 ; bin: 66 0f 3a 0b ea 01
; asm: roundsd $1, %xmm5, %xmm2
[-,%xmm2] v57 = floor v10 ; bin: 66 0f 3a 0b d5 01
; asm: roundsd $2, %xmm5, %xmm4
[-,%xmm4] v58 = ceil v10 ; bin: 66 0f 3a 0b e5 02
; asm: roundsd $2, %xmm2, %xmm5
[-,%xmm5] v59 = ceil v11 ; bin: 66 0f 3a 0b ea 02
; asm: roundsd $2, %xmm5, %xmm2
[-,%xmm2] v60 = ceil v10 ; bin: 66 0f 3a 0b d5 02
; asm: roundsd $3, %xmm5, %xmm4
[-,%xmm4] v61 = trunc v10 ; bin: 66 0f 3a 0b e5 03
; asm: roundsd $3, %xmm2, %xmm5
[-,%xmm5] v62 = trunc v11 ; bin: 66 0f 3a 0b ea 03
; asm: roundsd $3, %xmm5, %xmm2
[-,%xmm2] v63 = trunc v10 ; bin: 66 0f 3a 0b d5 03
; Load/Store
; asm: movsd (%ecx), %xmm5
[-,%xmm5] v100 = load.f64 v0 ; bin: heap_oob f2 0f 10 29
; asm: movsd (%esi), %xmm2
[-,%xmm2] v101 = load.f64 v1 ; bin: heap_oob f2 0f 10 16
; asm: movsd 50(%ecx), %xmm5
[-,%xmm5] v110 = load.f64 v0+50 ; bin: heap_oob f2 0f 10 69 32
; asm: movsd -50(%esi), %xmm2
[-,%xmm2] v111 = load.f64 v1-50 ; bin: heap_oob f2 0f 10 56 ce
; asm: movsd 10000(%ecx), %xmm5
[-,%xmm5] v120 = load.f64 v0+10000 ; bin: heap_oob f2 0f 10 a9 00002710
; asm: movsd -10000(%esi), %xmm2
[-,%xmm2] v121 = load.f64 v1-10000 ; bin: heap_oob f2 0f 10 96 ffffd8f0
; asm: movsd %xmm5, (%ecx)
[-] store.f64 v100, v0 ; bin: heap_oob f2 0f 11 29
; asm: movsd %xmm2, (%esi)
[-] store.f64 v101, v1 ; bin: heap_oob f2 0f 11 16
; asm: movsd %xmm5, 50(%ecx)
[-] store.f64 v100, v0+50 ; bin: heap_oob f2 0f 11 69 32
; asm: movsd %xmm2, -50(%esi)
[-] store.f64 v101, v1-50 ; bin: heap_oob f2 0f 11 56 ce
; asm: movsd %xmm5, 10000(%ecx)
[-] store.f64 v100, v0+10000 ; bin: heap_oob f2 0f 11 a9 00002710
; asm: movsd %xmm2, -10000(%esi)
[-] store.f64 v101, v1-10000 ; bin: heap_oob f2 0f 11 96 ffffd8f0
; Spill / Fill.
; asm: movsd %xmm5, 1032(%esp)
[-,ss1] v200 = spill v100 ; bin: stk_ovf f2 0f 11 ac 24 00000408
; asm: movsd %xmm2, 1032(%esp)
[-,ss1] v201 = spill v101 ; bin: stk_ovf f2 0f 11 94 24 00000408
; asm: movsd 1032(%esp), %xmm5
[-,%xmm5] v210 = fill v200 ; bin: f2 0f 10 ac 24 00000408
; asm: movsd 1032(%esp), %xmm2
[-,%xmm2] v211 = fill v201 ; bin: f2 0f 10 94 24 00000408
; asm: movsd %xmm5, 1032(%esp)
regspill v100, %xmm5 -> ss1 ; bin: stk_ovf f2 0f 11 ac 24 00000408
; asm: movsd 1032(%esp), %xmm5
regfill v100, ss1 -> %xmm5 ; bin: f2 0f 10 ac 24 00000408
; Comparisons.
;
; Only `supported_floatccs` are tested here. Others are handled by
; legalization patterns.
; asm: ucomisd %xmm2, %xmm5
; asm: setnp %bl
[-,%rbx] v300 = fcmp ord v10, v11 ; bin: 66 0f 2e ea 0f 9b c3
; asm: ucomisd %xmm5, %xmm2
; asm: setp %bl
[-,%rbx] v301 = fcmp uno v11, v10 ; bin: 66 0f 2e d5 0f 9a c3
; asm: ucomisd %xmm2, %xmm5
; asm: setne %dl
[-,%rdx] v302 = fcmp one v10, v11 ; bin: 66 0f 2e ea 0f 95 c2
; asm: ucomisd %xmm5, %xmm2
; asm: sete %dl
[-,%rdx] v303 = fcmp ueq v11, v10 ; bin: 66 0f 2e d5 0f 94 c2
; asm: ucomisd %xmm2, %xmm5
; asm: seta %bl
[-,%rbx] v304 = fcmp gt v10, v11 ; bin: 66 0f 2e ea 0f 97 c3
; asm: ucomisd %xmm5, %xmm2
; asm: setae %bl
[-,%rbx] v305 = fcmp ge v11, v10 ; bin: 66 0f 2e d5 0f 93 c3
; asm: ucomisd %xmm2, %xmm5
; asm: setb %dl
[-,%rdx] v306 = fcmp ult v10, v11 ; bin: 66 0f 2e ea 0f 92 c2
; asm: ucomisd %xmm5, %xmm2
; asm: setbe %dl
[-,%rdx] v307 = fcmp ule v11, v10 ; bin: 66 0f 2e d5 0f 96 c2
; asm: ucomisd %xmm2, %xmm5
[-,%rflags] v310 = ffcmp v10, v11 ; bin: 66 0f 2e ea
; asm: ucomisd %xmm2, %xmm5
[-,%rflags] v311 = ffcmp v11, v10 ; bin: 66 0f 2e d5
; asm: ucomisd %xmm5, %xmm5
[-,%rflags] v312 = ffcmp v10, v10 ; bin: 66 0f 2e ed
return
}
function %cpuflags_float(f32 [%xmm0]) {
block0(v0: f32 [%xmm0]):
; asm: ucomiss %xmm0, %xmm0
[-,%rflags] v1 = ffcmp v0, v0 ; bin: 0f 2e c0
jump block1
block1:
; asm: jnp block1
brff ord v1, block1 ; bin: 7b fe
jump block2
block2:
; asm: jp block1
brff uno v1, block1 ; bin: 7a fc
jump block3
block3:
; asm: jne block1
brff one v1, block1 ; bin: 75 fa
jump block4
block4:
; asm: je block1
brff ueq v1, block1 ; bin: 74 f8
jump block5
block5:
; asm: ja block1
brff gt v1, block1 ; bin: 77 f6
jump block6
block6:
; asm: jae block1
brff ge v1, block1 ; bin: 73 f4
jump block7
block7:
; asm: jb block1
brff ult v1, block1 ; bin: 72 f2
jump block8
block8:
; asm: jbe block1
brff ule v1, block1 ; bin: 76 f0
jump block9
block9:
; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2
trapff uno v1, user0 ; bin: 7b 02 user0 0f 0b
; asm: je .+4; ud2
trapff one v1, user0 ; bin: 74 02 user0 0f 0b
; asm: jne .+4; ud2
trapff ueq v1, user0 ; bin: 75 02 user0 0f 0b
; asm: jna .+4; ud2
trapff gt v1, user0 ; bin: 76 02 user0 0f 0b
; asm: jnae .+4; ud2
trapff ge v1, user0 ; bin: 72 02 user0 0f 0b
; asm: jnb .+4; ud2
trapff ult v1, user0 ; bin: 73 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapff ule v1, user0 ; bin: 77 02 user0 0f 0b
; asm: setnp %bl
[-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3
; asm: setp %bl
[-,%rbx] v11 = trueff uno v1 ; bin: 0f 9a c3
; asm: setne %dl
[-,%rdx] v12 = trueff one v1 ; bin: 0f 95 c2
; asm: sete %dl
[-,%rdx] v13 = trueff ueq v1 ; bin: 0f 94 c2
; asm: seta %al
[-,%rax] v14 = trueff gt v1 ; bin: 0f 97 c0
; asm: setae %al
[-,%rax] v15 = trueff ge v1 ; bin: 0f 93 c0
; asm: setb %cl
[-,%rcx] v16 = trueff ult v1 ; bin: 0f 92 c1
; asm: setbe %cl
[-,%rcx] v17 = trueff ule v1 ; bin: 0f 96 c1
return
}

View File

@@ -1,721 +0,0 @@
; binary emission of x86-32 code.
test binemit
set opt_level=speed_and_size
target i686 legacy haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/binary32.clif | llvm-mc -show-encoding -triple=i386
;
function %I32() {
sig0 = ()
fn0 = %foo()
gv0 = symbol %some_gv
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
block0:
; asm: movl $1, %ecx
[-,%rcx] v1 = iconst.i32 1 ; bin: b9 00000001
; asm: movl $2, %esi
[-,%rsi] v2 = iconst.i32 2 ; bin: be 00000002
; asm: movb $1, %cl
[-,%rcx] v9007 = bconst.b1 true ; bin: b9 00000001
; Integer Register-Register Operations.
; asm: addl %esi, %ecx
[-,%rcx] v10 = iadd v1, v2 ; bin: 01 f1
; asm: addl %ecx, %esi
[-,%rsi] v11 = iadd v2, v1 ; bin: 01 ce
; asm: subl %esi, %ecx
[-,%rcx] v12 = isub v1, v2 ; bin: 29 f1
; asm: subl %ecx, %esi
[-,%rsi] v13 = isub v2, v1 ; bin: 29 ce
; asm: andl %esi, %ecx
[-,%rcx] v14 = band v1, v2 ; bin: 21 f1
; asm: andl %ecx, %esi
[-,%rsi] v15 = band v2, v1 ; bin: 21 ce
; asm: orl %esi, %ecx
[-,%rcx] v16 = bor v1, v2 ; bin: 09 f1
; asm: orl %ecx, %esi
[-,%rsi] v17 = bor v2, v1 ; bin: 09 ce
; asm: xorl %esi, %ecx
[-,%rcx] v18 = bxor v1, v2 ; bin: 31 f1
; asm: xorl %ecx, %esi
[-,%rsi] v19 = bxor v2, v1 ; bin: 31 ce
; Dynamic shifts take the shift amount in %rcx.
; asm: shll %cl, %esi
[-,%rsi] v20 = ishl v2, v1 ; bin: d3 e6
; asm: shll %cl, %ecx
[-,%rcx] v21 = ishl v1, v1 ; bin: d3 e1
; asm: shrl %cl, %esi
[-,%rsi] v22 = ushr v2, v1 ; bin: d3 ee
; asm: shrl %cl, %ecx
[-,%rcx] v23 = ushr v1, v1 ; bin: d3 e9
; asm: sarl %cl, %esi
[-,%rsi] v24 = sshr v2, v1 ; bin: d3 fe
; asm: sarl %cl, %ecx
[-,%rcx] v25 = sshr v1, v1 ; bin: d3 f9
; asm: roll %cl, %esi
[-,%rsi] v26 = rotl v2, v1 ; bin: d3 c6
; asm: roll %cl, %ecx
[-,%rcx] v27 = rotl v1, v1 ; bin: d3 c1
; asm: rorl %cl, %esi
[-,%rsi] v28 = rotr v2, v1 ; bin: d3 ce
; asm: rorl %cl, %ecx
[-,%rcx] v29 = rotr v1, v1 ; bin: d3 c9
; Integer Register - Immediate 8-bit operations.
; The 8-bit immediate is sign-extended.
; asm: addl $-128, %ecx
[-,%rcx] v30 = iadd_imm v1, -128 ; bin: 83 c1 80
; asm: addl $10, %esi
[-,%rsi] v31 = iadd_imm v2, 10 ; bin: 83 c6 0a
; asm: andl $-128, %ecx
[-,%rcx] v32 = band_imm v1, -128 ; bin: 83 e1 80
; asm: andl $10, %esi
[-,%rsi] v33 = band_imm v2, 10 ; bin: 83 e6 0a
; asm: orl $-128, %ecx
[-,%rcx] v34 = bor_imm v1, -128 ; bin: 83 c9 80
; asm: orl $10, %esi
[-,%rsi] v35 = bor_imm v2, 10 ; bin: 83 ce 0a
; asm: xorl $-128, %ecx
[-,%rcx] v36 = bxor_imm v1, -128 ; bin: 83 f1 80
; asm: xorl $10, %esi
[-,%rsi] v37 = bxor_imm v2, 10 ; bin: 83 f6 0a
; Integer Register - Immediate 32-bit operations.
; asm: addl $-128000, %ecx
[-,%rcx] v40 = iadd_imm v1, -128000 ; bin: 81 c1 fffe0c00
; asm: addl $1000000, %esi
[-,%rsi] v41 = iadd_imm v2, 1000000 ; bin: 81 c6 000f4240
; asm: andl $-128000, %ecx
[-,%rcx] v42 = band_imm v1, -128000 ; bin: 81 e1 fffe0c00
; asm: andl $1000000, %esi
[-,%rsi] v43 = band_imm v2, 1000000 ; bin: 81 e6 000f4240
; asm: orl $-128000, %ecx
[-,%rcx] v44 = bor_imm v1, -128000 ; bin: 81 c9 fffe0c00
; asm: orl $1000000, %esi
[-,%rsi] v45 = bor_imm v2, 1000000 ; bin: 81 ce 000f4240
; asm: xorl $-128000, %ecx
[-,%rcx] v46 = bxor_imm v1, -128000 ; bin: 81 f1 fffe0c00
; asm: xorl $1000000, %esi
[-,%rsi] v47 = bxor_imm v2, 1000000 ; bin: 81 f6 000f4240
; More arithmetic.
; asm: imull %esi, %ecx
[-,%rcx] v50 = imul v1, v2 ; bin: 0f af ce
; asm: imull %ecx, %esi
[-,%rsi] v51 = imul v2, v1 ; bin: 0f af f1
; asm: movl $1, %eax
[-,%rax] v52 = iconst.i32 1 ; bin: b8 00000001
; asm: movl $2, %edx
[-,%rdx] v53 = iconst.i32 2 ; bin: ba 00000002
; asm: idivl %ecx
[-,%rax,%rdx] v54, v55 = x86_sdivmodx v52, v53, v1 ; bin: int_divz f7 f9
; asm: idivl %esi
[-,%rax,%rdx] v56, v57 = x86_sdivmodx v52, v53, v2 ; bin: int_divz f7 fe
; asm: divl %ecx
[-,%rax,%rdx] v58, v59 = x86_udivmodx v52, v53, v1 ; bin: int_divz f7 f1
; asm: divl %esi
[-,%rax,%rdx] v60, v61 = x86_udivmodx v52, v53, v2 ; bin: int_divz f7 f6
; Register copies.
; asm: movl %esi, %ecx
[-,%rcx] v80 = copy v2 ; bin: 89 f1
; asm: movl %ecx, %esi
[-,%rsi] v81 = copy v1 ; bin: 89 ce
; Copy Special
; asm: movl %esp, %ebp
copy_special %rsp -> %rbp ; bin: 89 e5
; asm: movl %ebp, %esp
copy_special %rbp -> %rsp ; bin: 89 ec
; Load/Store instructions.
; Register indirect addressing with no displacement.
; asm: movl %ecx, (%esi)
store v1, v2 ; bin: heap_oob 89 0e
; asm: movl %esi, (%ecx)
store v2, v1 ; bin: heap_oob 89 31
; asm: movw %cx, (%esi)
istore16 v1, v2 ; bin: heap_oob 66 89 0e
; asm: movw %si, (%ecx)
istore16 v2, v1 ; bin: heap_oob 66 89 31
; asm: movb %cl, (%esi)
istore8 v1, v2 ; bin: heap_oob 88 0e
; Can't store %sil in 32-bit mode (needs REX prefix).
; asm: movl (%ecx), %edi
[-,%rdi] v100 = load.i32 v1 ; bin: heap_oob 8b 39
; asm: movl (%esi), %edx
[-,%rdx] v101 = load.i32 v2 ; bin: heap_oob 8b 16
; asm: movzwl (%ecx), %edi
[-,%rdi] v102 = uload16.i32 v1 ; bin: heap_oob 0f b7 39
; asm: movzwl (%esi), %edx
[-,%rdx] v103 = uload16.i32 v2 ; bin: heap_oob 0f b7 16
; asm: movswl (%ecx), %edi
[-,%rdi] v104 = sload16.i32 v1 ; bin: heap_oob 0f bf 39
; asm: movswl (%esi), %edx
[-,%rdx] v105 = sload16.i32 v2 ; bin: heap_oob 0f bf 16
; asm: movzbl (%ecx), %edi
[-,%rdi] v106 = uload8.i32 v1 ; bin: heap_oob 0f b6 39
; asm: movzbl (%esi), %edx
[-,%rdx] v107 = uload8.i32 v2 ; bin: heap_oob 0f b6 16
; asm: movsbl (%ecx), %edi
[-,%rdi] v108 = sload8.i32 v1 ; bin: heap_oob 0f be 39
; asm: movsbl (%esi), %edx
[-,%rdx] v109 = sload8.i32 v2 ; bin: heap_oob 0f be 16
; Register-indirect with 8-bit signed displacement.
; asm: movl %ecx, 100(%esi)
store v1, v2+100 ; bin: heap_oob 89 4e 64
; asm: movl %esi, -100(%ecx)
store v2, v1-100 ; bin: heap_oob 89 71 9c
; asm: movw %cx, 100(%esi)
istore16 v1, v2+100 ; bin: heap_oob 66 89 4e 64
; asm: movw %si, -100(%ecx)
istore16 v2, v1-100 ; bin: heap_oob 66 89 71 9c
; asm: movb %cl, 100(%esi)
istore8 v1, v2+100 ; bin: heap_oob 88 4e 64
; asm: movl 50(%ecx), %edi
[-,%rdi] v110 = load.i32 v1+50 ; bin: heap_oob 8b 79 32
; asm: movl -50(%esi), %edx
[-,%rdx] v111 = load.i32 v2-50 ; bin: heap_oob 8b 56 ce
; asm: movzwl 50(%ecx), %edi
[-,%rdi] v112 = uload16.i32 v1+50 ; bin: heap_oob 0f b7 79 32
; asm: movzwl -50(%esi), %edx
[-,%rdx] v113 = uload16.i32 v2-50 ; bin: heap_oob 0f b7 56 ce
; asm: movswl 50(%ecx), %edi
[-,%rdi] v114 = sload16.i32 v1+50 ; bin: heap_oob 0f bf 79 32
; asm: movswl -50(%esi), %edx
[-,%rdx] v115 = sload16.i32 v2-50 ; bin: heap_oob 0f bf 56 ce
; asm: movzbl 50(%ecx), %edi
[-,%rdi] v116 = uload8.i32 v1+50 ; bin: heap_oob 0f b6 79 32
; asm: movzbl -50(%esi), %edx
[-,%rdx] v117 = uload8.i32 v2-50 ; bin: heap_oob 0f b6 56 ce
; asm: movsbl 50(%ecx), %edi
[-,%rdi] v118 = sload8.i32 v1+50 ; bin: heap_oob 0f be 79 32
; asm: movsbl -50(%esi), %edx
[-,%rdx] v119 = sload8.i32 v2-50 ; bin: heap_oob 0f be 56 ce
; Register-indirect with 32-bit signed displacement.
; asm: movl %ecx, 10000(%esi)
store v1, v2+10000 ; bin: heap_oob 89 8e 00002710
; asm: movl %esi, -10000(%ecx)
store v2, v1-10000 ; bin: heap_oob 89 b1 ffffd8f0
; asm: movw %cx, 10000(%esi)
istore16 v1, v2+10000 ; bin: heap_oob 66 89 8e 00002710
; asm: movw %si, -10000(%ecx)
istore16 v2, v1-10000 ; bin: heap_oob 66 89 b1 ffffd8f0
; asm: movb %cl, 10000(%esi)
istore8 v1, v2+10000 ; bin: heap_oob 88 8e 00002710
; asm: movl 50000(%ecx), %edi
[-,%rdi] v120 = load.i32 v1+50000 ; bin: heap_oob 8b b9 0000c350
; asm: movl -50000(%esi), %edx
[-,%rdx] v121 = load.i32 v2-50000 ; bin: heap_oob 8b 96 ffff3cb0
; asm: movzwl 50000(%ecx), %edi
[-,%rdi] v122 = uload16.i32 v1+50000 ; bin: heap_oob 0f b7 b9 0000c350
; asm: movzwl -50000(%esi), %edx
[-,%rdx] v123 = uload16.i32 v2-50000 ; bin: heap_oob 0f b7 96 ffff3cb0
; asm: movswl 50000(%ecx), %edi
[-,%rdi] v124 = sload16.i32 v1+50000 ; bin: heap_oob 0f bf b9 0000c350
; asm: movswl -50000(%esi), %edx
[-,%rdx] v125 = sload16.i32 v2-50000 ; bin: heap_oob 0f bf 96 ffff3cb0
; asm: movzbl 50000(%ecx), %edi
[-,%rdi] v126 = uload8.i32 v1+50000 ; bin: heap_oob 0f b6 b9 0000c350
; asm: movzbl -50000(%esi), %edx
[-,%rdx] v127 = uload8.i32 v2-50000 ; bin: heap_oob 0f b6 96 ffff3cb0
; asm: movsbl 50000(%ecx), %edi
[-,%rdi] v128 = sload8.i32 v1+50000 ; bin: heap_oob 0f be b9 0000c350
; asm: movsbl -50000(%esi), %edx
[-,%rdx] v129 = sload8.i32 v2-50000 ; bin: heap_oob 0f be 96 ffff3cb0
; Bit-counting instructions.
; asm: popcntl %esi, %ecx
[-,%rcx] v200 = popcnt v2 ; bin: f3 0f b8 ce
; asm: popcntl %ecx, %esi
[-,%rsi] v201 = popcnt v1 ; bin: f3 0f b8 f1
; asm: lzcntl %esi, %ecx
[-,%rcx] v202 = clz v2 ; bin: f3 0f bd ce
; asm: lzcntl %ecx, %esi
[-,%rsi] v203 = clz v1 ; bin: f3 0f bd f1
; asm: tzcntl %esi, %ecx
[-,%rcx] v204 = ctz v2 ; bin: f3 0f bc ce
; asm: tzcntl %ecx, %esi
[-,%rsi] v205 = ctz v1 ; bin: f3 0f bc f1
; Integer comparisons.
; asm: cmpl %esi, %ecx
; asm: sete %bl
[-,%rbx] v300 = icmp eq v1, v2 ; bin: 39 f1 0f 94 c3
; asm: cmpl %ecx, %esi
; asm: sete %dl
[-,%rdx] v301 = icmp eq v2, v1 ; bin: 39 ce 0f 94 c2
; asm: cmpl %esi, %ecx
; asm: setne %bl
[-,%rbx] v302 = icmp ne v1, v2 ; bin: 39 f1 0f 95 c3
; asm: cmpl %ecx, %esi
; asm: setne %dl
[-,%rdx] v303 = icmp ne v2, v1 ; bin: 39 ce 0f 95 c2
; asm: cmpl %esi, %ecx
; asm: setl %bl
[-,%rbx] v304 = icmp slt v1, v2 ; bin: 39 f1 0f 9c c3
; asm: cmpl %ecx, %esi
; asm: setl %dl
[-,%rdx] v305 = icmp slt v2, v1 ; bin: 39 ce 0f 9c c2
; asm: cmpl %esi, %ecx
; asm: setge %bl
[-,%rbx] v306 = icmp sge v1, v2 ; bin: 39 f1 0f 9d c3
; asm: cmpl %ecx, %esi
; asm: setge %dl
[-,%rdx] v307 = icmp sge v2, v1 ; bin: 39 ce 0f 9d c2
; asm: cmpl %esi, %ecx
; asm: setg %bl
[-,%rbx] v308 = icmp sgt v1, v2 ; bin: 39 f1 0f 9f c3
; asm: cmpl %ecx, %esi
; asm: setg %dl
[-,%rdx] v309 = icmp sgt v2, v1 ; bin: 39 ce 0f 9f c2
; asm: cmpl %esi, %ecx
; asm: setle %bl
[-,%rbx] v310 = icmp sle v1, v2 ; bin: 39 f1 0f 9e c3
; asm: cmpl %ecx, %esi
; asm: setle %dl
[-,%rdx] v311 = icmp sle v2, v1 ; bin: 39 ce 0f 9e c2
; asm: cmpl %esi, %ecx
; asm: setb %bl
[-,%rbx] v312 = icmp ult v1, v2 ; bin: 39 f1 0f 92 c3
; asm: cmpl %ecx, %esi
; asm: setb %dl
[-,%rdx] v313 = icmp ult v2, v1 ; bin: 39 ce 0f 92 c2
; asm: cmpl %esi, %ecx
; asm: setae %bl
[-,%rbx] v314 = icmp uge v1, v2 ; bin: 39 f1 0f 93 c3
; asm: cmpl %ecx, %esi
; asm: setae %dl
[-,%rdx] v315 = icmp uge v2, v1 ; bin: 39 ce 0f 93 c2
; asm: cmpl %esi, %ecx
; asm: seta %bl
[-,%rbx] v316 = icmp ugt v1, v2 ; bin: 39 f1 0f 97 c3
; asm: cmpl %ecx, %esi
; asm: seta %dl
[-,%rdx] v317 = icmp ugt v2, v1 ; bin: 39 ce 0f 97 c2
; asm: cmpl %esi, %ecx
; asm: setbe %bl
[-,%rbx] v318 = icmp ule v1, v2 ; bin: 39 f1 0f 96 c3
; asm: cmpl %ecx, %esi
; asm: setbe %dl
[-,%rdx] v319 = icmp ule v2, v1 ; bin: 39 ce 0f 96 c2
; Bool-to-int conversions.
; asm: movzbl %bl, %ecx
[-,%rcx] v350 = bint.i32 v300 ; bin: 0f b6 cb
; asm: movzbl %dl, %esi
[-,%rsi] v351 = bint.i32 v301 ; bin: 0f b6 f2
; asm: call foo
call fn0() ; bin: stk_ovf e8 CallPCRel4(%foo-4) 00000000
; asm: movl $0, %ecx
[-,%rcx] v400 = func_addr.i32 fn0 ; bin: b9 Abs4(%foo) 00000000
; asm: movl $0, %esi
[-,%rsi] v401 = func_addr.i32 fn0 ; bin: be Abs4(%foo) 00000000
; asm: call *%ecx
call_indirect sig0, v400() ; bin: stk_ovf ff d1
; asm: call *%esi
call_indirect sig0, v401() ; bin: stk_ovf ff d6
; asm: movl $0, %ecx
[-,%rcx] v450 = symbol_value.i32 gv0 ; bin: b9 Abs4(%some_gv) 00000000
; asm: movl $0, %esi
[-,%rsi] v451 = symbol_value.i32 gv0 ; bin: be Abs4(%some_gv) 00000000
; Spill / Fill.
; asm: movl %ecx, 1032(%esp)
[-,ss1] v500 = spill v1 ; bin: stk_ovf 89 8c 24 00000408
; asm: movl %esi, 1032(%esp)
[-,ss1] v501 = spill v2 ; bin: stk_ovf 89 b4 24 00000408
; asm: movl 1032(%esp), %ecx
[-,%rcx] v510 = fill v500 ; bin: 8b 8c 24 00000408
; asm: movl 1032(%esp), %esi
[-,%rsi] v511 = fill v501 ; bin: 8b b4 24 00000408
; asm: movl %ecx, 1032(%esp)
regspill v1, %rcx -> ss1 ; bin: stk_ovf 89 8c 24 00000408
; asm: movl 1032(%esp), %ecx
regfill v1, ss1 -> %rcx ; bin: 8b 8c 24 00000408
; Push and Pop
; asm: pushl %ecx
x86_push v1 ; bin: stk_ovf 51
; asm: popl %ecx
[-,%rcx] v512 = x86_pop.i32 ; bin: 59
; Adjust Stack Pointer Up
; asm: addl $64, %esp
adjust_sp_up_imm 64 ; bin: 83 c4 40
; asm: addl $-64, %esp
adjust_sp_up_imm -64 ; bin: 83 c4 c0
; asm: addl $1024, %esp
adjust_sp_up_imm 1024 ; bin: 81 c4 00000400
; asm: addl $-1024, %esp
adjust_sp_up_imm -1024 ; bin: 81 c4 fffffc00
; asm: addl $2147483647, %esp
adjust_sp_up_imm 2147483647 ; bin: 81 c4 7fffffff
; asm: addl $-2147483648, %esp
adjust_sp_up_imm -2147483648 ; bin: 81 c4 80000000
; Adjust Stack Pointer Down
; asm: subl %ecx, %esp
adjust_sp_down v1 ; bin: 29 cc
; asm: subl %esi, %esp
adjust_sp_down v2 ; bin: 29 f4
; asm: addl $64, %esp
adjust_sp_down_imm 64 ; bin: 83 ec 40
; asm: addl $-64, %esp
adjust_sp_down_imm -64 ; bin: 83 ec c0
; asm: addl $1024, %esp
adjust_sp_down_imm 1024 ; bin: 81 ec 00000400
; asm: addl $-1024, %esp
adjust_sp_down_imm -1024 ; bin: 81 ec fffffc00
; asm: addl $2147483647, %esp
adjust_sp_down_imm 2147483647 ; bin: 81 ec 7fffffff
; asm: addl $-2147483648, %esp
adjust_sp_down_imm -2147483648 ; bin: 81 ec 80000000
; Shift immediates
; asm: shll $2, %esi
[-,%rsi] v513 = ishl_imm v2, 2 ; bin: c1 e6 02
; asm: sarl $5, %esi
[-,%rsi] v514 = sshr_imm v2, 5 ; bin: c1 fe 05
; asm: shrl $8, %esi
[-,%rsi] v515 = ushr_imm v2, 8 ; bin: c1 ee 08
; Rotate immediates
; asm: rolq $12, %esi
[-,%rsi] v5101 = rotl_imm v2, 12 ; bin: c1 c6 0c
; asm: rorq $5, %esi
[-,%rsi] v5103 = rotr_imm v2, 5 ; bin: c1 ce 05
; Load Complex
[-,%rax] v521 = iconst.i32 1
[-,%rbx] v522 = iconst.i32 1
; asm: movl (%eax,%ebx,1), %ecx
[-,%rcx] v526 = load_complex.i32 v521+v522 ; bin: heap_oob 8b 0c 18
; asm: movl 1(%eax,%ebx,1), %ecx
[-,%rcx] v528 = load_complex.i32 v521+v522+1 ; bin: heap_oob 8b 4c 18 01
; asm: mov 0x100000(%eax,%ebx,1),%ecx
[-,%rcx] v530 = load_complex.i32 v521+v522+0x1000 ; bin: heap_oob 8b 8c 18 00001000
; asm: movzbl (%eax,%ebx,1),%ecx
[-,%rcx] v532 = uload8_complex.i32 v521+v522 ; bin: heap_oob 0f b6 0c 18
; asm: movsbl (%eax,%ebx,1),%ecx
[-,%rcx] v534 = sload8_complex.i32 v521+v522 ; bin: heap_oob 0f be 0c 18
; asm: movzwl (%eax,%ebx,1),%ecx
[-,%rcx] v536 = uload16_complex.i32 v521+v522 ; bin: heap_oob 0f b7 0c 18
; asm: movswl (%eax,%ebx,1),%ecx
[-,%rcx] v538 = sload16_complex.i32 v521+v522 ; bin: heap_oob 0f bf 0c 18
; Store Complex
[-,%rcx] v601 = iconst.i32 1
; asm: mov %ecx,(%eax,%ebx,1)
store_complex v601, v521+v522 ; bin: heap_oob 89 0c 18
; asm: mov %ecx,0x1(%eax,%ebx,1)
store_complex v601, v521+v522+1 ; bin: heap_oob 89 4c 18 01
; asm: mov %ecx,0x100000(%eax,%ebx,1)
store_complex v601, v521+v522+0x1000 ; bin: heap_oob 89 8c 18 00001000
; asm: mov %cx,(%eax,%ebx,1)
istore16_complex v601, v521+v522 ; bin: heap_oob 66 89 0c 18
; asm: mov %cl,(%eax,%ebx,1)
istore8_complex v601, v521+v522 ; bin: heap_oob 88 0c 18
; Carry Addition
; asm: addl %esi, %ecx
[-,%rcx,%rflags] v701, v702 = iadd_ifcout v1, v2 ; bin: 01 f1
; asm: adcl %esi, %ecx
[-,%rcx] v703 = iadd_ifcin v1, v2, v702 ; bin: 11 f1
; asm: adcl %esi, %ecx
[-,%rcx,%rflags] v704, v705 = iadd_ifcarry v1, v2, v702 ; bin: 11 f1
; Borrow Subtraction
; asm: subl %esi, %ecx
[-,%rcx,%rflags] v706, v707 = isub_ifbout v1, v2 ; bin: 29 f1
; asm: sbbl %esi, %ecx
[-,%rcx] v708 = isub_ifbin v1, v2, v707 ; bin: 19 f1
; asm: sbbl %esi, %ecx
[-,%rcx,%rflags] v709, v710 = isub_ifborrow v1, v2, v707 ; bin: 19 f1
; asm: testl %ecx, %ecx
; asm: je block1
brz v1, block1 ; bin: 85 c9 74 0e
fallthrough block3
block3:
; asm: testl %esi, %esi
; asm: je block1
brz v2, block1 ; bin: 85 f6 74 0a
fallthrough block4
block4:
; asm: testl %ecx, %ecx
; asm: jne block1
brnz v1, block1 ; bin: 85 c9 75 06
fallthrough block5
block5:
; asm: testl %esi, %esi
; asm: jne block1
brnz v2, block1 ; bin: 85 f6 75 02
; asm: jmp block2
jump block2 ; bin: eb 01
; asm: block1:
block1:
; asm: ret
return ; bin: c3
; asm: block2:
block2:
trap user0 ; bin: user0 0f 0b
}
; Special branch encodings only for I32 mode.
function %special_branches() {
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
[-,%rdi] v3 = icmp eq v1, v2
[-,%rbx] v4 = icmp ugt v1, v2
; asm: testl $0xff, %edi
; asm: je block1
brz v3, block1 ; bin: f7 c7 000000ff 0f 84 00000015
fallthrough block2
block2:
; asm: testb %bl, %bl
; asm: je block1
brz v4, block1 ; bin: 84 db 74 11
fallthrough block3
block3:
; asm: testl $0xff, %edi
; asm: jne block1
brnz v3, block1 ; bin: f7 c7 000000ff 0f 85 00000005
fallthrough block4
block4:
; asm: testb %bl, %bl
; asm: jne block1
brnz v4, block1 ; bin: 84 db 75 01
fallthrough block5
block5:
return
block1:
return
}
; CPU flag instructions.
function %cpu_flags() {
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rsi] v2 = iconst.i32 2
jump block1
block1:
; asm: cmpl %esi, %ecx
[-,%rflags] v10 = ifcmp v1, v2 ; bin: 39 f1
; asm: cmpl %ecx, %esi
[-,%rflags] v11 = ifcmp v2, v1 ; bin: 39 ce
; asm: je block1
brif eq v11, block1 ; bin: 74 fa
jump block2
block2:
; asm: jne block1
brif ne v11, block1 ; bin: 75 f8
jump block3
block3:
; asm: jl block1
brif slt v11, block1 ; bin: 7c f6
jump block4
block4:
; asm: jge block1
brif sge v11, block1 ; bin: 7d f4
jump block5
block5:
; asm: jg block1
brif sgt v11, block1 ; bin: 7f f2
jump block6
block6:
; asm: jle block1
brif sle v11, block1 ; bin: 7e f0
jump block7
block7:
; asm: jb block1
brif ult v11, block1 ; bin: 72 ee
jump block8
block8:
; asm: jae block1
brif uge v11, block1 ; bin: 73 ec
jump block9
block9:
; asm: ja block1
brif ugt v11, block1 ; bin: 77 ea
jump block10
block10:
; asm: jbe block1
brif ule v11, block1 ; bin: 76 e8
jump block11
block11:
; asm: sete %bl
[-,%rbx] v20 = trueif eq v11 ; bin: 0f 94 c3
; asm: setne %bl
[-,%rbx] v21 = trueif ne v11 ; bin: 0f 95 c3
; asm: setl %dl
[-,%rdx] v22 = trueif slt v11 ; bin: 0f 9c c2
; asm: setge %dl
[-,%rdx] v23 = trueif sge v11 ; bin: 0f 9d c2
; asm: setg %bl
[-,%rbx] v24 = trueif sgt v11 ; bin: 0f 9f c3
; asm: setle %bl
[-,%rbx] v25 = trueif sle v11 ; bin: 0f 9e c3
; asm: setb %dl
[-,%rdx] v26 = trueif ult v11 ; bin: 0f 92 c2
; asm: setae %dl
[-,%rdx] v27 = trueif uge v11 ; bin: 0f 93 c2
; asm: seta %bl
[-,%rbx] v28 = trueif ugt v11 ; bin: 0f 97 c3
; asm: setbe %bl
[-,%rbx] v29 = trueif ule v11 ; bin: 0f 96 c3
; The trapif instructions are encoded as macros: a conditional jump over a ud2.
; asm: jne .+4; ud2
trapif eq v11, user0 ; bin: 75 02 user0 0f 0b
; asm: je .+4; ud2
trapif ne v11, user0 ; bin: 74 02 user0 0f 0b
; asm: jnl .+4; ud2
trapif slt v11, user0 ; bin: 7d 02 user0 0f 0b
; asm: jnge .+4; ud2
trapif sge v11, user0 ; bin: 7c 02 user0 0f 0b
; asm: jng .+4; ud2
trapif sgt v11, user0 ; bin: 7e 02 user0 0f 0b
; asm: jnle .+4; ud2
trapif sle v11, user0 ; bin: 7f 02 user0 0f 0b
; asm: jnb .+4; ud2
trapif ult v11, user0 ; bin: 73 02 user0 0f 0b
; asm: jnae .+4; ud2
trapif uge v11, user0 ; bin: 72 02 user0 0f 0b
; asm: jna .+4; ud2
trapif ugt v11, user0 ; bin: 76 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapif ule v11, user0 ; bin: 77 02 user0 0f 0b
; asm: jo .+4; ud2
trapif of v11, user0 ; bin: 71 02 user0 0f 0b
; asm: jno .+4; ud2
trapif nof v11, user0 ; bin: 70 02 user0 0f 0b
; Stack check.
; asm: cmpl %esp, %ecx
[-,%rflags] v40 = ifcmp_sp v1 ; bin: 39 e1
; asm: cmpl %esp, %esi
[-,%rflags] v41 = ifcmp_sp v2 ; bin: 39 e6
; asm: cmpl $-100, %ecx
[-,%rflags] v42 = ifcmp_imm v1, -100 ; bin: 83 f9 9c
; asm: cmpl $100, %esi
[-,%rflags] v43 = ifcmp_imm v2, 100 ; bin: 83 fe 64
; asm: cmpl $-10000, %ecx
[-,%rflags] v44 = ifcmp_imm v1, -10000 ; bin: 81 f9 ffffd8f0
; asm: cmpl $10000, %esi
[-,%rflags] v45 = ifcmp_imm v2, 10000 ; bin: 81 fe 00002710
return
}
; Tests for i32/i8 conversion instructions.
function %I32_I8() {
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rcx] v11 = ireduce.i8 v1 ; bin:
; asm: movsbl %cl, %esi
[-,%rsi] v20 = sextend.i32 v11 ; bin: 0f be f1
; asm: movzbl %cl, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b6 f1
trap user0 ; bin: user0 0f 0b
}
; Tests for i32/i16 conversion instructions.
function %I32_I16() {
block0:
[-,%rcx] v1 = iconst.i32 1
[-,%rcx] v11 = ireduce.i16 v1 ; bin:
; asm: movswl %cx, %esi
[-,%rsi] v20 = sextend.i32 v11 ; bin: 0f bf f1
; asm: movzwl %cx, %esi
[-,%rsi] v30 = uextend.i32 v11 ; bin: 0f b7 f1
trap user0 ; bin: user0 0f 0b
}

View File

@@ -1,638 +0,0 @@
; Binary emission of 64-bit floating point code.
test binemit
set opt_level=speed_and_size
target x86_64 legacy haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/binary64-float.clif | llvm-mc -show-encoding -triple=x86_64
;
function %F32() {
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
block0:
[-,%r11] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
[-,%rax] v2 = iconst.i64 11
[-,%r14] v3 = iconst.i64 12
[-,%r13] v4 = iconst.i64 13
; asm: cvtsi2ssl %r11d, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f32 v0 ; bin: f3 41 0f 2a eb
; asm: cvtsi2ssl %esi, %xmm10
[-,%xmm10] v11 = fcvt_from_sint.f32 v1 ; bin: f3 44 0f 2a d6
; asm: cvtsi2ssq %rax, %xmm5
[-,%xmm5] v12 = fcvt_from_sint.f32 v2 ; bin: f3 48 0f 2a e8
; asm: cvtsi2ssq %r14, %xmm10
[-,%xmm10] v13 = fcvt_from_sint.f32 v3 ; bin: f3 4d 0f 2a d6
; asm: cvtss2sd %xmm10, %xmm5
[-,%xmm5] v14 = fpromote.f64 v11 ; bin: f3 41 0f 5a ea
; asm: cvtss2sd %xmm5, %xmm10
[-,%xmm10] v15 = fpromote.f64 v10 ; bin: f3 44 0f 5a d5
; asm: movd %r11d, %xmm5
[-,%xmm5] v16 = bitcast.f32 v0 ; bin: 66 41 0f 6e eb
; asm: movd %esi, %xmm10
[-,%xmm10] v17 = bitcast.f32 v1 ; bin: 66 44 0f 6e d6
; asm: movd %xmm5, %ecx
[-,%rcx] v18 = bitcast.i32 v10 ; bin: 66 0f 7e e9
; asm: movd %xmm10, %esi
[-,%rsi] v19 = bitcast.i32 v11 ; bin: 66 44 0f 7e d6
; Binary arithmetic.
; asm: addss %xmm10, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f3 41 0f 58 ea
; asm: addss %xmm5, %xmm10
[-,%xmm10] v21 = fadd v11, v10 ; bin: f3 44 0f 58 d5
; asm: subss %xmm10, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f3 41 0f 5c ea
; asm: subss %xmm5, %xmm10
[-,%xmm10] v23 = fsub v11, v10 ; bin: f3 44 0f 5c d5
; asm: mulss %xmm10, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f3 41 0f 59 ea
; asm: mulss %xmm5, %xmm10
[-,%xmm10] v25 = fmul v11, v10 ; bin: f3 44 0f 59 d5
; asm: divss %xmm10, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f3 41 0f 5e ea
; asm: divss %xmm5, %xmm10
[-,%xmm10] v27 = fdiv v11, v10 ; bin: f3 44 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm10, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 41 0f 54 ea
; asm: andps %xmm5, %xmm10
[-,%xmm10] v31 = band v11, v10 ; bin: 44 0f 54 d5
; asm: andnps %xmm10, %xmm5
[-,%xmm5] v32 = band_not v11, v10 ; bin: 41 0f 55 ea
; asm: andnps %xmm5, %xmm10
[-,%xmm10] v33 = band_not v10, v11 ; bin: 44 0f 55 d5
; asm: orps %xmm10, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 41 0f 56 ea
; asm: orps %xmm5, %xmm10
[-,%xmm10] v35 = bor v11, v10 ; bin: 44 0f 56 d5
; asm: xorps %xmm10, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 41 0f 57 ea
; asm: xorps %xmm5, %xmm10
[-,%xmm10] v37 = bxor v11, v10 ; bin: 44 0f 57 d5
; asm: movaps %xmm10, %xmm5
[-,%xmm5] v38 = copy v11 ; bin: 41 0f 28 ea
; asm: movaps %xmm5, %xmm10
[-,%xmm10] v39 = copy v10 ; bin: 44 0f 28 d5
; Copy to SSA
; asm: movsd %xmm0, %xmm15
[-,%xmm15] v400 = copy_to_ssa.f64 %xmm0 ; bin: f2 44 0f 10 f8
; asm: movsd %xmm15, %xmm0
[-,%xmm0] v401 = copy_to_ssa.f64 %xmm15 ; bin: f2 41 0f 10 c7
; asm: movsd %xmm7, %xmm6. Unfortunately we get a redundant REX prefix.
[-,%xmm6] v402 = copy_to_ssa.f64 %xmm7 ; bin: f2 40 0f 10 f7
; asm: movsd %xmm11, %xmm14
[-,%xmm14] v403 = copy_to_ssa.f64 %xmm11 ; bin: f2 45 0f 10 f3
; asm: movss %xmm0, %xmm15
[-,%xmm15] v404 = copy_to_ssa.f32 %xmm0 ; bin: f3 44 0f 10 f8
; asm: movss %xmm15, %xmm0
[-,%xmm0] v405 = copy_to_ssa.f32 %xmm15 ; bin: f3 41 0f 10 c7
; asm: movss %xmm7, %xmm6. Unfortunately we get a redundant REX prefix.
[-,%xmm6] v406 = copy_to_ssa.f32 %xmm7 ; bin: f3 40 0f 10 f7
; asm: movss %xmm11, %xmm14
[-,%xmm14] v407 = copy_to_ssa.f32 %xmm11 ; bin: f3 45 0f 10 f3
; Convert float to int.
; asm: cvttss2si %xmm5, %ecx
[-,%rcx] v40 = x86_cvtt2si.i32 v10 ; bin: f3 0f 2c cd
; asm: cvttss2si %xmm10, %esi
[-,%rsi] v41 = x86_cvtt2si.i32 v11 ; bin: f3 41 0f 2c f2
; asm: cvttss2si %xmm5, %rcx
[-,%rcx] v42 = x86_cvtt2si.i64 v10 ; bin: f3 48 0f 2c cd
; asm: cvttss2si %xmm10, %rsi
[-,%rsi] v43 = x86_cvtt2si.i64 v11 ; bin: f3 49 0f 2c f2
; Min/max.
; asm: minss %xmm10, %xmm5
[-,%xmm5] v44 = x86_fmin v10, v11 ; bin: f3 41 0f 5d ea
; asm: minss %xmm5, %xmm10
[-,%xmm10] v45 = x86_fmin v11, v10 ; bin: f3 44 0f 5d d5
; asm: maxss %xmm10, %xmm5
[-,%xmm5] v46 = x86_fmax v10, v11 ; bin: f3 41 0f 5f ea
; asm: maxss %xmm5, %xmm10
[-,%xmm10] v47 = x86_fmax v11, v10 ; bin: f3 44 0f 5f d5
; Unary arithmetic.
; asm: sqrtss %xmm5, %xmm10
[-,%xmm10] v50 = sqrt v10 ; bin: f3 44 0f 51 d5
; asm: sqrtss %xmm10, %xmm5
[-,%xmm5] v51 = sqrt v11 ; bin: f3 41 0f 51 ea
; asm: roundss $0, %xmm5, %xmm10
[-,%xmm10] v52 = nearest v10 ; bin: 66 44 0f 3a 0a d5 00
; asm: roundss $0, %xmm10, %xmm5
[-,%xmm5] v53 = nearest v11 ; bin: 66 41 0f 3a 0a ea 00
; asm: roundss $0, %xmm5, %xmm2
[-,%xmm2] v54 = nearest v10 ; bin: 66 0f 3a 0a d5 00
; asm: roundss $1, %xmm5, %xmm10
[-,%xmm10] v55 = floor v10 ; bin: 66 44 0f 3a 0a d5 01
; asm: roundss $1, %xmm10, %xmm5
[-,%xmm5] v56 = floor v11 ; bin: 66 41 0f 3a 0a ea 01
; asm: roundss $1, %xmm5, %xmm2
[-,%xmm2] v57 = floor v10 ; bin: 66 0f 3a 0a d5 01
; asm: roundss $2, %xmm5, %xmm10
[-,%xmm10] v58 = ceil v10 ; bin: 66 44 0f 3a 0a d5 02
; asm: roundss $2, %xmm10, %xmm5
[-,%xmm5] v59 = ceil v11 ; bin: 66 41 0f 3a 0a ea 02
; asm: roundss $2, %xmm5, %xmm2
[-,%xmm2] v60 = ceil v10 ; bin: 66 0f 3a 0a d5 02
; asm: roundss $3, %xmm5, %xmm10
[-,%xmm10] v61 = trunc v10 ; bin: 66 44 0f 3a 0a d5 03
; asm: roundss $3, %xmm10, %xmm5
[-,%xmm5] v62 = trunc v11 ; bin: 66 41 0f 3a 0a ea 03
; asm: roundss $3, %xmm5, %xmm2
[-,%xmm2] v63 = trunc v10 ; bin: 66 0f 3a 0a d5 03
; Load/Store
; asm: movss (%r14), %xmm5
[-,%xmm5] v100 = load.f32 v3 ; bin: heap_oob f3 41 0f 10 2e
; asm: movss (%rax), %xmm10
[-,%xmm10] v101 = load.f32 v2 ; bin: heap_oob f3 44 0f 10 10
; asm: movss 50(%r14), %xmm5
[-,%xmm5] v110 = load.f32 v3+50 ; bin: heap_oob f3 41 0f 10 6e 32
; asm: movss -50(%rax), %xmm10
[-,%xmm10] v111 = load.f32 v2-50 ; bin: heap_oob f3 44 0f 10 50 ce
; asm: movss 10000(%r14), %xmm5
[-,%xmm5] v120 = load.f32 v3+10000 ; bin: heap_oob f3 41 0f 10 ae 00002710
; asm: movss -10000(%rax), %xmm10
[-,%xmm10] v121 = load.f32 v2-10000 ; bin: heap_oob f3 44 0f 10 90 ffffd8f0
; asm: movss %xmm5, (%r14)
[-] store.f32 v100, v3 ; bin: heap_oob f3 41 0f 11 2e
; asm: movss %xmm10, (%rax)
[-] store.f32 v101, v2 ; bin: heap_oob f3 44 0f 11 10
; asm: movss %xmm5, (%r13)
[-] store.f32 v100, v4 ; bin: heap_oob f3 41 0f 11 6d 00
; asm: movss %xmm10, (%r13)
[-] store.f32 v101, v4 ; bin: heap_oob f3 45 0f 11 55 00
; asm: movss %xmm5, 50(%r14)
[-] store.f32 v100, v3+50 ; bin: heap_oob f3 41 0f 11 6e 32
; asm: movss %xmm10, -50(%rax)
[-] store.f32 v101, v2-50 ; bin: heap_oob f3 44 0f 11 50 ce
; asm: movss %xmm5, 10000(%r14)
[-] store.f32 v100, v3+10000 ; bin: heap_oob f3 41 0f 11 ae 00002710
; asm: movss %xmm10, -10000(%rax)
[-] store.f32 v101, v2-10000 ; bin: heap_oob f3 44 0f 11 90 ffffd8f0
; Spill / Fill.
; asm: movss %xmm5, 1032(%rsp)
[-,ss1] v200 = spill v100 ; bin: stk_ovf f3 0f 11 ac 24 00000408
; asm: movss %xmm10, 1032(%rsp)
[-,ss1] v201 = spill v101 ; bin: stk_ovf f3 44 0f 11 94 24 00000408
; asm: movss 1032(%rsp), %xmm5
[-,%xmm5] v210 = fill v200 ; bin: f3 0f 10 ac 24 00000408
; asm: movss 1032(%rsp), %xmm10
[-,%xmm10] v211 = fill v201 ; bin: f3 44 0f 10 94 24 00000408
; asm: movss %xmm5, 1032(%rsp)
regspill v100, %xmm5 -> ss1 ; bin: stk_ovf f3 0f 11 ac 24 00000408
; asm: movss 1032(%rsp), %xmm5
regfill v100, ss1 -> %xmm5 ; bin: f3 0f 10 ac 24 00000408
; Comparisons.
;
; Only `supported_floatccs` are tested here. Others are handled by
; legalization patterns.
; asm: ucomiss %xmm10, %xmm5
; asm: setnp %bl
[-,%rbx] v300 = fcmp ord v10, v11 ; bin: 41 0f 2e ea 0f 9b c3
; asm: ucomiss %xmm5, %xmm10
; asm: setp %bl
[-,%rbx] v301 = fcmp uno v11, v10 ; bin: 44 0f 2e d5 0f 9a c3
; asm: ucomiss %xmm10, %xmm5
; asm: setne %dl
[-,%rdx] v302 = fcmp one v10, v11 ; bin: 41 0f 2e ea 0f 95 c2
; asm: ucomiss %xmm5, %xmm10
; asm: sete %dl
[-,%rdx] v303 = fcmp ueq v11, v10 ; bin: 44 0f 2e d5 0f 94 c2
; asm: ucomiss %xmm10, %xmm5
; asm: seta %bl
[-,%rbx] v304 = fcmp gt v10, v11 ; bin: 41 0f 2e ea 0f 97 c3
; asm: ucomiss %xmm5, %xmm10
; asm: setae %bl
[-,%rbx] v305 = fcmp ge v11, v10 ; bin: 44 0f 2e d5 0f 93 c3
; asm: ucomiss %xmm10, %xmm5
; asm: setb %dl
[-,%rdx] v306 = fcmp ult v10, v11 ; bin: 41 0f 2e ea 0f 92 c2
; asm: ucomiss %xmm5, %xmm10
; asm: setbe %dl
[-,%rdx] v307 = fcmp ule v11, v10 ; bin: 44 0f 2e d5 0f 96 c2
; asm: ucomiss %xmm10, %xmm5
[-,%rflags] v310 = ffcmp v10, v11 ; bin: 41 0f 2e ea
; asm: ucomiss %xmm10, %xmm5
[-,%rflags] v311 = ffcmp v11, v10 ; bin: 44 0f 2e d5
; asm: ucomiss %xmm5, %xmm5
[-,%rflags] v312 = ffcmp v10, v10 ; bin: 0f 2e ed
; Load/Store Complex
[-,%rax] v350 = iconst.i64 1
[-,%rbx] v351 = iconst.i64 2
; asm: movss (%rax,%rbx,1),%xmm5
[-,%xmm5] v352 = load_complex.f32 v350+v351 ; bin: heap_oob f3 0f 10 2c 18
; asm: movss 0x32(%rax,%rbx,1),%xmm5
[-,%xmm5] v353 = load_complex.f32 v350+v351+50 ; bin: heap_oob f3 0f 10 6c 18 32
; asm: movss -0x32(%rax,%rbx,1),%xmm10
[-,%xmm10] v354 = load_complex.f32 v350+v351-50 ; bin: heap_oob f3 44 0f 10 54 18 ce
; asm: 0x2710(%rax,%rbx,1),%xmm5
[-,%xmm5] v355 = load_complex.f32 v350+v351+10000 ; bin: heap_oob f3 0f 10 ac 18 00002710
; asm: -0x2710(%rax,%rbx,1),%xmm10
[-,%xmm10] v356 = load_complex.f32 v350+v351-10000 ; bin: heap_oob f3 44 0f 10 94 18 ffffd8f0
; asm: movsd %xmm5, (%rax,%rbx,1)
[-] store_complex.f32 v100, v350+v351 ; bin: heap_oob f3 0f 11 2c 18
; asm: movsd %xmm5, 50(%rax,%rbx,1)
[-] store_complex.f32 v100, v350+v351+50 ; bin: heap_oob f3 0f 11 6c 18 32
; asm: movsd %xmm10, -50(%rax,%rbx,1)
[-] store_complex.f32 v101, v350+v351-50 ; bin: heap_oob f3 44 0f 11 54 18 ce
; asm: movsd %xmm5, 10000(%rax,%rbx,1)
[-] store_complex.f32 v100, v350+v351+10000 ; bin: heap_oob f3 0f 11 ac 18 00002710
; asm: movsd %xmm10, -10000(%rax,%rbx,1)
[-] store_complex.f32 v101, v350+v351-10000 ; bin: heap_oob f3 44 0f 11 94 18 ffffd8f0
return
}
function %F64() {
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
block0:
[-,%r11] v0 = iconst.i32 1
[-,%rsi] v1 = iconst.i32 2
[-,%rax] v2 = iconst.i64 11
[-,%r14] v3 = iconst.i64 12
[-,%r13] v4 = iconst.i64 13
; asm: cvtsi2sdl %r11d, %xmm5
[-,%xmm5] v10 = fcvt_from_sint.f64 v0 ; bin: f2 41 0f 2a eb
; asm: cvtsi2sdl %esi, %xmm10
[-,%xmm10] v11 = fcvt_from_sint.f64 v1 ; bin: f2 44 0f 2a d6
; asm: cvtsi2sdq %rax, %xmm5
[-,%xmm5] v12 = fcvt_from_sint.f64 v2 ; bin: f2 48 0f 2a e8
; asm: cvtsi2sdq %r14, %xmm10
[-,%xmm10] v13 = fcvt_from_sint.f64 v3 ; bin: f2 4d 0f 2a d6
; asm: cvtsd2ss %xmm10, %xmm5
[-,%xmm5] v14 = fdemote.f32 v11 ; bin: f2 41 0f 5a ea
; asm: cvtsd2ss %xmm5, %xmm10
[-,%xmm10] v15 = fdemote.f32 v10 ; bin: f2 44 0f 5a d5
; asm: movq %rax, %xmm5
[-,%xmm5] v16 = bitcast.f64 v2 ; bin: 66 48 0f 6e e8
; asm: movq %r14, %xmm10
[-,%xmm10] v17 = bitcast.f64 v3 ; bin: 66 4d 0f 6e d6
; asm: movq %xmm5, %rcx
[-,%rcx] v18 = bitcast.i64 v10 ; bin: 66 48 0f 7e e9
; asm: movq %xmm10, %rsi
[-,%rsi] v19 = bitcast.i64 v11 ; bin: 66 4c 0f 7e d6
; Binary arithmetic.
; asm: addsd %xmm10, %xmm5
[-,%xmm5] v20 = fadd v10, v11 ; bin: f2 41 0f 58 ea
; asm: addsd %xmm5, %xmm10
[-,%xmm10] v21 = fadd v11, v10 ; bin: f2 44 0f 58 d5
; asm: subsd %xmm10, %xmm5
[-,%xmm5] v22 = fsub v10, v11 ; bin: f2 41 0f 5c ea
; asm: subsd %xmm5, %xmm10
[-,%xmm10] v23 = fsub v11, v10 ; bin: f2 44 0f 5c d5
; asm: mulsd %xmm10, %xmm5
[-,%xmm5] v24 = fmul v10, v11 ; bin: f2 41 0f 59 ea
; asm: mulsd %xmm5, %xmm10
[-,%xmm10] v25 = fmul v11, v10 ; bin: f2 44 0f 59 d5
; asm: divsd %xmm10, %xmm5
[-,%xmm5] v26 = fdiv v10, v11 ; bin: f2 41 0f 5e ea
; asm: divsd %xmm5, %xmm10
[-,%xmm10] v27 = fdiv v11, v10 ; bin: f2 44 0f 5e d5
; Bitwise ops.
; We use the *ps SSE instructions for everything because they are smaller.
; asm: andps %xmm10, %xmm5
[-,%xmm5] v30 = band v10, v11 ; bin: 41 0f 54 ea
; asm: andps %xmm5, %xmm10
[-,%xmm10] v31 = band v11, v10 ; bin: 44 0f 54 d5
; asm: andnps %xmm10, %xmm5
[-,%xmm5] v32 = band_not v11, v10 ; bin: 41 0f 55 ea
; asm: andnps %xmm5, %xmm10
[-,%xmm10] v33 = band_not v10, v11 ; bin: 44 0f 55 d5
; asm: orps %xmm10, %xmm5
[-,%xmm5] v34 = bor v10, v11 ; bin: 41 0f 56 ea
; asm: orps %xmm5, %xmm10
[-,%xmm10] v35 = bor v11, v10 ; bin: 44 0f 56 d5
; asm: xorps %xmm10, %xmm5
[-,%xmm5] v36 = bxor v10, v11 ; bin: 41 0f 57 ea
; asm: xorps %xmm5, %xmm10
[-,%xmm10] v37 = bxor v11, v10 ; bin: 44 0f 57 d5
; asm: movaps %xmm10, %xmm5
[-,%xmm5] v38 = copy v11 ; bin: 41 0f 28 ea
; asm: movaps %xmm5, %xmm10
[-,%xmm10] v39 = copy v10 ; bin: 44 0f 28 d5
; Convert float to int.
; asm: cvttsd2si %xmm5, %ecx
[-,%rcx] v40 = x86_cvtt2si.i32 v10 ; bin: f2 0f 2c cd
; asm: cvttsd2si %xmm10, %esi
[-,%rsi] v41 = x86_cvtt2si.i32 v11 ; bin: f2 41 0f 2c f2
; asm: cvttsd2si %xmm5, %rcx
[-,%rcx] v42 = x86_cvtt2si.i64 v10 ; bin: f2 48 0f 2c cd
; asm: cvttsd2si %xmm10, %rsi
[-,%rsi] v43 = x86_cvtt2si.i64 v11 ; bin: f2 49 0f 2c f2
; Min/max.
; asm: minsd %xmm10, %xmm5
[-,%xmm5] v44 = x86_fmin v10, v11 ; bin: f2 41 0f 5d ea
; asm: minsd %xmm5, %xmm10
[-,%xmm10] v45 = x86_fmin v11, v10 ; bin: f2 44 0f 5d d5
; asm: maxsd %xmm10, %xmm5
[-,%xmm5] v46 = x86_fmax v10, v11 ; bin: f2 41 0f 5f ea
; asm: maxsd %xmm5, %xmm10
[-,%xmm10] v47 = x86_fmax v11, v10 ; bin: f2 44 0f 5f d5
; Unary arithmetic.
; asm: sqrtsd %xmm5, %xmm10
[-,%xmm10] v50 = sqrt v10 ; bin: f2 44 0f 51 d5
; asm: sqrtsd %xmm10, %xmm5
[-,%xmm5] v51 = sqrt v11 ; bin: f2 41 0f 51 ea
; asm: roundsd $0, %xmm5, %xmm10
[-,%xmm10] v52 = nearest v10 ; bin: 66 44 0f 3a 0b d5 00
; asm: roundsd $0, %xmm10, %xmm5
[-,%xmm5] v53 = nearest v11 ; bin: 66 41 0f 3a 0b ea 00
; asm: roundsd $0, %xmm5, %xmm2
[-,%xmm2] v54 = nearest v10 ; bin: 66 0f 3a 0b d5 00
; asm: roundsd $1, %xmm5, %xmm10
[-,%xmm10] v55 = floor v10 ; bin: 66 44 0f 3a 0b d5 01
; asm: roundsd $1, %xmm10, %xmm5
[-,%xmm5] v56 = floor v11 ; bin: 66 41 0f 3a 0b ea 01
; asm: roundsd $1, %xmm5, %xmm2
[-,%xmm2] v57 = floor v10 ; bin: 66 0f 3a 0b d5 01
; asm: roundsd $2, %xmm5, %xmm10
[-,%xmm10] v58 = ceil v10 ; bin: 66 44 0f 3a 0b d5 02
; asm: roundsd $2, %xmm10, %xmm5
[-,%xmm5] v59 = ceil v11 ; bin: 66 41 0f 3a 0b ea 02
; asm: roundsd $2, %xmm5, %xmm2
[-,%xmm2] v60 = ceil v10 ; bin: 66 0f 3a 0b d5 02
; asm: roundsd $3, %xmm5, %xmm10
[-,%xmm10] v61 = trunc v10 ; bin: 66 44 0f 3a 0b d5 03
; asm: roundsd $3, %xmm10, %xmm5
[-,%xmm5] v62 = trunc v11 ; bin: 66 41 0f 3a 0b ea 03
; asm: roundsd $3, %xmm5, %xmm2
[-,%xmm2] v63 = trunc v10 ; bin: 66 0f 3a 0b d5 03
; Load/Store
; asm: movsd (%r14), %xmm5
[-,%xmm5] v100 = load.f64 v3 ; bin: heap_oob f2 41 0f 10 2e
; asm: movsd (%rax), %xmm10
[-,%xmm10] v101 = load.f64 v2 ; bin: heap_oob f2 44 0f 10 10
; asm: movsd 50(%r14), %xmm5
[-,%xmm5] v110 = load.f64 v3+50 ; bin: heap_oob f2 41 0f 10 6e 32
; asm: movsd -50(%rax), %xmm10
[-,%xmm10] v111 = load.f64 v2-50 ; bin: heap_oob f2 44 0f 10 50 ce
; asm: movsd 10000(%r14), %xmm5
[-,%xmm5] v120 = load.f64 v3+10000 ; bin: heap_oob f2 41 0f 10 ae 00002710
; asm: movsd -10000(%rax), %xmm10
[-,%xmm10] v121 = load.f64 v2-10000 ; bin: heap_oob f2 44 0f 10 90 ffffd8f0
; asm: movsd %xmm5, (%r14)
[-] store.f64 v100, v3 ; bin: heap_oob f2 41 0f 11 2e
; asm: movsd %xmm10, (%rax)
[-] store.f64 v101, v2 ; bin: heap_oob f2 44 0f 11 10
; asm: movsd %xmm5, (%r13)
[-] store.f64 v100, v4 ; bin: heap_oob f2 41 0f 11 6d 00
; asm: movsd %xmm10, (%r13)
[-] store.f64 v101, v4 ; bin: heap_oob f2 45 0f 11 55 00
; asm: movsd %xmm5, 50(%r14)
[-] store.f64 v100, v3+50 ; bin: heap_oob f2 41 0f 11 6e 32
; asm: movsd %xmm10, -50(%rax)
[-] store.f64 v101, v2-50 ; bin: heap_oob f2 44 0f 11 50 ce
; asm: movsd %xmm5, 10000(%r14)
[-] store.f64 v100, v3+10000 ; bin: heap_oob f2 41 0f 11 ae 00002710
; asm: movsd %xmm10, -10000(%rax)
[-] store.f64 v101, v2-10000 ; bin: heap_oob f2 44 0f 11 90 ffffd8f0
; Spill / Fill.
; asm: movsd %xmm5, 1032(%rsp)
[-,ss1] v200 = spill v100 ; bin: stk_ovf f2 0f 11 ac 24 00000408
; asm: movsd %xmm10, 1032(%rsp)
[-,ss1] v201 = spill v101 ; bin: stk_ovf f2 44 0f 11 94 24 00000408
; asm: movsd 1032(%rsp), %xmm5
[-,%xmm5] v210 = fill v200 ; bin: f2 0f 10 ac 24 00000408
; asm: movsd 1032(%rsp), %xmm10
[-,%xmm10] v211 = fill v201 ; bin: f2 44 0f 10 94 24 00000408
; asm: movsd %xmm5, 1032(%rsp)
regspill v100, %xmm5 -> ss1 ; bin: stk_ovf f2 0f 11 ac 24 00000408
; asm: movsd 1032(%rsp), %xmm5
regfill v100, ss1 -> %xmm5 ; bin: f2 0f 10 ac 24 00000408
; Comparisons.
;
; Only `supported_floatccs` are tested here. Others are handled by
; legalization patterns.
; asm: ucomisd %xmm10, %xmm5
; asm: setnp %bl
[-,%rbx] v300 = fcmp ord v10, v11 ; bin: 66 41 0f 2e ea 0f 9b c3
; asm: ucomisd %xmm5, %xmm10
; asm: setp %bl
[-,%rbx] v301 = fcmp uno v11, v10 ; bin: 66 44 0f 2e d5 0f 9a c3
; asm: ucomisd %xmm10, %xmm5
; asm: setne %dl
[-,%rdx] v302 = fcmp one v10, v11 ; bin: 66 41 0f 2e ea 0f 95 c2
; asm: ucomisd %xmm5, %xmm10
; asm: sete %dl
[-,%rdx] v303 = fcmp ueq v11, v10 ; bin: 66 44 0f 2e d5 0f 94 c2
; asm: ucomisd %xmm10, %xmm5
; asm: seta %bl
[-,%rbx] v304 = fcmp gt v10, v11 ; bin: 66 41 0f 2e ea 0f 97 c3
; asm: ucomisd %xmm5, %xmm10
; asm: setae %bl
[-,%rbx] v305 = fcmp ge v11, v10 ; bin: 66 44 0f 2e d5 0f 93 c3
; asm: ucomisd %xmm10, %xmm5
; asm: setb %dl
[-,%rdx] v306 = fcmp ult v10, v11 ; bin: 66 41 0f 2e ea 0f 92 c2
; asm: ucomisd %xmm5, %xmm10
; asm: setbe %dl
[-,%rdx] v307 = fcmp ule v11, v10 ; bin: 66 44 0f 2e d5 0f 96 c2
; asm: ucomisd %xmm10, %xmm5
[-,%rflags] v310 = ffcmp v10, v11 ; bin: 66 41 0f 2e ea
; asm: ucomisd %xmm10, %xmm5
[-,%rflags] v311 = ffcmp v11, v10 ; bin: 66 44 0f 2e d5
; asm: ucomisd %xmm5, %xmm5
[-,%rflags] v312 = ffcmp v10, v10 ; bin: 66 0f 2e ed
; Load/Store Complex
[-,%rax] v350 = iconst.i64 1
[-,%rbx] v351 = iconst.i64 2
; asm: movsd (%rax,%rbx,1),%xmm5
[-,%xmm5] v352 = load_complex.f64 v350+v351 ; bin: heap_oob f2 0f 10 2c 18
; asm: movsd 0x32(%rax,%rbx,1),%xmm5
[-,%xmm5] v353 = load_complex.f64 v350+v351+50 ; bin: heap_oob f2 0f 10 6c 18 32
; asm: movsd -0x32(%rax,%rbx,1),%xmm10
[-,%xmm10] v354 = load_complex.f64 v350+v351-50 ; bin: heap_oob f2 44 0f 10 54 18 ce
; asm: movsd 0x2710(%rax,%rbx,1),%xmm5
[-,%xmm5] v355 = load_complex.f64 v350+v351+10000 ; bin: heap_oob f2 0f 10 ac 18 00002710
; asm: movsd -0x2710(%rax,%rbx,1),%xmm10
[-,%xmm10] v356 = load_complex.f64 v350+v351-10000 ; bin: heap_oob f2 44 0f 10 94 18 ffffd8f0
; asm: movsd %xmm5, (%rax,%rbx,1)
[-] store_complex.f64 v100, v350+v351 ; bin: heap_oob f2 0f 11 2c 18
; asm: movsd %xmm5, 50(%rax,%rbx,1)
[-] store_complex.f64 v100, v350+v351+50 ; bin: heap_oob f2 0f 11 6c 18 32
; asm: movsd %xmm10, -50(%rax,%rbx,1)
[-] store_complex.f64 v101, v350+v351-50 ; bin: heap_oob f2 44 0f 11 54 18 ce
; asm: movsd %xmm5, 10000(%rax,%rbx,1)
[-] store_complex.f64 v100, v350+v351+10000 ; bin: heap_oob f2 0f 11 ac 18 00002710
; asm: movsd %xmm10, -10000(%rax,%rbx,1)
[-] store_complex.f64 v101, v350+v351-10000 ; bin: heap_oob f2 44 0f 11 94 18 ffffd8f0
return
}
function %cpuflags_float(f32 [%xmm0]) {
block0(v0: f32 [%xmm0]):
; asm: ucomiss %xmm0, %xmm0
[-,%rflags] v1 = ffcmp v0, v0 ; bin: 0f 2e c0
jump block1
block1:
; asm: jnp block1
brff ord v1, block1 ; bin: 7b fe
jump block2
block2:
; asm: jp block1
brff uno v1, block1 ; bin: 7a fc
jump block3
block3:
; asm: jne block1
brff one v1, block1 ; bin: 75 fa
jump block4
block4:
; asm: je block1
brff ueq v1, block1 ; bin: 74 f8
jump block5
block5:
; asm: ja block1
brff gt v1, block1 ; bin: 77 f6
jump block6
block6:
; asm: jae block1
brff ge v1, block1 ; bin: 73 f4
jump block7
block7:
; asm: jb block1
brff ult v1, block1 ; bin: 72 f2
jump block8
block8:
; asm: jbe block1
brff ule v1, block1 ; bin: 76 f0
jump block9
block9:
; asm: jp .+4; ud2
trapff ord v1, user0 ; bin: 7a 02 user0 0f 0b
; asm: jnp .+4; ud2
trapff uno v1, user0 ; bin: 7b 02 user0 0f 0b
; asm: je .+4; ud2
trapff one v1, user0 ; bin: 74 02 user0 0f 0b
; asm: jne .+4; ud2
trapff ueq v1, user0 ; bin: 75 02 user0 0f 0b
; asm: jna .+4; ud2
trapff gt v1, user0 ; bin: 76 02 user0 0f 0b
; asm: jnae .+4; ud2
trapff ge v1, user0 ; bin: 72 02 user0 0f 0b
; asm: jnb .+4; ud2
trapff ult v1, user0 ; bin: 73 02 user0 0f 0b
; asm: jnbe .+4; ud2
trapff ule v1, user0 ; bin: 77 02 user0 0f 0b
; asm: setnp %bl
[-,%rbx] v10 = trueff ord v1 ; bin: 0f 9b c3
; asm: setp %bl
[-,%rbx] v11 = trueff uno v1 ; bin: 0f 9a c3
; asm: setne %dl
[-,%rdx] v12 = trueff one v1 ; bin: 0f 95 c2
; asm: sete %dl
[-,%rdx] v13 = trueff ueq v1 ; bin: 0f 94 c2
; asm: seta %r10b
[-,%r10] v14 = trueff gt v1 ; bin: 41 0f 97 c2
; asm: setae %r10b
[-,%r10] v15 = trueff ge v1 ; bin: 41 0f 93 c2
; asm: setb %r14b
[-,%r14] v16 = trueff ult v1 ; bin: 41 0f 92 c6
; asm: setbe %r14b
[-,%r14] v17 = trueff ule v1 ; bin: 41 0f 96 c6
return
}

View File

@@ -1,83 +0,0 @@
; binary emission of 64-bit code.
test binemit
set opt_level=speed_and_size
set is_pic
target x86_64 legacy haswell
; The binary encodings can be verified with the command:
;
; sed -ne 's/^ *; asm: *//p' filetests/isa/x86/binary64-pic.clif | llvm-mc -show-encoding -triple=x86_64
;
; Tests for i64 instructions.
function %I64() {
sig0 = ()
fn0 = %foo()
fn1 = colocated %bar()
gv0 = symbol %some_gv
gv1 = symbol colocated %some_gv
; Use incoming_arg stack slots because they won't be relocated by the frame
; layout.
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
ss2 = incoming_arg 1024, offset -2048
ss3 = incoming_arg 8, offset -2056
block0:
; Colocated functions.
; asm: call foo
call fn1() ; bin: stk_ovf e8 CallPCRel4(%bar-4) 00000000
; asm: lea 0x0(%rip), %rax
[-,%rax] v0 = func_addr.i64 fn1 ; bin: 48 8d 05 PCRel4(%bar-4) 00000000
; asm: lea 0x0(%rip), %rsi
[-,%rsi] v1 = func_addr.i64 fn1 ; bin: 48 8d 35 PCRel4(%bar-4) 00000000
; asm: lea 0x0(%rip), %r10
[-,%r10] v2 = func_addr.i64 fn1 ; bin: 4c 8d 15 PCRel4(%bar-4) 00000000
; asm: call *%rax
call_indirect sig0, v0() ; bin: stk_ovf ff d0
; asm: call *%rsi
call_indirect sig0, v1() ; bin: stk_ovf ff d6
; asm: call *%r10
call_indirect sig0, v2() ; bin: stk_ovf 41 ff d2
; Non-colocated functions.
; asm: call foo@PLT
call fn0() ; bin: stk_ovf e8 CallPLTRel4(%foo-4) 00000000
; asm: mov 0x0(%rip), %rax
[-,%rax] v100 = func_addr.i64 fn0 ; bin: 48 8b 05 GOTPCRel4(%foo-4) 00000000
; asm: mov 0x0(%rip), %rsi
[-,%rsi] v101 = func_addr.i64 fn0 ; bin: 48 8b 35 GOTPCRel4(%foo-4) 00000000
; asm: mov 0x0(%rip), %r10
[-,%r10] v102 = func_addr.i64 fn0 ; bin: 4c 8b 15 GOTPCRel4(%foo-4) 00000000
; asm: call *%rax
call_indirect sig0, v100() ; bin: stk_ovf ff d0
; asm: call *%rsi
call_indirect sig0, v101() ; bin: stk_ovf ff d6
; asm: call *%r10
call_indirect sig0, v102() ; bin: stk_ovf 41 ff d2
; asm: mov 0x0(%rip), %rcx
[-,%rcx] v3 = symbol_value.i64 gv0 ; bin: 48 8b 0d GOTPCRel4(%some_gv-4) 00000000
; asm: mov 0x0(%rip), %rsi
[-,%rsi] v4 = symbol_value.i64 gv0 ; bin: 48 8b 35 GOTPCRel4(%some_gv-4) 00000000
; asm: mov 0x0(%rip), %r10
[-,%r10] v5 = symbol_value.i64 gv0 ; bin: 4c 8b 15 GOTPCRel4(%some_gv-4) 00000000
; asm: lea 0x0(%rip), %rcx
[-,%rcx] v6 = symbol_value.i64 gv1 ; bin: 48 8d 0d PCRel4(%some_gv-4) 00000000
; asm: lea 0x0(%rip), %rsi
[-,%rsi] v7 = symbol_value.i64 gv1 ; bin: 48 8d 35 PCRel4(%some_gv-4) 00000000
; asm: lea 0x0(%rip), %r10
[-,%r10] v8 = symbol_value.i64 gv1 ; bin: 4c 8d 15 PCRel4(%some_gv-4) 00000000
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,42 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i128) -> i8 fast {
block0(v0: i128):
brz v0, block2
; check: v0 = iconcat v3, v4
; nextln: v5 = icmp_imm eq v3, 0
; nextln: v6 = icmp_imm eq v4, 0
; nextln: v7 = band v5, v6
; nextln: brnz v7, block2
jump block1
block1:
v1 = iconst.i8 0
return v1
block2:
v2 = iconst.i8 1
return v2
}
function u0:1(i128) -> i8 fast {
block0(v0: i128):
brnz v0, block2
; check: v0 = iconcat v3, v4
; nextln: brnz v3, block2
; nextln: fallthrough block3
; check: block3:
; nextln: brnz.i64 v4, block2
jump block1
; nextln: fallthrough block1
block1:
v1 = iconst.i8 0
return v1
block2:
v2 = iconst.i8 1
return v2
}

View File

@@ -1,38 +0,0 @@
test compile
target x86_64 legacy
function u0:0() -> b1 {
block0:
v0 = iconst.i8 0
; check: v0 = iconst.i8 0
brz v0, block1
; nextln: v3 = uextend.i32 v0
; nextln: brz v3, block1
jump block2
block1:
v1 = bconst.b1 true
return v1
block2:
v2 = bconst.b1 false
return v2
}
function u0:1() -> b1 {
block0:
v0 = iconst.i8 0
; check: v0 = iconst.i8 0
brnz v0, block1
; nextln: v3 = uextend.i32 v0
; nextln: brnz v3, block1
jump block2
block1:
v1 = bconst.b1 false
return v1
block2:
v2 = bconst.b1 true
return v2
}

View File

@@ -1,36 +0,0 @@
test compile
target i686 legacy
function u0:0(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iconcat v0, v1
; check: v6 = fill v0
; nextln: v3 = icmp_imm eq v6, 0
; nextln: v7 = fill v1
; nextln: v4 = icmp_imm eq v7, 0
; nextln: v5 = band v3, v4
; nextln: brnz v5, block1
brz v2, block1
jump block2
block1:
trap unreachable
block2:
trap unreachable
}
function u0:1(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iconcat v0, v1
; check: v3 = fill v0
; nextln: brnz v3, block1
; nextln: fallthrough block3
; check: block3:
; nextln: v4 = fill.i32 v1
; nextln: brnz v4, block1
brnz v2, block1
jump block2
block1:
trap unreachable
block2:
trap unreachable
}

View File

@@ -1,37 +0,0 @@
test compile
target x86_64 legacy
function u0:0() -> b1 {
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
; check: v0 = iconst.i64 0xffff_ffff_eeee_0000
; nextln: v2 -> v0
v1 = uextend.i128 v0
; nextln: v7 = iconst.i64 0
; nextln: v3 -> v7
; nextln: v1 = iconcat v0, v7
v2, v3 = isplit v1
v4 = icmp_imm eq v2, 0xffff_ffff_eeee_0000
v5 = icmp_imm eq v3, 0
v6 = band v4, v5
return v6
}
function u0:1() -> b1 {
block0:
v0 = iconst.i64 0xffff_ffff_eeee_0000
; check: v0 = iconst.i64 0xffff_ffff_eeee_0000
; nextln: v2 -> v0
v1 = sextend.i128 v0
; nextln: v8 = copy v0
; nextln: v7 = sshr_imm v8, 63
; nextln: v3 -> v7
v2, v3 = isplit v1
v4 = icmp_imm eq v2, 0xffff_ffff_eeee_0000
v5 = icmp_imm eq v3, 0xffff_ffff_ffff_ffff
v6 = band v4, v5
return v6
}

View File

@@ -1,37 +0,0 @@
test compile
target i686 legacy
function u0:0() -> b1 {
block0:
v0 = iconst.i32 0xffff_ee00
; check: v0 = iconst.i32 0xffff_ee00
; nextln: v2 -> v0
v1 = uextend.i64 v0
; nextln: v7 = iconst.i32 0
; nextln: v3 -> v7
; nextln: v1 = iconcat v0, v7
v2, v3 = isplit v1
v4 = icmp_imm eq v2, 0xffff_ee00
v5 = icmp_imm eq v3, 0
v6 = band v4, v5
return v6
}
function u0:1() -> b1 {
block0:
v0 = iconst.i32 0xffff_ee00
; check: v0 = iconst.i32 0xffff_ee00
; nextln: v2 -> v0
v1 = sextend.i64 v0
; nextln: v10 = copy v0
; nextln: v7 = sshr_imm v10, 31
; nextln: v3 -> v7
v2, v3 = isplit v1
v4 = icmp_imm eq v2, 0xffff_ee00
v5 = icmp_imm eq v3, 0xffff_ffff
v6 = band v4, v5
return v6
}

View File

@@ -1,17 +0,0 @@
; Check that floating-point and integer constants equal to zero are optimized correctly.
test binemit
target i686 legacy
function %foo() -> f32 fast {
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0
return v0
}
function %bar() -> f64 fast {
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v1 = f64const 0.0 ; bin: 66 0f 57 c0
return v1
}

View File

@@ -1,31 +0,0 @@
; Check that floating-point constants equal to zero are optimized correctly.
test binemit
target x86_64 legacy
function %zero_const_32bit_no_rex() -> f32 fast {
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 40 0f 57 c0
return v0
}
function %zero_const_32bit_rex() -> f32 fast {
block0:
; asm: xorps %xmm8, %xmm8
[-,%xmm8] v1 = f32const 0.0 ; bin: 45 0f 57 c0
return v1
}
function %zero_const_64bit_no_rex() -> f64 fast {
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v0 = f64const 0.0 ; bin: 66 40 0f 57 c0
return v0
}
function %zero_const_64bit_rex() -> f64 fast {
block0:
; asm: xorpd %xmm8, %xmm8
[-,%xmm8] v1 = f64const 0.0 ; bin: 66 45 0f 57 c0
return v1
}

View File

@@ -1,25 +0,0 @@
test compile
target x86_64 legacy
function u0:0() -> i128 system_v {
block0:
v0 = iconst.i64 0
v1 = iconst.i64 0
v2 = iconcat v0, v1
jump block5
block2:
jump block4(v27)
block4(v23: i128):
return v23
block5:
v27 = bxor.i128 v2, v2
v32 = iconst.i32 0
brz v32, block2
jump block6
block6:
trap user0
}

View File

@@ -1,46 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64, i64) -> i128 fast {
block0(v0: i64, v1: i64):
;check: block0(v0: i64 [%rdi], v1: i64 [%rsi], v3: i64 [%rbp]):
v2 = iconcat.i64 v0, v1
; check: regmove v0, %rdi -> %rax
; check: regmove v1, %rsi -> %rdx
return v2
; check: v4 = x86_pop.i64
; check: return v0, v1, v4
}
function u0:1(i128) -> i64, i64 fast {
block0(v0: i128):
; check: block0(v3: i64 [%rdi], v4: i64 [%rsi], v5: i64 [%rbp]):
v1, v2 = isplit v0
; check: regmove v3, %rdi -> %rax
; check: regmove v4, %rsi -> %rdx
return v1, v2
; check: v6 = x86_pop.i64
; check: return v3, v4, v6
}
function u0:2(i64, i128) fast {
; check: block0(v0: i64 [%rdi], v2: i64 [%rsi], v3: i64 [%rdx], v6: i64 [%rbp]):
block0(v0: i64, v1: i128):
; check: store v2, v0+8
; check: store v3, v0+16
store v1, v0+8
return
}
function u0:3(i64) -> i128 fast {
block0(v0: i64):
; check: v2 = load.i64 v0+8
; check: v3 = load.i64 v0+16
v1 = load.i128 v0+8
; check: return v2, v3, v5
return v1
}

View File

@@ -1,8 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i16) -> i8 fast {
block0(v0: i16):
v1 = ireduce.i8 v0
return v1
}

View File

@@ -1,20 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64, i64) -> i128 system_v {
block0(v0: i64, v1: i64):
trap user0
block30:
v245 = iconst.i64 0
v246 = iconcat v245, v245
; The next instruction used to be legalized twice, causing a panic the second time.
v250, v251 = isplit.i128 v370
v252, v253 = isplit v246
trap user0
block45:
v369 = iconst.i64 0
v370 = load.i128 v369
trap user0
}

View File

@@ -1,14 +0,0 @@
test compile
set opt_level=speed_and_size
target x86_64 legacy
function u0:0(i8) -> i8 fast {
block0(v0: i8):
v1 = iconst.i8 0
v2 = isub v1, v0
; check: uextend.i32
; nextln: iconst.i32
; nextln: isub
; nextln: ireduce.i8
return v2
}

View File

@@ -1,10 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i128) system_v {
block0(v0: i128):
jump block1(v0)
block1(v1: i128):
return
}

View File

@@ -1,10 +0,0 @@
test compile
target x86_64 legacy
function u0:0() -> i8 fast {
block0:
v14 = bconst.b1 false
v15 = bint.i8 v14
return v15
}

View File

@@ -1,28 +0,0 @@
test compile
target x86_64 legacy
function u0:51(i64, i64) system_v {
ss0 = explicit_slot 0
ss1 = explicit_slot 1
ss2 = explicit_slot 1
ss3 = explicit_slot 1
block0(v0: i64, v1: i64):
v2 = stack_addr.i64 ss1
v3 = load.i8 v1
store v3, v2
v4 = stack_addr.i64 ss2
v5 = stack_addr.i64 ss3
jump block1
block1:
v6 = load.i8 v2
store v6, v5
v7 = load.i8 v5
v8 = bnot v7
store v8, v4
v9 = load.i8 v4
store v9, v0
return
}

View File

@@ -1,46 +0,0 @@
test legalizer
target x86_64 legacy
function %br_icmp(i64) fast {
block0(v0: i64):
v1 = iconst.i64 0
br_icmp eq v0, v1, block1
jump block1
block1:
return
}
; sameln: function %br_icmp(i64 [%rdi]) fast {
; nextln: block0(v0: i64):
; nextln: [RexOp1pu_id#b8] v1 = iconst.i64 0
; nextln: [RexOp1icscc#8039] v2 = icmp eq v0, v1
; nextln: [RexOp1t8jccb#75] brnz v2, block1
; nextln: [Op1jmpb#eb] jump block1
; nextln:
; nextln: block1:
; nextln: [Op1ret#c3] return
; nextln: }
function %br_icmp_args(i64) fast {
block0(v0: i64):
v1 = iconst.i64 0
br_icmp eq v0, v1, block1(v0)
jump block1(v0)
block1(v2: i64):
return
}
; sameln: function %br_icmp_args(i64 [%rdi]) fast {
; nextln: block0(v0: i64):
; nextln: [RexOp1pu_id#b8] v1 = iconst.i64 0
; nextln: [RexOp1icscc#8039] v3 = icmp eq v0, v1
; nextln: [RexOp1t8jccb#75] brnz v3, block1(v0)
; nextln: [Op1jmpb#eb] jump block1(v0)
; nextln:
; nextln: block1(v2: i64):
; nextln: [Op1ret#c3] return
; nextln: }

View File

@@ -1,31 +0,0 @@
test compile
set opt_level=speed_and_size
target x86_64 legacy
; regex: V=v\d+
; regex: BB=block\d+
function u0:0(i64) system_v {
ss0 = explicit_slot 1
jt0 = jump_table [block1]
block0(v0: i64):
v1 = stack_addr.i64 ss0
v2 = load.i8 v1
br_table v2, block2, jt0
; check: $(oob=$V) = ifcmp_imm $(idx=$V), 1
; block2 is replaced by block1 by fold_redundant_jump
; nextln: brif uge $oob, block1
; nextln: fallthrough $(inb=$BB)
; check: $inb:
; nextln: $(final_idx=$V) = uextend.i64 $idx
; nextln: $(base=$V) = jump_table_base.i64 jt0
; nextln: $(rel_addr=$V) = jump_table_entry $final_idx, $base, 4, jt0
; nextln: $(addr=$V) = iadd $base, $rel_addr
; nextln: indirect_jump_table_br $addr, jt0
block2:
jump block1
block1:
return
}

View File

@@ -1,36 +0,0 @@
test compile
target x86_64 legacy
; regex: V=v\d+
function u0:0(i8, i8) fast {
fn0 = %black_box(i8)
ss0 = explicit_slot 1 ; black box
block0(v0: i8, v1: i8):
v99 = stack_addr.i64 ss0
; check: istore8 $(V), $(V)
v2 = band v0, v1
store v2, v99
v3 = bor v0, v1
store v3, v99
v4 = bxor v0, v1
store v4, v99
v5 = bnot v0
store v5, v99
v6 = band_not v0, v1
store v6, v99
v7 = bor_not v0, v1
store v7, v99
v8 = bxor_not v0, v1
store v8, v99
v9 = band_imm v0, 42
store v9, v99
v10 = bor_imm v0, 42
store v10, v99
v11 = bxor_imm v0, 42
store v11, v99
return
}

View File

@@ -1,14 +0,0 @@
; Test legalization of a non-colocated call in 64-bit non-PIC mode.
test legalizer
set opt_level=speed_and_size
target x86_64 legacy haswell
function %call() {
fn0 = %foo()
block0:
call fn0()
return
}
; check: v0 = func_addr.i64 fn0
; nextln: call_indirect sig0, v0()

View File

@@ -1,25 +0,0 @@
test compile
target x86_64 legacy
; regex: V=v\d+
function u0:0(i8) -> i8, i8 fast {
block0(v0: i8):
v1 = clz v0
; check: v3 = uextend.i32 v0
; nextln: v6 = iconst.i32 -1
; nextln: v7 = iconst.i32 31
; nextln: v8, v9 = x86_bsr v3
; nextln: v10 = selectif.i32 eq v9, v6, v8
; nextln: v4 = isub v7, v10
; nextln: v5 = iadd_imm v4, -24
; nextln: v1 = ireduce.i8 v5
v2 = ctz v0
; nextln: v11 = uextend.i32 v0
; nextln: v12 = bor_imm v11, 256
; nextln: v14 = iconst.i32 32
; nextln: v15, v16 = x86_bsf v12
; nextln: v13 = selectif.i32 eq v16, v14, v15
; nextln: v2 = ireduce.i8 v13
return v1, v2
}

View File

@@ -1,133 +0,0 @@
; Test the custom legalizations.
test legalizer
target i686 legacy
target x86_64 legacy
; regex: V=v\d+
; regex: BB=block\d+
function %cond_trap(i32) {
block0(v1: i32):
trapz v1, user67
return
; check: block0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $f, user67
; nextln: return
}
function %cond_trap2(i32) {
block0(v1: i32):
trapnz v1, int_ovf
return
; check: block0(v1: i32
; nextln: $(f=$V) = ifcmp_imm v1, 0
; nextln: trapif ne $f, int_ovf
; nextln: return
}
function %cond_trap_b1(i32) {
block0(v1: i32):
v2 = icmp_imm eq v1, 6
trapz v2, user7
return
; check: block0(v1: i32
; check: brnz v2, $(new=$BB)
; check: jump $(trap=$BB)
; check: $trap:
; nextln: trap user7
; check: $new:
; nextln: return
}
function %cond_trap2_b1(i32) {
block0(v1: i32):
v2 = icmp_imm eq v1, 6
trapnz v2, user9
return
; check: block0(v1: i32
; check: brz v2, $(new=$BB)
; check: jump $(trap=$BB)
; check: $trap:
; nextln: trap user9
; check: $new:
; nextln: return
}
function %f32const() -> f32 {
block0:
v1 = f32const 0x1.0p1
; check: $(tmp=$V) = iconst.i32
; check: v1 = bitcast.f32 $tmp
return v1
}
function %select_f64(f64, f64, i32) -> f64 {
block0(v0: f64, v1: f64, v2: i32):
v3 = select v2, v0, v1
; check: brnz v2, $(new=$BB)(v0)
; nextln: jump $new(v1)
; check: $new(v3: f64):
; nextln: return v3
return v3
}
function %f32_min(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fmin v0, v1
return v2
; check: $(vnat=$V) = x86_fmin.f32 v0, v1
; nextln: jump $(done=$BB)($vnat)
; check: $(uno=$BB):
; nextln: $(vuno=$V) = fadd.f32 v0, v1
; nextln: jump $(done=$BB)($vuno)
; check: $(ueq=$BB):
; check: $(veq=$V) = bor.f32 v0, v1
; nextln: jump $(done=$BB)($veq)
; check: $done(v2: f32):
; nextln: return v2
}
function %ineg_legalized_i8() {
block0:
v0 = iconst.i8 1
v1 = ineg v0
; check: v2 = iconst.i32 1
; nextln: v0 = ireduce.i8 v2
; nextln: v3 = iconst.i8 0
; nextln: v4 = uextend.i32 v3
; nextln: v5 = uextend.i32 v0
; nextln: v6 = isub v4, v5
; nextln: v1 = ireduce.i8 v6
return
}
function %ineg_legalized_i16() {
block0:
v0 = iconst.i16 1
v1 = ineg v0
; check: v2 = iconst.i32 1
; nextln: v0 = ireduce.i16 v2
; nextln: v3 = iconst.i16 0
; nextln: v4 = uextend.i32 v3
; nextln: v5 = uextend.i32 v0
; nextln: v6 = isub v4, v5
; nextln: v1 = ireduce.i16 v6
return
}
function %ineg_legalized_i32() {
block0:
v0 = iconst.i32 1
v1 = ineg v0
; check: v0 = iconst.i32 1
; nextln: v2 = iconst.i32 0
; nextln: v1 = isub v2, v0
return
}

View File

@@ -1,192 +0,0 @@
; Test the division legalizations.
test legalizer
; See also legalize-div.clif.
set avoid_div_traps=1
target x86_64 legacy
; regex: V=v\d+
; regex: BB=block\d+
function %udiv(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = udiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %udiv_0(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = udiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %udiv_minus_1(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = udiv v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = urem v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %urem_0(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = urem v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %urem_minus_1(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = urem v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = sdiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$BB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
; nextln: trapif eq $fm, int_ovf
; check: $done(v2: i64):
return v2
; nextln: return v2
}
function %sdiv_0(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = sdiv v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; check: $(hi=$V) = sshr_imm
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
return v2
; nextln: return v2
}
function %sdiv_minus_1(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = sdiv v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(q=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$BB)($q)
; check: $m1:
; nextln: $(imin=$V) = iconst.i64 0x8000_0000_0000_0000
; nextln: $(fm=$V) = ifcmp.i64 v0, $imin
; nextln: trapif eq $fm, int_ovf
; check: $done(v2: i64):
return v2
; nextln: return v2
}
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = srem v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$BB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$BB)($zero)
; check: $done(v2: i64):
return v2
; nextln: return v2
}
function %srem_0(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 0
; nextln: v1 = iconst.i64 0
v2 = srem v0, v1
; nextln: $(fz=$V) = ifcmp_imm v1, 0
; nextln: trapif eq $fz, int_divz
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
return v2
; nextln: return v2
}
function %srem_minus_1(i64) -> i64 {
block0(v0: i64):
; check: block0(
v1 = iconst.i64 -1
; nextln: v1 = iconst.i64 -1
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$BB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$BB)($zero)
; check: $done(v2: i64):
return v2
; nextln: return v2
}

View File

@@ -1,57 +0,0 @@
; Test the division legalizations.
test legalizer
; See also legalize-div-traps.clif.
set avoid_div_traps=0
target x86_64 legacy
; regex: V=v\d+
; regex: BB=block\d+
function %udiv(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = udiv v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $d
}
function %urem(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = urem v0, v1
; nextln: $(hi=$V) = iconst.i64 0
; nextln: $(d=$V), $(r=$V) = x86_udivmodx v0, $hi, v1
return v2
; nextln: return $r
}
function %sdiv(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = sdiv v0, v1
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
return v2
; nextln: return $d
}
; The srem expansion needs to special-case x % -1 since x86_sdivmodx traps on INT_MIN/-1.
; TODO: Add more explicit pattern matching once we've cleaned up the ifcmp+brif pattern.
function %srem(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
; check: block0(
v2 = srem v0, v1
; nextln: $(fm1=$V) = ifcmp_imm v1, -1
; nextln: brif eq $fm1, $(m1=$BB)
; check: $(hi=$V) = sshr_imm
; nextln: $(d=$V), $(r=$V) = x86_sdivmodx v0, $hi, v1
; nextln: jump $(done=$BB)($r)
; check: $m1:
; nextln: $(zero=$V) = iconst.i64 0
; nextln: jump $(done=$BB)($zero)
; check: $done(v2: i64):
return v2
; nextln: return v2
}

View File

@@ -1,13 +0,0 @@
; Test the legalization of f64const.
test legalizer
target x86_64 legacy
; regex: V=v\d+
function %f64const() -> f64 {
block0:
v1 = f64const 0x1.0p1
; check: $(tmp=$V) = iconst.i64
; check: v1 = bitcast.f64 $tmp
return v1
}

View File

@@ -1,14 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i16) -> f64 fast {
block0(v0: i16):
v1 = fcvt_from_uint.f64 v0
return v1
}
function u0:1(i16) -> f64 fast {
block0(v0: i16):
v1 = fcvt_from_sint.f64 v0
return v1
}

View File

@@ -1,123 +0,0 @@
test legalizer
set enable_heap_access_spectre_mitigation=false
target x86_64 legacy
; Test legalization for various forms of heap addresses.
; regex: BB=block\d+
function %heap_addrs(i32, i64, i64 vmctx) {
gv4 = vmctx
gv0 = iadd_imm.i64 gv4, 64
gv1 = iadd_imm.i64 gv4, 72
gv2 = iadd_imm.i64 gv4, 80
gv3 = load.i64 notrap aligned gv4+88
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
heap2 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
heap3 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i64
heap4 = dynamic gv1, min 0x1_0000, bound gv3, offset_guard 0x8000_0000, index_type i32
heap5 = dynamic gv1, bound gv3, offset_guard 0x1000, index_type i32
heap6 = dynamic gv1, min 0x1_0000, bound gv2, offset_guard 0x8000_0000, index_type i64
heap7 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i64
; check: heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; check: heap1 = static gv0, min 0, bound 0x0001_0000, offset_guard 4096, index_type i32
; check: heap2 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i64
; check: heap3 = static gv0, min 0, bound 0x0001_0000, offset_guard 4096, index_type i64
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv3, offset_guard 0x8000_0000, index_type i32
; check: heap5 = dynamic gv1, min 0, bound gv3, offset_guard 4096, index_type i32
; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, offset_guard 0x8000_0000, index_type i64
; check: heap7 = dynamic gv1, min 0, bound gv2, offset_guard 4096, index_type i64
block0(v0: i32, v1: i64, v3: i64):
; The fast-path; 32-bit index, static heap with a sufficient bound, no bounds check needed!
v4 = heap_addr.i64 heap0, v0, 0
; check: v12 = uextend.i64 v0
; check: v13 = iadd_imm v3, 64
; check: v4 = iadd v13, v12
v5 = heap_addr.i64 heap1, v0, 0
; check: v14 = uextend.i64 v0
; check: v15 = icmp_imm ugt v14, 0x0001_0000
; check: brz v15, $(resume_1=$BB)
; nextln: jump $(trap_1=$BB)
; check: $trap_1:
; nextln: trap heap_oob
; check: $resume_1:
; check: v16 = iadd_imm.i64 v3, 64
; check: v5 = iadd v16, v14
v6 = heap_addr.i64 heap2, v1, 0
; check: v19 = iconst.i64 0x0001_0000_0000
; check: v17 = icmp.i64 ugt v1, v19
; check: brz v17, $(resume_2=$BB)
; nextln: jump $(trap_2=$BB)
; check: $trap_2:
; nextln: trap heap_oob
; check: $resume_2:
; check: v18 = iadd_imm.i64 v3, 64
; check: v6 = iadd v18, v1
v7 = heap_addr.i64 heap3, v1, 0
; check: v20 = icmp_imm.i64 ugt v1, 0x0001_0000
; check: brz v20, $(resume_3=$BB)
; nextln: jump $(trap_3=$BB)
; check: $trap_3:
; nextln: trap heap_oob
; check: $resume_3:
; check: v21 = iadd_imm.i64 v3, 64
; check: v7 = iadd v21, v1
v8 = heap_addr.i64 heap4, v0, 0
; check: v22 = uextend.i64 v0
; check: v23 = load.i64 notrap aligned v3+88
; check: v24 = iadd_imm v23, 0
; check: v25 = icmp ugt v22, v24
; check: brz v25, $(resume_4=$BB)
; nextln: jump $(trap_4=$BB)
; check: $trap_4:
; nextln: trap heap_oob
; check: $resume_4:
; check: v26 = iadd_imm.i64 v3, 72
; check: v8 = iadd v26, v22
v9 = heap_addr.i64 heap5, v0, 0
; check: v27 = uextend.i64 v0
; check: v28 = load.i64 notrap aligned v3+88
; check: v29 = iadd_imm v28, 0
; check: v30 = icmp ugt v27, v29
; check: brz v30, $(resume_5=$BB)
; nextln: jump $(trap_5=$BB)
; check: $trap_5:
; nextln: trap heap_oob
; check: $resume_5:
; check: v31 = iadd_imm.i64 v3, 72
; check: v9 = iadd v31, v27
v10 = heap_addr.i64 heap6, v1, 0
; check: v32 = iadd_imm.i64 v3, 80
; check: v33 = iadd_imm v32, 0
; check: v34 = icmp.i64 ugt v1, v33
; check: brz v34, $(resume_6=$BB)
; nextln: jump $(trap_6=$BB)
; check: $trap_6:
; nextln: trap heap_oob
; check: $resume_6:
; check: v35 = iadd_imm.i64 v3, 72
; check: v10 = iadd v35, v1
v11 = heap_addr.i64 heap7, v1, 0
; check: v36 = iadd_imm.i64 v3, 80
; check: v37 = iadd_imm v36, 0
; check: v38 = icmp.i64 ugt v1, v37
; check: brz v38, $(resume_7=$BB)
; nextln: jump $(trap_7=$BB)
; check: $trap_7:
; nextln: trap heap_oob
; check: $resume_7:
; check: v39 = iadd_imm.i64 v3, 72
; check: v11 = iadd v39, v1
return
}

View File

@@ -1,20 +0,0 @@
; Test the legalization of i128 instructions on x86_64.
test legalizer
target x86_64 legacy haswell
; regex: V=v\d+
function %imul(i128, i128) -> i128 {
block0(v1: i128, v2: i128):
v10 = imul v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(v11=$V) = imul $v1_msb, $v2_lsb
; nextln: $(v12=$V) = imul $v1_lsb, $v2_msb
; nextln: $(v13=$V) = iadd $v11, $v12
; nextln: $(v99=$V), $(v14=$V) = x86_umulx $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = iadd $v13, $v14
; nextln: $(v10_lsb=$V) = imul $v1_lsb, $v2_lsb
; nextln: v10 = iconcat $v10_lsb, $v10_msb
return v10
}

View File

@@ -1,357 +0,0 @@
; Test the legalization of i64 instructions on x86_32.
test legalizer
target i686 legacy haswell
; regex: V=v\d+
function %iadd(i64, i64) -> i64 {
block0(v1: i64, v2: i64):
v10 = iadd v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(v10_lsb=$V), $(carry=$V) = iadd_ifcout $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = iadd_ifcin $v1_msb, $v2_msb, $carry
; nextln: v10 = iconcat $v10_lsb, $v10_msb
return v10
}
function %isub(i64, i64) -> i64 {
block0(v1: i64, v2: i64):
v10 = isub v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(v10_lsb=$V), $(borrow=$V) = isub_ifbout $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = isub_ifbin $v1_msb, $v2_msb, $borrow
; nextln: v10 = iconcat $v10_lsb, $v10_msb
return v10
}
function %imul(i64, i64) -> i64 {
block0(v1: i64, v2: i64):
v10 = imul v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(v11=$V) = imul $v1_msb, $v2_lsb
; nextln: $(v12=$V) = imul $v1_lsb, $v2_msb
; nextln: $(v13=$V) = iadd $v11, $v12
; nextln: $(v99=$V), $(v14=$V) = x86_umulx $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = iadd $v13, $v14
; nextln: $(v10_lsb=$V) = imul $v1_lsb, $v2_lsb
; nextln: v10 = iconcat $v10_lsb, $v10_msb
return v10
}
function %icmp_eq(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp eq v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(v10_lsb=$V) = icmp eq $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = icmp eq $v1_msb, $v2_msb
; nextln: v10 = band $v10_lsb, $v10_msb
return v10
}
function %icmp_imm_eq(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm eq v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(v10_lsb=$V) = icmp eq $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = icmp eq $v1_msb, $v2_msb
; nextln: v10 = band $v10_lsb, $v10_msb
return v10
}
function %icmp_ne(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp ne v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(v10_lsb=$V) = icmp ne $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = icmp ne $v1_msb, $v2_msb
; nextln: v10 = bor $v10_lsb, $v10_msb
return v10
}
function %icmp_imm_ne(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm ne v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(v10_lsb=$V) = icmp ne $v1_lsb, $v2_lsb
; nextln: $(v10_msb=$V) = icmp ne $v1_msb, $v2_msb
; nextln: v10 = bor $v10_lsb, $v10_msb
return v10
}
function %icmp_sgt(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp sgt v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ugt $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_sgt(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm sgt v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ugt $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_sge(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp sge v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp uge $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_sge(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm sge v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp uge $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_slt(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp slt v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ult $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_slt(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm slt v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ult $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_sle(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp sle v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ule $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_sle(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm sle v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp slt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp sgt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ule $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_ugt(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp ugt v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ugt $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_ugt(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm ugt v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ugt $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_uge(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp uge v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp uge $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_uge(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm uge v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp uge $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_ult(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp ult v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ult $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_ult(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm ult v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ult $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_ule(i64, i64) -> b1 {
block0(v1: i64, v2: i64):
v10 = icmp ule v1, v2
; check: v1 = iconcat $(v1_lsb=$V), $(v1_msb=$V)
; nextln: v2 = iconcat $(v2_lsb=$V), $(v2_msb=$V)
; nextln: $(b1=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ule $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %icmp_imm_ule(i64) -> b1 {
block0(v1: i64):
v10 = icmp_imm ule v1, 0
; check: $(v1_lsb=$V) -> $(v1_lsb_a=$V)
; nextln: $(v1_msb=$V) -> $(v1_msb_a=$V)
; nextln: v1 = iconcat $(v1_lsb_a=$V), $(v1_msb_a=$V)
; nextln: $(v2_lsb=$V) = iconst.i32 0
; nextln: $(v2_msb=$V) = iconst.i32 0
; nextln: $(b1=$V) = icmp ult $v1_msb, $v2_msb
; nextln: $(b2=$V) = icmp ugt $v1_msb, $v2_msb
; nextln: $(b3=$V) = icmp ule $v1_lsb, $v2_lsb
; nextln: $(c1=$V) = bnot $b2
; nextln: $(c2=$V) = band $c1, $b3
; nextln: v10 = bor $b1, $c2
return v10
}
function %ineg_legalized_i64() {
block0:
v0 = iconst.i64 1
v1 = ineg v0
; check: v2 = iconst.i32 1
; nextln: v3 = iconst.i32 0
; nextln: v0 = iconcat v2, v3
; nextln: v5 = iconst.i32 0
; nextln: v6 = iconst.i32 0
; nextln: v4 = iconcat v5, v6
; nextln: v7, v8 = isub_ifbout v5, v2
; nextln: v9 = isub_ifbin v6, v3, v8
; nextln: v1 = iconcat v7, v9
return
}

View File

@@ -1,19 +0,0 @@
test compile
target x86_64 legacy
; regex: V=v\d+
function u0:0(i8, i8) -> i8 fast {
block0(v0: i8, v1: i8):
v2 = icmp_imm sle v0, 0
; check: $(e1=$V) = sextend.i32 v0
; nextln: v2 = icmp_imm sle $e1, 0
v3 = bint.i8 v2
v4 = icmp eq v0, v1
; check: $(e2=$V) = uextend.i32 v0
; nextln: $(e3=$V) = uextend.i32 v1
; nextln: v4 = icmp eq $e2, $e3
v5 = bint.i8 v4
v6 = iadd v3, v5
return v6
}

View File

@@ -1,18 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64) system_v {
ss0 = explicit_slot 0
block0(v0: i64):
jump block1
block1:
; _0 = const 42u8
v1 = iconst.i8 42
store v1, v0
;
; return
return
}

View File

@@ -1,11 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64, i8, i8) system_v {
block0(v0: i64, v1: i8, v2: i8):
v11 = imul v1, v2
store v11, v0
return
}

View File

@@ -1,15 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64, i8) system_v {
ss0 = explicit_slot 1
block0(v0: i64, v1: i8):
v3 = stack_addr.i64 ss0
v5 = load.i8 v3
v6 = iconst.i8 2
v7 = imul_imm v5, 42
store v7, v0
return
}

View File

@@ -1,13 +0,0 @@
; Test the custom legalization of ineg.i64 on x86_64.
test legalizer
target x86_64 legacy
function %ineg_legalized_i64() {
block0:
v0 = iconst.i64 1
v1 = ineg v0
; check: v0 = iconst.i64 1
; nextln: v2 = iconst.i64 0
; nextln: v1 = isub v2, v0
return
}

View File

@@ -1,11 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = ireduce.i64 v2
; check: v3 = copy v0
; check: return v3
return v3
}

View File

@@ -1,11 +0,0 @@
test compile
target i686 legacy
function u0:0(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iconcat v0, v1
v3 = ireduce.i32 v2
; check: v3 = fill v0
; check: return v3
return v3
}

View File

@@ -1,24 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i128) -> i64, i64 fast {
; check: block0(v4: i64 [%rdi], v5: i64 [%rsi], v8: i64 [%rbp]):
block0(v0: i128):
jump block2
block1:
; When this `isplit` is legalized, the bnot below is not yet legalized,
; so there isn't a corresponding `iconcat` yet. We should try legalization
; for this `isplit` again once all instrucions have been legalized.
v2, v3 = isplit.i128 v1
; return v6, v7
return v2, v3
block2:
; check: v6 = bnot.i64 v4
; check: v2 -> v6
; check: v7 = bnot.i64 v5
; check: v3 -> v7
v1 = bnot.i128 v0
jump block1
}

View File

@@ -1,15 +0,0 @@
test legalizer
; Pre-SSE 4.1, we need to use runtime library calls for floating point rounding operations.
set is_pic
target x86_64 legacy
function %floor(f32) -> f32 {
block0(v0: f32):
v1 = floor v0
return v1
}
; check: function %floor(f32 [%xmm0]) -> f32 [%xmm0] fast {
; check: sig0 = (f32 [%xmm0]) -> f32 [%xmm0] system_v
; check: fn0 = %FloorF32 sig0
; check: v1 = call fn0(v0)

View File

@@ -1,31 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64, i8, i8) system_v {
ss0 = explicit_slot 0
ss1 = explicit_slot 1
ss2 = explicit_slot 1
ss3 = explicit_slot 1
ss4 = explicit_slot 1
block0(v0: i64, v1: i8, v2: i8):
v3 = stack_addr.i64 ss1
store v1, v3
v4 = stack_addr.i64 ss2
store v2, v4
v5 = stack_addr.i64 ss3
v6 = stack_addr.i64 ss4
jump block1
block1:
v7 = load.i8 v3
store v7, v5
v8 = load.i8 v4
store v8, v6
v9 = load.i8 v5
v10 = load.i8 v6
v11 = imul v9, v10
store v11, v0
return
}

View File

@@ -1,115 +0,0 @@
; Test the legalization of memory objects.
test legalizer
set enable_heap_access_spectre_mitigation=false
target x86_64 legacy
; regex: V=v\d+
; regex: BB=block\d+
function %vmctx(i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, -16
block1(v1: i64):
v2 = global_value.i64 gv1
; check: v2 = iadd_imm v1, -16
return v2
; check: return v2
}
function %load(i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0-16
gv2 = iadd_imm.i64 gv1, 32
block1(v1: i64):
v2 = global_value.i64 gv2
; check: $(p1=$V) = load.i64 notrap aligned v1-16
; check: v2 = iadd_imm $p1, 32
return v2
; check: return v2
}
function %symbol() -> i64 {
gv0 = symbol %something
gv1 = symbol u123:456
block1:
v0 = global_value.i64 gv0
; check: v0 = symbol_value.i64 gv0
v1 = global_value.i64 gv1
; check: v1 = symbol_value.i64 gv1
v2 = bxor v0, v1
return v2
}
; SpiderMonkey VM-style static 4+2 GB heap.
; This eliminates bounds checks completely for offsets < 2GB.
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v999: i64):
; check: block0(
v1 = heap_addr.i64 heap0, v0, 1
; Boundscheck should be eliminated.
; Checks here are assuming that no pipehole opts fold the load offsets.
; nextln: $(xoff=$V) = uextend.i64 v0
; check: $(hbase=$V) = iadd_imm v999, 64
; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+16
; nextln: v2 = load.f32 v1+16
v3 = load.f32 v1+20
; nextln: v3 = load.f32 v1+20
v4 = fadd v2, v3
return v4
}
function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1000_0000, offset_guard 0x8000_0000
block0(v0: i32, v999: i64):
; Everything after the obviously OOB access should be eliminated, leaving
; the `trap heap_oob` instruction as the terminator of the block and moving
; the remainder of the instructions into an inaccessible block.
; check: block0(
; nextln: trap heap_oob
; check: block1:
; nextln: v1 = iconst.i64 0
; nextln: v2 = load.f32 v1+16
; nextln: return v2
; nextln: }
v1 = heap_addr.i64 heap0, v0, 0x1000_0001
v2 = load.f32 v1+16
return v2
}
; SpiderMonkey VM-style static 4+2 GB heap.
; Offsets >= 2 GB do require a boundscheck.
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash_system_v {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v999: i64):
; check: block0(
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
; Boundscheck code
; check: $(xoff=$V) = uextend.i64 v0
; check: $(oob=$V) = icmp
; nextln: brz $oob, $(ok=$BB)
; nextln: jump $(trap_oob=$BB)
; check: $trap_oob:
; nextln: trap heap_oob
; check: $ok:
; Checks here are assuming that no pipehole opts fold the load offsets.
; check: $(hbase=$V) = iadd_imm.i64 v999, 64
; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+0x7fff_ffff
; nextln: v2 = load.f32 v1+0x7fff_ffff
return v2
}

View File

@@ -1,43 +0,0 @@
test compile
target x86_64 legacy baseline
; umulhi/smulhi on 64 bit operands
function %i64_umulhi(i64, i64) -> i64 {
block0(v10: i64, v11: i64):
v12 = umulhi v10, v11
; check: %rdi -> %rax
; check: x86_umulx
; check: %rdx -> %rax
return v12
}
function %i64_smulhi(i64, i64) -> i64 {
block0(v20: i64, v21: i64):
v22 = smulhi v20, v21
; check: %rdi -> %rax
; check: x86_smulx
; check: %rdx -> %rax
return v22
}
; umulhi/smulhi on 32 bit operands
function %i32_umulhi(i32, i32) -> i32 {
block0(v30: i32, v31: i32):
v32 = umulhi v30, v31
; check: %rdi -> %rax
; check: x86_umulx
; check: %rdx -> %rax
return v32
}
function %i32_smulhi(i32, i32) -> i32 {
block0(v40: i32, v41: i32):
v42 = smulhi v40, v41
; check: %rdi -> %rax
; check: x86_smulx
; check: %rdx -> %rax
return v42
}

View File

@@ -1,9 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i8) -> i8 fast {
block0(v0: i8):
v1 = popcnt v0
; check-not: sextend.i32 v0
return v1
}

View File

@@ -1,36 +0,0 @@
test compile
target x86_64 legacy
function u0:0(i64, i64, i64) system_v {
ss0 = explicit_slot 0
ss1 = explicit_slot 8
ss2 = explicit_slot 8
ss3 = explicit_slot 2
ss4 = explicit_slot 8
sig0 = (i64, i16, i64) system_v
fn0 = colocated u0:11 sig0
block0(v0: i64, v1: i64, v2: i64):
v3 = stack_addr.i64 ss1
store v1, v3
v4 = stack_addr.i64 ss2
store v2, v4
v5 = stack_addr.i64 ss3
v6 = stack_addr.i64 ss4
jump block1
block1:
v7 = load.i64 v3
v8 = load.i16 v7
store v8, v5
v9 = load.i64 v4
store v9, v6
v10 = load.i16 v5
v11 = load.i64 v6
call fn0(v0, v10, v11)
jump block2
block2:
return
}

View File

@@ -1,35 +0,0 @@
test compile
target x86_64 legacy
; regex: V=v\d+
; regex: R=%[a-z0-9]+
function %i32_rotr(i32, i32) -> i32 fast {
block0(v0: i32, v1: i32):
; check: regmove v1, $R -> %rcx
; check: v2 = rotr v0, v1
v2 = rotr v0, v1
return v2
}
function %i32_rotr_imm_1(i32) -> i32 fast {
block0(v0: i32):
; check: $V = rotr_imm v0, 1
v2 = rotr_imm v0, 1
return v2
}
function %i32_rotl(i32, i32) -> i32 fast {
block0(v0: i32, v1: i32):
; check: regmove v1, $R -> %rcx
; check: v2 = rotl v0, v1
v2 = rotl v0, v1
return v2
}
function %i32_rotl_imm_1(i32) -> i32 fast {
block0(v0: i32):
; check: $V = rotl_imm v0, 1
v2 = rotl_imm v0, 1
return v2
}

View File

@@ -1,24 +0,0 @@
test compile
target x86_64 legacy
; regex: V=v\d+
function u0:0(i8, i8) -> i8 fast {
block0(v0: i8, v1: i8):
v2 = ishl v0, v1
; check: $(e1=$V) = uextend.i32 v0
; check: $(r1=$V) = ishl $e1, v1
; check v2 = ireduce.i8 $r1
v3 = ushr v0, v1
; check: $(e2=$V) = uextend.i32 v0
; check: $(r2=$V) = ushr $e2, v1
; check v2 = ireduce.i8 $r2
v4 = sshr v0, v1
; check: $(e3=$V) = sextend.i32 v0
; check: $(r3=$V) = sshr $e3, v1
; check v2 = ireduce.i8 $r3
v5 = iadd v2, v3
v6 = iadd v4, v5
return v6
}

View File

@@ -1,73 +0,0 @@
test legalizer
target x86_64 legacy
; Test legalization for various forms of table addresses.
; regex: BB=block\d+
function %table_addrs(i32, i64, i64 vmctx) {
gv4 = vmctx
gv0 = iadd_imm.i64 gv4, 72
gv1 = iadd_imm.i64 gv4, 80
gv2 = load.i32 notrap aligned gv4+88
table0 = dynamic gv0, min 0x1_0000, bound gv2, element_size 1, index_type i32
table1 = dynamic gv0, bound gv2, element_size 16, index_type i32
table2 = dynamic gv0, min 0x1_0000, bound gv1, element_size 1, index_type i64
table3 = dynamic gv0, bound gv1, element_size 16, index_type i64
; check: table0 = dynamic gv0, min 0x0001_0000, bound gv2, element_size 1, index_type i32
; check: table1 = dynamic gv0, min 0, bound gv2, element_size 16, index_type i32
; check: table2 = dynamic gv0, min 0x0001_0000, bound gv1, element_size 1, index_type i64
; check: table3 = dynamic gv0, min 0, bound gv1, element_size 16, index_type i64
block0(v0: i32, v1: i64, v3: i64):
v4 = table_addr.i64 table0, v0, +0
; check: v8 = load.i32 notrap aligned v3+88
; check: v9 = icmp uge v0, v8
; check: brz v9, $(resume_1=$BB)
; nextln: jump $(trap_1=$BB)
; check: $trap_1:
; nextln: trap table_oob
; check: $resume_1:
; check: v10 = uextend.i64 v0
; check: v11 = iadd_imm.i64 v3, 72
; check: v4 = iadd v11, v10
v5 = table_addr.i64 table1, v0, +0
; check: v12 = load.i32 notrap aligned v3+88
; check: v13 = icmp.i32 uge v0, v12
; check: brz v13, $(resume_2=$BB)
; nextln: jump $(trap_2=$BB)
; check: $trap_2:
; nextln: trap table_oob
; check: $resume_2:
; check: v14 = uextend.i64 v0
; check: v15 = iadd_imm.i64 v3, 72
; check: v16 = ishl_imm v14, 4
; check: v5 = iadd v15, v16
v6 = table_addr.i64 table2, v1, +0
; check: v17 = iadd_imm.i64 v3, 80
; check: v18 = icmp.i64 uge v1, v17
; check: brz v18, $(resume_3=$BB)
; nextln: jump $(trap_3=$BB)
; check: $trap_3:
; nextln: trap table_oob
; check: $resume_3:
; check: v19 = iadd_imm.i64 v3, 72
; check: v6 = iadd v19, v1
v7 = table_addr.i64 table3, v1, +0
; check: v20 = iadd_imm.i64 v3, 80
; check: v21 = icmp.i64 uge v1, v20
; check: brz v21, $(resume_4=$BB)
; nextln: jump $(trap_4=$BB)
; check: $trap_4:
; nextln: trap table_oob
; check: $resume_4:
; check: v22 = iadd_imm.i64 v3, 72
; check: v23 = ishl_imm.i64 v1, 4
; check: v7 = iadd v22, v23
return
}

View File

@@ -1,15 +0,0 @@
test compile
target x86_64 legacy
; regex: V=v\d+
function u0:0(i8, i8) -> i8 fast {
block0(v0: i8, v1: i8):
v2 = urem v0, v1
; check: $(a=$V) = uextend.i32 v0
; nextln: $(b=$V) = uextend.i32 v1
; nextln: $(c=$V) = iconst.i32 0
; nextln: $(V), $(r=$V) = x86_udivmodx $a, $c, $b
; nextln: v2 = ireduce.i8 $r
return v2
}

View File

@@ -1,51 +0,0 @@
test compile
set enable_simd
target i686 legacy haswell
function u0:1(i32) -> i64 system_v {
block1(v0: i32):
v1 = load.i64 notrap aligned v0+0
v2 = load.i32 notrap aligned v0+16
v3 = ishl v1, v2
return v3
}
function u0:2(i32) -> i64 system_v {
block1(v0: i32):
v1 = load.i64 notrap aligned v0+0
v2 = load.i64 notrap aligned v0+16
v3 = ishl v1, v2
return v3
}
function u0:3(i32) -> i32 system_v {
block1(v0: i32):
v1 = load.i32 notrap aligned v0+0
v2 = load.i64 notrap aligned v0+16
v3 = ishl v1, v2
return v3
}
function u0:4(i32) -> i64 system_v {
block1(v0: i32):
v1 = load.i64 notrap aligned v0+0
v2 = load.i32 notrap aligned v0+16
v3 = ushr v1, v2
return v3
}
function u0:5(i32) -> i64 system_v {
block1(v0: i32):
v1 = load.i64 notrap aligned v0+0
v2 = load.i64 notrap aligned v0+16
v3 = ushr v1, v2
return v3
}
function u0:6(i32) -> i32 system_v {
block1(v0: i32):
v1 = load.i32 notrap aligned v0+0
v2 = load.i64 notrap aligned v0+16
v3 = ushr v1, v2
return v3
}

View File

@@ -1,16 +0,0 @@
test compile
target i686 legacy
function u0:0(i64, i32) system_v {
block0(v0: i64, v1: i32):
v2 = bor v0, v0
store v2, v1
return
}
function u0:1(i32) -> i64 system_v {
block0(v1: i32):
v0 = load.i64 v1
v2 = bor v0, v0
return v2
}

View File

@@ -1,10 +0,0 @@
test compile
target x86_64 legacy
function %test(i32) -> i32 system_v {
block0(v0: i32):
nop
v1 = iconst.i32 42
return v1
}

View File

@@ -1,52 +0,0 @@
; Check that floating-point and integer constants equal to zero are optimized correctly.
test binemit
set opt_level=speed_and_size
target i686 legacy
function %foo() -> f32 fast {
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0
return v0
}
function %bar() -> f64 fast {
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v1 = f64const 0.0 ; bin: 66 0f 57 c0
return v1
}
function %zero_dword() -> i32 fast {
block0:
; asm: xor %eax, %eax
[-,%rax] v0 = iconst.i32 0 ; bin: 31 c0
; asm: xor %edi, %edi
[-,%rdi] v1 = iconst.i32 0 ; bin: 31 ff
return v0
}
function %zero_word() -> i16 fast {
block0:
; while you may expect this to be encoded like 6631c0, aka
; xor %ax, %ax, the upper 16 bits of the register used for
; i16 are left undefined, so it's not wrong to clear them.
;
; discarding the 66 prefix is shorter, so this test expects
; that we do so.
;
; asm: xor %eax, %eax
[-,%rax] v0 = iconst.i16 0 ; bin: 31 c0
; asm: xor %edi, %edi
[-,%rdi] v1 = iconst.i16 0 ; bin: 31 ff
return v0
}
function %zero_byte() -> i8 fast {
block0:
; asm: xor %eax, %eax
[-,%rax] v0 = iconst.i8 0 ; bin: 31 c0
; asm: xor %edi, %edi
[-,%rdi] v1 = iconst.i8 0 ; bin: 31 ff
return v0
}

View File

@@ -1,72 +0,0 @@
; Check that floating-point constants equal to zero are optimized correctly.
test binemit
set opt_level=speed_and_size
target x86_64 legacy
function %zero_const_32bit_no_rex() -> f32 fast {
block0:
; asm: xorps %xmm0, %xmm0
[-,%xmm0] v0 = f32const 0.0 ; bin: 0f 57 c0
return v0
}
function %zero_const_32bit_rex() -> f32 fast {
block0:
; asm: xorps %xmm8, %xmm8
[-,%xmm8] v1 = f32const 0.0 ; bin: 45 0f 57 c0
return v1
}
function %zero_const_64bit_no_rex() -> f64 fast {
block0:
; asm: xorpd %xmm0, %xmm0
[-,%xmm0] v0 = f64const 0.0 ; bin: 66 0f 57 c0
return v0
}
function %zero_const_64bit_rex() -> f64 fast {
block0:
; asm: xorpd %xmm8, %xmm8
[-,%xmm8] v1 = f64const 0.0 ; bin: 66 45 0f 57 c0
return v1
}
function %imm_zero_register() -> i64 fast {
block0:
; asm: xor %eax, %eax
[-,%rax] v0 = iconst.i64 0 ; bin: 31 c0
; asm: xor %edi, %edi
[-,%rdi] v1 = iconst.i64 0 ; bin: 31 ff
; asm: xor %r8, r8
[-,%r8] v2 = iconst.i64 0 ; bin: 45 31 c0
; asm: xor %r15, %r15
[-,%r15] v4 = iconst.i64 0 ; bin: 45 31 ff
return v0
}
function %zero_word() -> i16 fast {
block0:
; while you may expect this to be encoded like 6631c0, aka
; xor %ax, %ax, the upper 16 bits of the register used for
; i16 are left undefined, so it's not wrong to clear them.
;
; discarding the 66 prefix is shorter, so this test expects
; that we do so.
;
; asm: xor %eax, %eax
[-,%rax] v0 = iconst.i16 0 ; bin: 31 c0
; asm: xor %edi, %edi
[-,%rdi] v1 = iconst.i16 0 ; bin: 31 ff
return v0
}
function %zero_byte() -> i8 fast {
block0:
; asm: xor %r8d, %r8d
[-,%r15] v0 = iconst.i8 0 ; bin: 45 31 ff
; asm: xor %eax, eax
[-,%rax] v1 = iconst.i8 0 ; bin: 31 c0
; asm: xor %edi, %edi
[-,%rdi] v2 = iconst.i8 0 ; bin: 31 ff
return v0
}

View File

@@ -1,74 +0,0 @@
test compile
set enable_pinned_reg=true
set use_pinned_reg_as_heap_base=true
set opt_level=speed_and_size
target x86_64 legacy
; regex: V=v\d+
; r15 is the pinned heap register. It must not be rewritten, so it must not be
; used as a tied output register.
function %tied_input() -> i64 system_v {
block0:
v1 = get_pinned_reg.i64
v2 = iadd_imm v1, 42
return v2
}
; check: ,%r15]
; sameln: v1 = get_pinned_reg.i64
; nextln: regmove v1, %r15 -> %rax
; nextln: ,%rax]
; sameln: iadd_imm v1, 42
;; It musn't be used even if this is a tied input used twice.
function %tied_twice() -> i64 system_v {
block0:
v1 = get_pinned_reg.i64
v2 = iadd v1, v1
return v2
}
; check: ,%r15]
; sameln: v1 = get_pinned_reg.i64
; nextln: regmove v1, %r15 -> %rax
; nextln: ,%rax]
; sameln: iadd v1, v1
function %uses() -> i64 system_v {
block0:
v1 = get_pinned_reg.i64
v2 = iadd_imm v1, 42
v3 = get_pinned_reg.i64
v4 = iadd v2, v3
return v4
}
; check: ,%r15]
; sameln: v1 = get_pinned_reg.i64
; nextln: regmove v1, %r15 -> %rax
; nextln: ,%rax]
; sameln: iadd_imm v1, 42
; nextln: ,%r15
; sameln: v3 = get_pinned_reg.i64
; nextln: ,%rax]
; sameln: iadd v2, v3
; When the pinned register is used as the heap base, the final load instruction
; must use the %r15 register, since x86 implements the complex addressing mode.
function u0:1(i64 vmctx) -> i64 system_v {
gv0 = vmctx
heap0 = static gv0, min 0x000a_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v42: i64):
v5 = iconst.i32 42
v6 = heap_addr.i64 heap0, v5, 0
v7 = load.i64 v6
return v7
}
; check: ,%r15]
; sameln: $(heap_base=$V) = get_pinned_reg.i64
; nextln: load_complex.i64 $heap_base+

View File

@@ -1,28 +0,0 @@
test compile
set use_colocated_libcalls=1
set probestack_func_adjusts_sp=1
target x86_64 legacy
; Like %big in probestack.clif, but with the probestack function adjusting
; the stack pointer itself.
function %big() system_v {
ss0 = explicit_slot 300000
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 300000, offset -300016
; nextln: ss1 = incoming_arg 16, offset -16
; nextln: sig0 = (i64 [%rax]) probestack
; nextln: fn0 = colocated %Probestack sig0
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 0x0004_93e0
; nextln: [Op1call_id#e8] call fn0(v1)
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 0x0004_93e0
; nextln: [RexOp1popq#58,%rbp] v2 = x86_pop.i64
; nextln: [Op1ret#c3] return v2
; nextln: }

View File

@@ -1,24 +0,0 @@
test compile
set use_colocated_libcalls=1
set enable_probestack=0
target x86_64 legacy
; Like %big in probestack.clif, but with probes disabled.
function %big() system_v {
ss0 = explicit_slot 300000
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 300000, offset -300016
; nextln: ss1 = incoming_arg 16, offset -16
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 0x0004_93e0
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 0x0004_93e0
; nextln: [RexOp1popq#58,%rbp] v1 = x86_pop.i64
; nextln: [Op1ret#c3] return v1
; nextln: }

View File

@@ -1,27 +0,0 @@
test compile
target x86_64 legacy
; Like %big in probestack.clif, but without a colocated libcall.
function %big() system_v {
ss0 = explicit_slot 300000
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 300000, offset -300016
; nextln: ss1 = incoming_arg 16, offset -16
; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack
; nextln: fn0 = %Probestack sig0
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 0x0004_93e0
; nextln: [RexOp1fnaddr8#80b8,%r11] v2 = func_addr.i64 fn0
; nextln: [RexOp1call_r#20ff,%rax] v3 = call_indirect sig0, v2(v1)
; nextln: [RexOp1adjustsp#8029] adjust_sp_down v3
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 0x0004_93e0
; nextln: [RexOp1popq#58,%rbp] v4 = x86_pop.i64
; nextln: [Op1ret#c3] return v4
; nextln: }

View File

@@ -1,74 +0,0 @@
test compile
set use_colocated_libcalls=1
set probestack_size_log2=13
target x86_64 legacy
; Like %big in probestack.clif, but now the probestack size is bigger
; and it no longer needs a probe.
function %big() system_v {
ss0 = explicit_slot 4097
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 4097, offset -4113
; nextln: ss1 = incoming_arg 16, offset -16
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 4112
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 4112
; nextln: [RexOp1popq#58,%rbp] v1 = x86_pop.i64
; nextln: [Op1ret#c3] return v1
; nextln: }
; Like %big; still doesn't need a probe.
function %bigger() system_v {
ss0 = explicit_slot 8192
block0:
return
}
; check: function %bigger(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 8192, offset -8208
; nextln: ss1 = incoming_arg 16, offset -16
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 8192
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 8192
; nextln: [RexOp1popq#58,%rbp] v1 = x86_pop.i64
; nextln: [Op1ret#c3] return v1
; nextln: }
; Like %bigger; this needs a probe.
function %biggest() system_v {
ss0 = explicit_slot 8193
block0:
return
}
; check: function %biggest(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 8193, offset -8209
; nextln: ss1 = incoming_arg 16, offset -16
; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack
; nextln: fn0 = colocated %Probestack sig0
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 8208
; nextln: [Op1call_id#e8,%rax] v2 = call fn0(v1)
; nextln: [RexOp1adjustsp#8029] adjust_sp_down v2
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 8208
; nextln: [RexOp1popq#58,%rbp] v3 = x86_pop.i64
; nextln: [Op1ret#c3] return v3
; nextln: }

View File

@@ -1,49 +0,0 @@
test compile
set use_colocated_libcalls=1
target x86_64 legacy
; A function with a big stack frame. This should have a stack probe.
function %big() system_v {
ss0 = explicit_slot 4097
block0:
return
}
; check: function %big(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 4097, offset -4113
; nextln: ss1 = incoming_arg 16, offset -16
; nextln: sig0 = (i64 [%rax]) -> i64 [%rax] probestack
; nextln: fn0 = colocated %Probestack sig0
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1pu_id#b8,%rax] v1 = iconst.i64 4112
; nextln: [Op1call_id#e8,%rax] v2 = call fn0(v1)
; nextln: [RexOp1adjustsp#8029] adjust_sp_down v2
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 4112
; nextln: [RexOp1popq#58,%rbp] v3 = x86_pop.i64
; nextln: [Op1ret#c3] return v3
; nextln: }
; A function with a small enough stack frame. This shouldn't have a stack probe.
function %small() system_v {
ss0 = explicit_slot 4096
block0:
return
}
; check: function %small(i64 fp [%rbp]) -> i64 fp [%rbp] system_v {
; nextln: ss0 = explicit_slot 4096, offset -4112
; nextln: ss1 = incoming_arg 16, offset -16
; nextln:
; nextln: block0(v0: i64 [%rbp]):
; nextln: [RexOp1pushq#50] x86_push v0
; nextln: [RexOp1copysp#8089] copy_special %rsp -> %rbp
; nextln: [RexOp1adjustsp_id#d081] adjust_sp_down_imm 4096
; nextln: [RexOp1adjustsp_id#8081] adjust_sp_up_imm 4096
; nextln: [RexOp1popq#58,%rbp] v1 = x86_pop.i64
; nextln: [Op1ret#c3] return v1
; nextln: }

Some files were not shown because too many files have changed in this diff Show More