Remove the old x86 backend

This commit is contained in:
bjorn3
2021-06-18 17:28:55 +02:00
parent e989caf337
commit 9e34df33b9
246 changed files with 76 additions and 28804 deletions

View File

@@ -1,68 +0,0 @@
test compile
target x86_64 legacy haswell
;; `b1` return values need to be legalized into bytes so that they can be stored
;; in memory.
function %return_4_b1s(b1, b1, b1, b1) -> b1, b1, b1, b1 {
;; check: function %return_4_b1s(b1 [%rsi], b1 [%rdx], b1 [%rcx], b1 [%r8], i64 sret [%rdi], i64 fp [%rbp]) -> i64 sret [%rax], i64 fp [%rbp] fast {
block0(v0: b1, v1: b1, v2: b1, v3: b1):
; check: block0(v0: b1 [%rsi], v1: b1 [%rdx], v2: b1 [%rcx], v3: b1 [%r8], v4: i64 [%rdi], v13: i64 [%rbp]):
return v0, v1, v2, v3
; check: v5 = bint.i8 v0
; nextln: v9 = uextend.i32 v5
; nextln: istore8 notrap aligned v9, v4
; nextln: v6 = bint.i8 v1
; nextln: v10 = uextend.i32 v6
; nextln: istore8 notrap aligned v10, v4+1
; nextln: v7 = bint.i8 v2
; nextln: v11 = uextend.i32 v7
; nextln: istore8 notrap aligned v11, v4+2
; nextln: v8 = bint.i8 v3
; nextln: v12 = uextend.i32 v8
; nextln: istore8 notrap aligned v12, v4+3
}
function %call_4_b1s() {
; check: function %call_4_b1s(i64 fp [%rbp], i64 csr [%rbx]) -> i64 fp [%rbp], i64 csr [%rbx] fast {
; nextln: ss0 = sret_slot 4, offset -28
fn0 = colocated %return_4_b1s(b1, b1, b1, b1) -> b1, b1, b1, b1
; check: sig0 = (b1 [%rsi], b1 [%rdx], b1 [%rcx], b1 [%r8], i64 sret [%rdi]) -> i64 sret [%rax] fast
block0:
; check: block0(v26: i64 [%rbp], v27: i64 [%rbx]):
v0 = bconst.b1 true
v1 = bconst.b1 false
v2 = bconst.b1 true
v3 = bconst.b1 false
; check: v8 = stack_addr.i64 ss0
v4, v5, v6, v7 = call fn0(v0, v1, v2, v3)
; check: v9 = call fn0(v0, v1, v2, v3, v8)
; nextln: v22 = uload8.i32 notrap aligned v9
; nextln: v10 = ireduce.i8 v22
; nextln: v11 = raw_bitcast.b8 v10
; nextln: v12 = breduce.b1 v11
; nextln: v4 -> v12
; nextln: v23 = uload8.i32 notrap aligned v9+1
; nextln: v13 = ireduce.i8 v23
; nextln: v14 = raw_bitcast.b8 v13
; nextln: v15 = breduce.b1 v14
; nextln: v5 -> v15
; nextln: v24 = uload8.i32 notrap aligned v9+2
; nextln: v16 = ireduce.i8 v24
; nextln: v17 = raw_bitcast.b8 v16
; nextln: v18 = breduce.b1 v17
; nextln: v6 -> v18
; nextln: v25 = uload8.i32 notrap aligned v9+3
; nextln: v19 = ireduce.i8 v25
; nextln: v20 = raw_bitcast.b8 v19
; nextln: v21 = breduce.b1 v20
; nextln: v7 -> v21
return
}

View File

@@ -1,26 +0,0 @@
test legalizer
target x86_64 legacy haswell
;; Indirect calls with many returns.
function %call_indirect_many_rets(i64) {
; check: ss0 = sret_slot 32
sig0 = () -> i64, i64, i64, i64
; check: sig0 = (i64 sret [%rdi]) -> i64 sret [%rax] fast
block0(v0: i64):
v1, v2, v3, v4 = call_indirect sig0, v0()
; check: v5 = stack_addr.i64 ss0
; nextln: v6 = call_indirect sig0, v0(v5)
; nextln: v7 = load.i64 notrap aligned v6
; nextln: v1 -> v7
; nextln: v8 = load.i64 notrap aligned v6+8
; nextln: v2 -> v8
; nextln: v9 = load.i64 notrap aligned v6+16
; nextln: v3 -> v9
; nextln: v10 = load.i64 notrap aligned v6+24
; nextln: v4 -> v10
return
}

View File

@@ -1,24 +0,0 @@
test legalizer
target x86_64 legacy haswell
;; Test if arguments are legalized if function uses sret
function %call_indirect_with_split_arg(i64, i64, i64) {
; check: ss0 = sret_slot 32
sig0 = (i128) -> i64, i64, i64, i64
; check: sig0 = (i64 [%rsi], i64 [%rdx], i64 sret [%rdi]) -> i64 sret [%rax] fast
block0(v0: i64, v1: i64, v2: i64):
v3 = iconcat v1, v2
v4, v5, v6, v7 = call_indirect sig0, v0(v3)
; check: v8 = stack_addr.i64 ss0
; check: v9 = call_indirect sig0, v0(v1, v2, v8)
; check: v10 = load.i64 notrap aligned v9
; check: v4 -> v10
; check: v11 = load.i64 notrap aligned v9+8
; check: v5 -> v11
; check: v12 = load.i64 notrap aligned v9+16
; check: v6 -> v12
; check: v13 = load.i64 notrap aligned v9+24
; check: v7 -> v13
return
}

View File

@@ -1,61 +0,0 @@
test legalizer
target x86_64 legacy haswell
;; Test that we don't reuse `sret` stack slots for multiple calls. We could do
;; this one day, but it would require some care to ensure that we don't have
;; subsequent calls overwrite the results of previous calls.
function %foo() -> i32, f32 {
; check: ss0 = sret_slot 20
; nextln: ss1 = sret_slot 20
fn0 = %f() -> i32, i32, i32, i32, i32
fn1 = %g() -> f32, f32, f32, f32, f32
; check: sig0 = (i64 sret [%rdi]) -> i64 sret [%rax] fast
; nextln: sig1 = (i64 sret [%rdi]) -> i64 sret [%rax] fast
; nextln: fn0 = %f sig0
; nextln: fn1 = %g sig1
block0:
v0, v1, v2, v3, v4 = call fn0()
; check: v18 = stack_addr.i64 ss0
; nextln: v25 = func_addr.i64 fn0
; nextln: v19 = call_indirect sig0, v25(v18)
; nextln: v20 = load.i32 notrap aligned v19
; nextln: v0 -> v20
; nextln: v21 = load.i32 notrap aligned v19+4
; nextln: v1 -> v21
; nextln: v22 = load.i32 notrap aligned v19+8
; nextln: v2 -> v22
; nextln: v23 = load.i32 notrap aligned v19+12
; nextln: v3 -> v23
; nextln: v24 = load.i32 notrap aligned v19+16
; nextln: v4 -> v24
v5, v6, v7, v8, v9 = call fn1()
; check: v26 = stack_addr.i64 ss1
; nextln: v33 = func_addr.i64 fn1
; nextln: v27 = call_indirect sig1, v33(v26)
; nextln: v28 = load.f32 notrap aligned v27
; nextln: v5 -> v28
; nextln: v29 = load.f32 notrap aligned v27+4
; nextln: v6 -> v29
; nextln: v30 = load.f32 notrap aligned v27+8
; nextln: v7 -> v30
; nextln: v31 = load.f32 notrap aligned v27+12
; nextln: v8 -> v31
; nextln: v32 = load.f32 notrap aligned v27+16
; nextln: v9 -> v32
v10 = iadd v0, v1
v11 = iadd v2, v3
v12 = iadd v10, v11
v13 = iadd v12, v4
v14 = fadd v5, v6
v15 = fadd v7, v8
v16 = fadd v14, v15
v17 = fadd v16, v9
return v13, v17
}

View File

@@ -1,51 +0,0 @@
test legalizer
target x86_64 legacy haswell
;; Need to insert padding after the `i8`s so that the `i32` and `i64` are
;; aligned.
function %returner() -> i8, i32, i8, i64 {
; check: function %returner(i64 sret [%rdi]) -> i64 sret [%rax] fast {
block0:
; check: block0(v4: i64):
v0 = iconst.i8 0
v1 = iconst.i32 1
v2 = iconst.i8 2
v3 = iconst.i64 3
return v0, v1, v2, v3
; check: v6 = uextend.i32 v0
; nextln: istore8 notrap aligned v6, v4
; nextln: store notrap aligned v1, v4+4
; nextln: v7 = uextend.i32 v2
; nextln: istore8 notrap aligned v7, v4+8
; nextln: store notrap aligned v3, v4+16
; nextln: return v4
}
function %caller() {
; check: ss0 = sret_slot 24
fn0 = %returner() -> i8, i32, i8, i64
; check: sig0 = (i64 sret [%rdi]) -> i64 sret [%rax] fast
; nextln: fn0 = %returner sig0
block0:
v0, v1, v2, v3 = call fn0()
; check: v4 = stack_addr.i64 ss0
; nextln: v10 = func_addr.i64 fn0
; nextln: v5 = call_indirect sig0, v10(v4)
; nextln: v11 = uload8.i32 notrap aligned v5
; nextln: v6 = ireduce.i8 v11
; nextln: v0 -> v6
; nextln: v7 = load.i32 notrap aligned v5+4
; nextln: v1 -> v7
; nextln: v12 = uload8.i32 notrap aligned v5+8
; nextln: v8 = ireduce.i8 v12
; nextln: v2 -> v8
; nextln: v9 = load.i64 notrap aligned v5+16
; nextln: v3 -> v9
return
}