x64 backend: add lowerings with load-op-store fusion. (#4071)

x64 backend: add lowerings with load-op-store fusion.

These lowerings use the `OP [mem], reg` forms (or in AT&T syntax, `OP
%reg, (mem)`) -- i.e., x86 instructions that load from memory, perform
an ALU operation, and store the result, all in one instruction. Using
these instruction forms, we can merge three CLIF ops together: a load,
an arithmetic operation, and a store.
This commit is contained in:
Chris Fallin
2022-04-26 18:58:26 -07:00
committed by GitHub
parent 164bfeaf7e
commit dd45f44511
9 changed files with 1442 additions and 298 deletions

View File

@@ -0,0 +1,147 @@
test compile precise-output
target x86_64
function %f0(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = iadd v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; addl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f1(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = iadd v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; addl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f2(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = isub v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; subl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f3(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = band v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; andl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f4(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = band v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; andl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f5(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bor v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; orl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f6(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bor v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; orl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f7(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bxor v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; xorl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f8(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bxor v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; xorl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret

View File

@@ -0,0 +1,95 @@
test run
target x86_64
target aarch64
function %load_op_store_iadd_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 8
v4 = iconst.i64 42
store.i64 v4, v3
v5 = load.i64 v3
v6 = iadd.i64 v5, v2
store.i64 v6, v3
v7 = load.i64 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i32(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 4
v4 = iconst.i64 42
store.i32 v4, v3
v5 = load.i32 v3
v6 = iadd.i32 v5, v2
store.i32 v6, v3
v7 = load.i32 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i8(i64 vmctx, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i8):
v3 = heap_addr.i64 heap0, v1, 4
v4 = iconst.i8 42
store.i8 v4, v3
v5 = load.i8 v3
v6 = iadd.i8 v5, v2
store.i8 v6, v3
v7 = load.i8 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_isub_iand_ior_ixor_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 8
store.i64 v2, v3
v4 = load.i64 v3
v5 = iconst.i64 1
v6 = iadd.i64 v5, v4
store.i64 v6, v3
v7 = load.i64 v3
v8 = iconst.i64 2
v9 = load.i64 v3
v10 = isub.i64 v9, v8
store.i64 v10, v3
v11 = load.i64 v3
v12 = iconst.i64 0xf
v13 = band.i64 v12, v11
store.i64 v13, v3
v14 = iconst.i64 0x10
v15 = load.i64 v3
v16 = bor.i64 v15, v14
store.i64 v16, v3
v17 = load.i64 v3
v18 = iconst.i64 0xff
v19 = bxor.i64 v17, v18
store.i64 v19, v3
v20 = load.i64 v3
return v20
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 0x1234) == 236