x64 backend: add lowerings with load-op-store fusion. (#4071)

x64 backend: add lowerings with load-op-store fusion.

These lowerings use the `OP [mem], reg` forms (or in AT&T syntax, `OP
%reg, (mem)`) -- i.e., x86 instructions that load from memory, perform
an ALU operation, and store the result, all in one instruction. Using
these instruction forms, we can merge three CLIF ops together: a load,
an arithmetic operation, and a store.
This commit is contained in:
Chris Fallin
2022-04-26 18:58:26 -07:00
committed by GitHub
parent 164bfeaf7e
commit dd45f44511
9 changed files with 1442 additions and 298 deletions

View File

@@ -0,0 +1,147 @@
test compile precise-output
target x86_64
function %f0(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = iadd v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; addl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f1(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = iadd v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; addl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f2(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = isub v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; subl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f3(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = band v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; andl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f4(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = band v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; andl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f5(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bor v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; orl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f6(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bor v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; orl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f7(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bxor v2, v1
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; xorl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret
function %f8(i64, i32) {
block0(v0: i64, v1: i32):
v2 = load.i32 v0+32
v3 = bxor v1, v2
store v3, v0+32
return
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; xorl %esi, 32(%rdi)
; movq %rbp, %rsp
; popq %rbp
; ret