* Add a pre-opt optimization to change constants into immediates. This converts 'iadd' + 'iconst' into 'iadd_imm', and so on. * Optimize away redundant `bint` instructions. Cretonne has a concept of "Testable" values, which can be either boolean or integer. When the an instruction needing a "Testable" value receives the result of a `bint`, converting boolean to integer, eliminate the `bint`, as it's redundant. * Postopt: Optimize using CPU flags. This introduces a post-legalization optimization pass which converts compare+branch sequences to use flags values on CPUs which support it. * Define a form of x86's `urm` that doesn't clobber FLAGS. movzbl/movsbl/etc. don't clobber FLAGS; define a form of the `urm` recipe that represents this. * Implement a DCE pass. This pass deletes instructions with no side effects and no results that are used. * Clarify ambiguity about "32-bit" and "64-bit" in comments. * Add x86 encodings for icmp_imm. * Add a testcase for postopt CPU flags optimization. This covers the basic functionality of transforming compare+branch sequences to use CPU flags. * Pattern-match irsub_imm in preopt.
81 lines
1.5 KiB
Plaintext
81 lines
1.5 KiB
Plaintext
test preopt
|
|
isa intel
|
|
|
|
function %iadd_imm(i32) -> i32 {
|
|
ebb0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = iadd v0, v1
|
|
return v2
|
|
}
|
|
; sameln: function %iadd_imm
|
|
; nextln: ebb0(v0: i32):
|
|
; nextln: v1 = iconst.i32 2
|
|
; nextln: v2 = iadd_imm v0, 2
|
|
; nextln: return v2
|
|
; nextln: }
|
|
|
|
function %isub_imm(i32) -> i32 {
|
|
ebb0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = isub v0, v1
|
|
return v2
|
|
}
|
|
; sameln: function %isub_imm
|
|
; nextln: ebb0(v0: i32):
|
|
; nextln: v1 = iconst.i32 2
|
|
; nextln: v2 = iadd_imm v0, -2
|
|
; nextln: return v2
|
|
; nextln: }
|
|
|
|
function %icmp_imm(i32) -> i32 {
|
|
ebb0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = icmp slt v0, v1
|
|
v3 = bint.i32 v2
|
|
return v3
|
|
}
|
|
; sameln: function %icmp_imm
|
|
; nextln: ebb0(v0: i32):
|
|
; nextln: v1 = iconst.i32 2
|
|
; nextln: v2 = icmp_imm slt v0, 2
|
|
; nextln: v3 = bint.i32 v2
|
|
; nextln: return v3
|
|
; nextln: }
|
|
|
|
function %brz_bint(i32) {
|
|
ebb0(v0: i32):
|
|
v3 = icmp_imm slt v0, 0
|
|
v1 = bint.i32 v3
|
|
v2 = select v1, v1, v1
|
|
trapz v1, user0
|
|
brz v1, ebb1
|
|
jump ebb2
|
|
|
|
ebb1:
|
|
return
|
|
|
|
ebb2:
|
|
return
|
|
}
|
|
; sameln: function %brz_bint
|
|
; nextln: (v0: i32):
|
|
; nextln: v3 = icmp_imm slt v0, 0
|
|
; nextln: v1 = bint.i32 v3
|
|
; nextln: v2 = select v3, v1, v1
|
|
; nextln: trapz v3, user0
|
|
; nextln: brz v3, ebb1
|
|
; nextln: jump ebb2
|
|
|
|
function %irsub_imm(i32) -> i32 {
|
|
ebb0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = isub v1, v0
|
|
return v2
|
|
}
|
|
; sameln: function %irsub_imm
|
|
; nextln: ebb0(v0: i32):
|
|
; nextln: v1 = iconst.i32 2
|
|
; nextln: v2 = irsub_imm v1, 2
|
|
; nextln: return v2
|
|
; nextln: }
|