Legalize b{and,or,xor}_not into component instructions (#5709)

* Remove trailing whitespace in `lower.isle` files

* Legalize the `band_not` instruction into simpler form

This commit legalizes the `band_not` instruction into `band`-of-`bnot`,
or two instructions. This is intended to assist with egraph-based
optimizations where the `band_not` instruction doesn't have to be
specifically included in other bit-operation-patterns.

Lowerings of the `band_not` instruction have been moved to a
specialization of the `band` instruction.

* Legalize `bor_not` into components

Same as prior commit, but for the `bor_not` instruction.

* Legalize bxor_not into bxor-of-bnot

Same as prior commits. I think this also ended up fixing a bug in the
s390x backend where `bxor_not x y` was actually translated as `bnot
(bxor x y)` by accident given the test update changes.

* Simplify not-fused operands for riscv64

Looks like some delegated-to rules have special-cases for "if this
feature is enabled use the fused instruction" so move the clause for
testing the feature up to the lowering phase to help trigger other rules
if the feature isn't enabled. This should make the riscv64 backend more
consistent with how other backends are implemented.

* Remove B{and,or,xor}Not from cost of egraph metrics

These shouldn't ever reach egraphs now that they're legalized away.

* Add an egraph optimization for `x^-1 => ~x`

This adds a simplification node to translate xor-against-minus-1 to a
`bnot` instruction. This helps trigger various other optimizations in
the egraph implementation and also various backend lowering rules for
instructions. This is chiefly useful as wasm doesn't have a `bnot`
equivalent, so it's encoded as `x^-1`.

* Add a wasm test for end-to-end bitwise lowerings

Test that end-to-end various optimizations are being applied for input
wasm modules.

* Specifically don't self-update rustup on CI

I forget why this was here originally, but this is failing on Windows
CI. In general there's no need to update rustup, so leave it as-is.

* Cleanup some aarch64 lowering rules

Previously a 32/64 split was necessary due to the `ALUOp` being different
but that's been refactored away no so there's no longer any need for
duplicate rules.

* Narrow a x64 lowering rule

This previously made more sense when it was `band_not` and rarely used,
but be more specific in the type-filter on this rule that it's only
applicable to SIMD types with lanes.

* Simplify xor-against-minus-1 rule

No need to have the commutative version since constants are already
shuffled right for egraphs

* Optimize band-of-bnot when bnot is on the left

Use some more rules in the egraph algebraic optimizations to
canonicalize band/bor/bxor with a `bnot` operand to put the operand on
the right. That way the lowerings in the backends only have to list the
rule once, with the operand on the right, to optimize both styles of
input.

* Add commutative lowering rules

* Update cranelift/codegen/src/isa/x64/lower.isle

Co-authored-by: Jamey Sharp <jamey@minilop.net>

---------

Co-authored-by: Jamey Sharp <jamey@minilop.net>
This commit is contained in:
Alex Crichton
2023-02-06 13:53:40 -06:00
committed by GitHub
parent 99c3936616
commit de0e0bea3f
17 changed files with 506 additions and 277 deletions

View File

@@ -221,3 +221,33 @@ block0(v1: i8):
; check: v3 = iconst.i8 0
; check: return v3
function %bnot1(i8) -> i8 {
block0(v1: i8):
v2 = iconst.i8 -1
v3 = bxor v1, v2
return v3
}
; check: v4 = bnot v1
; check: return v4
function %bnot2(i64) -> i64 {
block0(v1: i64):
v2 = iconst.i64 -1
v3 = bxor v1, v2
return v3
}
; check: v4 = bnot v1
; check: return v4
function %bnot3(i64) -> i64 {
block0(v1: i64):
v2 = iconst.i64 -1
v3 = bxor v2, v1
return v3
}
; check: v5 = bnot v1
; check: return v5

View File

@@ -0,0 +1,37 @@
test compile precise-output
set unwind_info=false
set opt_level=speed
target aarch64
function %band_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = band v2, v1
return v3
}
; block0:
; bic w0, w1, w0
; ret
function %bor_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = bor v2, v1
return v3
}
; block0:
; orn w0, w1, w0
; ret
function %bxor_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = bxor v2, v1
return v3
}
; block0:
; eon w0, w1, w0
; ret

View File

@@ -0,0 +1,45 @@
test compile precise-output
set opt_level=speed
target riscv64 has_b
function %band_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = band_not.i32 v0, v1
return v2
}
; block0:
; andn a0,a0,a1
; ret
function %band_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = band v2, v1
return v3
}
; block0:
; andn a0,a1,a0
; ret
function %bor_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bor_not.i32 v0, v1
return v2
}
; block0:
; orn a0,a0,a1
; ret
function %bor_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = bor v2, v1
return v3
}
; block0:
; orn a0,a1,a0
; ret

View File

@@ -631,9 +631,9 @@ block0(v0: i128, v1: i128):
}
; block0:
; not a2,a2
; and a0,a0,a2
; not a4,a2
; not a6,a3
; and a0,a0,a4
; and a1,a1,a6
; ret
@@ -645,9 +645,9 @@ block0(v0: i64):
}
; block0:
; li t2,4
; not a1,t2
; and a0,a0,a1
; li a1,4
; not a2,a1
; and a0,a0,a2
; ret
function %band_not_i64_constant_shift(i64, i64) -> i64 {
@@ -660,8 +660,8 @@ block0(v0: i64, v1: i64):
; block0:
; slli a2,a1,4
; not a1,a2
; and a0,a0,a1
; not a2,a2
; and a0,a0,a2
; ret
function %bor_not_i32(i32, i32) -> i32 {
@@ -693,9 +693,9 @@ block0(v0: i128, v1: i128):
}
; block0:
; not a2,a2
; or a0,a0,a2
; not a4,a2
; not a6,a3
; or a0,a0,a4
; or a1,a1,a6
; ret
@@ -707,9 +707,9 @@ block0(v0: i64):
}
; block0:
; li t2,4
; not a1,t2
; or a0,a0,a1
; li a1,4
; not a2,a1
; or a0,a0,a2
; ret
function %bor_not_i64_constant_shift(i64, i64) -> i64 {
@@ -722,8 +722,8 @@ block0(v0: i64, v1: i64):
; block0:
; slli a2,a1,4
; not a1,a2
; or a0,a0,a1
; not a2,a2
; or a0,a0,a2
; ret
function %bxor_not_i32(i32, i32) -> i32 {
@@ -755,9 +755,9 @@ block0(v0: i128, v1: i128):
}
; block0:
; not a2,a2
; xor a0,a0,a2
; not a4,a2
; not a6,a3
; xor a0,a0,a4
; xor a1,a1,a6
; ret
@@ -769,9 +769,9 @@ block0(v0: i64):
}
; block0:
; li t2,4
; not a1,t2
; xor a0,a0,a1
; li a1,4
; not a2,a1
; xor a0,a0,a2
; ret
function %bxor_not_i64_constant_shift(i64, i64) -> i64 {
@@ -784,8 +784,8 @@ block0(v0: i64, v1: i64):
; block0:
; slli a2,a1,4
; not a1,a2
; xor a0,a0,a1
; not a2,a2
; xor a0,a0,a2
; ret
function %ishl_i128_i8(i128, i8) -> i128 {

View File

@@ -0,0 +1,66 @@
test compile precise-output
set opt_level=speed
target s390x has_mie2
function %band_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = band_not.i32 v0, v1
return v2
}
; block0:
; ncrk %r2, %r2, %r3
; br %r14
function %band_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = band v2, v1
return v3
}
; block0:
; ncrk %r2, %r3, %r2
; br %r14
function %bor_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bor_not.i32 v0, v1
return v2
}
; block0:
; ocrk %r2, %r2, %r3
; br %r14
function %bor_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = bor v2, v1
return v3
}
; block0:
; ocrk %r2, %r3, %r2
; br %r14
function %bxor_not_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bxor_not.i32 v0, v1
return v2
}
; block0:
; nxrk %r2, %r2, %r3
; br %r14
function %bxor_not_i32_reversed(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bnot v0
v3 = bxor v2, v1
return v3
}
; block0:
; nxrk %r2, %r3, %r2
; br %r14

View File

@@ -366,9 +366,8 @@ block0(v0: i32, v1: i32):
}
; block0:
; lgr %r5, %r3
; xilf %r5, 4294967295
; nr %r2, %r5
; xilf %r3, 4294967295
; nr %r2, %r3
; br %r14
function %band_not_i16(i16, i16) -> i16 {
@@ -378,9 +377,8 @@ block0(v0: i16, v1: i16):
}
; block0:
; lgr %r5, %r3
; xilf %r5, 4294967295
; nr %r2, %r5
; xilf %r3, 4294967295
; nr %r2, %r3
; br %r14
function %band_not_i8(i8, i8) -> i8 {
@@ -390,9 +388,8 @@ block0(v0: i8, v1: i8):
}
; block0:
; lgr %r5, %r3
; xilf %r5, 4294967295
; nr %r2, %r5
; xilf %r3, 4294967295
; nr %r2, %r3
; br %r14
function %bor_not_i128(i128, i128) -> i128 {
@@ -427,9 +424,8 @@ block0(v0: i32, v1: i32):
}
; block0:
; lgr %r5, %r3
; xilf %r5, 4294967295
; or %r2, %r5
; xilf %r3, 4294967295
; or %r2, %r3
; br %r14
function %bor_not_i16(i16, i16) -> i16 {
@@ -439,9 +435,8 @@ block0(v0: i16, v1: i16):
}
; block0:
; lgr %r5, %r3
; xilf %r5, 4294967295
; or %r2, %r5
; xilf %r3, 4294967295
; or %r2, %r3
; br %r14
function %bor_not_i8(i8, i8) -> i8 {
@@ -451,9 +446,8 @@ block0(v0: i8, v1: i8):
}
; block0:
; lgr %r5, %r3
; xilf %r5, 4294967295
; or %r2, %r5
; xilf %r3, 4294967295
; or %r2, %r3
; br %r14
function %bxor_not_i128(i128, i128) -> i128 {
@@ -476,9 +470,9 @@ block0(v0: i64, v1: i64):
}
; block0:
; xilf %r3, 4294967295
; xihf %r3, 4294967295
; xgr %r2, %r3
; xilf %r2, 4294967295
; xihf %r2, 4294967295
; br %r14
function %bxor_not_i32(i32, i32) -> i32 {
@@ -488,8 +482,8 @@ block0(v0: i32, v1: i32):
}
; block0:
; xilf %r3, 4294967295
; xr %r2, %r3
; xilf %r2, 4294967295
; br %r14
function %bxor_not_i16(i16, i16) -> i16 {
@@ -499,8 +493,8 @@ block0(v0: i16, v1: i16):
}
; block0:
; xilf %r3, 4294967295
; xr %r2, %r3
; xilf %r2, 4294967295
; br %r14
function %bxor_not_i8(i8, i8) -> i8 {
@@ -510,8 +504,8 @@ block0(v0: i8, v1: i8):
}
; block0:
; xilf %r3, 4294967295
; xr %r2, %r3
; xilf %r2, 4294967295
; br %r14
function %bnot_i128(i128) -> i128 {

View File

@@ -1,4 +1,5 @@
test compile precise-output
set opt_level=speed
target x86_64 has_bmi1
function %f1(i8, i8) -> i8 {
@@ -15,3 +16,19 @@ block0(v0: i8, v1: i8):
; popq %rbp
; ret
function %reversed_operands(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = bnot v0
v3 = band v2, v1
return v3
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; andn %eax, %edi, %esi
; movq %rbp, %rsp
; popq %rbp
; ret

View File

@@ -0,0 +1,46 @@
;;!target = "x86_64"
;;!compile = true
;;!settings = ["opt_level=speed", "has_bmi1=true"]
(module
;; this should get optimized to a `bnot` in clif
(func (param i32) (result i32)
i32.const -1
local.get 0
i32.xor)
;; this should get optimized to a single `andn` instruction
(func (param i32 i32) (result i32)
local.get 0
i32.const -1
local.get 1
i32.xor
i32.and)
)
;; function u0:0:
;; pushq %rbp
;; unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
;; movq %rsp, %rbp
;; unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
;; block0:
;; jmp label1
;; block1:
;; movq %rdi, %rax
;; notl %eax, %eax
;; movq %rbp, %rsp
;; popq %rbp
;; ret
;;
;; function u0:1:
;; pushq %rbp
;; unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
;; movq %rsp, %rbp
;; unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
;; block0:
;; jmp label1
;; block1:
;; andn %eax, %esi, %edi
;; movq %rbp, %rsp
;; popq %rbp
;; ret