Files
wasmtime/cranelift/codegen/src/inst_predicates.rs
Chris Fallin 7b8854f803 egraphs: fix handling of effectful-but-idempotent ops and GVN. (#5800)
* Revert "egraphs: disable GVN of effectful idempotent ops (temporarily). (#5808)"

This reverts commit c7e2571866.

* egraphs: fix handling of effectful-but-idempotent ops and GVN.

This PR addresses #5796: currently, ops that are effectful, i.e., remain
in the side-effecting skeleton (which we keep in the `Layout` while the
egraph exists), but are idempotent and thus mergeable by a GVN pass, are
not handled properly.

GVN is still possible on effectful but idempotent ops precisely because
our GVN does not create partial redundancies: it removes an instruction
only when it is dominated by an identical instruction. An isntruction
will not be "hoisted" to a point where it could execute in the optimized
code but not in the original.

However, there are really two parts to the egraph implementation that
produce this effect: the deduplication on insertion into the egraph, and
the elaboration with a scoped hashmap. The deduplication lets us give a
single name (value ID) to all copies of an identical instruction, and
then elaboration will re-create duplicates if GVN should not hoist or
merge some of them.

Because deduplication need not worry about dominance or scopes, we use a
simple (non-scoped) hashmap to dedup/intern ops as "egraph nodes".

When we added support for GVN'ing effectful but idempotent ops (#5594),
we kept the use of this simple dedup'ing hashmap, but these ops do not
get elaborated; instead they stay in the side-effecting skeleton. Thus,
we inadvertently created potential for weird code-motion effects.

The proposal in #5796 would solve this in a clean way by treating these
ops as pure again, and keeping them out of the skeleton, instead putting
"force" pseudo-ops in the skeleton. However, this is a little more
complex than I would like, and I've realized that @jameysharp's earlier
suggestion is much simpler: we can keep an actual scoped hashmap
separately just for the effectful-but-idempotent ops, and use it to GVN
while we build the egraph. In effect, we're fusing a separate GVN pass
with the egraph pass (but letting it interact corecursively with
egraph rewrites. This is in principle similar to how we keep a separate
map for loads and fuse this pass with the egraph rewrite pass as well.

Note that we can use a `ScopedHashMap` here without the "context" (as
needed by `CtxHashMap`) because, as noted by @jameysharp, in practice
the ops we want to GVN have all their args inline. Equality on the
`InstructinoData` itself is conservative: two insts whose struct
contents compare shallowly equal are definitely identical, but identical
insts in a deep-equality sense may not compare shallowly equal, due to
list indirection. This is fine for GVN, because it is still sound to
skip any given GVN opportunity (and keep the original instructions).

Fixes #5796.

* Add comments from review.
2023-03-02 02:10:42 +00:00

218 lines
8.3 KiB
Rust

//! Instruction predicates/properties, shared by various analyses.
use crate::ir::immediates::Offset32;
use crate::ir::{self, Block, DataFlowGraph, Function, Inst, InstructionData, Opcode, Type, Value};
use cranelift_entity::EntityRef;
/// Preserve instructions with used result values.
pub fn any_inst_results_used(inst: Inst, live: &[bool], dfg: &DataFlowGraph) -> bool {
dfg.inst_results(inst).iter().any(|v| live[v.index()])
}
/// Test whether the given opcode is unsafe to even consider as side-effect-free.
#[inline(always)]
fn trivially_has_side_effects(opcode: Opcode) -> bool {
opcode.is_call()
|| opcode.is_branch()
|| opcode.is_terminator()
|| opcode.is_return()
|| opcode.can_trap()
|| opcode.other_side_effects()
|| opcode.can_store()
}
/// Load instructions without the `notrap` flag are defined to trap when
/// operating on inaccessible memory, so we can't treat them as side-effect-free even if the loaded
/// value is unused.
#[inline(always)]
fn is_load_with_defined_trapping(opcode: Opcode, data: &InstructionData) -> bool {
if !opcode.can_load() {
return false;
}
match *data {
InstructionData::StackLoad { .. } => false,
InstructionData::Load { flags, .. } => !flags.notrap(),
_ => true,
}
}
/// Does the given instruction have any side-effect that would preclude it from being removed when
/// its value is unused?
#[inline(always)]
pub fn has_side_effect(func: &Function, inst: Inst) -> bool {
let data = &func.dfg.insts[inst];
let opcode = data.opcode();
trivially_has_side_effects(opcode) || is_load_with_defined_trapping(opcode, data)
}
/// Does the given instruction behave as a "pure" node with respect to
/// aegraph semantics?
///
/// - Actual pure nodes (arithmetic, etc)
/// - Loads with the `readonly` flag set
pub fn is_pure_for_egraph(func: &Function, inst: Inst) -> bool {
let is_readonly_load = match func.dfg.insts[inst] {
InstructionData::Load {
opcode: Opcode::Load,
flags,
..
} => flags.readonly() && flags.notrap(),
_ => false,
};
// Multi-value results do not play nicely with much of the egraph
// infrastructure. They are in practice used only for multi-return
// calls and some other odd instructions (e.g. iadd_cout) which,
// for now, we can afford to leave in place as opaque
// side-effecting ops. So if more than one result, then the inst
// is "not pure". Similarly, ops with zero results can be used
// only for their side-effects, so are never pure. (Or if they
// are, we can always trivially eliminate them with no effect.)
let has_one_result = func.dfg.inst_results(inst).len() == 1;
let op = func.dfg.insts[inst].opcode();
has_one_result && (is_readonly_load || (!op.can_load() && !trivially_has_side_effects(op)))
}
/// Can the given instruction be merged into another copy of itself?
/// These instructions may have side-effects, but as long as we retain
/// the first instance of the instruction, the second and further
/// instances are redundant if they would produce the same trap or
/// result.
pub fn is_mergeable_for_egraph(func: &Function, inst: Inst) -> bool {
let op = func.dfg.insts[inst].opcode();
// We can only merge one-result operators due to the way that GVN
// is structured in the egraph implementation.
let has_one_result = func.dfg.inst_results(inst).len() == 1;
has_one_result
// Loads/stores are handled by alias analysis and not
// otherwise mergeable.
&& !op.can_load()
&& !op.can_store()
// Can only have idempotent side-effects.
&& (!has_side_effect(func, inst) || op.side_effects_idempotent())
}
/// Does the given instruction have any side-effect as per [has_side_effect], or else is a load,
/// but not the get_pinned_reg opcode?
pub fn has_lowering_side_effect(func: &Function, inst: Inst) -> bool {
let op = func.dfg.insts[inst].opcode();
op != Opcode::GetPinnedReg && (has_side_effect(func, inst) || op.can_load())
}
/// Is the given instruction a constant value (`iconst`, `fconst`) that can be
/// represented in 64 bits?
pub fn is_constant_64bit(func: &Function, inst: Inst) -> Option<u64> {
let data = &func.dfg.insts[inst];
if data.opcode() == Opcode::Null {
return Some(0);
}
match data {
&InstructionData::UnaryImm { imm, .. } => Some(imm.bits() as u64),
&InstructionData::UnaryIeee32 { imm, .. } => Some(imm.bits() as u64),
&InstructionData::UnaryIeee64 { imm, .. } => Some(imm.bits()),
_ => None,
}
}
/// Get the address, offset, and access type from the given instruction, if any.
pub fn inst_addr_offset_type(func: &Function, inst: Inst) -> Option<(Value, Offset32, Type)> {
let data = &func.dfg.insts[inst];
match data {
InstructionData::Load { arg, offset, .. } => {
let ty = func.dfg.value_type(func.dfg.inst_results(inst)[0]);
Some((*arg, *offset, ty))
}
InstructionData::LoadNoOffset { arg, .. } => {
let ty = func.dfg.value_type(func.dfg.inst_results(inst)[0]);
Some((*arg, 0.into(), ty))
}
InstructionData::Store { args, offset, .. } => {
let ty = func.dfg.value_type(args[0]);
Some((args[1], *offset, ty))
}
InstructionData::StoreNoOffset { args, .. } => {
let ty = func.dfg.value_type(args[0]);
Some((args[1], 0.into(), ty))
}
_ => None,
}
}
/// Get the store data, if any, from an instruction.
pub fn inst_store_data(func: &Function, inst: Inst) -> Option<Value> {
let data = &func.dfg.insts[inst];
match data {
InstructionData::Store { args, .. } | InstructionData::StoreNoOffset { args, .. } => {
Some(args[0])
}
_ => None,
}
}
/// Determine whether this opcode behaves as a memory fence, i.e.,
/// prohibits any moving of memory accesses across it.
pub fn has_memory_fence_semantics(op: Opcode) -> bool {
match op {
Opcode::AtomicRmw
| Opcode::AtomicCas
| Opcode::AtomicLoad
| Opcode::AtomicStore
| Opcode::Fence
| Opcode::Debugtrap => true,
Opcode::Call | Opcode::CallIndirect => true,
op if op.can_trap() => true,
_ => false,
}
}
/// Visit all successors of a block with a given visitor closure. The closure
/// arguments are the branch instruction that is used to reach the successor,
/// the successor block itself, and a flag indicating whether the block is
/// branched to via a table entry.
pub(crate) fn visit_block_succs<F: FnMut(Inst, Block, bool)>(
f: &Function,
block: Block,
mut visit: F,
) {
if let Some(inst) = f.layout.last_inst(block) {
match &f.dfg.insts[inst] {
ir::InstructionData::Jump {
destination: dest, ..
} => {
visit(inst, dest.block(&f.dfg.value_lists), false);
}
ir::InstructionData::Brif {
blocks: [block_then, block_else],
..
} => {
visit(inst, block_then.block(&f.dfg.value_lists), false);
visit(inst, block_else.block(&f.dfg.value_lists), false);
}
ir::InstructionData::BranchTable { table, .. } => {
let pool = &f.dfg.value_lists;
let table = &f.stencil.dfg.jump_tables[*table];
// The default block is reached via a direct conditional branch,
// so it is not part of the table. We visit the default block
// first explicitly, to mirror the traversal order of
// `JumpTableData::all_branches`, and transitively the order of
// `InstructionData::branch_destination`.
//
// Additionally, this case is why we are unable to replace this
// whole function with a loop over `branch_destination`: we need
// to report which branch targets come from the table vs the
// default.
visit(inst, table.default_block().block(pool), false);
for dest in table.as_slice() {
visit(inst, dest.block(pool), true);
}
}
inst => debug_assert!(!inst.opcode().is_branch()),
}
}
}