egraphs: disable GVN of effectful idempotent ops (temporarily). (#5808)
This is a short-term fix to the same bug that #5800 is addressing (#5796), but with less risk: it simply turns off GVN'ing of effectful but idempotent ops. Because we have an upcoming release, and this is a miscompile (albeit to do with trapping behavior), we would like to make the simplest possible fix that avoids the bug, and backport it. I will then rebase #5800 on top of a revert of this followed by the more complete fix.
This commit is contained in:
@@ -7,7 +7,7 @@ use crate::dominator_tree::DominatorTree;
|
|||||||
use crate::egraph::domtree::DomTreeWithChildren;
|
use crate::egraph::domtree::DomTreeWithChildren;
|
||||||
use crate::egraph::elaborate::Elaborator;
|
use crate::egraph::elaborate::Elaborator;
|
||||||
use crate::fx::FxHashSet;
|
use crate::fx::FxHashSet;
|
||||||
use crate::inst_predicates::{is_mergeable_for_egraph, is_pure_for_egraph};
|
use crate::inst_predicates::is_pure_for_egraph;
|
||||||
use crate::ir::{
|
use crate::ir::{
|
||||||
DataFlowGraph, Function, Inst, InstructionData, Type, Value, ValueDef, ValueListPool,
|
DataFlowGraph, Function, Inst, InstructionData, Type, Value, ValueDef, ValueListPool,
|
||||||
};
|
};
|
||||||
@@ -285,41 +285,10 @@ where
|
|||||||
fn optimize_skeleton_inst(&mut self, inst: Inst) -> bool {
|
fn optimize_skeleton_inst(&mut self, inst: Inst) -> bool {
|
||||||
self.stats.skeleton_inst += 1;
|
self.stats.skeleton_inst += 1;
|
||||||
|
|
||||||
// First, can we try to deduplicate? We need to keep some copy
|
// If a load or store, process it with the alias analysis to see
|
||||||
// of the instruction around because it's side-effecting, but
|
// if we can optimize it (rewrite in terms of an earlier load or
|
||||||
// we may be able to reuse an earlier instance of it.
|
// stored value).
|
||||||
if is_mergeable_for_egraph(self.func, inst) {
|
if let Some(new_result) =
|
||||||
let result = self.func.dfg.inst_results(inst)[0];
|
|
||||||
trace!(" -> mergeable side-effecting op {}", inst);
|
|
||||||
let inst = NewOrExistingInst::Existing(inst);
|
|
||||||
let gvn_context = GVNContext {
|
|
||||||
union_find: self.eclasses,
|
|
||||||
value_lists: &self.func.dfg.value_lists,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Does this instruction already exist? If so, add entries to
|
|
||||||
// the value-map to rewrite uses of its results to the results
|
|
||||||
// of the original (existing) instruction. If not, optimize
|
|
||||||
// the new instruction.
|
|
||||||
let key = inst.get_inst_key(&self.func.dfg);
|
|
||||||
if let Some(&orig_result) = self.gvn_map.get(&key, &gvn_context) {
|
|
||||||
// Hit in GVN map -- reuse value.
|
|
||||||
self.value_to_opt_value[result] = orig_result;
|
|
||||||
self.eclasses.union(orig_result, result);
|
|
||||||
trace!(" -> merges result {} to {}", result, orig_result);
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
// Otherwise, insert it into the value-map.
|
|
||||||
self.value_to_opt_value[result] = result;
|
|
||||||
self.gvn_map.insert(key, result, &gvn_context);
|
|
||||||
trace!(" -> inserts as new (no GVN)");
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Otherwise, if a load or store, process it with the alias
|
|
||||||
// analysis to see if we can optimize it (rewrite in terms of
|
|
||||||
// an earlier load or stored value).
|
|
||||||
else if let Some(new_result) =
|
|
||||||
self.alias_analysis
|
self.alias_analysis
|
||||||
.process_inst(self.func, self.alias_analysis_state, inst)
|
.process_inst(self.func, self.alias_analysis_state, inst)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -73,25 +73,6 @@ pub fn is_pure_for_egraph(func: &Function, inst: Inst) -> bool {
|
|||||||
has_one_result && (is_readonly_load || (!op.can_load() && !trivially_has_side_effects(op)))
|
has_one_result && (is_readonly_load || (!op.can_load() && !trivially_has_side_effects(op)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Can the given instruction be merged into another copy of itself?
|
|
||||||
/// These instructions may have side-effects, but as long as we retain
|
|
||||||
/// the first instance of the instruction, the second and further
|
|
||||||
/// instances are redundant if they would produce the same trap or
|
|
||||||
/// result.
|
|
||||||
pub fn is_mergeable_for_egraph(func: &Function, inst: Inst) -> bool {
|
|
||||||
let op = func.dfg.insts[inst].opcode();
|
|
||||||
// We can only merge one-result operators due to the way that GVN
|
|
||||||
// is structured in the egraph implementation.
|
|
||||||
let has_one_result = func.dfg.inst_results(inst).len() == 1;
|
|
||||||
has_one_result
|
|
||||||
// Loads/stores are handled by alias analysis and not
|
|
||||||
// otherwise mergeable.
|
|
||||||
&& !op.can_load()
|
|
||||||
&& !op.can_store()
|
|
||||||
// Can only have idempotent side-effects.
|
|
||||||
&& (!has_side_effect(func, inst) || op.side_effects_idempotent())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Does the given instruction have any side-effect as per [has_side_effect], or else is a load,
|
/// Does the given instruction have any side-effect as per [has_side_effect], or else is a load,
|
||||||
/// but not the get_pinned_reg opcode?
|
/// but not the get_pinned_reg opcode?
|
||||||
pub fn has_lowering_side_effect(func: &Function, inst: Inst) -> bool {
|
pub fn has_lowering_side_effect(func: &Function, inst: Inst) -> bool {
|
||||||
|
|||||||
@@ -1,92 +0,0 @@
|
|||||||
;;! target = "x86_64"
|
|
||||||
;;!
|
|
||||||
;;! optimize = true
|
|
||||||
;;!
|
|
||||||
;;! settings = [
|
|
||||||
;;! "enable_heap_access_spectre_mitigation=true",
|
|
||||||
;;! "opt_level=speed_and_size",
|
|
||||||
;;! "use_egraphs=true"
|
|
||||||
;;! ]
|
|
||||||
;;!
|
|
||||||
;;! [globals.vmctx]
|
|
||||||
;;! type = "i64"
|
|
||||||
;;! vmctx = true
|
|
||||||
;;!
|
|
||||||
;;! [globals.heap_base]
|
|
||||||
;;! type = "i64"
|
|
||||||
;;! load = { base = "vmctx", offset = 0 }
|
|
||||||
;;!
|
|
||||||
;;! [globals.heap_bound]
|
|
||||||
;;! type = "i64"
|
|
||||||
;;! load = { base = "vmctx", offset = 8 }
|
|
||||||
;;!
|
|
||||||
;;! [[heaps]]
|
|
||||||
;;! base = "heap_base"
|
|
||||||
;;! min_size = 0
|
|
||||||
;;! offset_guard_size = 0xffffffff
|
|
||||||
;;! index_type = "i32"
|
|
||||||
;;! style = { kind = "dynamic", bound = "heap_bound" }
|
|
||||||
|
|
||||||
(module
|
|
||||||
(memory (export "memory") 0)
|
|
||||||
(func (export "load-without-offset") (param i32) (result i32 i32)
|
|
||||||
local.get 0
|
|
||||||
i32.load
|
|
||||||
local.get 0
|
|
||||||
i32.load
|
|
||||||
)
|
|
||||||
(func (export "load-with-offset") (param i32) (result i32 i32)
|
|
||||||
local.get 0
|
|
||||||
i32.load offset=1234
|
|
||||||
local.get 0
|
|
||||||
i32.load offset=1234
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
;; function u0:0(i32, i64 vmctx) -> i32, i32 fast {
|
|
||||||
;; gv0 = vmctx
|
|
||||||
;; gv1 = load.i64 notrap aligned gv0+8
|
|
||||||
;; gv2 = load.i64 notrap aligned gv0
|
|
||||||
;;
|
|
||||||
;; block0(v0: i32, v1: i64):
|
|
||||||
;; @0057 v4 = uextend.i64 v0
|
|
||||||
;; @0057 v5 = iconst.i64 4
|
|
||||||
;; @0057 v6 = uadd_overflow_trap v4, v5, heap_oob ; v5 = 4
|
|
||||||
;; @0057 v7 = load.i64 notrap aligned v1+8
|
|
||||||
;; @0057 v8 = load.i64 notrap aligned v1
|
|
||||||
;; @0057 v11 = icmp ugt v6, v7
|
|
||||||
;; @0057 v10 = iconst.i64 0
|
|
||||||
;; @0057 v9 = iadd v8, v4
|
|
||||||
;; @0057 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
|
|
||||||
;; @0057 v13 = load.i32 little heap v12
|
|
||||||
;; v2 -> v13
|
|
||||||
;; @005f jump block1
|
|
||||||
;;
|
|
||||||
;; block1:
|
|
||||||
;; @005f return v13, v13
|
|
||||||
;; }
|
|
||||||
;;
|
|
||||||
;; function u0:1(i32, i64 vmctx) -> i32, i32 fast {
|
|
||||||
;; gv0 = vmctx
|
|
||||||
;; gv1 = load.i64 notrap aligned gv0+8
|
|
||||||
;; gv2 = load.i64 notrap aligned gv0
|
|
||||||
;;
|
|
||||||
;; block0(v0: i32, v1: i64):
|
|
||||||
;; @0064 v4 = uextend.i64 v0
|
|
||||||
;; @0064 v5 = iconst.i64 1238
|
|
||||||
;; @0064 v6 = uadd_overflow_trap v4, v5, heap_oob ; v5 = 1238
|
|
||||||
;; @0064 v7 = load.i64 notrap aligned v1+8
|
|
||||||
;; @0064 v8 = load.i64 notrap aligned v1
|
|
||||||
;; @0064 v12 = icmp ugt v6, v7
|
|
||||||
;; @0064 v11 = iconst.i64 0
|
|
||||||
;; @0064 v9 = iadd v8, v4
|
|
||||||
;; v26 = iconst.i64 1234
|
|
||||||
;; @0064 v10 = iadd v9, v26 ; v26 = 1234
|
|
||||||
;; @0064 v13 = select_spectre_guard v12, v11, v10 ; v11 = 0
|
|
||||||
;; @0064 v14 = load.i32 little heap v13
|
|
||||||
;; v2 -> v14
|
|
||||||
;; @006e jump block1
|
|
||||||
;;
|
|
||||||
;; block1:
|
|
||||||
;; @006e return v14, v14
|
|
||||||
;; }
|
|
||||||
Reference in New Issue
Block a user