Spectre mitigation on heap access overflow checks.

This PR adds a conditional move following a heap bounds check through
which the address to be accessed flows. This conditional move ensures
that even if the branch is mispredicted (access is actually out of
bounds, but speculation goes down in-bounds path), the acually accessed
address is zero (a NULL pointer) rather than the out-of-bounds address.

The mitigation is controlled by a flag that is off by default, but can
be set by the embedding. Note that in order to turn it on by default,
we would need to add conditional-move support to the current x86
backend; this does not appear to be present. Once the deprecated
backend is removed in favor of the new backend, IMHO we should turn
this flag on by default.

Note that the mitigation is unneccessary when we use the "huge heap"
technique on 64-bit systems, in which we allocate a range of virtual
address space such that no 32-bit offset can reach other data. Hence,
this only affects small-heap configurations.
This commit is contained in:
Chris Fallin
2020-06-29 14:04:26 -07:00
parent ae634417a0
commit e694fb1312
9 changed files with 148 additions and 47 deletions

View File

@@ -1023,7 +1023,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Nothing.
}
Opcode::Select | Opcode::Selectif => {
Opcode::Select | Opcode::Selectif | Opcode::SelectifSpectreGuard => {
let cond = if op == Opcode::Select {
let (cmp_op, narrow_mode) = if ty_bits(ctx.input_ty(insn, 0)) > 32 {
(ALUOp::SubS64, NarrowValueMode::ZeroExtend64)

View File

@@ -66,19 +66,14 @@ fn dynamic_addr(
// Start with the bounds check. Trap if `offset + access_size > bound`.
let bound = pos.ins().global_value(offset_ty, bound_gv);
let oob;
if access_size == 1 {
let (cc, lhs, bound) = if access_size == 1 {
// `offset > bound - 1` is the same as `offset >= bound`.
oob = pos
.ins()
.icmp(IntCC::UnsignedGreaterThanOrEqual, offset, bound);
(IntCC::UnsignedGreaterThanOrEqual, offset, bound)
} else if access_size <= min_size {
// We know that bound >= min_size, so here we can compare `offset > bound - access_size`
// without wrapping.
let adj_bound = pos.ins().iadd_imm(bound, -(access_size as i64));
oob = pos
.ins()
.icmp(IntCC::UnsignedGreaterThan, offset, adj_bound);
(IntCC::UnsignedGreaterThan, offset, adj_bound)
} else {
// We need an overflow check for the adjusted offset.
let access_size_val = pos.ins().iconst(offset_ty, access_size as i64);
@@ -88,13 +83,27 @@ fn dynamic_addr(
overflow,
ir::TrapCode::HeapOutOfBounds,
);
oob = pos
.ins()
.icmp(IntCC::UnsignedGreaterThan, adj_offset, bound);
}
(IntCC::UnsignedGreaterThan, adj_offset, bound)
};
let oob = pos.ins().icmp(cc, lhs, bound);
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
compute_addr(isa, inst, heap, addr_ty, offset, offset_ty, pos.func);
let spectre_oob_comparison = if isa.flags().enable_heap_access_spectre_mitigation() {
Some((cc, lhs, bound))
} else {
None
};
compute_addr(
isa,
inst,
heap,
addr_ty,
offset,
offset_ty,
pos.func,
spectre_oob_comparison,
);
}
/// Expand a `heap_addr` for a static heap.
@@ -146,20 +155,35 @@ fn static_addr(
// With that we have an optimization here where with 32-bit offsets and
// `bound - access_size >= 4GB` we can omit a bounds check.
let limit = bound - access_size;
let mut spectre_oob_comparison = None;
if offset_ty != ir::types::I32 || limit < 0xffff_ffff {
let oob = if limit & 1 == 1 {
let (cc, lhs, limit_imm) = if limit & 1 == 1 {
// Prefer testing `offset >= limit - 1` when limit is odd because an even number is
// likely to be a convenient constant on ARM and other RISC architectures.
pos.ins()
.icmp_imm(IntCC::UnsignedGreaterThanOrEqual, offset, limit as i64 - 1)
let limit = limit as i64 - 1;
(IntCC::UnsignedGreaterThanOrEqual, offset, limit)
} else {
pos.ins()
.icmp_imm(IntCC::UnsignedGreaterThan, offset, limit as i64)
let limit = limit as i64;
(IntCC::UnsignedGreaterThan, offset, limit)
};
let oob = pos.ins().icmp_imm(cc, lhs, limit_imm);
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
if isa.flags().enable_heap_access_spectre_mitigation() {
let limit = pos.ins().iconst(offset_ty, limit_imm);
spectre_oob_comparison = Some((cc, lhs, limit));
}
}
compute_addr(isa, inst, heap, addr_ty, offset, offset_ty, pos.func);
compute_addr(
isa,
inst,
heap,
addr_ty,
offset,
offset_ty,
pos.func,
spectre_oob_comparison,
);
}
/// Emit code for the base address computation of a `heap_addr` instruction.
@@ -171,6 +195,11 @@ fn compute_addr(
mut offset: ir::Value,
offset_ty: ir::Type,
func: &mut ir::Function,
// If we are performing Spectre mitigation with conditional selects, the
// values to compare and the condition code that indicates an out-of bounds
// condition; on this condition, the conditional move will choose a
// speculatively safe address (a zero / null pointer) instead.
spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>,
) {
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
@@ -198,5 +227,15 @@ fn compute_addr(
pos.ins().global_value(addr_ty, base_gv)
};
pos.func.dfg.replace(inst).iadd(base, offset);
if let Some((cc, a, b)) = spectre_oob_comparison {
let final_addr = pos.ins().iadd(base, offset);
let zero = pos.ins().iconst(addr_ty, 0);
let flags = pos.ins().ifcmp(a, b);
pos.func
.dfg
.replace(inst)
.selectif_spectre_guard(addr_ty, cc, flags, zero, final_addr);
} else {
pos.func.dfg.replace(inst).iadd(base, offset);
}
}

View File

@@ -399,6 +399,7 @@ emit_all_ones_funcaddrs = false
enable_probestack = true
probestack_func_adjusts_sp = false
enable_jump_tables = true
enable_heap_access_spectre_mitigation = true
"#
);
assert_eq!(f.opt_level(), super::OptLevel::None);