Spectre mitigation on heap access overflow checks.

This PR adds a conditional move following a heap bounds check through
which the address to be accessed flows. This conditional move ensures
that even if the branch is mispredicted (access is actually out of
bounds, but speculation goes down in-bounds path), the acually accessed
address is zero (a NULL pointer) rather than the out-of-bounds address.

The mitigation is controlled by a flag that is off by default, but can
be set by the embedding. Note that in order to turn it on by default,
we would need to add conditional-move support to the current x86
backend; this does not appear to be present. Once the deprecated
backend is removed in favor of the new backend, IMHO we should turn
this flag on by default.

Note that the mitigation is unneccessary when we use the "huge heap"
technique on 64-bit systems, in which we allocate a range of virtual
address space such that no 32-bit offset can reach other data. Hence,
this only affects small-heap configurations.
This commit is contained in:
Chris Fallin
2020-06-29 14:04:26 -07:00
parent ae634417a0
commit e694fb1312
9 changed files with 148 additions and 47 deletions

View File

@@ -1355,6 +1355,7 @@ fn define_alu(
let rotr = shared.by_name("rotr"); let rotr = shared.by_name("rotr");
let rotr_imm = shared.by_name("rotr_imm"); let rotr_imm = shared.by_name("rotr_imm");
let selectif = shared.by_name("selectif"); let selectif = shared.by_name("selectif");
let selectif_spectre_guard = shared.by_name("selectif_spectre_guard");
let sshr = shared.by_name("sshr"); let sshr = shared.by_name("sshr");
let sshr_imm = shared.by_name("sshr_imm"); let sshr_imm = shared.by_name("sshr_imm");
let trueff = shared.by_name("trueff"); let trueff = shared.by_name("trueff");
@@ -1568,6 +1569,11 @@ fn define_alu(
// Conditional move (a.k.a integer select). // Conditional move (a.k.a integer select).
e.enc_i32_i64(selectif, rec_cmov.opcodes(&CMOV_OVERFLOW)); e.enc_i32_i64(selectif, rec_cmov.opcodes(&CMOV_OVERFLOW));
// A Spectre-guard integer select is exactly the same as a selectif, but
// is not associated with any other legalization rules and is not
// recognized by any optimizations, so it must arrive here unmodified
// and in its original place.
e.enc_i32_i64(selectif_spectre_guard, rec_cmov.opcodes(&CMOV_OVERFLOW));
} }
#[inline(never)] #[inline(never)]

View File

@@ -1748,6 +1748,34 @@ pub(crate) fn define(
.operands_out(vec![a]), .operands_out(vec![a]),
); );
ig.push(
Inst::new(
"selectif_spectre_guard",
r#"
Conditional select intended for Spectre guards.
This operation is semantically equivalent to a selectif instruction.
However, it is guaranteed to not be removed or otherwise altered by any
optimization pass, and is guaranteed to result in a conditional-move
instruction, not a branch-based lowering. As such, it is suitable
for use when producing Spectre guards. For example, a bounds-check
may guard against unsafe speculation past a bounds-check conditional
branch by passing the address or index to be accessed through a
conditional move, also gated on the same condition. Because no
Spectre-vulnerable processors are known to perform speculation on
conditional move instructions, this is guaranteed to pick the
correct input. If the selected input in case of overflow is a "safe"
value, for example a null pointer that causes an exception in the
speculative path, this ensures that no Spectre vulnerability will
exist.
"#,
&formats.int_select,
)
.operands_in(vec![cc, flags, x, y])
.operands_out(vec![a])
.other_side_effects(true),
);
let c = &Operand::new("c", Any).with_doc("Controlling value to test"); let c = &Operand::new("c", Any).with_doc("Controlling value to test");
ig.push( ig.push(
Inst::new( Inst::new(

View File

@@ -264,5 +264,23 @@ pub(crate) fn define() -> SettingGroup {
true, true,
); );
// Spectre options.
settings.add_bool(
"enable_heap_access_spectre_mitigation",
r#"
Enable Spectre mitigation on heap bounds checks.
This is a no-op for any heap that needs no bounds checks; e.g.,
if the limit is static and the guard region is large enough that
the index cannot reach past it.
This option is enabled by default because it is highly
recommended for secure sandboxing. The embedder should consider
the security implications carefully before disabling this option.
"#,
true,
);
settings.build() settings.build()
} }

View File

@@ -1023,7 +1023,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Nothing. // Nothing.
} }
Opcode::Select | Opcode::Selectif => { Opcode::Select | Opcode::Selectif | Opcode::SelectifSpectreGuard => {
let cond = if op == Opcode::Select { let cond = if op == Opcode::Select {
let (cmp_op, narrow_mode) = if ty_bits(ctx.input_ty(insn, 0)) > 32 { let (cmp_op, narrow_mode) = if ty_bits(ctx.input_ty(insn, 0)) > 32 {
(ALUOp::SubS64, NarrowValueMode::ZeroExtend64) (ALUOp::SubS64, NarrowValueMode::ZeroExtend64)

View File

@@ -66,19 +66,14 @@ fn dynamic_addr(
// Start with the bounds check. Trap if `offset + access_size > bound`. // Start with the bounds check. Trap if `offset + access_size > bound`.
let bound = pos.ins().global_value(offset_ty, bound_gv); let bound = pos.ins().global_value(offset_ty, bound_gv);
let oob; let (cc, lhs, bound) = if access_size == 1 {
if access_size == 1 {
// `offset > bound - 1` is the same as `offset >= bound`. // `offset > bound - 1` is the same as `offset >= bound`.
oob = pos (IntCC::UnsignedGreaterThanOrEqual, offset, bound)
.ins()
.icmp(IntCC::UnsignedGreaterThanOrEqual, offset, bound);
} else if access_size <= min_size { } else if access_size <= min_size {
// We know that bound >= min_size, so here we can compare `offset > bound - access_size` // We know that bound >= min_size, so here we can compare `offset > bound - access_size`
// without wrapping. // without wrapping.
let adj_bound = pos.ins().iadd_imm(bound, -(access_size as i64)); let adj_bound = pos.ins().iadd_imm(bound, -(access_size as i64));
oob = pos (IntCC::UnsignedGreaterThan, offset, adj_bound)
.ins()
.icmp(IntCC::UnsignedGreaterThan, offset, adj_bound);
} else { } else {
// We need an overflow check for the adjusted offset. // We need an overflow check for the adjusted offset.
let access_size_val = pos.ins().iconst(offset_ty, access_size as i64); let access_size_val = pos.ins().iconst(offset_ty, access_size as i64);
@@ -88,13 +83,27 @@ fn dynamic_addr(
overflow, overflow,
ir::TrapCode::HeapOutOfBounds, ir::TrapCode::HeapOutOfBounds,
); );
oob = pos (IntCC::UnsignedGreaterThan, adj_offset, bound)
.ins() };
.icmp(IntCC::UnsignedGreaterThan, adj_offset, bound); let oob = pos.ins().icmp(cc, lhs, bound);
}
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds); pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
compute_addr(isa, inst, heap, addr_ty, offset, offset_ty, pos.func); let spectre_oob_comparison = if isa.flags().enable_heap_access_spectre_mitigation() {
Some((cc, lhs, bound))
} else {
None
};
compute_addr(
isa,
inst,
heap,
addr_ty,
offset,
offset_ty,
pos.func,
spectre_oob_comparison,
);
} }
/// Expand a `heap_addr` for a static heap. /// Expand a `heap_addr` for a static heap.
@@ -146,20 +155,35 @@ fn static_addr(
// With that we have an optimization here where with 32-bit offsets and // With that we have an optimization here where with 32-bit offsets and
// `bound - access_size >= 4GB` we can omit a bounds check. // `bound - access_size >= 4GB` we can omit a bounds check.
let limit = bound - access_size; let limit = bound - access_size;
let mut spectre_oob_comparison = None;
if offset_ty != ir::types::I32 || limit < 0xffff_ffff { if offset_ty != ir::types::I32 || limit < 0xffff_ffff {
let oob = if limit & 1 == 1 { let (cc, lhs, limit_imm) = if limit & 1 == 1 {
// Prefer testing `offset >= limit - 1` when limit is odd because an even number is // Prefer testing `offset >= limit - 1` when limit is odd because an even number is
// likely to be a convenient constant on ARM and other RISC architectures. // likely to be a convenient constant on ARM and other RISC architectures.
pos.ins() let limit = limit as i64 - 1;
.icmp_imm(IntCC::UnsignedGreaterThanOrEqual, offset, limit as i64 - 1) (IntCC::UnsignedGreaterThanOrEqual, offset, limit)
} else { } else {
pos.ins() let limit = limit as i64;
.icmp_imm(IntCC::UnsignedGreaterThan, offset, limit as i64) (IntCC::UnsignedGreaterThan, offset, limit)
}; };
let oob = pos.ins().icmp_imm(cc, lhs, limit_imm);
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds); pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
if isa.flags().enable_heap_access_spectre_mitigation() {
let limit = pos.ins().iconst(offset_ty, limit_imm);
spectre_oob_comparison = Some((cc, lhs, limit));
}
} }
compute_addr(isa, inst, heap, addr_ty, offset, offset_ty, pos.func); compute_addr(
isa,
inst,
heap,
addr_ty,
offset,
offset_ty,
pos.func,
spectre_oob_comparison,
);
} }
/// Emit code for the base address computation of a `heap_addr` instruction. /// Emit code for the base address computation of a `heap_addr` instruction.
@@ -171,6 +195,11 @@ fn compute_addr(
mut offset: ir::Value, mut offset: ir::Value,
offset_ty: ir::Type, offset_ty: ir::Type,
func: &mut ir::Function, func: &mut ir::Function,
// If we are performing Spectre mitigation with conditional selects, the
// values to compare and the condition code that indicates an out-of bounds
// condition; on this condition, the conditional move will choose a
// speculatively safe address (a zero / null pointer) instead.
spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>,
) { ) {
let mut pos = FuncCursor::new(func).at_inst(inst); let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst); pos.use_srcloc(inst);
@@ -198,5 +227,15 @@ fn compute_addr(
pos.ins().global_value(addr_ty, base_gv) pos.ins().global_value(addr_ty, base_gv)
}; };
if let Some((cc, a, b)) = spectre_oob_comparison {
let final_addr = pos.ins().iadd(base, offset);
let zero = pos.ins().iconst(addr_ty, 0);
let flags = pos.ins().ifcmp(a, b);
pos.func
.dfg
.replace(inst)
.selectif_spectre_guard(addr_ty, cc, flags, zero, final_addr);
} else {
pos.func.dfg.replace(inst).iadd(base, offset); pos.func.dfg.replace(inst).iadd(base, offset);
} }
}

View File

@@ -399,6 +399,7 @@ emit_all_ones_funcaddrs = false
enable_probestack = true enable_probestack = true
probestack_func_adjusts_sp = false probestack_func_adjusts_sp = false
enable_jump_tables = true enable_jump_tables = true
enable_heap_access_spectre_mitigation = true
"# "#
); );
assert_eq!(f.opt_level(), super::OptLevel::None); assert_eq!(f.opt_level(), super::OptLevel::None);

View File

@@ -1,4 +1,5 @@
test legalizer test legalizer
set enable_heap_access_spectre_mitigation=false
target x86_64 target x86_64
; Test legalization for various forms of heap addresses. ; Test legalization for various forms of heap addresses.

View File

@@ -1,5 +1,6 @@
; Test the legalization of memory objects. ; Test the legalization of memory objects.
test legalizer test legalizer
set enable_heap_access_spectre_mitigation=false
target x86_64 target x86_64
; regex: V=v\d+ ; regex: V=v\d+

View File

@@ -1,4 +1,5 @@
test compile test compile
set enable_heap_access_spectre_mitigation=true
target aarch64 target aarch64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 { function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
@@ -11,21 +12,24 @@ block0(v0: i64, v1: i32):
return v2 return v2
} }
; check: Block 0:
; check: stp fp, lr, [sp, #-16]! ; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp ; nextln: mov fp, sp
; nextln: ldur w2, [x0] ; nextln: ldur w2, [x0]
; nextln: add w2, w2, #0 ; nextln: add w2, w2, #0
; nextln: subs wzr, w1, w2 ; nextln: subs wzr, w1, w2
; nextln: b.ls label1 ; b label2 ; nextln: b.ls label1 ; b label2
; nextln: Block 1: ; check: Block 1:
; check: add x0, x0, x1, UXTW ; check: add x0, x0, x1, UXTW
; nextln: subs wzr, w1, w2
; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi
; nextln: mov sp, fp ; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16
; nextln: ret ; nextln: ret
; nextln: Block 2: ; check: Block 2:
; check: udf ; check: udf
function %static_heap_check(i64 vmctx, i32) -> i64 { function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32 heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
@@ -35,15 +39,18 @@ block0(v0: i64, v1: i32):
return v2 return v2
} }
; check: Block 0:
; check: stp fp, lr, [sp, #-16]! ; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp ; nextln: mov fp, sp
; nextln: subs wzr, w1, #65536 ; nextln: subs wzr, w1, #65536
; nextln: b.ls label1 ; b label2 ; nextln: b.ls label1 ; b label2
; nextln: Block 1: ; check: Block 1:
; check: add x0, x0, x1, UXTW ; check: add x0, x0, x1, UXTW
; nextln: subs wzr, w1, #65536
; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi
; nextln: mov sp, fp ; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16
; nextln: ret ; nextln: ret
; nextln: Block 2: ; check: Block 2:
; check: udf ; check: udf