Remove heaps from core Cranelift, push them into cranelift-wasm (#5386)
* cranelift-wasm: translate Wasm loads into lower-level CLIF operations
Rather than using `heap_{load,store,addr}`.
* cranelift: Remove the `heap_{addr,load,store}` instructions
These are now legalized in the `cranelift-wasm` frontend.
* cranelift: Remove the `ir::Heap` entity from CLIF
* Port basic memory operation tests to .wat filetests
* Remove test for verifying CLIF heaps
* Remove `heap_addr` from replace_branching_instructions_and_cfg_predecessors.clif test
* Remove `heap_addr` from readonly.clif test
* Remove `heap_addr` from `table_addr.clif` test
* Remove `heap_addr` from the simd-fvpromote_low.clif test
* Remove `heap_addr` from simd-fvdemote.clif test
* Remove `heap_addr` from the load-op-store.clif test
* Remove the CLIF heap runtest
* Remove `heap_addr` from the global_value.clif test
* Remove `heap_addr` from fpromote.clif runtests
* Remove `heap_addr` from fdemote.clif runtests
* Remove `heap_addr` from memory.clif parser test
* Remove `heap_addr` from reject_load_readonly.clif test
* Remove `heap_addr` from reject_load_notrap.clif test
* Remove `heap_addr` from load_readonly_notrap.clif test
* Remove `static-heap-without-guard-pages.clif` test
Will be subsumed when we port `make-heap-load-store-tests.sh` to generating
`.wat` tests.
* Remove `static-heap-with-guard-pages.clif` test
Will be subsumed when we port `make-heap-load-store-tests.sh` over to `.wat`
tests.
* Remove more heap tests
These will be subsumed by porting `make-heap-load-store-tests.sh` over to `.wat`
tests.
* Remove `heap_addr` from `simple-alias.clif` test
* Remove `heap_addr` from partial-redundancy.clif test
* Remove `heap_addr` from multiple-blocks.clif test
* Remove `heap_addr` from fence.clif test
* Remove `heap_addr` from extends.clif test
* Remove runtests that rely on heaps
Heaps are not a thing in CLIF or the interpreter anymore
* Add generated load/store `.wat` tests
* Enable memory-related wasm features in `.wat` tests
* Remove CLIF heap from fcmp-mem-bug.clif test
* Add a mode for compiling `.wat` all the way to assembly in filetests
* Also generate WAT to assembly tests in `make-load-store-tests.sh`
* cargo fmt
* Reinstate `f{de,pro}mote.clif` tests without the heap bits
* Remove undefined doc link
* Remove outdated SVG and dot file from docs
* Add docs about `None` returns for base address computation helpers
* Factor out `env.heap_access_spectre_mitigation()` to a local
* Expand docs for `FuncEnvironment::heaps` trait method
* Restore f{de,pro}mote+load clif runtests with stack memory
This commit is contained in:
@@ -71,6 +71,8 @@
|
||||
//! <https://github.com/bytecodealliance/cranelift/pull/1236>
|
||||
//! ("Relax verification to allow I8X16 to act as a default vector type")
|
||||
|
||||
mod bounds_checks;
|
||||
|
||||
use super::{hash_map, HashMap};
|
||||
use crate::environ::{FuncEnvironment, GlobalVariable};
|
||||
use crate::state::{ControlStackFrame, ElseData, FuncTranslationState};
|
||||
@@ -95,6 +97,24 @@ use std::convert::TryFrom;
|
||||
use std::vec::Vec;
|
||||
use wasmparser::{FuncValidator, MemArg, Operator, WasmModuleResources};
|
||||
|
||||
/// Given an `Option<T>`, unwrap the inner `T` or, if the option is `None`, set
|
||||
/// the state to unreachable and return.
|
||||
///
|
||||
/// Used in combination with calling `prepare_addr` and `prepare_atomic_addr`
|
||||
/// when we can statically determine that a Wasm access will unconditionally
|
||||
/// trap.
|
||||
macro_rules! unwrap_or_return_unreachable_state {
|
||||
($state:ident, $value:expr) => {
|
||||
match $value {
|
||||
Some(x) => x,
|
||||
None => {
|
||||
$state.reachable = false;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Clippy warns about "align: _" but its important to document that the flags field is ignored
|
||||
#[cfg_attr(
|
||||
feature = "cargo-clippy",
|
||||
@@ -695,32 +715,50 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
|
||||
translate_load(memarg, ir::Opcode::Load, I8X16, builder, state, environ)?;
|
||||
}
|
||||
Operator::V128Load8x8S { memarg } => {
|
||||
let (flags, base) = prepare_addr(memarg, 8, builder, state, environ)?;
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
prepare_addr(memarg, 8, builder, state, environ)?
|
||||
);
|
||||
let loaded = builder.ins().sload8x8(flags, base, 0);
|
||||
state.push1(loaded);
|
||||
}
|
||||
Operator::V128Load8x8U { memarg } => {
|
||||
let (flags, base) = prepare_addr(memarg, 8, builder, state, environ)?;
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
prepare_addr(memarg, 8, builder, state, environ)?
|
||||
);
|
||||
let loaded = builder.ins().uload8x8(flags, base, 0);
|
||||
state.push1(loaded);
|
||||
}
|
||||
Operator::V128Load16x4S { memarg } => {
|
||||
let (flags, base) = prepare_addr(memarg, 8, builder, state, environ)?;
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
prepare_addr(memarg, 8, builder, state, environ)?
|
||||
);
|
||||
let loaded = builder.ins().sload16x4(flags, base, 0);
|
||||
state.push1(loaded);
|
||||
}
|
||||
Operator::V128Load16x4U { memarg } => {
|
||||
let (flags, base) = prepare_addr(memarg, 8, builder, state, environ)?;
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
prepare_addr(memarg, 8, builder, state, environ)?
|
||||
);
|
||||
let loaded = builder.ins().uload16x4(flags, base, 0);
|
||||
state.push1(loaded);
|
||||
}
|
||||
Operator::V128Load32x2S { memarg } => {
|
||||
let (flags, base) = prepare_addr(memarg, 8, builder, state, environ)?;
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
prepare_addr(memarg, 8, builder, state, environ)?
|
||||
);
|
||||
let loaded = builder.ins().sload32x2(flags, base, 0);
|
||||
state.push1(loaded);
|
||||
}
|
||||
Operator::V128Load32x2U { memarg } => {
|
||||
let (flags, base) = prepare_addr(memarg, 8, builder, state, environ)?;
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
prepare_addr(memarg, 8, builder, state, environ)?
|
||||
);
|
||||
let loaded = builder.ins().uload32x2(flags, base, 0);
|
||||
state.push1(loaded);
|
||||
}
|
||||
@@ -1070,7 +1108,7 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
|
||||
let effective_addr = if memarg.offset == 0 {
|
||||
addr
|
||||
} else {
|
||||
let index_type = builder.func.heaps[heap].index_type;
|
||||
let index_type = environ.heaps()[heap].index_type;
|
||||
let offset = builder.ins().iconst(index_type, memarg.offset as i64);
|
||||
builder
|
||||
.ins()
|
||||
@@ -1096,7 +1134,7 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
|
||||
let effective_addr = if memarg.offset == 0 {
|
||||
addr
|
||||
} else {
|
||||
let index_type = builder.func.heaps[heap].index_type;
|
||||
let index_type = environ.heaps()[heap].index_type;
|
||||
let offset = builder.ins().iconst(index_type, memarg.offset as i64);
|
||||
builder
|
||||
.ins()
|
||||
@@ -2193,14 +2231,19 @@ fn translate_unreachable_operator<FE: FuncEnvironment + ?Sized>(
|
||||
/// generate necessary IR to validate that the heap address is correctly
|
||||
/// in-bounds, and various parameters are returned describing the valid *native*
|
||||
/// heap address if execution reaches that point.
|
||||
fn prepare_addr<FE: FuncEnvironment + ?Sized>(
|
||||
///
|
||||
/// Returns `None` when the Wasm access will unconditionally trap.
|
||||
fn prepare_addr<FE>(
|
||||
memarg: &MemArg,
|
||||
access_size: u8,
|
||||
builder: &mut FunctionBuilder,
|
||||
state: &mut FuncTranslationState,
|
||||
environ: &mut FE,
|
||||
) -> WasmResult<(MemFlags, Value)> {
|
||||
let addr = state.pop1();
|
||||
) -> WasmResult<Option<(MemFlags, Value)>>
|
||||
where
|
||||
FE: FuncEnvironment + ?Sized,
|
||||
{
|
||||
let index = state.pop1();
|
||||
let heap = state.get_heap(builder.func, memarg.memory, environ)?;
|
||||
|
||||
// How exactly the bounds check is performed here and what it's performed
|
||||
@@ -2276,11 +2319,14 @@ fn prepare_addr<FE: FuncEnvironment + ?Sized>(
|
||||
let addr = match u32::try_from(memarg.offset) {
|
||||
// If our offset fits within a u32, then we can place the it into the
|
||||
// offset immediate of the `heap_addr` instruction.
|
||||
Ok(offset) => {
|
||||
builder
|
||||
.ins()
|
||||
.heap_addr(environ.pointer_type(), heap, addr, offset, access_size)
|
||||
}
|
||||
Ok(offset) => bounds_checks::bounds_check_and_compute_addr(
|
||||
builder,
|
||||
&*environ,
|
||||
&environ.heaps()[heap],
|
||||
index,
|
||||
offset,
|
||||
access_size,
|
||||
),
|
||||
|
||||
// If the offset doesn't fit within a u32, then we can't pass it
|
||||
// directly into `heap_addr`.
|
||||
@@ -2309,17 +2355,26 @@ fn prepare_addr<FE: FuncEnvironment + ?Sized>(
|
||||
// relatively odd/rare. In the future if needed we can look into
|
||||
// optimizing this more.
|
||||
Err(_) => {
|
||||
let index_type = builder.func.heaps[heap].index_type;
|
||||
let index_type = environ.heaps()[heap].index_type;
|
||||
let offset = builder.ins().iconst(index_type, memarg.offset as i64);
|
||||
let addr =
|
||||
let adjusted_index =
|
||||
builder
|
||||
.ins()
|
||||
.uadd_overflow_trap(addr, offset, ir::TrapCode::HeapOutOfBounds);
|
||||
builder
|
||||
.ins()
|
||||
.heap_addr(environ.pointer_type(), heap, addr, 0, access_size)
|
||||
.uadd_overflow_trap(index, offset, ir::TrapCode::HeapOutOfBounds);
|
||||
bounds_checks::bounds_check_and_compute_addr(
|
||||
builder,
|
||||
&*environ,
|
||||
&environ.heaps()[heap],
|
||||
adjusted_index,
|
||||
0,
|
||||
access_size,
|
||||
)
|
||||
}
|
||||
};
|
||||
let addr = match addr {
|
||||
None => return Ok(None),
|
||||
Some(a) => a,
|
||||
};
|
||||
|
||||
// Note that we don't set `is_aligned` here, even if the load instruction's
|
||||
// alignment immediate may says it's aligned, because WebAssembly's
|
||||
@@ -2334,7 +2389,7 @@ fn prepare_addr<FE: FuncEnvironment + ?Sized>(
|
||||
// vmctx, stack) accesses.
|
||||
flags.set_heap();
|
||||
|
||||
Ok((flags, addr))
|
||||
Ok(Some((flags, addr)))
|
||||
}
|
||||
|
||||
fn align_atomic_addr(
|
||||
@@ -2372,13 +2427,16 @@ fn align_atomic_addr(
|
||||
}
|
||||
}
|
||||
|
||||
/// Like `prepare_addr` but for atomic accesses.
|
||||
///
|
||||
/// Returns `None` when the Wasm access will unconditionally trap.
|
||||
fn prepare_atomic_addr<FE: FuncEnvironment + ?Sized>(
|
||||
memarg: &MemArg,
|
||||
loaded_bytes: u8,
|
||||
builder: &mut FunctionBuilder,
|
||||
state: &mut FuncTranslationState,
|
||||
environ: &mut FE,
|
||||
) -> WasmResult<(MemFlags, Value)> {
|
||||
) -> WasmResult<Option<(MemFlags, Value)>> {
|
||||
align_atomic_addr(memarg, loaded_bytes, builder, state);
|
||||
prepare_addr(memarg, loaded_bytes, builder, state, environ)
|
||||
}
|
||||
@@ -2392,13 +2450,16 @@ fn translate_load<FE: FuncEnvironment + ?Sized>(
|
||||
state: &mut FuncTranslationState,
|
||||
environ: &mut FE,
|
||||
) -> WasmResult<()> {
|
||||
let (flags, base) = prepare_addr(
|
||||
memarg,
|
||||
mem_op_size(opcode, result_ty),
|
||||
builder,
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
environ,
|
||||
)?;
|
||||
prepare_addr(
|
||||
memarg,
|
||||
mem_op_size(opcode, result_ty),
|
||||
builder,
|
||||
state,
|
||||
environ,
|
||||
)?
|
||||
);
|
||||
let (load, dfg) = builder
|
||||
.ins()
|
||||
.Load(opcode, result_ty, flags, Offset32::new(0), base);
|
||||
@@ -2417,7 +2478,10 @@ fn translate_store<FE: FuncEnvironment + ?Sized>(
|
||||
let val = state.pop1();
|
||||
let val_ty = builder.func.dfg.value_type(val);
|
||||
|
||||
let (flags, base) = prepare_addr(memarg, mem_op_size(opcode, val_ty), builder, state, environ)?;
|
||||
let (flags, base) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
prepare_addr(memarg, mem_op_size(opcode, val_ty), builder, state, environ)?
|
||||
);
|
||||
builder
|
||||
.ins()
|
||||
.Store(opcode, val_ty, flags, Offset32::new(0), val, base);
|
||||
@@ -2474,13 +2538,16 @@ fn translate_atomic_rmw<FE: FuncEnvironment + ?Sized>(
|
||||
arg2 = builder.ins().ireduce(access_ty, arg2);
|
||||
}
|
||||
|
||||
let (flags, addr) = prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
let (flags, addr) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
environ,
|
||||
)?;
|
||||
prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
state,
|
||||
environ,
|
||||
)?
|
||||
);
|
||||
|
||||
let mut res = builder.ins().atomic_rmw(access_ty, flags, op, addr, arg2);
|
||||
if access_ty != widened_ty {
|
||||
@@ -2528,13 +2595,16 @@ fn translate_atomic_cas<FE: FuncEnvironment + ?Sized>(
|
||||
replacement = builder.ins().ireduce(access_ty, replacement);
|
||||
}
|
||||
|
||||
let (flags, addr) = prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
let (flags, addr) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
environ,
|
||||
)?;
|
||||
prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
state,
|
||||
environ,
|
||||
)?
|
||||
);
|
||||
let mut res = builder.ins().atomic_cas(flags, addr, expected, replacement);
|
||||
if access_ty != widened_ty {
|
||||
res = builder.ins().uextend(widened_ty, res);
|
||||
@@ -2568,13 +2638,16 @@ fn translate_atomic_load<FE: FuncEnvironment + ?Sized>(
|
||||
};
|
||||
assert!(w_ty_ok && widened_ty.bytes() >= access_ty.bytes());
|
||||
|
||||
let (flags, addr) = prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
let (flags, addr) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
environ,
|
||||
)?;
|
||||
prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
state,
|
||||
environ,
|
||||
)?
|
||||
);
|
||||
let mut res = builder.ins().atomic_load(access_ty, flags, addr);
|
||||
if access_ty != widened_ty {
|
||||
res = builder.ins().uextend(widened_ty, res);
|
||||
@@ -2614,13 +2687,16 @@ fn translate_atomic_store<FE: FuncEnvironment + ?Sized>(
|
||||
data = builder.ins().ireduce(access_ty, data);
|
||||
}
|
||||
|
||||
let (flags, addr) = prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
let (flags, addr) = unwrap_or_return_unreachable_state!(
|
||||
state,
|
||||
environ,
|
||||
)?;
|
||||
prepare_atomic_addr(
|
||||
memarg,
|
||||
u8::try_from(access_ty.bytes()).unwrap(),
|
||||
builder,
|
||||
state,
|
||||
environ,
|
||||
)?
|
||||
);
|
||||
builder.ins().atomic_store(flags, data, addr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
409
cranelift/wasm/src/code_translator/bounds_checks.rs
Normal file
409
cranelift/wasm/src/code_translator/bounds_checks.rs
Normal file
@@ -0,0 +1,409 @@
|
||||
//! Implementation of Wasm to CLIF memory access translation.
|
||||
//!
|
||||
//! Given
|
||||
//!
|
||||
//! * a dynamic Wasm memory index operand,
|
||||
//! * a static offset immediate, and
|
||||
//! * a static access size,
|
||||
//!
|
||||
//! bounds check the memory access and translate it into a native memory access.
|
||||
|
||||
use crate::{HeapData, HeapStyle, TargetEnvironment};
|
||||
use cranelift_codegen::{
|
||||
cursor::{Cursor, FuncCursor},
|
||||
ir::{self, condcodes::IntCC, InstBuilder, RelSourceLoc},
|
||||
};
|
||||
use cranelift_frontend::FunctionBuilder;
|
||||
|
||||
/// Helper used to emit bounds checks (as necessary) and compute the native
|
||||
/// address of a heap access.
|
||||
///
|
||||
/// Returns the `ir::Value` holding the native address of the heap access, or
|
||||
/// `None` if the heap access will unconditionally trap.
|
||||
pub fn bounds_check_and_compute_addr<TE>(
|
||||
builder: &mut FunctionBuilder,
|
||||
env: &TE,
|
||||
heap: &HeapData,
|
||||
// Dynamic operand indexing into the heap.
|
||||
index: ir::Value,
|
||||
// Static immediate added to the index.
|
||||
offset: u32,
|
||||
// Static size of the heap access.
|
||||
access_size: u8,
|
||||
) -> Option<ir::Value>
|
||||
where
|
||||
TE: TargetEnvironment + ?Sized,
|
||||
{
|
||||
let index = cast_index_to_pointer_ty(
|
||||
index,
|
||||
heap.index_type,
|
||||
env.pointer_type(),
|
||||
&mut builder.cursor(),
|
||||
);
|
||||
let offset_and_size = offset_plus_size(offset, access_size);
|
||||
let spectre_mitigations_enabled = env.heap_access_spectre_mitigation();
|
||||
|
||||
// We need to emit code that will trap (or compute an address that will trap
|
||||
// when accessed) if
|
||||
//
|
||||
// index + offset + access_size > bound
|
||||
//
|
||||
// or if the `index + offset + access_size` addition overflows.
|
||||
//
|
||||
// Note that we ultimately want a 64-bit integer (we only target 64-bit
|
||||
// architectures at the moment) and that `offset` is a `u32` and
|
||||
// `access_size` is a `u8`. This means that we can add the latter together
|
||||
// as `u64`s without fear of overflow, and we only have to be concerned with
|
||||
// whether adding in `index` will overflow.
|
||||
//
|
||||
// Finally, the following right-hand sides of the matches do have a little
|
||||
// bit of duplicated code across them, but I think writing it this way is
|
||||
// worth it for readability and seeing very clearly each of our cases for
|
||||
// different bounds checks and optimizations of those bounds checks. It is
|
||||
// intentionally written in a straightforward case-matching style that will
|
||||
// hopefully make it easy to port to ISLE one day.
|
||||
match heap.style {
|
||||
// ====== Dynamic Memories ======
|
||||
//
|
||||
// 1. First special case for when `offset + access_size == 1`:
|
||||
//
|
||||
// index + 1 > bound
|
||||
// ==> index >= bound
|
||||
//
|
||||
// 1.a. When Spectre mitigations are enabled, avoid duplicating
|
||||
// bounds checks between the mitigations and the regular bounds
|
||||
// checks.
|
||||
HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 && spectre_mitigations_enabled => {
|
||||
let bound = builder.ins().global_value(env.pointer_type(), bound_gv);
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
Some(SpectreOobComparison {
|
||||
cc: IntCC::UnsignedGreaterThanOrEqual,
|
||||
lhs: index,
|
||||
rhs: bound,
|
||||
}),
|
||||
))
|
||||
}
|
||||
// 1.b. Emit explicit `index >= bound` bounds checks.
|
||||
HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => {
|
||||
let bound = builder.ins().global_value(env.pointer_type(), bound_gv);
|
||||
let oob = builder
|
||||
.ins()
|
||||
.icmp(IntCC::UnsignedGreaterThanOrEqual, index, bound);
|
||||
builder.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
// 2. Second special case for when `offset + access_size <= min_size`.
|
||||
//
|
||||
// We know that `bound >= min_size`, so we can do the following
|
||||
// comparison, without fear of the right-hand side wrapping around:
|
||||
//
|
||||
// index + offset + access_size > bound
|
||||
// ==> index > bound - (offset + access_size)
|
||||
//
|
||||
// 2.a. Dedupe bounds checks with Spectre mitigations.
|
||||
HeapStyle::Dynamic { bound_gv }
|
||||
if offset_and_size <= heap.min_size.into() && spectre_mitigations_enabled =>
|
||||
{
|
||||
let bound = builder.ins().global_value(env.pointer_type(), bound_gv);
|
||||
let adjusted_bound = builder.ins().iadd_imm(bound, -(offset_and_size as i64));
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
Some(SpectreOobComparison {
|
||||
cc: IntCC::UnsignedGreaterThan,
|
||||
lhs: index,
|
||||
rhs: adjusted_bound,
|
||||
}),
|
||||
))
|
||||
}
|
||||
// 2.b. Emit explicit `index > bound - (offset + access_size)` bounds
|
||||
// checks.
|
||||
HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.min_size.into() => {
|
||||
let bound = builder.ins().global_value(env.pointer_type(), bound_gv);
|
||||
let adjusted_bound = builder.ins().iadd_imm(bound, -(offset_and_size as i64));
|
||||
let oob = builder
|
||||
.ins()
|
||||
.icmp(IntCC::UnsignedGreaterThan, index, adjusted_bound);
|
||||
builder.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
// 3. General case for dynamic memories:
|
||||
//
|
||||
// index + offset + access_size > bound
|
||||
//
|
||||
// And we have to handle the overflow case in the left-hand side.
|
||||
//
|
||||
// 3.a. Dedupe bounds checks with Spectre mitigations.
|
||||
HeapStyle::Dynamic { bound_gv } if spectre_mitigations_enabled => {
|
||||
let access_size_val = builder
|
||||
.ins()
|
||||
.iconst(env.pointer_type(), offset_and_size as i64);
|
||||
let adjusted_index = builder.ins().uadd_overflow_trap(
|
||||
index,
|
||||
access_size_val,
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
);
|
||||
let bound = builder.ins().global_value(env.pointer_type(), bound_gv);
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
Some(SpectreOobComparison {
|
||||
cc: IntCC::UnsignedGreaterThan,
|
||||
lhs: adjusted_index,
|
||||
rhs: bound,
|
||||
}),
|
||||
))
|
||||
}
|
||||
// 3.b. Emit an explicit `index + offset + access_size > bound`
|
||||
// check.
|
||||
HeapStyle::Dynamic { bound_gv } => {
|
||||
let access_size_val = builder
|
||||
.ins()
|
||||
.iconst(env.pointer_type(), offset_and_size as i64);
|
||||
let adjusted_index = builder.ins().uadd_overflow_trap(
|
||||
index,
|
||||
access_size_val,
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
);
|
||||
let bound = builder.ins().global_value(env.pointer_type(), bound_gv);
|
||||
let oob = builder
|
||||
.ins()
|
||||
.icmp(IntCC::UnsignedGreaterThan, adjusted_index, bound);
|
||||
builder.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
// ====== Static Memories ======
|
||||
//
|
||||
// With static memories we know the size of the heap bound at compile
|
||||
// time.
|
||||
//
|
||||
// 1. First special case: trap immediately if `offset + access_size >
|
||||
// bound`, since we will end up being out-of-bounds regardless of the
|
||||
// given `index`.
|
||||
HeapStyle::Static { bound } if offset_and_size > bound.into() => {
|
||||
builder.ins().trap(ir::TrapCode::HeapOutOfBounds);
|
||||
None
|
||||
}
|
||||
|
||||
// 2. Second special case for when we can completely omit explicit
|
||||
// bounds checks for 32-bit static memories.
|
||||
//
|
||||
// First, let's rewrite our comparison to move all of the constants
|
||||
// to one side:
|
||||
//
|
||||
// index + offset + access_size > bound
|
||||
// ==> index > bound - (offset + access_size)
|
||||
//
|
||||
// We know the subtraction on the right-hand side won't wrap because
|
||||
// we didn't hit the first special case.
|
||||
//
|
||||
// Additionally, we add our guard pages (if any) to the right-hand
|
||||
// side, since we can rely on the virtual memory subsystem at runtime
|
||||
// to catch out-of-bound accesses within the range `bound .. bound +
|
||||
// guard_size`. So now we are dealing with
|
||||
//
|
||||
// index > bound + guard_size - (offset + access_size)
|
||||
//
|
||||
// Note that `bound + guard_size` cannot overflow for
|
||||
// correctly-configured heaps, as otherwise the heap wouldn't fit in
|
||||
// a 64-bit memory space.
|
||||
//
|
||||
// The complement of our should-this-trap comparison expression is
|
||||
// the should-this-not-trap comparison expression:
|
||||
//
|
||||
// index <= bound + guard_size - (offset + access_size)
|
||||
//
|
||||
// If we know the right-hand side is greater than or equal to
|
||||
// `u32::MAX`, then
|
||||
//
|
||||
// index <= u32::MAX <= bound + guard_size - (offset + access_size)
|
||||
//
|
||||
// This expression is always true when the heap is indexed with
|
||||
// 32-bit integers because `index` cannot be larger than
|
||||
// `u32::MAX`. This means that `index` is always either in bounds or
|
||||
// within the guard page region, neither of which require emitting an
|
||||
// explicit bounds check.
|
||||
HeapStyle::Static { bound }
|
||||
if heap.index_type == ir::types::I32
|
||||
&& u64::from(u32::MAX)
|
||||
<= u64::from(bound) + u64::from(heap.offset_guard_size) - offset_and_size =>
|
||||
{
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
None,
|
||||
))
|
||||
}
|
||||
|
||||
// 3. General case for static memories.
|
||||
//
|
||||
// We have to explicitly test whether
|
||||
//
|
||||
// index > bound - (offset + access_size)
|
||||
//
|
||||
// and trap if so.
|
||||
//
|
||||
// Since we have to emit explicit bounds checks, we might as well be
|
||||
// precise, not rely on the virtual memory subsystem at all, and not
|
||||
// factor in the guard pages here.
|
||||
//
|
||||
// 3.a. Dedupe the Spectre mitigation and the explicit bounds check.
|
||||
HeapStyle::Static { bound } if spectre_mitigations_enabled => {
|
||||
// NB: this subtraction cannot wrap because we didn't hit the first
|
||||
// special case.
|
||||
let adjusted_bound = u64::from(bound) - offset_and_size;
|
||||
let adjusted_bound = builder
|
||||
.ins()
|
||||
.iconst(env.pointer_type(), adjusted_bound as i64);
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
Some(SpectreOobComparison {
|
||||
cc: IntCC::UnsignedGreaterThan,
|
||||
lhs: index,
|
||||
rhs: adjusted_bound,
|
||||
}),
|
||||
))
|
||||
}
|
||||
// 3.b. Emit the explicit `index > bound - (offset + access_size)`
|
||||
// check.
|
||||
HeapStyle::Static { bound } => {
|
||||
// See comment in 3.a. above.
|
||||
let adjusted_bound = u64::from(bound) - offset_and_size;
|
||||
let oob =
|
||||
builder
|
||||
.ins()
|
||||
.icmp_imm(IntCC::UnsignedGreaterThan, index, adjusted_bound as i64);
|
||||
builder.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
|
||||
Some(compute_addr(
|
||||
&mut builder.cursor(),
|
||||
heap,
|
||||
env.pointer_type(),
|
||||
index,
|
||||
offset,
|
||||
None,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn cast_index_to_pointer_ty(
|
||||
index: ir::Value,
|
||||
index_ty: ir::Type,
|
||||
pointer_ty: ir::Type,
|
||||
pos: &mut FuncCursor,
|
||||
) -> ir::Value {
|
||||
if index_ty == pointer_ty {
|
||||
return index;
|
||||
}
|
||||
// Note that using 64-bit heaps on a 32-bit host is not currently supported,
|
||||
// would require at least a bounds check here to ensure that the truncation
|
||||
// from 64-to-32 bits doesn't lose any upper bits. For now though we're
|
||||
// mostly interested in the 32-bit-heaps-on-64-bit-hosts cast.
|
||||
assert!(index_ty.bits() < pointer_ty.bits());
|
||||
|
||||
// Convert `index` to `addr_ty`.
|
||||
let extended_index = pos.ins().uextend(pointer_ty, index);
|
||||
|
||||
// Add debug value-label alias so that debuginfo can name the extended
|
||||
// value as the address
|
||||
let loc = pos.srcloc();
|
||||
let loc = RelSourceLoc::from_base_offset(pos.func.params.base_srcloc(), loc);
|
||||
pos.func
|
||||
.stencil
|
||||
.dfg
|
||||
.add_value_label_alias(extended_index, loc, index);
|
||||
|
||||
extended_index
|
||||
}
|
||||
|
||||
struct SpectreOobComparison {
|
||||
cc: IntCC,
|
||||
lhs: ir::Value,
|
||||
rhs: ir::Value,
|
||||
}
|
||||
|
||||
/// Emit code for the base address computation of a `heap_addr` instruction,
|
||||
/// without any bounds checks (other than optional Spectre mitigations).
|
||||
fn compute_addr(
|
||||
pos: &mut FuncCursor,
|
||||
heap: &HeapData,
|
||||
addr_ty: ir::Type,
|
||||
index: ir::Value,
|
||||
offset: u32,
|
||||
// If we are performing Spectre mitigation with conditional selects, the
|
||||
// values to compare and the condition code that indicates an out-of bounds
|
||||
// condition; on this condition, the conditional move will choose a
|
||||
// speculatively safe address (a zero / null pointer) instead.
|
||||
spectre_oob_comparison: Option<SpectreOobComparison>,
|
||||
) -> ir::Value {
|
||||
debug_assert_eq!(pos.func.dfg.value_type(index), addr_ty);
|
||||
|
||||
// Add the heap base address base
|
||||
let base = pos.ins().global_value(addr_ty, heap.base);
|
||||
|
||||
let final_base = pos.ins().iadd(base, index);
|
||||
let final_addr = if offset == 0 {
|
||||
final_base
|
||||
} else {
|
||||
// NB: The addition of the offset immediate must happen *before* the
|
||||
// `select_spectre_guard`. If it happens after, then we potentially are
|
||||
// letting speculative execution read the whole first 4GiB of memory.
|
||||
pos.ins().iadd_imm(final_base, offset as i64)
|
||||
};
|
||||
|
||||
if let Some(SpectreOobComparison { cc, lhs, rhs }) = spectre_oob_comparison {
|
||||
let null = pos.ins().iconst(addr_ty, 0);
|
||||
let cmp = pos.ins().icmp(cc, lhs, rhs);
|
||||
pos.ins().select_spectre_guard(cmp, null, final_addr)
|
||||
} else {
|
||||
final_addr
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn offset_plus_size(offset: u32, size: u8) -> u64 {
|
||||
// Cannot overflow because we are widening to `u64`.
|
||||
offset as u64 + size as u64
|
||||
}
|
||||
@@ -10,8 +10,8 @@ use crate::func_translator::FuncTranslator;
|
||||
use crate::state::FuncTranslationState;
|
||||
use crate::WasmType;
|
||||
use crate::{
|
||||
DataIndex, DefinedFuncIndex, ElemIndex, FuncIndex, Global, GlobalIndex, Memory, MemoryIndex,
|
||||
Table, TableIndex, TypeIndex, WasmFuncType, WasmResult,
|
||||
DataIndex, DefinedFuncIndex, ElemIndex, FuncIndex, Global, GlobalIndex, Heap, HeapData,
|
||||
HeapStyle, Memory, MemoryIndex, Table, TableIndex, TypeIndex, WasmFuncType, WasmResult,
|
||||
};
|
||||
use core::convert::TryFrom;
|
||||
use cranelift_codegen::cursor::FuncCursor;
|
||||
@@ -212,6 +212,9 @@ pub struct DummyFuncEnvironment<'dummy_environment> {
|
||||
|
||||
/// Expected reachability data (before/after for each op) to assert. This is used for testing.
|
||||
expected_reachability: Option<ExpectedReachability>,
|
||||
|
||||
/// Heaps we have created to implement Wasm linear memories.
|
||||
pub heaps: PrimaryMap<Heap, HeapData>,
|
||||
}
|
||||
|
||||
impl<'dummy_environment> DummyFuncEnvironment<'dummy_environment> {
|
||||
@@ -223,6 +226,7 @@ impl<'dummy_environment> DummyFuncEnvironment<'dummy_environment> {
|
||||
Self {
|
||||
mod_info,
|
||||
expected_reachability,
|
||||
heaps: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,6 +254,10 @@ impl<'dummy_environment> TargetEnvironment for DummyFuncEnvironment<'dummy_envir
|
||||
fn target_config(&self) -> TargetFrontendConfig {
|
||||
self.mod_info.config
|
||||
}
|
||||
|
||||
fn heap_access_spectre_mitigation(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environment> {
|
||||
@@ -275,7 +283,11 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
})
|
||||
}
|
||||
|
||||
fn make_heap(&mut self, func: &mut ir::Function, _index: MemoryIndex) -> WasmResult<ir::Heap> {
|
||||
fn heaps(&self) -> &PrimaryMap<Heap, HeapData> {
|
||||
&self.heaps
|
||||
}
|
||||
|
||||
fn make_heap(&mut self, func: &mut ir::Function, _index: MemoryIndex) -> WasmResult<Heap> {
|
||||
// Create a static heap whose base address is stored at `vmctx+0`.
|
||||
let addr = func.create_global_value(ir::GlobalValueData::VMContext);
|
||||
let gv = func.create_global_value(ir::GlobalValueData::Load {
|
||||
@@ -285,12 +297,12 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
readonly: true,
|
||||
});
|
||||
|
||||
Ok(func.create_heap(ir::HeapData {
|
||||
Ok(self.heaps.push(HeapData {
|
||||
base: gv,
|
||||
min_size: 0.into(),
|
||||
offset_guard_size: 0x8000_0000.into(),
|
||||
style: ir::HeapStyle::Static {
|
||||
bound: 0x1_0000_0000.into(),
|
||||
min_size: 0,
|
||||
offset_guard_size: 0x8000_0000,
|
||||
style: HeapStyle::Static {
|
||||
bound: 0x1_0000_0000,
|
||||
},
|
||||
index_type: I32,
|
||||
}))
|
||||
@@ -455,7 +467,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
&mut self,
|
||||
mut pos: FuncCursor,
|
||||
_index: MemoryIndex,
|
||||
_heap: ir::Heap,
|
||||
_heap: Heap,
|
||||
_val: ir::Value,
|
||||
) -> WasmResult<ir::Value> {
|
||||
Ok(pos.ins().iconst(I32, -1))
|
||||
@@ -465,7 +477,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
&mut self,
|
||||
mut pos: FuncCursor,
|
||||
_index: MemoryIndex,
|
||||
_heap: ir::Heap,
|
||||
_heap: Heap,
|
||||
) -> WasmResult<ir::Value> {
|
||||
Ok(pos.ins().iconst(I32, -1))
|
||||
}
|
||||
@@ -474,9 +486,9 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
&mut self,
|
||||
_pos: FuncCursor,
|
||||
_src_index: MemoryIndex,
|
||||
_src_heap: ir::Heap,
|
||||
_src_heap: Heap,
|
||||
_dst_index: MemoryIndex,
|
||||
_dst_heap: ir::Heap,
|
||||
_dst_heap: Heap,
|
||||
_dst: ir::Value,
|
||||
_src: ir::Value,
|
||||
_len: ir::Value,
|
||||
@@ -488,7 +500,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
&mut self,
|
||||
_pos: FuncCursor,
|
||||
_index: MemoryIndex,
|
||||
_heap: ir::Heap,
|
||||
_heap: Heap,
|
||||
_dst: ir::Value,
|
||||
_val: ir::Value,
|
||||
_len: ir::Value,
|
||||
@@ -500,7 +512,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
&mut self,
|
||||
_pos: FuncCursor,
|
||||
_index: MemoryIndex,
|
||||
_heap: ir::Heap,
|
||||
_heap: Heap,
|
||||
_seg_index: u32,
|
||||
_dst: ir::Value,
|
||||
_src: ir::Value,
|
||||
@@ -625,7 +637,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
&mut self,
|
||||
mut pos: FuncCursor,
|
||||
_index: MemoryIndex,
|
||||
_heap: ir::Heap,
|
||||
_heap: Heap,
|
||||
_addr: ir::Value,
|
||||
_expected: ir::Value,
|
||||
_timeout: ir::Value,
|
||||
@@ -637,7 +649,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
&mut self,
|
||||
mut pos: FuncCursor,
|
||||
_index: MemoryIndex,
|
||||
_heap: ir::Heap,
|
||||
_heap: Heap,
|
||||
_addr: ir::Value,
|
||||
_count: ir::Value,
|
||||
) -> WasmResult<ir::Value> {
|
||||
@@ -653,6 +665,10 @@ impl TargetEnvironment for DummyEnvironment {
|
||||
fn target_config(&self) -> TargetFrontendConfig {
|
||||
self.info.config
|
||||
}
|
||||
|
||||
fn heap_access_spectre_mitigation(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl<'data> ModuleEnvironment<'data> for DummyEnvironment {
|
||||
|
||||
@@ -8,14 +8,16 @@
|
||||
|
||||
use crate::state::FuncTranslationState;
|
||||
use crate::{
|
||||
DataIndex, ElemIndex, FuncIndex, Global, GlobalIndex, Memory, MemoryIndex, SignatureIndex,
|
||||
Table, TableIndex, Tag, TagIndex, TypeIndex, WasmError, WasmFuncType, WasmResult, WasmType,
|
||||
DataIndex, ElemIndex, FuncIndex, Global, GlobalIndex, Heap, HeapData, Memory, MemoryIndex,
|
||||
SignatureIndex, Table, TableIndex, Tag, TagIndex, TypeIndex, WasmError, WasmFuncType,
|
||||
WasmResult, WasmType,
|
||||
};
|
||||
use core::convert::From;
|
||||
use cranelift_codegen::cursor::FuncCursor;
|
||||
use cranelift_codegen::ir::immediates::Offset32;
|
||||
use cranelift_codegen::ir::{self, InstBuilder};
|
||||
use cranelift_codegen::isa::TargetFrontendConfig;
|
||||
use cranelift_entity::PrimaryMap;
|
||||
use cranelift_frontend::FunctionBuilder;
|
||||
use std::boxed::Box;
|
||||
use std::string::ToString;
|
||||
@@ -46,6 +48,9 @@ pub trait TargetEnvironment {
|
||||
/// Get the information needed to produce Cranelift IR for the given target.
|
||||
fn target_config(&self) -> TargetFrontendConfig;
|
||||
|
||||
/// Whether to enable Spectre mitigations for heap accesses.
|
||||
fn heap_access_spectre_mitigation(&self) -> bool;
|
||||
|
||||
/// Get the Cranelift integer type to use for native pointers.
|
||||
///
|
||||
/// This returns `I64` for 64-bit architectures and `I32` for 32-bit architectures.
|
||||
@@ -112,11 +117,20 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
index: GlobalIndex,
|
||||
) -> WasmResult<GlobalVariable>;
|
||||
|
||||
/// Get the heaps for this function environment.
|
||||
///
|
||||
/// The returned map should provide heap format details (encoded in
|
||||
/// `HeapData`) for each `Heap` that was previously returned by
|
||||
/// `make_heap()`. The translator will first call make_heap for each Wasm
|
||||
/// memory, and then later when translating code, will invoke `heaps()` to
|
||||
/// learn how to access the environment's implementation of each memory.
|
||||
fn heaps(&self) -> &PrimaryMap<Heap, HeapData>;
|
||||
|
||||
/// Set up the necessary preamble definitions in `func` to access the linear memory identified
|
||||
/// by `index`.
|
||||
///
|
||||
/// The index space covers both imported and locally declared memories.
|
||||
fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<ir::Heap>;
|
||||
fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<Heap>;
|
||||
|
||||
/// Set up the necessary preamble definitions in `func` to access the table identified
|
||||
/// by `index`.
|
||||
@@ -206,7 +220,7 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
index: MemoryIndex,
|
||||
heap: ir::Heap,
|
||||
heap: Heap,
|
||||
val: ir::Value,
|
||||
) -> WasmResult<ir::Value>;
|
||||
|
||||
@@ -220,7 +234,7 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
index: MemoryIndex,
|
||||
heap: ir::Heap,
|
||||
heap: Heap,
|
||||
) -> WasmResult<ir::Value>;
|
||||
|
||||
/// Translate a `memory.copy` WebAssembly instruction.
|
||||
@@ -231,9 +245,9 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
src_index: MemoryIndex,
|
||||
src_heap: ir::Heap,
|
||||
src_heap: Heap,
|
||||
dst_index: MemoryIndex,
|
||||
dst_heap: ir::Heap,
|
||||
dst_heap: Heap,
|
||||
dst: ir::Value,
|
||||
src: ir::Value,
|
||||
len: ir::Value,
|
||||
@@ -247,7 +261,7 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
index: MemoryIndex,
|
||||
heap: ir::Heap,
|
||||
heap: Heap,
|
||||
dst: ir::Value,
|
||||
val: ir::Value,
|
||||
len: ir::Value,
|
||||
@@ -263,7 +277,7 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
index: MemoryIndex,
|
||||
heap: ir::Heap,
|
||||
heap: Heap,
|
||||
seg_index: u32,
|
||||
dst: ir::Value,
|
||||
src: ir::Value,
|
||||
@@ -420,7 +434,7 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
index: MemoryIndex,
|
||||
heap: ir::Heap,
|
||||
heap: Heap,
|
||||
addr: ir::Value,
|
||||
expected: ir::Value,
|
||||
timeout: ir::Value,
|
||||
@@ -440,7 +454,7 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
index: MemoryIndex,
|
||||
heap: ir::Heap,
|
||||
heap: Heap,
|
||||
addr: ir::Value,
|
||||
count: ir::Value,
|
||||
) -> WasmResult<ir::Value>;
|
||||
|
||||
99
cranelift/wasm/src/heap.rs
Normal file
99
cranelift/wasm/src/heap.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
//! Heaps to implement WebAssembly linear memories.
|
||||
|
||||
use cranelift_codegen::ir::{GlobalValue, Type};
|
||||
use cranelift_entity::entity_impl;
|
||||
|
||||
/// An opaque reference to a [`HeapData`][crate::HeapData].
|
||||
///
|
||||
/// While the order is stable, it is arbitrary.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
#[cfg_attr(feature = "enable-serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct Heap(u32);
|
||||
entity_impl!(Heap, "heap");
|
||||
|
||||
/// A heap implementing a WebAssembly linear memory.
|
||||
///
|
||||
/// Code compiled from WebAssembly runs in a sandbox where it can't access all
|
||||
/// process memory. Instead, it is given a small set of memory areas to work in,
|
||||
/// and all accesses are bounds checked. `cranelift-wasm` models this through
|
||||
/// the concept of *heaps*.
|
||||
///
|
||||
/// Heap addresses can be smaller than the native pointer size, for example
|
||||
/// unsigned `i32` offsets on a 64-bit architecture.
|
||||
///
|
||||
/// A heap appears as three consecutive ranges of address space:
|
||||
///
|
||||
/// 1. The *mapped pages* are the accessible memory range in the heap. A heap
|
||||
/// may have a minimum guaranteed size which means that some mapped pages are
|
||||
/// always present.
|
||||
///
|
||||
/// 2. The *unmapped pages* is a possibly empty range of address space that may
|
||||
/// be mapped in the future when the heap is grown. They are addressable
|
||||
/// but not accessible.
|
||||
///
|
||||
/// 3. The *offset-guard pages* is a range of address space that is guaranteed
|
||||
/// to always cause a trap when accessed. It is used to optimize bounds
|
||||
/// checking for heap accesses with a shared base pointer. They are
|
||||
/// addressable but not accessible.
|
||||
///
|
||||
/// The *heap bound* is the total size of the mapped and unmapped pages. This is
|
||||
/// the bound that `heap_addr` checks against. Memory accesses inside the heap
|
||||
/// bounds can trap if they hit an unmapped page (which is not accessible).
|
||||
///
|
||||
/// Two styles of heaps are supported, *static* and *dynamic*. They behave
|
||||
/// differently when resized.
|
||||
///
|
||||
/// #### Static heaps
|
||||
///
|
||||
/// A *static heap* starts out with all the address space it will ever need, so it
|
||||
/// never moves to a different address. At the base address is a number of mapped
|
||||
/// pages corresponding to the heap's current size. Then follows a number of
|
||||
/// unmapped pages where the heap can grow up to its maximum size. After the
|
||||
/// unmapped pages follow the offset-guard pages which are also guaranteed to
|
||||
/// generate a trap when accessed.
|
||||
///
|
||||
/// #### Dynamic heaps
|
||||
///
|
||||
/// A *dynamic heap* can be relocated to a different base address when it is
|
||||
/// resized, and its bound can move dynamically. The offset-guard pages move
|
||||
/// when the heap is resized. The bound of a dynamic heap is stored in a global
|
||||
/// value.
|
||||
#[derive(Clone, PartialEq, Hash)]
|
||||
#[cfg_attr(feature = "enable-serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct HeapData {
|
||||
/// The address of the start of the heap's storage.
|
||||
pub base: GlobalValue,
|
||||
|
||||
/// Guaranteed minimum heap size in bytes. Heap accesses before `min_size`
|
||||
/// don't need bounds checking.
|
||||
pub min_size: u64,
|
||||
|
||||
/// Size in bytes of the offset-guard pages following the heap.
|
||||
pub offset_guard_size: u64,
|
||||
|
||||
/// Heap style, with additional style-specific info.
|
||||
pub style: HeapStyle,
|
||||
|
||||
/// The index type for the heap.
|
||||
pub index_type: Type,
|
||||
}
|
||||
|
||||
/// Style of heap including style-specific information.
|
||||
#[derive(Clone, PartialEq, Hash)]
|
||||
#[cfg_attr(feature = "enable-serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum HeapStyle {
|
||||
/// A dynamic heap can be relocated to a different base address when it is
|
||||
/// grown.
|
||||
Dynamic {
|
||||
/// Global value providing the current bound of the heap in bytes.
|
||||
bound_gv: GlobalValue,
|
||||
},
|
||||
|
||||
/// A static heap has a fixed base address and a number of not-yet-allocated
|
||||
/// pages before the offset-guard pages.
|
||||
Static {
|
||||
/// Heap bound in bytes. The offset-guard pages are allocated after the
|
||||
/// bound.
|
||||
bound: u64,
|
||||
},
|
||||
}
|
||||
@@ -51,6 +51,7 @@ use std::collections::{
|
||||
mod code_translator;
|
||||
mod environ;
|
||||
mod func_translator;
|
||||
mod heap;
|
||||
mod module_translator;
|
||||
mod sections_translator;
|
||||
mod state;
|
||||
@@ -61,6 +62,7 @@ pub use crate::environ::{
|
||||
GlobalVariable, ModuleEnvironment, TargetEnvironment,
|
||||
};
|
||||
pub use crate::func_translator::FuncTranslator;
|
||||
pub use crate::heap::{Heap, HeapData, HeapStyle};
|
||||
pub use crate::module_translator::translate_module;
|
||||
pub use crate::state::FuncTranslationState;
|
||||
pub use crate::translation_utils::*;
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
//! value and control stacks during the translation of a single function.
|
||||
|
||||
use crate::environ::{FuncEnvironment, GlobalVariable};
|
||||
use crate::{FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TypeIndex, WasmResult};
|
||||
use crate::{FuncIndex, GlobalIndex, Heap, MemoryIndex, TableIndex, TypeIndex, WasmResult};
|
||||
use crate::{HashMap, Occupied, Vacant};
|
||||
use cranelift_codegen::ir::{self, Block, Inst, Value};
|
||||
use std::vec::Vec;
|
||||
@@ -225,7 +225,7 @@ pub struct FuncTranslationState {
|
||||
globals: HashMap<GlobalIndex, GlobalVariable>,
|
||||
|
||||
// Map of heaps that have been created by `FuncEnvironment::make_heap`.
|
||||
heaps: HashMap<MemoryIndex, ir::Heap>,
|
||||
memory_to_heap: HashMap<MemoryIndex, Heap>,
|
||||
|
||||
// Map of tables that have been created by `FuncEnvironment::make_table`.
|
||||
pub(crate) tables: HashMap<TableIndex, ir::Table>,
|
||||
@@ -258,7 +258,7 @@ impl FuncTranslationState {
|
||||
control_stack: Vec::new(),
|
||||
reachable: true,
|
||||
globals: HashMap::new(),
|
||||
heaps: HashMap::new(),
|
||||
memory_to_heap: HashMap::new(),
|
||||
tables: HashMap::new(),
|
||||
signatures: HashMap::new(),
|
||||
functions: HashMap::new(),
|
||||
@@ -270,7 +270,7 @@ impl FuncTranslationState {
|
||||
debug_assert!(self.control_stack.is_empty());
|
||||
self.reachable = true;
|
||||
self.globals.clear();
|
||||
self.heaps.clear();
|
||||
self.memory_to_heap.clear();
|
||||
self.tables.clear();
|
||||
self.signatures.clear();
|
||||
self.functions.clear();
|
||||
@@ -462,9 +462,9 @@ impl FuncTranslationState {
|
||||
func: &mut ir::Function,
|
||||
index: u32,
|
||||
environ: &mut FE,
|
||||
) -> WasmResult<ir::Heap> {
|
||||
) -> WasmResult<Heap> {
|
||||
let index = MemoryIndex::from_u32(index);
|
||||
match self.heaps.entry(index) {
|
||||
match self.memory_to_heap.entry(index) {
|
||||
Occupied(entry) => Ok(*entry.get()),
|
||||
Vacant(entry) => Ok(*entry.insert(environ.make_heap(func, index)?)),
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user