Implement RFC 11: Redesigning Wasmtime's APIs (#2897)

Implement Wasmtime's new API as designed by RFC 11. This is quite a large commit which has had lots of discussion externally, so for more information it's best to read the RFC thread and the PR thread.
This commit is contained in:
Alex Crichton
2021-06-03 09:10:53 -05:00
committed by GitHub
parent a5a28b1c5b
commit 7a1b7cdf92
233 changed files with 13349 additions and 11997 deletions

View File

@@ -1,8 +1,6 @@
use crate::vmcontext::{
VMCallerCheckedAnyfunc, VMContext, VMGlobalDefinition, VMMemoryDefinition, VMTableDefinition,
};
use crate::RuntimeInstance;
use std::any::Any;
use std::ptr::NonNull;
use wasmtime_environ::wasm::Global;
use wasmtime_environ::{MemoryPlan, TablePlan};
@@ -20,16 +18,10 @@ pub enum Export {
/// A global export value.
Global(ExportGlobal),
/// An instance
Instance(RuntimeInstance),
/// A module
Module(Box<dyn Any>),
}
/// A function export value.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Copy)]
pub struct ExportFunction {
/// The `VMCallerCheckedAnyfunc` for this exported function.
///
@@ -38,6 +30,12 @@ pub struct ExportFunction {
pub anyfunc: NonNull<VMCallerCheckedAnyfunc>,
}
// It's part of the contract of using `ExportFunction` that synchronization
// properties are upheld, so declare that despite the raw pointers inside this
// is send/sync.
unsafe impl Send for ExportFunction {}
unsafe impl Sync for ExportFunction {}
impl From<ExportFunction> for Export {
fn from(func: ExportFunction) -> Export {
Export::Function(func)
@@ -55,6 +53,10 @@ pub struct ExportTable {
pub table: TablePlan,
}
// See docs on send/sync for `ExportFunction` above.
unsafe impl Send for ExportTable {}
unsafe impl Sync for ExportTable {}
impl From<ExportTable> for Export {
fn from(func: ExportTable) -> Export {
Export::Table(func)
@@ -72,6 +74,10 @@ pub struct ExportMemory {
pub memory: MemoryPlan,
}
// See docs on send/sync for `ExportFunction` above.
unsafe impl Send for ExportMemory {}
unsafe impl Sync for ExportMemory {}
impl From<ExportMemory> for Export {
fn from(func: ExportMemory) -> Export {
Export::Memory(func)
@@ -89,6 +95,10 @@ pub struct ExportGlobal {
pub global: Global,
}
// See docs on send/sync for `ExportFunction` above.
unsafe impl Send for ExportGlobal {}
unsafe impl Sync for ExportGlobal {}
impl From<ExportGlobal> for Export {
fn from(func: ExportGlobal) -> Export {
Export::Global(func)

View File

@@ -100,13 +100,14 @@
//! <https://openresearch-repository.anu.edu.au/bitstream/1885/42030/2/hon-thesis.pdf>
use std::any::Any;
use std::cell::{Cell, RefCell, UnsafeCell};
use std::cmp::Ordering;
use std::cell::UnsafeCell;
use std::cmp;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::Deref;
use std::ptr::{self, NonNull};
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::{alloc::Layout, sync::Arc};
use wasmtime_environ::ir::StackMap;
@@ -145,17 +146,16 @@ use wasmtime_environ::ir::StackMap;
/// let file = std::fs::File::create("some/file/path")?;
///
/// // Wrap the file up as an `VMExternRef` that can be passed to Wasm.
/// let extern_ref_to_file = VMExternRef::new(RefCell::new(file));
/// let extern_ref_to_file = VMExternRef::new(file);
///
/// // `VMExternRef`s dereference to `dyn Any`, so you can use `Any` methods to
/// // perform runtime type checks and downcasts.
///
/// assert!(extern_ref_to_file.is::<RefCell<std::fs::File>>());
/// assert!(extern_ref_to_file.is::<std::fs::File>());
/// assert!(!extern_ref_to_file.is::<String>());
///
/// if let Some(file) = extern_ref_to_file.downcast_ref::<RefCell<std::fs::File>>() {
/// if let Some(mut file) = extern_ref_to_file.downcast_ref::<std::fs::File>() {
/// use std::io::Write;
/// let mut file = file.borrow_mut();
/// writeln!(&mut file, "Hello, `VMExternRef`!")?;
/// }
/// # Ok(())
@@ -165,6 +165,10 @@ use wasmtime_environ::ir::StackMap;
#[repr(transparent)]
pub struct VMExternRef(NonNull<VMExternData>);
// Data contained is always Send+Sync so these should be safe.
unsafe impl Send for VMExternRef {}
unsafe impl Sync for VMExternRef {}
#[repr(C)]
pub(crate) struct VMExternData {
// Implicit, dynamically-sized member that always preceded an
@@ -180,11 +184,11 @@ pub(crate) struct VMExternData {
/// Note: this field's offset must be kept in sync with
/// `wasmtime_environ::VMOffsets::vm_extern_data_ref_count()` which is
/// currently always zero.
ref_count: UnsafeCell<usize>,
ref_count: AtomicUsize,
/// Always points to the implicit, dynamically-sized `value` member that
/// precedes this `VMExternData`.
value_ptr: NonNull<dyn Any>,
value_ptr: NonNull<dyn Any + Send + Sync>,
}
impl Clone for VMExternRef {
@@ -199,13 +203,23 @@ impl Drop for VMExternRef {
#[inline]
fn drop(&mut self) {
let data = self.extern_data();
data.decrement_ref_count();
if data.get_ref_count() == 0 {
// Drop our live reference to `data` before we drop it itself.
drop(data);
unsafe {
VMExternData::drop_and_dealloc(self.0);
}
// Note that the memory orderings here also match the standard library
// itself. Documentation is more available in the implementation of
// `Arc`, but the general idea is that this is a special pattern allowed
// by the C standard with atomic orderings where we "release" for all
// the decrements and only the final decrementer performs an acquire
// fence. This properly ensures that the final thread, which actually
// destroys the data, sees all the updates from all other threads.
if data.ref_count.fetch_sub(1, Ordering::Release) != 1 {
return;
}
atomic::fence(Ordering::Acquire);
// Drop our live reference to `data` before we drop it itself.
drop(data);
unsafe {
VMExternData::drop_and_dealloc(self.0);
}
}
}
@@ -241,7 +255,7 @@ impl VMExternData {
// resides within after this block.
let (alloc_ptr, layout) = {
let data = data.as_mut();
debug_assert_eq!(data.get_ref_count(), 0);
debug_assert_eq!(data.ref_count.load(Ordering::SeqCst), 0);
// Same thing, but for the dropping the reference to `value` before
// we drop it itself.
@@ -260,25 +274,16 @@ impl VMExternData {
std::alloc::dealloc(alloc_ptr.as_ptr(), layout);
}
#[inline]
fn get_ref_count(&self) -> usize {
unsafe { *self.ref_count.get() }
}
#[inline]
fn increment_ref_count(&self) {
unsafe {
let count = self.ref_count.get();
*count += 1;
}
}
#[inline]
fn decrement_ref_count(&self) {
unsafe {
let count = self.ref_count.get();
*count -= 1;
}
// This is only using during cloning operations, and like the standard
// library we use `Relaxed` here. The rationale is better documented in
// libstd's implementation of `Arc`, but the general gist is that we're
// creating a new pointer for our own thread, so there's no need to have
// any synchronization with orderings. The synchronization with other
// threads with respect to orderings happens when the pointer is sent to
// another thread.
self.ref_count.fetch_add(1, Ordering::Relaxed);
}
}
@@ -293,7 +298,7 @@ impl VMExternRef {
/// Wrap the given value inside an `VMExternRef`.
pub fn new<T>(value: T) -> VMExternRef
where
T: 'static + Any,
T: 'static + Any + Send + Sync,
{
VMExternRef::new_with(|| value)
}
@@ -301,7 +306,7 @@ impl VMExternRef {
/// Construct a new `VMExternRef` in place by invoking `make_value`.
pub fn new_with<T>(make_value: impl FnOnce() -> T) -> VMExternRef
where
T: 'static + Any,
T: 'static + Any + Send + Sync,
{
unsafe {
let (layout, footer_offset) =
@@ -315,19 +320,14 @@ impl VMExternRef {
let value_ptr = alloc_ptr.cast::<T>();
ptr::write(value_ptr.as_ptr(), make_value());
let value_ref: &T = value_ptr.as_ref();
let value_ref: &dyn Any = value_ref as _;
let value_ptr: *const dyn Any = value_ref as _;
let value_ptr: *mut dyn Any = value_ptr as _;
let value_ptr = NonNull::new_unchecked(value_ptr);
let extern_data_ptr =
alloc_ptr.cast::<u8>().as_ptr().add(footer_offset) as *mut VMExternData;
ptr::write(
extern_data_ptr,
VMExternData {
ref_count: UnsafeCell::new(1),
value_ptr,
ref_count: AtomicUsize::new(1),
// Cast from `*mut T` to `*mut dyn Any` here.
value_ptr: NonNull::new_unchecked(value_ptr.as_ptr()),
},
);
@@ -395,8 +395,11 @@ impl VMExternRef {
}
/// Get the strong reference count for this `VMExternRef`.
///
/// Note that this loads with a `SeqCst` ordering to synchronize with other
/// threads.
pub fn strong_count(&self) -> usize {
self.extern_data().get_ref_count()
self.extern_data().ref_count.load(Ordering::SeqCst)
}
#[inline]
@@ -439,7 +442,7 @@ impl VMExternRef {
/// semantics, and so only pointers are compared, and doesn't use any `Cmp`
/// or `PartialCmp` implementation of the pointed-to values.
#[inline]
pub fn cmp(a: &Self, b: &Self) -> Ordering {
pub fn cmp(a: &Self, b: &Self) -> cmp::Ordering {
let a = a.0.as_ptr() as usize;
let b = b.0.as_ptr() as usize;
a.cmp(&b)
@@ -486,8 +489,42 @@ type TableElem = UnsafeCell<Option<VMExternRef>>;
///
/// Under the covers, this is a simple bump allocator that allows duplicate
/// entries. Deduplication happens at GC time.
#[repr(C)]
#[repr(C)] // `alloc` must be the first member, it's accessed from JIT code
pub struct VMExternRefActivationsTable {
/// Structures used to perform fast bump allocation of storage of externref
/// values.
///
/// This is the only member of this structure accessed from JIT code.
alloc: VMExternRefTableAlloc,
/// When unioned with `chunk`, this is an over-approximation of the GC roots
/// on the stack, inside Wasm frames.
///
/// This is used by slow-path insertion, and when a GC cycle finishes, is
/// re-initialized to the just-discovered precise set of stack roots (which
/// immediately becomes an over-approximation again as soon as Wasm runs and
/// potentially drops references).
over_approximated_stack_roots: HashSet<VMExternRefWithTraits>,
/// The precise set of on-stack, inside-Wasm GC roots that we discover via
/// walking the stack and interpreting stack maps.
///
/// This is *only* used inside the `gc` function, and is empty otherwise. It
/// is just part of this struct so that we can reuse the allocation, rather
/// than create a new hash set every GC.
precise_stack_roots: HashSet<VMExternRefWithTraits>,
/// A pointer to the youngest host stack frame before we called
/// into Wasm for the first time. When walking the stack in garbage
/// collection, if we don't find this frame, then we failed to walk every
/// Wasm stack frame, which means we failed to find all on-stack,
/// inside-a-Wasm-frame roots, and doing a GC could lead to freeing one of
/// those missed roots, and use after free.
stack_canary: Option<usize>,
}
#[repr(C)] // this is accessed from JTI code
struct VMExternRefTableAlloc {
/// Bump-allocation finger within the `chunk`.
///
/// NB: this is an `UnsafeCell` because it is written to by compiled Wasm
@@ -501,32 +538,21 @@ pub struct VMExternRefActivationsTable {
end: NonNull<TableElem>,
/// Bump allocation chunk that stores fast-path insertions.
///
/// This is not accessed from JIT code.
chunk: Box<[TableElem]>,
}
/// When unioned with `chunk`, this is an over-approximation of the GC roots
/// on the stack, inside Wasm frames.
///
/// This is used by slow-path insertion, and when a GC cycle finishes, is
/// re-initialized to the just-discovered precise set of stack roots (which
/// immediately becomes an over-approximation again as soon as Wasm runs and
/// potentially drops references).
over_approximated_stack_roots: RefCell<HashSet<VMExternRefWithTraits>>,
// This gets around the usage of `UnsafeCell` throughout the internals of this
// allocator, but the storage should all be Send/Sync and synchronization isn't
// necessary since operations require `&mut self`.
unsafe impl Send for VMExternRefTableAlloc {}
unsafe impl Sync for VMExternRefTableAlloc {}
/// The precise set of on-stack, inside-Wasm GC roots that we discover via
/// walking the stack and interpreting stack maps.
///
/// This is *only* used inside the `gc` function, and is empty otherwise. It
/// is just part of this struct so that we can reuse the allocation, rather
/// than create a new hash set every GC.
precise_stack_roots: RefCell<HashSet<VMExternRefWithTraits>>,
/// A pointer to the youngest host stack frame before we called
/// into Wasm for the first time. When walking the stack in garbage
/// collection, if we don't find this frame, then we failed to walk every
/// Wasm stack frame, which means we failed to find all on-stack,
/// inside-a-Wasm-frame roots, and doing a GC could lead to freeing one of
/// those missed roots, and use after free.
stack_canary: Cell<Option<usize>>,
fn _assert_send_sync() {
fn _assert<T: Send + Sync>() {}
_assert::<VMExternRefActivationsTable>();
_assert::<VMExternRef>();
}
impl VMExternRefActivationsTable {
@@ -539,12 +565,14 @@ impl VMExternRefActivationsTable {
let end = unsafe { next.add(chunk.len()) };
VMExternRefActivationsTable {
next: UnsafeCell::new(NonNull::new(next).unwrap()),
end: NonNull::new(end).unwrap(),
chunk,
over_approximated_stack_roots: RefCell::new(HashSet::with_capacity(Self::CHUNK_SIZE)),
precise_stack_roots: RefCell::new(HashSet::with_capacity(Self::CHUNK_SIZE)),
stack_canary: Cell::new(None),
alloc: VMExternRefTableAlloc {
next: UnsafeCell::new(NonNull::new(next).unwrap()),
end: NonNull::new(end).unwrap(),
chunk,
},
over_approximated_stack_roots: HashSet::with_capacity(Self::CHUNK_SIZE),
precise_stack_roots: HashSet::with_capacity(Self::CHUNK_SIZE),
stack_canary: None,
}
}
@@ -563,10 +591,10 @@ impl VMExternRefActivationsTable {
/// `insert_slow_path` to infallibly insert the reference (potentially
/// allocating additional space in the table to hold it).
#[inline]
pub fn try_insert(&self, externref: VMExternRef) -> Result<(), VMExternRef> {
pub fn try_insert(&mut self, externref: VMExternRef) -> Result<(), VMExternRef> {
unsafe {
let next = *self.next.get();
if next == self.end {
let next = *self.alloc.next.get();
if next == self.alloc.end {
return Err(externref);
}
@@ -577,8 +605,8 @@ impl VMExternRefActivationsTable {
ptr::write(next.as_ptr(), UnsafeCell::new(Some(externref)));
let next = NonNull::new_unchecked(next.as_ptr().add(1));
debug_assert!(next <= self.end);
*self.next.get() = next;
debug_assert!(next <= self.alloc.end);
*self.alloc.next.get() = next;
Ok(())
}
@@ -592,7 +620,7 @@ impl VMExternRefActivationsTable {
/// The same as `gc`.
#[inline]
pub unsafe fn insert_with_gc(
&self,
&mut self,
externref: VMExternRef,
module_info_lookup: &dyn ModuleInfoLookup,
) {
@@ -603,7 +631,7 @@ impl VMExternRefActivationsTable {
#[inline(never)]
unsafe fn gc_and_insert_slow(
&self,
&mut self,
externref: VMExternRef,
module_info_lookup: &dyn ModuleInfoLookup,
) {
@@ -612,27 +640,26 @@ impl VMExternRefActivationsTable {
// Might as well insert right into the hash set, rather than the bump
// chunk, since we are already on a slow path and we get de-duplication
// this way.
let mut roots = self.over_approximated_stack_roots.borrow_mut();
roots.insert(VMExternRefWithTraits(externref));
self.over_approximated_stack_roots
.insert(VMExternRefWithTraits(externref));
}
fn num_filled_in_bump_chunk(&self) -> usize {
let next = unsafe { *self.next.get() };
let bytes_unused = (self.end.as_ptr() as usize) - (next.as_ptr() as usize);
let next = unsafe { *self.alloc.next.get() };
let bytes_unused = (self.alloc.end.as_ptr() as usize) - (next.as_ptr() as usize);
let slots_unused = bytes_unused / mem::size_of::<TableElem>();
self.chunk.len().saturating_sub(slots_unused)
self.alloc.chunk.len().saturating_sub(slots_unused)
}
fn elements(&self, mut f: impl FnMut(&VMExternRef)) {
let roots = self.over_approximated_stack_roots.borrow();
for elem in roots.iter() {
for elem in self.over_approximated_stack_roots.iter() {
f(&elem.0);
}
// The bump chunk is not all the way full, so we only iterate over its
// filled-in slots.
let num_filled = self.num_filled_in_bump_chunk();
for slot in self.chunk.iter().take(num_filled) {
for slot in self.alloc.chunk.iter().take(num_filled) {
if let Some(elem) = unsafe { &*slot.get() } {
f(elem);
}
@@ -649,36 +676,20 @@ impl VMExternRefActivationsTable {
/// Sweep the bump allocation table after we've discovered our precise stack
/// roots.
fn sweep(&self, precise_stack_roots: &mut HashSet<VMExternRefWithTraits>) {
// Swap out the over-approximated set so we can distinguish between the
// over-approximation before we started sweeping, and any new elements
// we might insert into the table because of re-entering Wasm via an
// `externref`'s destructor. The new elements must be kept alive for
// memory safety, but we keep this set around because we likely want to
// reuse its allocation/capacity for the new `precise_stack_roots` in
// the next GC cycle.
let mut old_over_approximated = mem::replace(
&mut *self.over_approximated_stack_roots.borrow_mut(),
Default::default(),
);
fn sweep(&mut self) {
// Sweep our bump chunk.
//
// Just in case an `externref` destructor calls back into Wasm, passing
// more `externref`s into that Wasm, which requires the `externref`s to
// be inserted into this `VMExternRefActivationsTable`, make sure `next
// == end` so that they go into the over-approximation hash set.
let num_filled = self.num_filled_in_bump_chunk();
unsafe {
*self.next.get() = self.end;
*self.alloc.next.get() = self.alloc.end;
}
for slot in self.chunk.iter().take(num_filled) {
for slot in self.alloc.chunk.iter().take(num_filled) {
unsafe {
*slot.get() = None;
}
}
debug_assert!(
self.chunk
self.alloc
.chunk
.iter()
.all(|slot| unsafe { (*slot.get()).as_ref().is_none() }),
"after sweeping the bump chunk, all slots should be `None`"
@@ -686,33 +697,25 @@ impl VMExternRefActivationsTable {
// Reset our `next` finger to the start of the bump allocation chunk.
unsafe {
let next = self.chunk.as_ptr() as *mut TableElem;
let next = self.alloc.chunk.as_ptr() as *mut TableElem;
debug_assert!(!next.is_null());
*self.next.get() = NonNull::new_unchecked(next);
*self.alloc.next.get() = NonNull::new_unchecked(next);
}
// The current `precise_stack_roots` becomes our new over-appoximated
// set for the next GC cycle.
let mut over_approximated = self.over_approximated_stack_roots.borrow_mut();
mem::swap(&mut *precise_stack_roots, &mut *over_approximated);
mem::swap(
&mut self.precise_stack_roots,
&mut self.over_approximated_stack_roots,
);
// And finally, the new `precise_stack_roots` should be cleared and
// remain empty until the next GC cycle.
//
// However, if an `externref` destructor called re-entered Wasm with
// more `externref`s, then the temp over-approximated set we were using
// during sweeping (now `precise_stack_roots`) is not empty, and we need
// to keep its references alive in our new over-approximated set.
over_approximated.extend(precise_stack_roots.drain());
// If we didn't re-enter Wasm during destructors (likely),
// `precise_stack_roots` has zero capacity, and the old
// over-approximated has a bunch of capacity. Reuse whichever set has
// most capacity.
if old_over_approximated.capacity() > precise_stack_roots.capacity() {
old_over_approximated.clear();
*precise_stack_roots = old_over_approximated;
}
// Note that this may run arbitrary code as we run externref
// destructors. Because of our `&mut` borrow above on this table,
// though, we're guaranteed that nothing will touch this table.
self.precise_stack_roots.clear();
}
/// Fetches the current value of this table's stack canary.
@@ -724,7 +727,7 @@ impl VMExternRefActivationsTable {
/// For more information on canaries see the gc functions below.
#[inline]
pub fn stack_canary(&self) -> Option<usize> {
self.stack_canary.get()
self.stack_canary
}
/// Sets the current value of the stack canary.
@@ -736,14 +739,14 @@ impl VMExternRefActivationsTable {
///
/// For more information on canaries see the gc functions below.
#[inline]
pub fn set_stack_canary(&self, canary: Option<usize>) {
self.stack_canary.set(canary);
pub fn set_stack_canary(&mut self, canary: Option<usize>) {
self.stack_canary = canary;
}
}
/// Used by the runtime to lookup information about a module given a
/// program counter value.
pub trait ModuleInfoLookup: 'static {
pub trait ModuleInfoLookup {
/// Lookup the module information from a program counter value.
fn lookup(&self, pc: usize) -> Option<Arc<dyn ModuleInfo>>;
}
@@ -754,16 +757,6 @@ pub trait ModuleInfo {
fn lookup_stack_map(&self, pc: usize) -> Option<&StackMap>;
}
pub(crate) struct EmptyModuleInfoLookup;
impl ModuleInfoLookup for EmptyModuleInfoLookup {
fn lookup(&self, _pc: usize) -> Option<Arc<dyn ModuleInfo>> {
None
}
}
pub(crate) const EMPTY_MODULE_LOOKUP: EmptyModuleInfoLookup = EmptyModuleInfoLookup;
#[derive(Debug, Default)]
struct DebugOnly<T> {
inner: T,
@@ -810,22 +803,8 @@ impl<T> std::ops::DerefMut for DebugOnly<T> {
/// that has frames on the stack with the given `stack_maps_registry`.
pub unsafe fn gc(
module_info_lookup: &dyn ModuleInfoLookup,
externref_activations_table: &VMExternRefActivationsTable,
externref_activations_table: &mut VMExternRefActivationsTable,
) {
// We borrow the precise stack roots `RefCell` for the whole duration of
// GC. Whether it is dynamically borrowed serves as a flag for detecting
// re-entrancy into GC. Re-entrancy can occur if we do a GC, drop an
// `externref`, and that `externref`'s destructor then triggers another
// GC. Whenever we detect re-entrancy, we return and give the first,
// outermost GC call priority.
let mut precise_stack_roots = match externref_activations_table
.precise_stack_roots
.try_borrow_mut()
{
Err(_) => return,
Ok(roots) => roots,
};
log::debug!("start GC");
debug_assert!({
@@ -834,7 +813,7 @@ pub unsafe fn gc(
// into the activations table's bump-allocated space at the
// end. Therefore, it should always be empty upon entering this
// function.
precise_stack_roots.is_empty()
externref_activations_table.precise_stack_roots.is_empty()
});
// Whenever we call into Wasm from host code for the first time, we set a
@@ -842,7 +821,7 @@ pub unsafe fn gc(
// canary. If there is *not* a stack canary, then there must be zero Wasm
// frames on the stack. Therefore, we can simply reset the table without
// walking the stack.
let stack_canary = match externref_activations_table.stack_canary.get() {
let stack_canary = match externref_activations_table.stack_canary {
None => {
if cfg!(debug_assertions) {
// Assert that there aren't any Wasm frames on the stack.
@@ -851,7 +830,7 @@ pub unsafe fn gc(
true
});
}
externref_activations_table.sweep(&mut precise_stack_roots);
externref_activations_table.sweep();
log::debug!("end GC");
return;
}
@@ -911,7 +890,7 @@ pub unsafe fn gc(
);
if let Some(r) = NonNull::new(r) {
VMExternRefActivationsTable::insert_precise_stack_root(
&mut precise_stack_roots,
&mut externref_activations_table.precise_stack_roots,
r,
);
}
@@ -941,10 +920,10 @@ pub unsafe fn gc(
// would free those missing roots while they are still in use, leading to
// use-after-free.
if found_canary {
externref_activations_table.sweep(&mut precise_stack_roots);
externref_activations_table.sweep();
} else {
log::warn!("did not find stack canary; skipping GC sweep");
precise_stack_roots.clear();
externref_activations_table.precise_stack_roots.clear();
}
log::debug!("end GC");
@@ -972,12 +951,12 @@ mod tests {
#[test]
fn ref_count_is_at_correct_offset() {
let s = "hi";
let s: &dyn Any = &s as _;
let s: *const dyn Any = s as _;
let s: *mut dyn Any = s as _;
let s: &(dyn Any + Send + Sync) = &s as _;
let s: *const (dyn Any + Send + Sync) = s as _;
let s: *mut (dyn Any + Send + Sync) = s as _;
let extern_data = VMExternData {
ref_count: UnsafeCell::new(0),
ref_count: AtomicUsize::new(0),
value_ptr: NonNull::new(s).unwrap(),
};
@@ -997,7 +976,7 @@ mod tests {
let table = VMExternRefActivationsTable::new();
let table_ptr = &table as *const _;
let next_ptr = &table.next as *const _;
let next_ptr = &table.alloc.next as *const _;
let actual_offset = (next_ptr as usize) - (table_ptr as usize);
@@ -1024,7 +1003,7 @@ mod tests {
let table = VMExternRefActivationsTable::new();
let table_ptr = &table as *const _;
let end_ptr = &table.end as *const _;
let end_ptr = &table.alloc.end as *const _;
let actual_offset = (end_ptr as usize) - (table_ptr as usize);

View File

@@ -16,14 +16,15 @@
int RegisterSetjmp(
void **buf_storage,
void (*body)(void*),
void *payload) {
void (*body)(void*, void*),
void *payload,
void *callee) {
platform_jmp_buf buf;
if (platform_setjmp(buf) != 0) {
return 0;
}
*buf_storage = &buf;
body(payload);
body(payload, callee);
return 1;
}

View File

@@ -3,7 +3,7 @@
//! `InstanceHandle` is a reference-counting handle for an `Instance`.
use crate::export::Export;
use crate::externref::{ModuleInfoLookup, VMExternRefActivationsTable};
use crate::externref::VMExternRefActivationsTable;
use crate::memory::{Memory, RuntimeMemoryCreator};
use crate::table::{Table, TableElement};
use crate::traphandlers::Trap;
@@ -12,24 +12,21 @@ use crate::vmcontext::{
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryDefinition, VMMemoryImport,
VMSharedSignatureIndex, VMTableDefinition, VMTableImport,
};
use crate::{ExportFunction, ExportGlobal, ExportMemory, ExportTable};
use indexmap::IndexMap;
use crate::{ExportFunction, ExportGlobal, ExportMemory, ExportTable, Store};
use memoffset::offset_of;
use more_asserts::assert_lt;
use std::alloc::Layout;
use std::any::Any;
use std::cell::RefCell;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::hash::Hash;
use std::ptr::NonNull;
use std::rc::Rc;
use std::sync::Arc;
use std::{mem, ptr, slice};
use wasmtime_environ::entity::{packed_option::ReservedValue, EntityRef, EntitySet, PrimaryMap};
use wasmtime_environ::wasm::{
DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, EntityIndex,
FuncIndex, GlobalIndex, MemoryIndex, TableElementType, TableIndex,
FuncIndex, GlobalIndex, MemoryIndex, TableElementType, TableIndex, WasmType,
};
use wasmtime_environ::{ir, Module, VMOffsets};
@@ -41,7 +38,7 @@ pub use allocator::*;
///
/// An instance can be created with a resource limiter so that hosts can take into account
/// non-WebAssembly resource usage to determine if a linear memory or table should grow.
pub trait ResourceLimiter {
pub trait ResourceLimiter: Send + Sync + 'static {
/// Notifies the resource limiter that an instance's linear memory has been requested to grow.
///
/// * `current` is the current size of the linear memory in WebAssembly page units.
@@ -53,7 +50,7 @@ pub trait ResourceLimiter {
/// This function should return `true` to indicate that the growing operation is permitted or
/// `false` if not permitted. Returning `true` when a maximum has been exceeded will have no
/// effect as the linear memory will not grow.
fn memory_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
fn memory_growing(&mut self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
/// Notifies the resource limiter that an instance's table has been requested to grow.
///
@@ -65,7 +62,7 @@ pub trait ResourceLimiter {
/// This function should return `true` to indicate that the growing operation is permitted or
/// `false` if not permitted. Returning `true` when a maximum has been exceeded will have no
/// effect as the table will not grow.
fn table_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
fn table_growing(&mut self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
/// The maximum number of instances that can be created for a `Store`.
///
@@ -83,10 +80,6 @@ pub trait ResourceLimiter {
fn memories(&self) -> usize;
}
/// Runtime representation of an instance value, which erases all `Instance`
/// information since instances are just a collection of values.
pub type RuntimeInstance = Rc<IndexMap<String, Export>>;
/// A WebAssembly instance.
///
/// This is repr(C) to ensure that the vmctx field is last.
@@ -106,14 +99,14 @@ pub(crate) struct Instance {
/// Stores the dropped passive element segments in this instantiation by index.
/// If the index is present in the set, the segment has been dropped.
dropped_elements: RefCell<EntitySet<ElemIndex>>,
dropped_elements: EntitySet<ElemIndex>,
/// Stores the dropped passive data segments in this instantiation by index.
/// If the index is present in the set, the segment has been dropped.
dropped_data: RefCell<EntitySet<DataIndex>>,
dropped_data: EntitySet<DataIndex>,
/// Hosts can store arbitrary per-instance information here.
host_state: Box<dyn Any>,
host_state: Box<dyn Any + Send + Sync>,
/// Additional context used by compiled wasm code. This field is last, and
/// represents a dynamically-sized array that extends beyond the nominal
@@ -242,16 +235,8 @@ impl Instance {
}
/// Return the indexed `VMGlobalDefinition`.
fn global(&self, index: DefinedGlobalIndex) -> VMGlobalDefinition {
unsafe { *self.global_ptr(index) }
}
/// Set the indexed global to `VMGlobalDefinition`.
#[allow(dead_code)]
fn set_global(&self, index: DefinedGlobalIndex, global: VMGlobalDefinition) {
unsafe {
*self.global_ptr(index) = global;
}
fn global(&self, index: DefinedGlobalIndex) -> &VMGlobalDefinition {
unsafe { &*self.global_ptr(index) }
}
/// Return the indexed `VMGlobalDefinition`.
@@ -295,17 +280,35 @@ impl Instance {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_externref_activations_table()) }
}
/// Return a pointer to the `ModuleInfoLookup`.
pub fn module_info_lookup(&self) -> *mut *const dyn ModuleInfoLookup {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_module_info_lookup()) }
/// Gets a pointer to this instance's `Store` which was originally
/// configured on creation.
///
/// # Panics
///
/// This will panic if the originally configured store was `None`. That can
/// happen for host functions so host functions can't be queried what their
/// original `Store` was since it's just retained as null (since host
/// functions are shared amongst threads and don't all share the same
/// store).
#[inline]
pub fn store(&self) -> *mut dyn Store {
let ptr = unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets.vmctx_store()) };
assert!(!ptr.is_null());
ptr
}
pub unsafe fn set_store(&mut self, store: *mut dyn Store) {
*self.vmctx_plus_offset(self.offsets.vmctx_store()) = store;
}
/// Return a reference to the vmctx used by compiled wasm code.
#[inline]
pub fn vmctx(&self) -> &VMContext {
&self.vmctx
}
/// Return a raw pointer to the vmctx used by compiled wasm code.
#[inline]
pub fn vmctx_ptr(&self) -> *mut VMContext {
self.vmctx() as *const VMContext as *mut VMContext
}
@@ -423,13 +426,18 @@ impl Instance {
///
/// Returns `None` if memory can't be grown by the specified amount
/// of pages.
pub(crate) fn memory_grow(&self, memory_index: DefinedMemoryIndex, delta: u32) -> Option<u32> {
pub(crate) fn memory_grow(
&mut self,
memory_index: DefinedMemoryIndex,
delta: u32,
) -> Option<u32> {
let limiter = unsafe { (*self.store()).limiter() };
let memory = self
.memories
.get(memory_index)
.get_mut(memory_index)
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()));
let result = unsafe { memory.grow(delta) };
let result = unsafe { memory.grow(delta, limiter) };
// Keep current the VMContext pointers used by compiled wasm code.
self.set_memory(memory_index, self.memories[memory_index].vmmemory());
@@ -446,12 +454,12 @@ impl Instance {
/// This and `imported_memory_size` are currently unsafe because they
/// dereference the memory import's pointers.
pub(crate) unsafe fn imported_memory_grow(
&self,
&mut self,
memory_index: MemoryIndex,
delta: u32,
) -> Option<u32> {
let import = self.imported_memory(memory_index);
let foreign_instance = (&*import.vmctx).instance();
let foreign_instance = (*import.vmctx).instance_mut();
let foreign_memory = &*import.from;
let foreign_index = foreign_instance.memory_index(foreign_memory);
@@ -480,9 +488,8 @@ impl Instance {
foreign_instance.memory_size(foreign_index)
}
pub(crate) fn table_element_type(&self, table_index: TableIndex) -> TableElementType {
let table = self.get_table(table_index);
table.element_type()
pub(crate) fn table_element_type(&mut self, table_index: TableIndex) -> TableElementType {
unsafe { (*self.get_table(table_index)).element_type() }
}
/// Grow table by the specified amount of elements, filling them with
@@ -491,7 +498,7 @@ impl Instance {
/// Returns `None` if table can't be grown by the specified amount of
/// elements, or if `init_value` is the wrong type of table element.
pub(crate) fn table_grow(
&self,
&mut self,
table_index: TableIndex,
delta: u32,
init_value: TableElement,
@@ -502,17 +509,18 @@ impl Instance {
}
fn defined_table_grow(
&self,
&mut self,
table_index: DefinedTableIndex,
delta: u32,
init_value: TableElement,
) -> Option<u32> {
let limiter = unsafe { (*self.store()).limiter() };
let table = self
.tables
.get(table_index)
.get_mut(table_index)
.unwrap_or_else(|| panic!("no table for index {}", table_index.index()));
let result = unsafe { table.grow(delta, init_value) };
let result = unsafe { table.grow(delta, init_value, limiter) };
// Keep the `VMContext` pointers used by compiled Wasm code up to
// date.
@@ -521,36 +529,6 @@ impl Instance {
result
}
pub(crate) fn defined_table_fill(
&self,
table_index: DefinedTableIndex,
dst: u32,
val: TableElement,
len: u32,
) -> Result<(), Trap> {
self.tables.get(table_index).unwrap().fill(dst, val, len)
}
// Get table element by index.
fn table_get(&self, table_index: DefinedTableIndex, index: u32) -> Option<TableElement> {
self.tables
.get(table_index)
.unwrap_or_else(|| panic!("no table for index {}", table_index.index()))
.get(index)
}
fn table_set(
&self,
table_index: DefinedTableIndex,
index: u32,
val: TableElement,
) -> Result<(), ()> {
self.tables
.get(table_index)
.unwrap_or_else(|| panic!("no table for index {}", table_index.index()))
.set(index, val)
}
fn alloc_layout(&self) -> Layout {
let size = mem::size_of_val(self)
.checked_add(usize::try_from(self.offsets.size_of_vmctx()).unwrap())
@@ -584,14 +562,14 @@ impl Instance {
index: I,
index_map: &HashMap<I, usize>,
data: &'a Vec<D>,
dropped: &RefCell<EntitySet<I>>,
dropped: &EntitySet<I>,
) -> &'a [T]
where
D: AsRef<[T]>,
I: EntityRef + Hash,
{
match index_map.get(&index) {
Some(index) if !dropped.borrow().contains(I::new(*index)) => data[*index].as_ref(),
Some(index) if !dropped.contains(I::new(*index)) => data[*index].as_ref(),
_ => &[],
}
}
@@ -604,24 +582,28 @@ impl Instance {
/// Returns a `Trap` error when the range within the table is out of bounds
/// or the range within the passive element is out of bounds.
pub(crate) fn table_init(
&self,
&mut self,
table_index: TableIndex,
elem_index: ElemIndex,
dst: u32,
src: u32,
len: u32,
) -> Result<(), Trap> {
// TODO: this `clone()` shouldn't be necessary but is used for now to
// inform `rustc` that the lifetime of the elements here are
// disconnected from the lifetime of `self`.
let module = self.module.clone();
let elements = Self::find_passive_segment(
elem_index,
&self.module.passive_elements_map,
&self.module.passive_elements,
&module.passive_elements_map,
&module.passive_elements,
&self.dropped_elements,
);
self.table_init_segment(table_index, elements, dst, src, len)
}
pub(crate) fn table_init_segment(
&self,
&mut self,
table_index: TableIndex,
elements: &[FuncIndex],
dst: u32,
@@ -630,7 +612,7 @@ impl Instance {
) -> Result<(), Trap> {
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
let table = self.get_table(table_index);
let table = unsafe { &mut *self.get_table(table_index) };
let elements = match elements
.get(usize::try_from(src).unwrap()..)
@@ -665,19 +647,22 @@ impl Instance {
}
/// Drop an element.
pub(crate) fn elem_drop(&self, elem_index: ElemIndex) {
pub(crate) fn elem_drop(&mut self, elem_index: ElemIndex) {
// https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
if let Some(index) = self.module.passive_elements_map.get(&elem_index) {
self.dropped_elements
.borrow_mut()
.insert(ElemIndex::new(*index));
self.dropped_elements.insert(ElemIndex::new(*index));
}
// Note that we don't check that we actually removed a segment because
// dropping a non-passive segment is a no-op (not a trap).
}
/// Get a locally-defined memory.
pub(crate) fn get_defined_memory(&mut self, index: DefinedMemoryIndex) -> *mut Memory {
ptr::addr_of_mut!(self.memories[index])
}
/// Do a `memory.copy`
///
/// # Errors
@@ -685,7 +670,7 @@ impl Instance {
/// Returns a `Trap` error when the source or destination ranges are out of
/// bounds.
pub(crate) fn memory_copy(
&self,
&mut self,
dst_index: MemoryIndex,
dst: u32,
src_index: MemoryIndex,
@@ -784,24 +769,28 @@ impl Instance {
/// memory's bounds or if the source range is outside the data segment's
/// bounds.
pub(crate) fn memory_init(
&self,
&mut self,
memory_index: MemoryIndex,
data_index: DataIndex,
dst: u32,
src: u32,
len: u32,
) -> Result<(), Trap> {
// TODO: this `clone()` shouldn't be necessary but is used for now to
// inform `rustc` that the lifetime of the elements here are
// disconnected from the lifetime of `self`.
let module = self.module.clone();
let data = Self::find_passive_segment(
data_index,
&self.module.passive_data_map,
&self.module.passive_data,
&module.passive_data_map,
&module.passive_data,
&self.dropped_data,
);
self.memory_init_segment(memory_index, &data, dst, src, len)
}
pub(crate) fn memory_init_segment(
&self,
&mut self,
memory_index: MemoryIndex,
data: &[u8],
dst: u32,
@@ -834,11 +823,9 @@ impl Instance {
}
/// Drop the given data segment, truncating its length to zero.
pub(crate) fn data_drop(&self, data_index: DataIndex) {
pub(crate) fn data_drop(&mut self, data_index: DataIndex) {
if let Some(index) = self.module.passive_data_map.get(&data_index) {
self.dropped_data
.borrow_mut()
.insert(DataIndex::new(*index));
self.dropped_data.insert(DataIndex::new(*index));
}
// Note that we don't check that we actually removed a segment because
@@ -847,7 +834,7 @@ impl Instance {
/// Get a table by index regardless of whether it is locally-defined or an
/// imported, foreign table.
pub(crate) fn get_table(&self, table_index: TableIndex) -> &Table {
pub(crate) fn get_table(&mut self, table_index: TableIndex) -> *mut Table {
if let Some(defined_table_index) = self.module.defined_table_index(table_index) {
self.get_defined_table(defined_table_index)
} else {
@@ -856,33 +843,56 @@ impl Instance {
}
/// Get a locally-defined table.
pub(crate) fn get_defined_table(&self, index: DefinedTableIndex) -> &Table {
&self.tables[index]
pub(crate) fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
ptr::addr_of_mut!(self.tables[index])
}
/// Get an imported, foreign table.
pub(crate) fn get_foreign_table(&self, index: TableIndex) -> &Table {
pub(crate) fn get_foreign_table(&mut self, index: TableIndex) -> *mut Table {
let import = self.imported_table(index);
let foreign_instance = unsafe { (&mut *(import).vmctx).instance() };
let foreign_table = unsafe { &mut *(import).from };
let foreign_instance = unsafe { (*import.vmctx).instance_mut() };
let foreign_table = unsafe { &*import.from };
let foreign_index = foreign_instance.table_index(foreign_table);
&foreign_instance.tables[foreign_index]
ptr::addr_of_mut!(foreign_instance.tables[foreign_index])
}
pub(crate) fn get_defined_table_index_and_instance(
&self,
&mut self,
index: TableIndex,
) -> (DefinedTableIndex, &Instance) {
) -> (DefinedTableIndex, &mut Instance) {
if let Some(defined_table_index) = self.module.defined_table_index(index) {
(defined_table_index, self)
} else {
let import = self.imported_table(index);
let foreign_instance = unsafe { (&mut *(import).vmctx).instance() };
let foreign_table_def = unsafe { &mut *(import).from };
let foreign_instance = unsafe { (*import.vmctx).instance_mut() };
let foreign_table_def = unsafe { &*import.from };
let foreign_table_index = foreign_instance.table_index(foreign_table_def);
(foreign_table_index, foreign_instance)
}
}
fn drop_globals(&mut self) {
for (idx, global) in self.module.globals.iter() {
let idx = match self.module.defined_global_index(idx) {
Some(idx) => idx,
None => continue,
};
match global.wasm_ty {
// For now only externref gloabls need to get destroyed
WasmType::ExternRef => {}
_ => continue,
}
unsafe {
drop((*self.global_ptr(idx)).as_externref_mut().take());
}
}
}
}
impl Drop for Instance {
fn drop(&mut self) {
self.drop_globals();
}
}
/// A handle holding an `Instance` of a WebAssembly module.
@@ -891,6 +901,16 @@ pub struct InstanceHandle {
instance: *mut Instance,
}
// These are only valid if the `Instance` type is send/sync, hence the
// assertion below.
unsafe impl Send for InstanceHandle {}
unsafe impl Sync for InstanceHandle {}
fn _assert_send_sync() {
fn _assert<T: Send + Sync>() {}
_assert::<Instance>();
}
impl InstanceHandle {
/// Create a new `InstanceHandle` pointing at the instance
/// pointed to by the given `VMContext` pointer.
@@ -898,6 +918,7 @@ impl InstanceHandle {
/// # Safety
/// This is unsafe because it doesn't work on just any `VMContext`, it must
/// be a `VMContext` allocated as part of an `Instance`.
#[inline]
pub unsafe fn from_vmctx(vmctx: *mut VMContext) -> Self {
let instance = (&mut *vmctx).instance();
Self {
@@ -911,6 +932,7 @@ impl InstanceHandle {
}
/// Return a raw pointer to the vmctx used by compiled wasm code.
#[inline]
pub fn vmctx_ptr(&self) -> *mut VMContext {
self.instance().vmctx_ptr()
}
@@ -944,12 +966,9 @@ impl InstanceHandle {
self.instance().memory_index(memory)
}
/// Grow memory in this instance by the specified amount of pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of pages.
pub fn memory_grow(&self, memory_index: DefinedMemoryIndex, delta: u32) -> Option<u32> {
self.instance().memory_grow(memory_index, delta)
/// Get a memory defined locally within this module.
pub fn get_defined_memory(&mut self, index: DefinedMemoryIndex) -> *mut Memory {
self.instance_mut().get_defined_memory(index)
}
/// Return the table index for the given `VMTableDefinition` in this instance.
@@ -957,83 +976,35 @@ impl InstanceHandle {
self.instance().table_index(table)
}
/// Grow table in this instance by the specified amount of elements.
///
/// When the table is successfully grown, returns the original size of the
/// table.
///
/// Returns `None` if memory can't be grown by the specified amount of pages
/// or if the `init_value` is the incorrect table element type.
pub fn table_grow(
&self,
table_index: TableIndex,
delta: u32,
init_value: TableElement,
) -> Option<u32> {
self.instance().table_grow(table_index, delta, init_value)
}
/// Grow table in this instance by the specified amount of elements.
///
/// When the table is successfully grown, returns the original size of the
/// table.
///
/// Returns `None` if memory can't be grown by the specified amount of pages
/// or if the `init_value` is the incorrect table element type.
pub fn defined_table_grow(
&self,
table_index: DefinedTableIndex,
delta: u32,
init_value: TableElement,
) -> Option<u32> {
self.instance()
.defined_table_grow(table_index, delta, init_value)
}
/// Get table element reference.
///
/// Returns `None` if index is out of bounds.
pub fn table_get(&self, table_index: DefinedTableIndex, index: u32) -> Option<TableElement> {
self.instance().table_get(table_index, index)
}
/// Set table element reference.
///
/// Returns an error if the index is out of bounds
pub fn table_set(
&self,
table_index: DefinedTableIndex,
index: u32,
val: TableElement,
) -> Result<(), ()> {
self.instance().table_set(table_index, index, val)
}
/// Fill a region of the table.
///
/// Returns an error if the region is out of bounds or val is not of the
/// correct type.
pub fn defined_table_fill(
&self,
table_index: DefinedTableIndex,
dst: u32,
val: TableElement,
len: u32,
) -> Result<(), Trap> {
self.instance()
.defined_table_fill(table_index, dst, val, len)
}
/// Get a table defined locally within this module.
pub fn get_defined_table(&self, index: DefinedTableIndex) -> &Table {
self.instance().get_defined_table(index)
pub fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
self.instance_mut().get_defined_table(index)
}
/// Return a reference to the contained `Instance`.
#[inline]
pub(crate) fn instance(&self) -> &Instance {
unsafe { &*(self.instance as *const Instance) }
}
pub(crate) fn instance_mut(&mut self) -> &mut Instance {
unsafe { &mut *self.instance }
}
/// Returns the `Store` pointer that was stored on creation
#[inline]
pub fn store(&self) -> *mut dyn Store {
self.instance().store()
}
/// Configure the `*mut dyn Store` internal pointer after-the-fact.
///
/// This is provided for the original `Store` itself to configure the first
/// self-pointer after the original `Box` has been initialized.
pub unsafe fn set_store(&mut self, store: *mut dyn Store) {
self.instance_mut().set_store(store);
}
/// Returns a clone of this instance.
///
/// This is unsafe because the returned handle here is just a cheap clone

View File

@@ -1,4 +1,3 @@
use crate::externref::{ModuleInfoLookup, VMExternRefActivationsTable, EMPTY_MODULE_LOOKUP};
use crate::imports::Imports;
use crate::instance::{Instance, InstanceHandle, ResourceLimiter, RuntimeMemoryCreator};
use crate::memory::{DefaultMemoryCreator, Memory};
@@ -6,16 +5,15 @@ use crate::table::Table;
use crate::traphandlers::Trap;
use crate::vmcontext::{
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryImport, VMSharedSignatureIndex,
VMTableImport,
VMGlobalDefinition, VMGlobalImport, VMMemoryImport, VMSharedSignatureIndex, VMTableImport,
};
use crate::Store;
use anyhow::Result;
use std::alloc;
use std::any::Any;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::marker;
use std::ptr::{self, NonNull};
use std::rc::Rc;
use std::slice;
use std::sync::Arc;
use thiserror::Error;
@@ -49,19 +47,23 @@ pub struct InstanceAllocationRequest<'a> {
pub shared_signatures: SharedSignatures<'a>,
/// The host state to associate with the instance.
pub host_state: Box<dyn Any>,
pub host_state: Box<dyn Any + Send + Sync>,
/// The pointer to the VM interrupts structure to use for the instance.
pub interrupts: *const VMInterrupts,
/// The pointer to the reference activations table to use for the instance.
pub externref_activations_table: *mut VMExternRefActivationsTable,
/// The pointer to the module info lookup to use for the instance.
pub module_info_lookup: Option<*const dyn ModuleInfoLookup>,
/// The resource limiter to use for the instance.
pub limiter: Option<&'a Rc<dyn ResourceLimiter>>,
/// A pointer to the "store" for this instance to be allocated. The store
/// correlates with the `Store` in wasmtime itself, and lots of contextual
/// information about the execution of wasm can be learned through the store.
///
/// Note that this is a raw pointer and has a static lifetime, both of which
/// are a bit of a lie. This is done purely so a store can learn about
/// itself when it gets called as a host function, and additionally so this
/// runtime can access internals as necessary (such as the
/// VMExternRefActivationsTable or the ResourceLimiter).
///
/// Note that this ends up being a self-pointer to the instance when stored.
/// The reason is that the instance itself is then stored within the store.
/// We use a number of `PhantomPinned` declarations to indicate this to the
/// compiler. More info on this in `wasmtime/src/store.rs`
pub store: Option<*mut dyn Store>,
}
/// An link error while instantiating a module.
@@ -141,7 +143,8 @@ pub unsafe trait InstanceAllocator: Send + Sync {
/// This method is only safe to call immediately after an instance has been allocated.
unsafe fn initialize(
&self,
handle: &InstanceHandle,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError>;
@@ -232,9 +235,12 @@ fn get_table_init_start(
}
}
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_initializers {
let table = instance.get_table(init.table_index);
fn check_table_init_bounds(
instance: &mut Instance,
module: &Module,
) -> Result<(), InstantiationError> {
for init in &module.table_initializers {
let table = unsafe { &*instance.get_table(init.table_index) };
let start = get_table_init_start(init, instance)?;
let start = usize::try_from(start).unwrap();
let end = start.checked_add(init.elements.len());
@@ -254,8 +260,8 @@ fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError
Ok(())
}
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_initializers {
fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<(), InstantiationError> {
for init in &module.table_initializers {
instance
.table_init_segment(
init.table_index,
@@ -318,7 +324,7 @@ fn check_memory_init_bounds(
}
fn initialize_memories(
instance: &Instance,
instance: &mut Instance,
initializers: &[MemoryInitializer],
) -> Result<(), InstantiationError> {
for init in initializers {
@@ -336,8 +342,8 @@ fn initialize_memories(
Ok(())
}
fn check_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
check_table_init_bounds(instance)?;
fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<(), InstantiationError> {
check_table_init_bounds(instance, module)?;
match &instance.module.memory_initialization {
MemoryInitialization::Paged { out_of_bounds, .. } => {
@@ -356,7 +362,8 @@ fn check_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
}
fn initialize_instance(
instance: &Instance,
instance: &mut Instance,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError> {
// If bulk memory is not enabled, bounds check the data and element segments before
@@ -364,14 +371,14 @@ fn initialize_instance(
// in-order and side effects are observed up to the point of an out-of-bounds
// initializer, so the early checking is not desired.
if !is_bulk_memory {
check_init_bounds(instance)?;
check_init_bounds(instance, module)?;
}
// Initialize the tables
initialize_tables(instance)?;
initialize_tables(instance, module)?;
// Initialize the memories
match &instance.module.memory_initialization {
match &module.memory_initialization {
MemoryInitialization::Paged { map, out_of_bounds } => {
for (index, pages) in map {
let memory = instance.memory(index);
@@ -404,12 +411,14 @@ fn initialize_instance(
Ok(())
}
unsafe fn initialize_vmcontext(instance: &Instance, req: InstanceAllocationRequest) {
let module = &instance.module;
unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationRequest) {
if let Some(store) = req.store {
*instance.interrupts() = (*store).vminterrupts();
*instance.externref_activations_table() = (*store).externref_activations_table().0;
instance.set_store(store);
}
*instance.interrupts() = req.interrupts;
*instance.externref_activations_table() = req.externref_activations_table;
*instance.module_info_lookup() = req.module_info_lookup.unwrap_or(&EMPTY_MODULE_LOOKUP);
let module = &instance.module;
// Initialize shared signatures
let mut ptr = instance.signature_ids_ptr();
@@ -520,17 +529,24 @@ unsafe fn initialize_vmcontext_globals(instance: &Instance) {
let from = if let Some(def_x) = module.defined_global_index(x) {
instance.global(def_x)
} else {
*instance.imported_global(x).from
&*instance.imported_global(x).from
};
*to = from;
// Globals of type `externref` need to manage the reference
// count as values move between globals, everything else is just
// copy-able bits.
match global.wasm_ty {
WasmType::ExternRef => *(*to).as_externref_mut() = from.as_externref().clone(),
_ => ptr::copy_nonoverlapping(from, to, 1),
}
}
GlobalInit::RefFunc(f) => {
*(*to).as_anyfunc_mut() = instance.get_caller_checked_anyfunc(f).unwrap()
as *const VMCallerCheckedAnyfunc;
}
GlobalInit::RefNullConst => match global.wasm_ty {
WasmType::FuncRef => *(*to).as_anyfunc_mut() = ptr::null(),
WasmType::ExternRef => *(*to).as_externref_mut() = None,
// `VMGlobalDefinition::new()` already zeroed out the bits
WasmType::FuncRef => {}
WasmType::ExternRef => {}
ty => panic!("unsupported reference type for global: {:?}", ty),
},
GlobalInit::Import => panic!("locally-defined global initialized as import"),
@@ -545,6 +561,17 @@ pub struct OnDemandInstanceAllocator {
stack_size: usize,
}
// rustc is quite strict with the lifetimes when dealing with mutable borrows,
// so this is a little helper to get a shorter lifetime on `Option<&mut T>`
fn borrow_limiter<'a>(
limiter: &'a mut Option<&mut dyn ResourceLimiter>,
) -> Option<&'a mut dyn ResourceLimiter> {
match limiter {
Some(limiter) => Some(&mut **limiter),
None => None,
}
}
impl OnDemandInstanceAllocator {
/// Creates a new on-demand instance allocator.
pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>, stack_size: usize) -> Self {
@@ -556,13 +583,16 @@ impl OnDemandInstanceAllocator {
fn create_tables(
module: &Module,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<PrimaryMap<DefinedTableIndex, Table>, InstantiationError> {
let num_imports = module.num_imported_tables;
let mut tables: PrimaryMap<DefinedTableIndex, _> =
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
for table in &module.table_plans.values().as_slice()[num_imports..] {
tables.push(Table::new_dynamic(table, limiter).map_err(InstantiationError::Resource)?);
tables.push(
Table::new_dynamic(table, borrow_limiter(&mut limiter))
.map_err(InstantiationError::Resource)?,
);
}
Ok(tables)
}
@@ -570,7 +600,7 @@ impl OnDemandInstanceAllocator {
fn create_memories(
&self,
module: &Module,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<PrimaryMap<DefinedMemoryIndex, Memory>, InstantiationError> {
let creator = self
.mem_creator
@@ -581,7 +611,7 @@ impl OnDemandInstanceAllocator {
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
memories.push(
Memory::new_dynamic(plan, creator, limiter)
Memory::new_dynamic(plan, creator, borrow_limiter(&mut limiter))
.map_err(InstantiationError::Resource)?,
);
}
@@ -603,23 +633,24 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
&self,
mut req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError> {
let memories = self.create_memories(&req.module, req.limiter)?;
let tables = Self::create_tables(&req.module, req.limiter)?;
let mut limiter = req.store.and_then(|s| (*s).limiter());
let memories = self.create_memories(&req.module, borrow_limiter(&mut limiter))?;
let tables = Self::create_tables(&req.module, borrow_limiter(&mut limiter))?;
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
let handle = {
let mut handle = {
let instance = Instance {
module: req.module.clone(),
offsets: VMOffsets::new(std::mem::size_of::<*const u8>() as u8, &req.module),
memories,
tables,
dropped_elements: RefCell::new(EntitySet::with_capacity(
req.module.passive_elements.len(),
)),
dropped_data: RefCell::new(EntitySet::with_capacity(req.module.passive_data.len())),
dropped_elements: EntitySet::with_capacity(req.module.passive_elements.len()),
dropped_data: EntitySet::with_capacity(req.module.passive_data.len()),
host_state,
vmctx: VMContext {},
vmctx: VMContext {
_marker: marker::PhantomPinned,
},
};
let layout = instance.alloc_layout();
let instance_ptr = alloc::alloc(layout) as *mut Instance;
@@ -632,17 +663,18 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
}
};
initialize_vmcontext(handle.instance(), req);
initialize_vmcontext(handle.instance_mut(), req);
Ok(handle)
}
unsafe fn initialize(
&self,
handle: &InstanceHandle,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError> {
initialize_instance(handle.instance(), is_bulk_memory)
initialize_instance(handle.instance_mut(), module, is_bulk_memory)
}
unsafe fn deallocate(&self, handle: &InstanceHandle) {

View File

@@ -7,6 +7,7 @@
//! Using the pooling instance allocator can speed up module instantiation
//! when modules can be constrained based on configurable limits.
use super::borrow_limiter;
use super::{
initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator,
InstanceHandle, InstantiationError, ResourceLimiter,
@@ -14,11 +15,10 @@ use super::{
use crate::{instance::Instance, Memory, Mmap, Table, VMContext};
use anyhow::{anyhow, bail, Context, Result};
use rand::Rng;
use std::cell::RefCell;
use std::cmp::min;
use std::convert::TryFrom;
use std::marker;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use wasmtime_environ::{
entity::{EntitySet, PrimaryMap},
@@ -290,12 +290,12 @@ impl Default for PoolingAllocationStrategy {
#[derive(Debug)]
struct InstancePool {
mapping: Mmap,
offsets: VMOffsets,
instance_size: usize,
max_instances: usize,
free_list: Mutex<Vec<usize>>,
memories: MemoryPool,
tables: TablePool,
empty_module: Arc<Module>,
}
impl InstancePool {
@@ -334,18 +334,17 @@ impl InstancePool {
let pool = Self {
mapping,
offsets,
instance_size,
max_instances,
free_list: Mutex::new((0..max_instances).collect()),
memories: MemoryPool::new(module_limits, instance_limits)?,
tables: TablePool::new(module_limits, instance_limits)?,
empty_module: Arc::new(Module::default()),
};
// Use a default module to initialize the instances to start
let module = Arc::new(Module::default());
for i in 0..instance_limits.count as usize {
pool.initialize(i, &module);
pool.initialize(module_limits, i);
}
Ok(pool)
@@ -356,7 +355,7 @@ impl InstancePool {
&mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance)
}
fn initialize(&self, index: usize, module: &Arc<Module>) {
fn initialize(&self, limits: &ModuleLimits, index: usize) {
unsafe {
let instance = self.instance(index);
@@ -364,14 +363,19 @@ impl InstancePool {
std::ptr::write(
instance as _,
Instance {
module: module.clone(),
offsets: self.offsets,
memories: PrimaryMap::with_capacity(self.offsets.num_defined_memories as usize),
tables: PrimaryMap::with_capacity(self.offsets.num_defined_tables as usize),
dropped_elements: RefCell::new(EntitySet::new()),
dropped_data: RefCell::new(EntitySet::new()),
module: self.empty_module.clone(),
offsets: VMOffsets::new(
std::mem::size_of::<*const u8>() as u8,
&self.empty_module,
),
memories: PrimaryMap::with_capacity(limits.memories as usize),
tables: PrimaryMap::with_capacity(limits.tables as usize),
dropped_elements: EntitySet::new(),
dropped_data: EntitySet::new(),
host_state: Box::new(()),
vmctx: VMContext {},
vmctx: VMContext {
_marker: marker::PhantomPinned,
},
},
);
}
@@ -391,18 +395,19 @@ impl InstancePool {
);
instance.host_state = std::mem::replace(&mut req.host_state, Box::new(()));
let mut limiter = req.store.and_then(|s| (*s).limiter());
Self::set_instance_memories(
instance,
self.memories.get(index),
self.memories.max_wasm_pages,
req.limiter,
borrow_limiter(&mut limiter),
)?;
Self::set_instance_tables(
instance,
self.tables.get(index),
self.tables.get(index).map(|x| x as *mut usize),
self.tables.max_elements,
req.limiter,
borrow_limiter(&mut limiter),
)?;
initialize_vmcontext(instance, req);
@@ -452,7 +457,7 @@ impl InstancePool {
// Decommit any linear memories that were used
for (memory, base) in instance.memories.values_mut().zip(self.memories.get(index)) {
let memory = mem::take(memory);
let mut memory = mem::take(memory);
debug_assert!(memory.is_static());
// Reset any faulted guard pages as the physical memory may be reused for another instance in the future
@@ -460,14 +465,15 @@ impl InstancePool {
memory
.reset_guard_pages()
.expect("failed to reset guard pages");
drop(&mut memory); // require mutable on all platforms, not just uffd
let size = (memory.size() * WASM_PAGE_SIZE) as usize;
let size = (memory.size() as usize) * (WASM_PAGE_SIZE as usize);
drop(memory);
decommit_memory_pages(base, size).expect("failed to decommit linear memory pages");
}
instance.memories.clear();
instance.dropped_data.borrow_mut().clear();
instance.dropped_data.clear();
// Decommit any tables that were used
for (table, base) in instance.tables.values_mut().zip(self.tables.get(index)) {
@@ -484,11 +490,22 @@ impl InstancePool {
}
instance.tables.clear();
instance.dropped_elements.borrow_mut().clear();
instance.dropped_elements.clear();
// Drop all `global` values which need a destructor, such as externref
// values which now need their reference count dropped.
instance.drop_globals();
// Drop any host state
instance.host_state = Box::new(());
// And finally reset the module/offsets back to their original. This
// should put everything back in a relatively pristine state for each
// fresh allocation later on.
instance.module = self.empty_module.clone();
instance.offsets =
VMOffsets::new(std::mem::size_of::<*const u8>() as u8, &self.empty_module);
self.free_list.lock().unwrap().push(index);
}
@@ -496,7 +513,7 @@ impl InstancePool {
instance: &mut Instance,
mut memories: impl Iterator<Item = *mut u8>,
max_pages: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<(), InstantiationError> {
let module = instance.module.as_ref();
@@ -505,30 +522,34 @@ impl InstancePool {
for plan in
(&module.memory_plans.values().as_slice()[module.num_imported_memories..]).iter()
{
let memory = unsafe {
std::slice::from_raw_parts_mut(
memories.next().unwrap(),
(max_pages as usize) * (WASM_PAGE_SIZE as usize),
)
};
instance.memories.push(
Memory::new_static(
plan,
memories.next().unwrap(),
max_pages,
memory,
commit_memory_pages,
limiter,
borrow_limiter(&mut limiter),
)
.map_err(InstantiationError::Resource)?,
);
}
let mut dropped_data = instance.dropped_data.borrow_mut();
debug_assert!(dropped_data.is_empty());
dropped_data.resize(module.passive_data.len());
debug_assert!(instance.dropped_data.is_empty());
instance.dropped_data.resize(module.passive_data.len());
Ok(())
}
fn set_instance_tables(
instance: &mut Instance,
mut tables: impl Iterator<Item = *mut u8>,
mut tables: impl Iterator<Item = *mut usize>,
max_elements: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<(), InstantiationError> {
let module = instance.module.as_ref();
@@ -537,18 +558,23 @@ impl InstancePool {
for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() {
let base = tables.next().unwrap();
commit_table_pages(base, max_elements as usize * mem::size_of::<*mut u8>())
.map_err(InstantiationError::Resource)?;
commit_table_pages(
base as *mut u8,
max_elements as usize * mem::size_of::<*mut u8>(),
)
.map_err(InstantiationError::Resource)?;
let table = unsafe { std::slice::from_raw_parts_mut(base, max_elements as usize) };
instance.tables.push(
Table::new_static(plan, base as _, max_elements, limiter)
Table::new_static(plan, table, borrow_limiter(&mut limiter))
.map_err(InstantiationError::Resource)?,
);
}
let mut dropped_elements = instance.dropped_elements.borrow_mut();
debug_assert!(dropped_elements.is_empty());
dropped_elements.resize(module.passive_elements.len());
debug_assert!(instance.dropped_elements.is_empty());
instance
.dropped_elements
.resize(module.passive_elements.len());
Ok(())
}
@@ -595,7 +621,7 @@ impl MemoryPool {
}
// The maximum module memory page count cannot exceed the memory reservation size
if (module_limits.memory_pages * WASM_PAGE_SIZE) as u64
if u64::from(module_limits.memory_pages) * u64::from(WASM_PAGE_SIZE)
> instance_limits.memory_reservation_size
{
bail!(
@@ -957,21 +983,22 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
unsafe fn initialize(
&self,
handle: &InstanceHandle,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError> {
let instance = handle.instance();
let instance = handle.instance_mut();
cfg_if::cfg_if! {
if #[cfg(all(feature = "uffd", target_os = "linux"))] {
match &instance.module.memory_initialization {
match &module.memory_initialization {
wasmtime_environ::MemoryInitialization::Paged{ out_of_bounds, .. } => {
if !is_bulk_memory {
super::check_init_bounds(instance)?;
super::check_init_bounds(instance, module)?;
}
// Initialize the tables
super::initialize_tables(instance)?;
super::initialize_tables(instance, module)?;
// Don't initialize the memory; the fault handler will back the pages when accessed
@@ -984,10 +1011,10 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
Ok(())
},
_ => initialize_instance(instance, is_bulk_memory)
_ => initialize_instance(instance, module, is_bulk_memory)
}
} else {
initialize_instance(instance, is_bulk_memory)
initialize_instance(instance, module, is_bulk_memory)
}
}
}
@@ -1355,19 +1382,6 @@ mod test {
let instances = InstancePool::new(&module_limits, &instance_limits)?;
assert_eq!(
instances.offsets.pointer_size,
std::mem::size_of::<*const u8>() as u8
);
assert_eq!(instances.offsets.num_signature_ids, 0);
assert_eq!(instances.offsets.num_imported_functions, 0);
assert_eq!(instances.offsets.num_imported_tables, 0);
assert_eq!(instances.offsets.num_imported_memories, 0);
assert_eq!(instances.offsets.num_imported_globals, 0);
assert_eq!(instances.offsets.num_defined_functions, 0);
assert_eq!(instances.offsets.num_defined_tables, 1);
assert_eq!(instances.offsets.num_defined_memories, 1);
assert_eq!(instances.offsets.num_defined_globals, 0);
// As of April 2021, the instance struct's size is largely below the size of a single page,
// so it's safe to assume it's been rounded to the size of a single memory page here.
assert_eq!(instances.instance_size, region::page::size());
@@ -1395,10 +1409,7 @@ mod test {
},
shared_signatures: VMSharedSignatureIndex::default().into(),
host_state: Box::new(()),
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
store: None,
},
)
.expect("allocation should succeed"),
@@ -1420,10 +1431,7 @@ mod test {
},
shared_signatures: VMSharedSignatureIndex::default().into(),
host_state: Box::new(()),
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
store: None,
},
) {
Err(InstantiationError::Limit(3)) => {}

View File

@@ -130,7 +130,7 @@ fn reset_guard_page(addr: *mut u8, len: usize) -> Result<()> {
}
/// Represents a location of a page fault within monitored regions of memory.
enum FaultLocation<'a> {
enum FaultLocation {
/// The address location is in a WebAssembly linear memory page.
/// The fault handler will copy the pages from initialization data if necessary.
MemoryPage {
@@ -139,7 +139,7 @@ enum FaultLocation<'a> {
/// The length of the page being accessed.
len: usize,
/// The instance related to the memory page that was accessed.
instance: &'a Instance,
instance: *mut Instance,
/// The index of the memory that was accessed.
memory_index: DefinedMemoryIndex,
/// The Wasm page index to initialize if the access was not a guard page.
@@ -194,9 +194,9 @@ impl FaultLocator {
///
/// If the assumption holds true, accessing the instance data from the handler thread
/// should, in theory, be safe.
unsafe fn get_instance(&self, index: usize) -> &Instance {
unsafe fn get_instance(&self, index: usize) -> *mut Instance {
debug_assert!(index < self.max_instances);
&*((self.instances_start + (index * self.instance_size)) as *const Instance)
(self.instances_start + (index * self.instance_size)) as *mut Instance
}
unsafe fn locate(&self, addr: usize) -> Option<FaultLocation> {
@@ -208,7 +208,7 @@ impl FaultLocator {
let page_index = (addr - memory_start) / WASM_PAGE_SIZE;
let instance = self.get_instance(index / self.max_memories);
let init_page_index = instance.memories.get(memory_index).and_then(|m| {
let init_page_index = (*instance).memories.get(memory_index).and_then(|m| {
if page_index < m.size() as usize {
Some(page_index)
} else {
@@ -310,13 +310,13 @@ unsafe fn handle_page_fault(
match page_index {
Some(page_index) => {
initialize_wasm_page(&uffd, instance, page_addr, memory_index, page_index)?;
initialize_wasm_page(&uffd, &*instance, page_addr, memory_index, page_index)?;
}
None => {
log::trace!("out of bounds memory access at {:p}", addr);
// Record the guard page fault so the page protection level can be reset later
instance.memories[memory_index].record_guard_page_fault(
(*instance).memories[memory_index].record_guard_page_fault(
page_addr,
len,
reset_guard_page,
@@ -436,7 +436,6 @@ mod test {
Imports, InstanceAllocationRequest, InstanceLimits, ModuleLimits,
PoolingAllocationStrategy, VMSharedSignatureIndex,
};
use std::ptr;
use std::sync::Arc;
use wasmtime_environ::{entity::PrimaryMap, wasm::Memory, MemoryPlan, MemoryStyle, Module};
@@ -521,10 +520,7 @@ mod test {
},
shared_signatures: VMSharedSignatureIndex::default().into(),
host_state: Box::new(()),
interrupts: ptr::null(),
externref_activations_table: ptr::null_mut(),
module_info_lookup: None,
limiter: None,
store: None,
},
)
.expect("instance should allocate"),

View File

@@ -20,6 +20,8 @@
)
)]
use std::error::Error;
mod export;
mod externref;
mod imports;
@@ -40,15 +42,15 @@ pub use crate::imports::Imports;
pub use crate::instance::{
InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits,
InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator,
PoolingAllocationStrategy, PoolingInstanceAllocator, ResourceLimiter, RuntimeInstance,
PoolingAllocationStrategy, PoolingInstanceAllocator, ResourceLimiter,
};
pub use crate::jit_int::GdbJitImageRegistration;
pub use crate::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator};
pub use crate::mmap::Mmap;
pub use crate::table::{Table, TableElement};
pub use crate::traphandlers::{
catch_traps, init_traps, raise_lib_trap, raise_user_trap, resume_panic, with_last_info,
SignalHandler, TlsRestore, Trap, TrapInfo,
catch_traps, init_traps, raise_lib_trap, raise_user_trap, resume_panic, SignalHandler,
TlsRestore, Trap,
};
pub use crate::vmcontext::{
VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport, VMGlobalDefinition,
@@ -80,3 +82,41 @@ pub fn pointer_type() -> wasmtime_environ::ir::Type {
unreachable!()
}
}
/// Dynamic runtime functionality needed by this crate throughout the execution
/// of a wasm instance.
///
/// This trait is used to store a raw pointer trait object within each
/// `VMContext`. This raw pointer trait object points back to the
/// `wasmtime::Store` internally but is type-erased so this `wasmtime_runtime`
/// crate doesn't need the entire `wasmtime` crate to build.
///
/// Note that this is an extra-unsafe trait because no heed is paid to the
/// lifetime of this store or the Send/Sync-ness of this store. All of that must
/// be respected by embedders (e.g. the `wasmtime::Store` structure). The theory
/// is that `wasmtime::Store` handles all this correctly.
pub unsafe trait Store {
/// Returns the raw pointer in memory where this store's shared
/// `VMInterrupts` structure is located.
///
/// Used to configure `VMContext` initialization and store the right pointer
/// in the `VMContext`.
fn vminterrupts(&self) -> *mut VMInterrupts;
/// Returns the externref management structures necessary for this store.
///
/// The first element returned is the table in which externrefs are stored
/// throughout wasm execution, and the second element is how to look up
/// module information for gc requests.
fn externref_activations_table(
&mut self,
) -> (&mut VMExternRefActivationsTable, &dyn ModuleInfoLookup);
/// Returns a reference to the store's limiter for limiting resources, if any.
fn limiter(&mut self) -> Option<&mut dyn ResourceLimiter>;
/// Callback invoked whenever fuel runs out by a wasm instance. If an error
/// is returned that's raised as a trap. Otherwise wasm execution will
/// continue as normal.
fn out_of_gas(&mut self) -> Result<(), Box<dyn Error + Send + Sync>>;
}

View File

@@ -191,7 +191,7 @@ pub unsafe extern "C" fn wasmtime_memory32_grow(
delta: u32,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance
@@ -205,7 +205,7 @@ pub unsafe extern "C" fn wasmtime_imported_memory32_grow(
delta: u32,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
let memory_index = MemoryIndex::from_u32(memory_index);
instance
@@ -215,7 +215,7 @@ pub unsafe extern "C" fn wasmtime_imported_memory32_grow(
/// Implementation of memory.size for locally-defined 32-bit memories.
pub unsafe extern "C" fn wasmtime_memory32_size(vmctx: *mut VMContext, memory_index: u32) -> u32 {
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance.memory_size(memory_index)
@@ -226,7 +226,7 @@ pub unsafe extern "C" fn wasmtime_imported_memory32_size(
vmctx: *mut VMContext,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance();
let memory_index = MemoryIndex::from_u32(memory_index);
instance.imported_memory_size(memory_index)
@@ -241,7 +241,7 @@ pub unsafe extern "C" fn wasmtime_table_grow(
// or is a `VMExternRef` until we look at the table type.
init_value: *mut u8,
) -> u32 {
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
let table_index = TableIndex::from_u32(table_index);
match instance.table_element_type(table_index) {
TableElementType::Func => {
@@ -277,9 +277,9 @@ pub unsafe extern "C" fn wasmtime_table_fill(
len: u32,
) {
let result = {
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
let table_index = TableIndex::from_u32(table_index);
let table = instance.get_table(table_index);
let table = &mut *instance.get_table(table_index);
match table.element_type() {
TableElementType::Func => {
let val = val as *mut VMCallerCheckedAnyfunc;
@@ -313,7 +313,7 @@ pub unsafe extern "C" fn wasmtime_table_copy(
let result = {
let dst_table_index = TableIndex::from_u32(dst_table_index);
let src_table_index = TableIndex::from_u32(src_table_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
let dst_table = instance.get_table(dst_table_index);
let src_table = instance.get_table(src_table_index);
Table::copy(dst_table, src_table, dst, src, len)
@@ -335,7 +335,7 @@ pub unsafe extern "C" fn wasmtime_table_init(
let result = {
let table_index = TableIndex::from_u32(table_index);
let elem_index = ElemIndex::from_u32(elem_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
instance.table_init(table_index, elem_index, dst, src, len)
};
if let Err(trap) = result {
@@ -346,7 +346,7 @@ pub unsafe extern "C" fn wasmtime_table_init(
/// Implementation of `elem.drop`.
pub unsafe extern "C" fn wasmtime_elem_drop(vmctx: *mut VMContext, elem_index: u32) {
let elem_index = ElemIndex::from_u32(elem_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
instance.elem_drop(elem_index);
}
@@ -362,7 +362,7 @@ pub unsafe extern "C" fn wasmtime_memory_copy(
let result = {
let src_index = MemoryIndex::from_u32(src_index);
let dst_index = MemoryIndex::from_u32(dst_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
instance.memory_copy(dst_index, dst, src_index, src, len)
};
if let Err(trap) = result {
@@ -380,7 +380,7 @@ pub unsafe extern "C" fn wasmtime_memory_fill(
) {
let result = {
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance();
instance.defined_memory_fill(memory_index, dst, val, len)
};
if let Err(trap) = result {
@@ -398,7 +398,7 @@ pub unsafe extern "C" fn wasmtime_imported_memory_fill(
) {
let result = {
let memory_index = MemoryIndex::from_u32(memory_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
instance.imported_memory_fill(memory_index, dst, val, len)
};
if let Err(trap) = result {
@@ -418,7 +418,7 @@ pub unsafe extern "C" fn wasmtime_memory_init(
let result = {
let memory_index = MemoryIndex::from_u32(memory_index);
let data_index = DataIndex::from_u32(data_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
instance.memory_init(memory_index, data_index, dst, src, len)
};
if let Err(trap) = result {
@@ -429,7 +429,7 @@ pub unsafe extern "C" fn wasmtime_memory_init(
/// Implementation of `data.drop`.
pub unsafe extern "C" fn wasmtime_data_drop(vmctx: *mut VMContext, data_index: u32) {
let data_index = DataIndex::from_u32(data_index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance_mut();
instance.data_drop(data_index)
}
@@ -447,9 +447,8 @@ pub unsafe extern "C" fn wasmtime_activations_table_insert_with_gc(
externref: *mut u8,
) {
let externref = VMExternRef::clone_from_raw(externref);
let instance = (&mut *vmctx).instance();
let activations_table = &**instance.externref_activations_table();
let module_info_lookup = &**instance.module_info_lookup();
let instance = (*vmctx).instance();
let (activations_table, module_info_lookup) = (*instance.store()).externref_activations_table();
activations_table.insert_with_gc(externref, module_info_lookup);
}
@@ -459,14 +458,14 @@ pub unsafe extern "C" fn wasmtime_externref_global_get(
index: u32,
) -> *mut u8 {
let index = GlobalIndex::from_u32(index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance();
let global = instance.defined_or_imported_global_ptr(index);
match (*global).as_externref().clone() {
None => ptr::null_mut(),
Some(externref) => {
let raw = externref.as_raw();
let activations_table = &**instance.externref_activations_table();
let module_info_lookup = &**instance.module_info_lookup();
let (activations_table, module_info_lookup) =
(*instance.store()).externref_activations_table();
activations_table.insert_with_gc(externref, module_info_lookup);
raw
}
@@ -486,7 +485,7 @@ pub unsafe extern "C" fn wasmtime_externref_global_set(
};
let index = GlobalIndex::from_u32(index);
let instance = (&mut *vmctx).instance();
let instance = (*vmctx).instance();
let global = instance.defined_or_imported_global_ptr(index);
// Swap the new `externref` value into the global before we drop the old
@@ -583,6 +582,9 @@ pub unsafe extern "C" fn wasmtime_imported_memory_atomic_wait64(
}
/// Hook for when an instance runs out of fuel.
pub unsafe extern "C" fn wasmtime_out_of_gas(_vmctx: *mut VMContext) {
crate::traphandlers::out_of_gas()
pub unsafe extern "C" fn wasmtime_out_of_gas(vmctx: *mut VMContext) {
match (*(*vmctx).instance().store()).out_of_gas() {
Ok(()) => {}
Err(err) => crate::traphandlers::raise_user_trap(err),
}
}

View File

@@ -7,11 +7,7 @@ use crate::vmcontext::VMMemoryDefinition;
use crate::ResourceLimiter;
use anyhow::{bail, Result};
use more_asserts::{assert_ge, assert_le};
use std::cell::{Cell, RefCell};
use std::cmp::min;
use std::convert::TryFrom;
use std::ptr;
use std::rc::Rc;
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
/// A memory allocator
@@ -31,7 +27,7 @@ impl RuntimeMemoryCreator for DefaultMemoryCreator {
}
/// A linear memory
pub trait RuntimeLinearMemory {
pub trait RuntimeLinearMemory: Send + Sync {
/// Returns the number of allocated wasm pages.
fn size(&self) -> u32;
@@ -43,7 +39,7 @@ pub trait RuntimeLinearMemory {
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages.
fn grow(&self, delta: u32) -> Option<u32>;
fn grow(&mut self, delta: u32) -> Option<u32>;
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
fn vmmemory(&self) -> VMMemoryDefinition;
@@ -53,7 +49,7 @@ pub trait RuntimeLinearMemory {
#[derive(Debug)]
pub struct MmapMemory {
// The underlying allocation.
mmap: RefCell<WasmMmap>,
mmap: WasmMmap,
// The optional maximum size in wasm pages of this linear memory.
maximum: Option<u32>,
@@ -108,7 +104,7 @@ impl MmapMemory {
impl RuntimeLinearMemory for MmapMemory {
/// Returns the number of allocated wasm pages.
fn size(&self) -> u32 {
self.mmap.borrow().size
self.mmap.size
}
/// Returns the maximum number of pages the memory can grow to.
@@ -121,19 +117,18 @@ impl RuntimeLinearMemory for MmapMemory {
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages.
fn grow(&self, delta: u32) -> Option<u32> {
fn grow(&mut self, delta: u32) -> Option<u32> {
// Optimization of memory.grow 0 calls.
let mut mmap = self.mmap.borrow_mut();
if delta == 0 {
return Some(mmap.size);
return Some(self.mmap.size);
}
let new_pages = match mmap.size.checked_add(delta) {
let new_pages = match self.mmap.size.checked_add(delta) {
Some(new_pages) => new_pages,
// Linear memory size overflow.
None => return None,
};
let prev_pages = mmap.size;
let prev_pages = self.mmap.size;
if let Some(maximum) = self.maximum {
if new_pages > maximum {
@@ -145,7 +140,7 @@ impl RuntimeLinearMemory for MmapMemory {
// Wasm linear memories are never allowed to grow beyond what is
// indexable. If the memory has no maximum, enforce the greatest
// limit here.
if new_pages >= WASM_MAX_PAGES {
if new_pages > WASM_MAX_PAGES {
// Linear memory size would exceed the index range.
return None;
}
@@ -154,7 +149,7 @@ impl RuntimeLinearMemory for MmapMemory {
let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize;
let new_bytes = usize::try_from(new_pages).unwrap() * WASM_PAGE_SIZE as usize;
if new_bytes > mmap.alloc.len() - self.offset_guard_size {
if new_bytes > self.mmap.alloc.len() - self.offset_guard_size {
// If the new size is within the declared maximum, but needs more memory than we
// have on hand, it's a dynamic heap and it can move.
let guard_bytes = self.offset_guard_size;
@@ -162,48 +157,59 @@ impl RuntimeLinearMemory for MmapMemory {
let mut new_mmap = Mmap::accessible_reserved(new_bytes, request_bytes).ok()?;
let copy_len = mmap.alloc.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&mmap.alloc.as_slice()[..copy_len]);
let copy_len = self.mmap.alloc.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len]
.copy_from_slice(&self.mmap.alloc.as_slice()[..copy_len]);
mmap.alloc = new_mmap;
self.mmap.alloc = new_mmap;
} else if delta_bytes > 0 {
// Make the newly allocated pages accessible.
mmap.alloc.make_accessible(prev_bytes, delta_bytes).ok()?;
self.mmap
.alloc
.make_accessible(prev_bytes, delta_bytes)
.ok()?;
}
mmap.size = new_pages;
self.mmap.size = new_pages;
Some(prev_pages)
}
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
fn vmmemory(&self) -> VMMemoryDefinition {
let mmap = self.mmap.borrow();
VMMemoryDefinition {
base: mmap.alloc.as_mut_ptr(),
current_length: mmap.size as usize * WASM_PAGE_SIZE as usize,
base: self.mmap.alloc.as_mut_ptr(),
current_length: self.mmap.size as usize * WASM_PAGE_SIZE as usize,
}
}
}
enum MemoryStorage {
/// Representation of a runtime wasm linear memory.
pub enum Memory {
/// A "static" memory where the lifetime of the backing memory is managed
/// elsewhere. Currently used with the pooling allocator.
Static {
base: *mut u8,
size: Cell<u32>,
maximum: u32,
/// The memory in the host for this wasm memory. The length of this
/// slice is the maximum size of the memory that can be grown to.
base: &'static mut [u8],
/// The current size, in wasm pages, of this memory.
size: u32,
/// A callback which makes portions of `base` accessible for when memory
/// is grown. Otherwise it's expected that accesses to `base` will
/// fault.
make_accessible: fn(*mut u8, usize) -> Result<()>,
/// Stores the pages in the linear memory that have faulted as guard pages when using the `uffd` feature.
/// These pages need their protection level reset before the memory can grow.
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: RefCell<Vec<(*mut u8, usize, fn(*mut u8, usize) -> Result<()>)>>,
guard_page_faults: Vec<(usize, usize, fn(*mut u8, usize) -> Result<()>)>,
},
Dynamic(Box<dyn RuntimeLinearMemory>),
}
/// Represents an instantiation of a WebAssembly memory.
pub struct Memory {
storage: MemoryStorage,
limiter: Option<Rc<dyn ResourceLimiter>>,
/// A "dynamic" memory whose data is managed at runtime and lifetime is tied
/// to this instance.
Dynamic(Box<dyn RuntimeLinearMemory>),
}
impl Memory {
@@ -211,40 +217,45 @@ impl Memory {
pub fn new_dynamic(
plan: &MemoryPlan,
creator: &dyn RuntimeMemoryCreator,
limiter: Option<&Rc<dyn ResourceLimiter>>,
limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<Self> {
Self::new(
plan,
MemoryStorage::Dynamic(creator.new_memory(plan)?),
limiter,
)
Self::limit_new(plan, limiter)?;
Ok(Memory::Dynamic(creator.new_memory(plan)?))
}
/// Create a new static (immovable) memory instance for the specified plan.
pub fn new_static(
plan: &MemoryPlan,
base: *mut u8,
maximum: u32,
base: &'static mut [u8],
make_accessible: fn(*mut u8, usize) -> Result<()>,
limiter: Option<&Rc<dyn ResourceLimiter>>,
limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<Self> {
let storage = MemoryStorage::Static {
base,
size: Cell::new(plan.memory.minimum),
maximum: min(plan.memory.maximum.unwrap_or(maximum), maximum),
make_accessible,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: RefCell::new(Vec::new()),
Self::limit_new(plan, limiter)?;
let base = match plan.memory.maximum {
Some(max) if (max as usize) < base.len() / (WASM_PAGE_SIZE as usize) => {
&mut base[..(max * WASM_PAGE_SIZE) as usize]
}
_ => base,
};
Self::new(plan, storage, limiter)
if plan.memory.minimum > 0 {
make_accessible(
base.as_mut_ptr(),
plan.memory.minimum as usize * WASM_PAGE_SIZE as usize,
)?;
}
Ok(Memory::Static {
base,
size: plan.memory.minimum,
make_accessible,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: Vec::new(),
})
}
fn new(
plan: &MemoryPlan,
storage: MemoryStorage,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
fn limit_new(plan: &MemoryPlan, limiter: Option<&mut dyn ResourceLimiter>) -> Result<()> {
if let Some(limiter) = limiter {
if !limiter.memory_growing(0, plan.memory.minimum, plan.memory.maximum) {
bail!(
@@ -253,32 +264,14 @@ impl Memory {
);
}
}
if let MemoryStorage::Static {
base,
make_accessible,
..
} = &storage
{
if plan.memory.minimum > 0 {
make_accessible(
*base,
plan.memory.minimum as usize * WASM_PAGE_SIZE as usize,
)?;
}
}
Ok(Self {
storage,
limiter: limiter.cloned(),
})
Ok(())
}
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
match &self.storage {
MemoryStorage::Static { size, .. } => size.get(),
MemoryStorage::Dynamic(mem) => mem.size(),
match self {
Memory::Static { size, .. } => *size,
Memory::Dynamic(mem) => mem.size(),
}
}
@@ -289,15 +282,15 @@ impl Memory {
/// The runtime maximum may not be equal to the maximum from the linear memory's
/// Wasm type when it is being constrained by an instance allocator.
pub fn maximum(&self) -> Option<u32> {
match &self.storage {
MemoryStorage::Static { maximum, .. } => Some(*maximum),
MemoryStorage::Dynamic(mem) => mem.maximum(),
match self {
Memory::Static { base, .. } => Some((base.len() / (WASM_PAGE_SIZE as usize)) as u32),
Memory::Dynamic(mem) => mem.maximum(),
}
}
/// Returns whether or not the underlying storage of the memory is "static".
pub(crate) fn is_static(&self) -> bool {
if let MemoryStorage::Static { .. } = &self.storage {
if let Memory::Static { .. } = self {
true
} else {
false
@@ -317,57 +310,65 @@ impl Memory {
///
/// Generally, prefer using `InstanceHandle::memory_grow`, which encapsulates
/// this unsafety.
pub unsafe fn grow(&self, delta: u32) -> Option<u32> {
pub unsafe fn grow(
&mut self,
delta: u32,
limiter: Option<&mut dyn ResourceLimiter>,
) -> Option<u32> {
let old_size = self.size();
if delta == 0 {
return Some(old_size);
}
let new_size = old_size.checked_add(delta)?;
let maximum = self.maximum();
if let Some(limiter) = &self.limiter {
if !limiter.memory_growing(old_size, new_size, self.maximum()) {
if let Some(limiter) = limiter {
if !limiter.memory_growing(old_size, new_size, maximum) {
return None;
}
}
match &self.storage {
MemoryStorage::Static {
#[cfg(all(feature = "uffd", target_os = "linux"))]
{
if self.is_static() {
// Reset any faulted guard pages before growing the memory.
self.reset_guard_pages().ok()?;
}
}
match self {
Memory::Static {
base,
size,
maximum,
make_accessible,
..
} => {
// Reset any faulted guard pages before growing the memory.
#[cfg(all(feature = "uffd", target_os = "linux"))]
self.reset_guard_pages().ok()?;
if new_size > *maximum || new_size >= WASM_MAX_PAGES {
if new_size > maximum.unwrap_or(WASM_MAX_PAGES) {
return None;
}
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
make_accessible(base.add(start), len).ok()?;
make_accessible(base.as_mut_ptr().add(start), len).ok()?;
size.set(new_size);
*size = new_size;
Some(old_size)
}
MemoryStorage::Dynamic(mem) => mem.grow(delta),
Memory::Dynamic(mem) => mem.grow(delta),
}
}
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
pub fn vmmemory(&self) -> VMMemoryDefinition {
match &self.storage {
MemoryStorage::Static { base, size, .. } => VMMemoryDefinition {
base: *base,
current_length: size.get() as usize * WASM_PAGE_SIZE as usize,
match self {
Memory::Static { base, size, .. } => VMMemoryDefinition {
base: base.as_ptr() as *mut _,
current_length: *size as usize * WASM_PAGE_SIZE as usize,
},
MemoryStorage::Dynamic(mem) => mem.vmmemory(),
Memory::Dynamic(mem) => mem.vmmemory(),
}
}
@@ -378,20 +379,18 @@ impl Memory {
/// This function will panic if called on a dynamic memory.
#[cfg(all(feature = "uffd", target_os = "linux"))]
pub(crate) fn record_guard_page_fault(
&self,
&mut self,
page_addr: *mut u8,
size: usize,
reset: fn(*mut u8, usize) -> Result<()>,
) {
match &self.storage {
MemoryStorage::Static {
match self {
Memory::Static {
guard_page_faults, ..
} => {
guard_page_faults
.borrow_mut()
.push((page_addr, size, reset));
guard_page_faults.push((page_addr as usize, size, reset));
}
MemoryStorage::Dynamic(_) => {
Memory::Dynamic(_) => {
unreachable!("dynamic memories should not have guard page faults")
}
}
@@ -403,17 +402,16 @@ impl Memory {
///
/// This function will panic if called on a dynamic memory.
#[cfg(all(feature = "uffd", target_os = "linux"))]
pub(crate) fn reset_guard_pages(&self) -> Result<()> {
match &self.storage {
MemoryStorage::Static {
pub(crate) fn reset_guard_pages(&mut self) -> Result<()> {
match self {
Memory::Static {
guard_page_faults, ..
} => {
let mut faults = guard_page_faults.borrow_mut();
for (addr, len, reset) in faults.drain(..) {
reset(addr, len)?;
for (addr, len, reset) in guard_page_faults.drain(..) {
reset(addr as *mut u8, len)?;
}
}
MemoryStorage::Dynamic(_) => {
Memory::Dynamic(_) => {
unreachable!("dynamic memories should not have guard page faults")
}
}
@@ -425,20 +423,12 @@ impl Memory {
// The default memory representation is an empty memory that cannot grow.
impl Default for Memory {
fn default() -> Self {
fn make_accessible(_ptr: *mut u8, _len: usize) -> Result<()> {
unreachable!()
}
Self {
storage: MemoryStorage::Static {
base: ptr::null_mut(),
size: Cell::new(0),
maximum: 0,
make_accessible,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: RefCell::new(Vec::new()),
},
limiter: None,
Memory::Static {
base: &mut [],
size: 0,
make_accessible: |_, _| unreachable!(),
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: Vec::new(),
}
}
}

View File

@@ -5,12 +5,9 @@
use crate::vmcontext::{VMCallerCheckedAnyfunc, VMTableDefinition};
use crate::{ResourceLimiter, Trap, VMExternRef};
use anyhow::{bail, Result};
use std::cell::{Cell, RefCell};
use std::cmp::min;
use std::convert::{TryFrom, TryInto};
use std::ops::Range;
use std::ptr;
use std::rc::Rc;
use wasmtime_environ::wasm::TableElementType;
use wasmtime_environ::{ir, TablePlan};
@@ -25,6 +22,11 @@ pub enum TableElement {
ExternRef(Option<VMExternRef>),
}
// The usage of `*mut VMCallerCheckedAnyfunc` is safe w.r.t. thread safety, this
// just relies on thread-safety of `VMExternRef` itself.
unsafe impl Send for TableElement where VMExternRef: Send {}
unsafe impl Sync for TableElement where VMExternRef: Sync {}
impl TableElement {
/// Consumes the given raw pointer into a table element.
///
@@ -33,13 +35,13 @@ impl TableElement {
/// This is unsafe as it will *not* clone any externref, leaving the reference count unchanged.
///
/// This should only be used if the raw pointer is no longer in use.
unsafe fn from_raw(ty: TableElementType, ptr: *mut u8) -> Self {
unsafe fn from_raw(ty: TableElementType, ptr: usize) -> Self {
match ty {
TableElementType::Func => Self::FuncRef(ptr as _),
TableElementType::Val(_) => Self::ExternRef(if ptr.is_null() {
TableElementType::Val(_) => Self::ExternRef(if ptr == 0 {
None
} else {
Some(VMExternRef::from_raw(ptr))
Some(VMExternRef::from_raw(ptr as *mut u8))
}),
}
}
@@ -49,13 +51,13 @@ impl TableElement {
/// # Safety
///
/// This is unsafe as it will clone any externref, incrementing the reference count.
unsafe fn clone_from_raw(ty: TableElementType, ptr: *mut u8) -> Self {
unsafe fn clone_from_raw(ty: TableElementType, ptr: usize) -> Self {
match ty {
TableElementType::Func => Self::FuncRef(ptr as _),
TableElementType::Val(_) => Self::ExternRef(if ptr.is_null() {
TableElementType::Val(_) => Self::ExternRef(if ptr == 0 {
None
} else {
Some(VMExternRef::clone_from_raw(ptr))
Some(VMExternRef::clone_from_raw(ptr as *mut u8))
}),
}
}
@@ -68,10 +70,10 @@ impl TableElement {
/// the reference count.
///
/// Use `from_raw` to properly drop any table elements stored as raw pointers.
unsafe fn into_raw(self) -> *mut u8 {
unsafe fn into_raw(self) -> usize {
match self {
Self::FuncRef(e) => e as _,
Self::ExternRef(e) => e.map_or(ptr::null_mut(), |e| e.into_raw()),
Self::ExternRef(e) => e.map_or(0, |e| e.into_raw() as usize),
}
}
}
@@ -94,71 +96,68 @@ impl From<VMExternRef> for TableElement {
}
}
enum TableStorage {
/// Represents an instance's table.
pub enum Table {
/// A "static" table where storage space is managed externally, currently
/// used with the pooling allocator.
Static {
data: *mut *mut u8,
size: Cell<u32>,
/// Where data for this table is stored. The length of this list is the
/// maximum size of the table.
data: &'static mut [usize],
/// The current size of the table.
size: u32,
/// The type of this table.
ty: TableElementType,
maximum: u32,
},
/// A "dynamic" table where table storage space is dynamically allocated via
/// `malloc` (aka Rust's `Vec`).
Dynamic {
elements: RefCell<Vec<*mut u8>>,
/// Dynamically managed storage space for this table. The length of this
/// vector is the current size of the table.
elements: Vec<usize>,
/// The type of this table.
ty: TableElementType,
/// Maximum size that `elements` can grow to.
maximum: Option<u32>,
},
}
/// Represents an instance's table.
pub struct Table {
storage: TableStorage,
limiter: Option<Rc<dyn ResourceLimiter>>,
}
impl Table {
/// Create a new dynamic (movable) table instance for the specified table plan.
pub fn new_dynamic(
plan: &TablePlan,
limiter: Option<&Rc<dyn ResourceLimiter>>,
limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<Self> {
let elements = RefCell::new(vec![ptr::null_mut(); plan.table.minimum as usize]);
Self::limit_new(plan, limiter)?;
let elements = vec![0; plan.table.minimum as usize];
let ty = plan.table.ty.clone();
let maximum = plan.table.maximum;
let storage = TableStorage::Dynamic {
Ok(Table::Dynamic {
elements,
ty,
maximum,
};
Self::new(plan, storage, limiter)
})
}
/// Create a new static (immovable) table instance for the specified table plan.
pub fn new_static(
plan: &TablePlan,
data: *mut *mut u8,
maximum: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
data: &'static mut [usize],
limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<Self> {
let size = Cell::new(plan.table.minimum);
Self::limit_new(plan, limiter)?;
let size = plan.table.minimum;
let ty = plan.table.ty.clone();
let maximum = min(plan.table.maximum.unwrap_or(maximum), maximum);
let storage = TableStorage::Static {
data,
size,
ty,
maximum,
let data = match plan.table.maximum {
Some(max) if (max as usize) < data.len() => &mut data[..max as usize],
_ => data,
};
Self::new(plan, storage, limiter)
Ok(Table::Static { data, size, ty })
}
fn new(
plan: &TablePlan,
storage: TableStorage,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
fn limit_new(plan: &TablePlan, limiter: Option<&mut dyn ResourceLimiter>) -> Result<()> {
if let Some(limiter) = limiter {
if !limiter.table_growing(0, plan.table.minimum, plan.table.maximum) {
bail!(
@@ -167,24 +166,20 @@ impl Table {
);
}
}
Ok(Self {
storage,
limiter: limiter.cloned(),
})
Ok(())
}
/// Returns the type of the elements in this table.
pub fn element_type(&self) -> TableElementType {
match &self.storage {
TableStorage::Static { ty, .. } => *ty,
TableStorage::Dynamic { ty, .. } => *ty,
match self {
Table::Static { ty, .. } => *ty,
Table::Dynamic { ty, .. } => *ty,
}
}
/// Returns whether or not the underlying storage of the table is "static".
pub(crate) fn is_static(&self) -> bool {
if let TableStorage::Static { .. } = &self.storage {
if let Table::Static { .. } = self {
true
} else {
false
@@ -193,9 +188,9 @@ impl Table {
/// Returns the number of allocated elements.
pub fn size(&self) -> u32 {
match &self.storage {
TableStorage::Static { size, .. } => size.get(),
TableStorage::Dynamic { elements, .. } => elements.borrow().len().try_into().unwrap(),
match self {
Table::Static { size, .. } => *size,
Table::Dynamic { elements, .. } => elements.len().try_into().unwrap(),
}
}
@@ -206,9 +201,9 @@ impl Table {
/// The runtime maximum may not be equal to the maximum from the table's Wasm type
/// when it is being constrained by an instance allocator.
pub fn maximum(&self) -> Option<u32> {
match &self.storage {
TableStorage::Static { maximum, .. } => Some(*maximum),
TableStorage::Dynamic { maximum, .. } => maximum.clone(),
match self {
Table::Static { data, .. } => Some(data.len() as u32),
Table::Dynamic { maximum, .. } => maximum.clone(),
}
}
@@ -216,32 +211,31 @@ impl Table {
///
/// Returns a trap error on out-of-bounds accesses.
pub fn init_funcs(
&self,
&mut self,
dst: u32,
items: impl ExactSizeIterator<Item = *mut VMCallerCheckedAnyfunc>,
) -> Result<(), Trap> {
assert!(self.element_type() == TableElementType::Func);
self.with_elements_mut(|elements| {
let elements = match elements
.get_mut(usize::try_from(dst).unwrap()..)
.and_then(|s| s.get_mut(..items.len()))
{
Some(elements) => elements,
None => return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds)),
};
let elements = match self
.elements_mut()
.get_mut(usize::try_from(dst).unwrap()..)
.and_then(|s| s.get_mut(..items.len()))
{
Some(elements) => elements,
None => return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds)),
};
for (item, slot) in items.zip(elements) {
*slot = item as *mut u8;
}
Ok(())
})
for (item, slot) in items.zip(elements) {
*slot = item as usize;
}
Ok(())
}
/// Fill `table[dst..dst + len]` with `val`.
///
/// Returns a trap error on out-of-bounds accesses.
pub fn fill(&self, dst: u32, val: TableElement, len: u32) -> Result<(), Trap> {
pub fn fill(&mut self, dst: u32, val: TableElement, len: u32) -> Result<(), Trap> {
let start = dst as usize;
let end = start
.checked_add(len as usize)
@@ -253,19 +247,16 @@ impl Table {
debug_assert!(self.type_matches(&val));
self.with_elements_mut(|elements| {
if let Some((last, elements)) = elements[start..end].split_last_mut() {
let ty = self.element_type();
for e in elements {
Self::set_raw(ty, e, val.clone());
}
Self::set_raw(self.element_type(), last, val);
let ty = self.element_type();
if let Some((last, elements)) = self.elements_mut()[start..end].split_last_mut() {
for e in elements {
Self::set_raw(ty, e, val.clone());
}
Ok(())
})
Self::set_raw(ty, last, val);
}
Ok(())
}
/// Grow table by the specified amount of elements.
@@ -284,11 +275,16 @@ impl Table {
///
/// Generally, prefer using `InstanceHandle::table_grow`, which encapsulates
/// this unsafety.
pub unsafe fn grow(&self, delta: u32, init_value: TableElement) -> Option<u32> {
pub unsafe fn grow(
&mut self,
delta: u32,
init_value: TableElement,
limiter: Option<&mut dyn ResourceLimiter>,
) -> Option<u32> {
let old_size = self.size();
let new_size = old_size.checked_add(delta)?;
if let Some(limiter) = &self.limiter {
if let Some(limiter) = limiter {
if !limiter.table_growing(old_size, new_size, self.maximum()) {
return None;
}
@@ -303,13 +299,15 @@ impl Table {
debug_assert!(self.type_matches(&init_value));
// First resize the storage and then fill with the init value
match &self.storage {
TableStorage::Static { size, .. } => {
size.set(new_size);
match self {
Table::Static { size, data, .. } => {
debug_assert!(data[*size as usize..new_size as usize]
.iter()
.all(|x| *x == 0));
*size = new_size;
}
TableStorage::Dynamic { elements, .. } => {
let mut elements = elements.borrow_mut();
elements.resize(new_size as usize, ptr::null_mut());
Table::Dynamic { elements, .. } => {
elements.resize(new_size as usize, 0);
}
}
@@ -323,11 +321,9 @@ impl Table {
///
/// Returns `None` if the index is out of bounds.
pub fn get(&self, index: u32) -> Option<TableElement> {
self.with_elements(|elements| {
elements
.get(index as usize)
.map(|p| unsafe { TableElement::clone_from_raw(self.element_type(), *p) })
})
self.elements()
.get(index as usize)
.map(|p| unsafe { TableElement::clone_from_raw(self.element_type(), *p) })
}
/// Set reference to the specified element.
@@ -336,16 +332,15 @@ impl Table {
///
/// Returns an error if `index` is out of bounds or if this table type does
/// not match the element type.
pub fn set(&self, index: u32, elem: TableElement) -> Result<(), ()> {
pub fn set(&mut self, index: u32, elem: TableElement) -> Result<(), ()> {
if !self.type_matches(&elem) {
return Err(());
}
self.with_elements_mut(|elements| {
let e = elements.get_mut(index as usize).ok_or(())?;
Self::set_raw(self.element_type(), e, elem);
Ok(())
})
let ty = self.element_type();
let e = self.elements_mut().get_mut(index as usize).ok_or(())?;
Self::set_raw(ty, e, elem);
Ok(())
}
/// Copy `len` elements from `src_table[src_index..]` into `dst_table[dst_index..]`.
@@ -354,9 +349,9 @@ impl Table {
///
/// Returns an error if the range is out of bounds of either the source or
/// destination tables.
pub fn copy(
dst_table: &Self,
src_table: &Self,
pub unsafe fn copy(
dst_table: *mut Self,
src_table: *mut Self,
dst_index: u32,
src_index: u32,
len: u32,
@@ -365,16 +360,16 @@ impl Table {
if src_index
.checked_add(len)
.map_or(true, |n| n > src_table.size())
.map_or(true, |n| n > (*src_table).size())
|| dst_index
.checked_add(len)
.map_or(true, |m| m > dst_table.size())
.map_or(true, |m| m > (*dst_table).size())
{
return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds));
}
debug_assert!(
dst_table.element_type() == src_table.element_type(),
(*dst_table).element_type() == (*src_table).element_type(),
"table element type mismatch"
);
@@ -383,9 +378,9 @@ impl Table {
// Check if the tables are the same as we cannot mutably borrow and also borrow the same `RefCell`
if ptr::eq(dst_table, src_table) {
Self::copy_elements_within(dst_table, dst_range, src_range);
(*dst_table).copy_elements_within(dst_range, src_range);
} else {
Self::copy_elements(dst_table, src_table, dst_range, src_range);
Self::copy_elements(&mut *dst_table, &*src_table, dst_range, src_range);
}
Ok(())
@@ -393,18 +388,15 @@ impl Table {
/// Return a `VMTableDefinition` for exposing the table to compiled wasm code.
pub fn vmtable(&self) -> VMTableDefinition {
match &self.storage {
TableStorage::Static { data, size, .. } => VMTableDefinition {
base: *data as _,
current_elements: size.get(),
match self {
Table::Static { data, size, .. } => VMTableDefinition {
base: data.as_ptr() as *mut _,
current_elements: *size,
},
Table::Dynamic { elements, .. } => VMTableDefinition {
base: elements.as_ptr() as _,
current_elements: elements.len().try_into().unwrap(),
},
TableStorage::Dynamic { elements, .. } => {
let elements = elements.borrow();
VMTableDefinition {
base: elements.as_ptr() as _,
current_elements: elements.len().try_into().unwrap(),
}
}
}
}
@@ -416,37 +408,21 @@ impl Table {
}
}
fn with_elements<F, R>(&self, f: F) -> R
where
F: FnOnce(&[*mut u8]) -> R,
{
match &self.storage {
TableStorage::Static { data, size, .. } => unsafe {
f(std::slice::from_raw_parts(*data, size.get() as usize))
},
TableStorage::Dynamic { elements, .. } => {
let elements = elements.borrow();
f(elements.as_slice())
}
fn elements(&self) -> &[usize] {
match self {
Table::Static { data, size, .. } => &data[..*size as usize],
Table::Dynamic { elements, .. } => &elements[..],
}
}
fn with_elements_mut<F, R>(&self, f: F) -> R
where
F: FnOnce(&mut [*mut u8]) -> R,
{
match &self.storage {
TableStorage::Static { data, size, .. } => unsafe {
f(std::slice::from_raw_parts_mut(*data, size.get() as usize))
},
TableStorage::Dynamic { elements, .. } => {
let mut elements = elements.borrow_mut();
f(elements.as_mut_slice())
}
fn elements_mut(&mut self) -> &mut [usize] {
match self {
Table::Static { data, size, .. } => &mut data[..*size as usize],
Table::Dynamic { elements, .. } => &mut elements[..],
}
}
fn set_raw(ty: TableElementType, elem: &mut *mut u8, val: TableElement) {
fn set_raw(ty: TableElementType, elem: &mut usize, val: TableElement) {
unsafe {
let old = *elem;
*elem = val.into_raw();
@@ -457,7 +433,7 @@ impl Table {
}
fn copy_elements(
dst_table: &Self,
dst_table: &mut Self,
src_table: &Self,
dst_range: Range<usize>,
src_range: Range<usize>,
@@ -470,47 +446,43 @@ impl Table {
match ty {
TableElementType::Func => {
// `funcref` are `Copy`, so just do a mempcy
dst_table.with_elements_mut(|dst| {
src_table.with_elements(|src| dst[dst_range].copy_from_slice(&src[src_range]))
});
dst_table.elements_mut()[dst_range]
.copy_from_slice(&src_table.elements()[src_range]);
}
TableElementType::Val(_) => {
// We need to clone each `externref`
dst_table.with_elements_mut(|dst| {
src_table.with_elements(|src| {
for (s, d) in src_range.zip(dst_range) {
let elem = unsafe { TableElement::clone_from_raw(ty, src[s]) };
Self::set_raw(ty, &mut dst[d], elem);
}
})
});
let dst = dst_table.elements_mut();
let src = src_table.elements();
for (s, d) in src_range.zip(dst_range) {
let elem = unsafe { TableElement::clone_from_raw(ty, src[s]) };
Self::set_raw(ty, &mut dst[d], elem);
}
}
}
}
fn copy_elements_within(table: &Self, dst_range: Range<usize>, src_range: Range<usize>) {
let ty = table.element_type();
fn copy_elements_within(&mut self, dst_range: Range<usize>, src_range: Range<usize>) {
let ty = self.element_type();
let dst = self.elements_mut();
match ty {
TableElementType::Func => {
// `funcref` are `Copy`, so just do a memmove
table.with_elements_mut(|dst| dst.copy_within(src_range, dst_range.start));
dst.copy_within(src_range, dst_range.start);
}
TableElementType::Val(_) => {
// We need to clone each `externref` while handling overlapping ranges
table.with_elements_mut(|dst| {
if dst_range.start <= src_range.start {
for (s, d) in src_range.zip(dst_range) {
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
Self::set_raw(ty, &mut dst[d], elem);
}
} else {
for (s, d) in src_range.rev().zip(dst_range.rev()) {
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
Self::set_raw(ty, &mut dst[d], elem);
}
// We need to clone each `externref` while handling overlapping
// ranges
if dst_range.start <= src_range.start {
for (s, d) in src_range.zip(dst_range) {
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
Self::set_raw(ty, &mut dst[d], elem);
}
});
} else {
for (s, d) in src_range.rev().zip(dst_range.rev()) {
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
Self::set_raw(ty, &mut dst[d], elem);
}
}
}
}
}
@@ -526,25 +498,19 @@ impl Drop for Table {
}
// Properly drop any table elements stored in the table
self.with_elements(|elements| {
for element in elements.iter() {
let _ = unsafe { TableElement::from_raw(ty, *element) };
}
});
for element in self.elements() {
drop(unsafe { TableElement::from_raw(ty, *element) });
}
}
}
// The default table representation is an empty funcref table that cannot grow.
impl Default for Table {
fn default() -> Self {
Self {
storage: TableStorage::Static {
data: std::ptr::null_mut(),
size: Cell::new(0),
ty: TableElementType::Func,
maximum: 0,
},
limiter: None,
Table::Static {
data: &mut [],
size: 0,
ty: TableElementType::Func,
}
}
}

View File

@@ -1,7 +1,7 @@
//! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use crate::VMInterrupts;
use crate::{VMContext, VMInterrupts};
use backtrace::Backtrace;
use std::any::Any;
use std::cell::{Cell, UnsafeCell};
@@ -15,10 +15,12 @@ use wasmtime_environ::ir;
pub use self::tls::TlsRestore;
extern "C" {
#[allow(improper_ctypes)]
fn RegisterSetjmp(
jmp_buf: *mut *const u8,
callback: extern "C" fn(*mut u8),
callback: extern "C" fn(*mut u8, *mut VMContext),
payload: *mut u8,
callee: *mut VMContext,
) -> i32;
fn Unwind(jmp_buf: *const u8) -> !;
}
@@ -52,7 +54,7 @@ static mut IS_WASM_PC: fn(usize) -> bool = |_| false;
/// This function must not only be called globally once before entering
/// WebAssembly but it must also be called once-per-thread that enters
/// WebAssembly. Currently in wasmtime's integration this function is called on
/// creation of a `Store`.
/// creation of a `Engine`.
///
/// The `is_wasm_pc` argument is used when a trap happens to determine if a
/// program counter is the pc of an actual wasm trap or not. This is then used
@@ -165,76 +167,42 @@ impl Trap {
/// returning them as a `Result`.
///
/// Highly unsafe since `closure` won't have any dtors run.
pub unsafe fn catch_traps<F>(trap_info: &impl TrapInfo, mut closure: F) -> Result<(), Trap>
pub unsafe fn catch_traps<'a, F>(
vminterrupts: *mut VMInterrupts,
signal_handler: Option<*const SignalHandler<'static>>,
callee: *mut VMContext,
mut closure: F,
) -> Result<(), Trap>
where
F: FnMut(),
F: FnMut(*mut VMContext),
{
return CallThreadState::new(trap_info).with(|cx| {
return CallThreadState::new(signal_handler).with(vminterrupts, |cx| {
RegisterSetjmp(
cx.jmp_buf.as_ptr(),
call_closure::<F>,
&mut closure as *mut F as *mut u8,
callee,
)
});
extern "C" fn call_closure<F>(payload: *mut u8)
extern "C" fn call_closure<F>(payload: *mut u8, callee: *mut VMContext)
where
F: FnMut(),
F: FnMut(*mut VMContext),
{
unsafe { (*(payload as *mut F))() }
unsafe { (*(payload as *mut F))(callee) }
}
}
/// Runs `func` with the last `trap_info` object registered by `catch_traps`.
///
/// Calls `func` with `None` if `catch_traps` wasn't previously called from this
/// stack frame.
pub fn with_last_info<R>(func: impl FnOnce(Option<&dyn Any>) -> R) -> R {
tls::with(|state| func(state.map(|s| s.trap_info.as_any())))
}
/// Invokes the contextually-defined context's out-of-gas function.
///
/// (basically delegates to `wasmtime::Store::out_of_gas`)
pub fn out_of_gas() {
tls::with(|state| state.unwrap().trap_info.out_of_gas())
}
/// Temporary state stored on the stack which is registered in the `tls` module
/// below for calls into wasm.
pub struct CallThreadState<'a> {
pub struct CallThreadState {
unwind: UnsafeCell<MaybeUninit<UnwindReason>>,
jmp_buf: Cell<*const u8>,
handling_trap: Cell<bool>,
trap_info: &'a (dyn TrapInfo + 'a),
signal_handler: Option<*const SignalHandler<'static>>,
prev: Cell<tls::Ptr>,
}
/// A package of functionality needed by `catch_traps` to figure out what to do
/// when handling a trap.
///
/// Note that this is an `unsafe` trait at least because it's being run in the
/// context of a synchronous signal handler, so it needs to be careful to not
/// access too much state in answering these queries.
pub unsafe trait TrapInfo {
/// Converts this object into an `Any` to dynamically check its type.
fn as_any(&self) -> &dyn Any;
/// Uses `call` to call a custom signal handler, if one is specified.
///
/// Returns `true` if `call` returns true, otherwise returns `false`.
fn custom_signal_handler(&self, call: &dyn Fn(&SignalHandler) -> bool) -> bool;
/// Callback invoked whenever WebAssembly has entirely consumed the fuel
/// that it was allotted.
///
/// This function may return, and it may also `raise_lib_trap`.
fn out_of_gas(&self);
/// Returns the VM interrupts to use for interrupting Wasm code.
fn interrupts(&self) -> &VMInterrupts;
}
enum UnwindReason {
Panic(Box<dyn Any + Send>),
UserTrap(Box<dyn Error + Send + Sync>),
@@ -242,19 +210,23 @@ enum UnwindReason {
JitTrap { backtrace: Backtrace, pc: usize },
}
impl<'a> CallThreadState<'a> {
impl CallThreadState {
#[inline]
fn new(trap_info: &'a (dyn TrapInfo + 'a)) -> CallThreadState<'a> {
fn new(signal_handler: Option<*const SignalHandler<'static>>) -> CallThreadState {
CallThreadState {
unwind: UnsafeCell::new(MaybeUninit::uninit()),
jmp_buf: Cell::new(ptr::null()),
handling_trap: Cell::new(false),
trap_info,
signal_handler,
prev: Cell::new(ptr::null()),
}
}
fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Trap> {
fn with(
self,
interrupts: *mut VMInterrupts,
closure: impl FnOnce(&CallThreadState) -> i32,
) -> Result<(), Trap> {
let ret = tls::set(&self, || closure(&self))?;
if ret != 0 {
return Ok(());
@@ -263,9 +235,9 @@ impl<'a> CallThreadState<'a> {
UnwindReason::UserTrap(data) => Err(Trap::User(data)),
UnwindReason::LibTrap(trap) => Err(trap),
UnwindReason::JitTrap { backtrace, pc } => {
let interrupts = self.trap_info.interrupts();
let maybe_interrupted =
interrupts.stack_limit.load(SeqCst) == wasmtime_environ::INTERRUPTED;
let maybe_interrupted = unsafe {
(*interrupts).stack_limit.load(SeqCst) == wasmtime_environ::INTERRUPTED
};
Err(Trap::Jit {
pc,
backtrace,
@@ -322,8 +294,10 @@ impl<'a> CallThreadState<'a> {
// First up see if any instance registered has a custom trap handler,
// in which case run them all. If anything handles the trap then we
// return that the trap was handled.
if self.trap_info.custom_signal_handler(&call_handler) {
return 1 as *const _;
if let Some(handler) = self.signal_handler {
if unsafe { call_handler(&*handler) } {
return 1 as *const _;
}
}
// If this fault wasn't in wasm code, then it's not our problem
@@ -366,7 +340,6 @@ impl<T: Copy> Drop for ResetCell<'_, T> {
mod tls {
use super::CallThreadState;
use crate::Trap;
use std::mem;
use std::ptr;
pub use raw::Ptr;
@@ -388,7 +361,7 @@ mod tls {
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState<'static>;
pub type Ptr = *const CallThreadState;
// The first entry here is the `Ptr` which is what's used as part of the
// public interface of this module. The second entry is a boolean which
@@ -460,10 +433,11 @@ mod tls {
/// Configures thread local state such that for the duration of the
/// execution of `closure` any call to `with` will yield `ptr`, unless this
/// is recursively called again.
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> Result<R, Trap> {
struct Reset<'a, 'b>(&'a CallThreadState<'b>);
#[inline]
pub fn set<R>(state: &CallThreadState, closure: impl FnOnce() -> R) -> Result<R, Trap> {
struct Reset<'a>(&'a CallThreadState);
impl Drop for Reset<'_, '_> {
impl Drop for Reset<'_> {
#[inline]
fn drop(&mut self) {
raw::replace(self.0.prev.replace(ptr::null()))
@@ -471,13 +445,7 @@ mod tls {
}
}
// Note that this extension of the lifetime to `'static` should be
// safe because we only ever access it below with an anonymous
// lifetime, meaning `'static` never leaks out of this module.
let ptr = unsafe {
mem::transmute::<*const CallThreadState<'_>, *const CallThreadState<'static>>(state)
};
let prev = raw::replace(ptr)?;
let prev = raw::replace(state)?;
state.prev.set(prev);
let _reset = Reset(state);
Ok(closure())
@@ -485,7 +453,7 @@ mod tls {
/// Returns the last pointer configured with `set` above. Panics if `set`
/// has not been previously called.
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState<'_>>) -> R) -> R {
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
let p = raw::get();
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
}

View File

@@ -149,7 +149,7 @@ use mach_addons::*;
pub enum Void {}
/// For now this is basically unused, we don't expose this any more for
/// Wasmtime on macOS.
pub type SignalHandler<'a> = dyn Fn(Void) -> bool + 'a;
pub type SignalHandler<'a> = dyn Fn(Void) -> bool + Send + Sync + 'a;
/// Process-global port that we use to route thread-level exceptions to.
static mut WASMTIME_PORT: mach_port_name_t = MACH_PORT_NULL;

View File

@@ -7,7 +7,7 @@ use std::ptr::{self, null_mut};
/// Function which may handle custom signals while processing traps.
pub type SignalHandler<'a> =
dyn Fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) -> bool + 'a;
dyn Fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) -> bool + Send + Sync + 'a;
static mut PREV_SIGSEGV: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
static mut PREV_SIGBUS: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();

View File

@@ -6,7 +6,8 @@ use winapi::um::winnt::*;
use winapi::vc::excpt::*;
/// Function which may handle custom signals while processing traps.
pub type SignalHandler<'a> = dyn Fn(winapi::um::winnt::PEXCEPTION_POINTERS) -> bool + 'a;
pub type SignalHandler<'a> =
dyn Fn(winapi::um::winnt::PEXCEPTION_POINTERS) -> bool + Send + Sync + 'a;
pub unsafe fn platform_init() {
// our trap handler needs to go first, so that we can recover from

View File

@@ -5,6 +5,7 @@ use crate::externref::VMExternRef;
use crate::instance::Instance;
use std::any::Any;
use std::cell::UnsafeCell;
use std::marker;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
use std::u32;
@@ -21,6 +22,11 @@ pub struct VMFunctionImport {
pub vmctx: *mut VMContext,
}
// Declare that this type is send/sync, it's the responsibility of users of
// `VMFunctionImport` to uphold this guarantee.
unsafe impl Send for VMFunctionImport {}
unsafe impl Sync for VMFunctionImport {}
#[cfg(test)]
mod test_vmfunction_import {
use super::VMFunctionImport;
@@ -77,6 +83,11 @@ pub struct VMTableImport {
pub vmctx: *mut VMContext,
}
// Declare that this type is send/sync, it's the responsibility of users of
// `VMTableImport` to uphold this guarantee.
unsafe impl Send for VMTableImport {}
unsafe impl Sync for VMTableImport {}
#[cfg(test)]
mod test_vmtable_import {
use super::VMTableImport;
@@ -115,6 +126,11 @@ pub struct VMMemoryImport {
pub vmctx: *mut VMContext,
}
// Declare that this type is send/sync, it's the responsibility of users of
// `VMMemoryImport` to uphold this guarantee.
unsafe impl Send for VMMemoryImport {}
unsafe impl Sync for VMMemoryImport {}
#[cfg(test)]
mod test_vmmemory_import {
use super::VMMemoryImport;
@@ -150,6 +166,11 @@ pub struct VMGlobalImport {
pub from: *mut VMGlobalDefinition,
}
// Declare that this type is send/sync, it's the responsibility of users of
// `VMGlobalImport` to uphold this guarantee.
unsafe impl Send for VMGlobalImport {}
unsafe impl Sync for VMGlobalImport {}
#[cfg(test)]
mod test_vmglobal_import {
use super::VMGlobalImport;
@@ -259,7 +280,7 @@ mod test_vmtable_definition {
///
/// TODO: Pack the globals more densely, rather than using the same size
/// for every type.
#[derive(Debug, Copy, Clone)]
#[derive(Debug)]
#[repr(C, align(16))]
pub struct VMGlobalDefinition {
storage: [u8; 16],
@@ -524,6 +545,9 @@ pub struct VMCallerCheckedAnyfunc {
// If more elements are added here, remember to add offset_of tests below!
}
unsafe impl Send for VMCallerCheckedAnyfunc {}
unsafe impl Sync for VMCallerCheckedAnyfunc {}
#[cfg(test)]
mod test_vmcaller_checked_anyfunc {
use super::VMCallerCheckedAnyfunc;
@@ -682,6 +706,16 @@ pub struct VMInterrupts {
pub fuel_consumed: UnsafeCell<i64>,
}
// The `VMInterrupts` type is a pod-type with no destructor, and we only access
// `stack_limit` from other threads, so add in these trait impls which are
// otherwise not available due to the `fuel_consumed` variable in
// `VMInterrupts`.
//
// Note that users of `fuel_consumed` understand that the unsafety encompasses
// ensuring that it's only mutated/accessed from one thread dynamically.
unsafe impl Send for VMInterrupts {}
unsafe impl Sync for VMInterrupts {}
impl VMInterrupts {
/// Flag that an interrupt should occur
pub fn interrupt(&self) {
@@ -728,7 +762,17 @@ mod test_vminterrupts {
/// TODO: We could move the globals into the `vmctx` allocation too.
#[derive(Debug)]
#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
pub struct VMContext {}
pub struct VMContext {
/// There's some more discussion about this within `wasmtime/src/lib.rs` but
/// the idea is that we want to tell the compiler that this contains
/// pointers which transitively refers to itself, to suppress some
/// optimizations that might otherwise assume this doesn't exist.
///
/// The self-referential pointer we care about is the `*mut Store` pointer
/// early on in this context, which if you follow through enough levels of
/// nesting, eventually can refer back to this `VMContext`
pub _marker: marker::PhantomPinned,
}
impl VMContext {
/// Return a mutable reference to the associated `Instance`.
@@ -742,6 +786,11 @@ impl VMContext {
&*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *const Instance)
}
#[inline]
pub(crate) unsafe fn instance_mut(&mut self) -> &mut Instance {
&mut *((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
}
/// Return a reference to the host state associated with this `Instance`.
///
/// # Safety