Implement RFC 11: Redesigning Wasmtime's APIs (#2897)

Implement Wasmtime's new API as designed by RFC 11. This is quite a large commit which has had lots of discussion externally, so for more information it's best to read the RFC thread and the PR thread.
This commit is contained in:
Alex Crichton
2021-06-03 09:10:53 -05:00
committed by GitHub
parent a5a28b1c5b
commit 7a1b7cdf92
233 changed files with 13349 additions and 11997 deletions

View File

@@ -1,4 +1,3 @@
use crate::externref::{ModuleInfoLookup, VMExternRefActivationsTable, EMPTY_MODULE_LOOKUP};
use crate::imports::Imports;
use crate::instance::{Instance, InstanceHandle, ResourceLimiter, RuntimeMemoryCreator};
use crate::memory::{DefaultMemoryCreator, Memory};
@@ -6,16 +5,15 @@ use crate::table::Table;
use crate::traphandlers::Trap;
use crate::vmcontext::{
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryImport, VMSharedSignatureIndex,
VMTableImport,
VMGlobalDefinition, VMGlobalImport, VMMemoryImport, VMSharedSignatureIndex, VMTableImport,
};
use crate::Store;
use anyhow::Result;
use std::alloc;
use std::any::Any;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::marker;
use std::ptr::{self, NonNull};
use std::rc::Rc;
use std::slice;
use std::sync::Arc;
use thiserror::Error;
@@ -49,19 +47,23 @@ pub struct InstanceAllocationRequest<'a> {
pub shared_signatures: SharedSignatures<'a>,
/// The host state to associate with the instance.
pub host_state: Box<dyn Any>,
pub host_state: Box<dyn Any + Send + Sync>,
/// The pointer to the VM interrupts structure to use for the instance.
pub interrupts: *const VMInterrupts,
/// The pointer to the reference activations table to use for the instance.
pub externref_activations_table: *mut VMExternRefActivationsTable,
/// The pointer to the module info lookup to use for the instance.
pub module_info_lookup: Option<*const dyn ModuleInfoLookup>,
/// The resource limiter to use for the instance.
pub limiter: Option<&'a Rc<dyn ResourceLimiter>>,
/// A pointer to the "store" for this instance to be allocated. The store
/// correlates with the `Store` in wasmtime itself, and lots of contextual
/// information about the execution of wasm can be learned through the store.
///
/// Note that this is a raw pointer and has a static lifetime, both of which
/// are a bit of a lie. This is done purely so a store can learn about
/// itself when it gets called as a host function, and additionally so this
/// runtime can access internals as necessary (such as the
/// VMExternRefActivationsTable or the ResourceLimiter).
///
/// Note that this ends up being a self-pointer to the instance when stored.
/// The reason is that the instance itself is then stored within the store.
/// We use a number of `PhantomPinned` declarations to indicate this to the
/// compiler. More info on this in `wasmtime/src/store.rs`
pub store: Option<*mut dyn Store>,
}
/// An link error while instantiating a module.
@@ -141,7 +143,8 @@ pub unsafe trait InstanceAllocator: Send + Sync {
/// This method is only safe to call immediately after an instance has been allocated.
unsafe fn initialize(
&self,
handle: &InstanceHandle,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError>;
@@ -232,9 +235,12 @@ fn get_table_init_start(
}
}
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_initializers {
let table = instance.get_table(init.table_index);
fn check_table_init_bounds(
instance: &mut Instance,
module: &Module,
) -> Result<(), InstantiationError> {
for init in &module.table_initializers {
let table = unsafe { &*instance.get_table(init.table_index) };
let start = get_table_init_start(init, instance)?;
let start = usize::try_from(start).unwrap();
let end = start.checked_add(init.elements.len());
@@ -254,8 +260,8 @@ fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError
Ok(())
}
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_initializers {
fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<(), InstantiationError> {
for init in &module.table_initializers {
instance
.table_init_segment(
init.table_index,
@@ -318,7 +324,7 @@ fn check_memory_init_bounds(
}
fn initialize_memories(
instance: &Instance,
instance: &mut Instance,
initializers: &[MemoryInitializer],
) -> Result<(), InstantiationError> {
for init in initializers {
@@ -336,8 +342,8 @@ fn initialize_memories(
Ok(())
}
fn check_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
check_table_init_bounds(instance)?;
fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<(), InstantiationError> {
check_table_init_bounds(instance, module)?;
match &instance.module.memory_initialization {
MemoryInitialization::Paged { out_of_bounds, .. } => {
@@ -356,7 +362,8 @@ fn check_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
}
fn initialize_instance(
instance: &Instance,
instance: &mut Instance,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError> {
// If bulk memory is not enabled, bounds check the data and element segments before
@@ -364,14 +371,14 @@ fn initialize_instance(
// in-order and side effects are observed up to the point of an out-of-bounds
// initializer, so the early checking is not desired.
if !is_bulk_memory {
check_init_bounds(instance)?;
check_init_bounds(instance, module)?;
}
// Initialize the tables
initialize_tables(instance)?;
initialize_tables(instance, module)?;
// Initialize the memories
match &instance.module.memory_initialization {
match &module.memory_initialization {
MemoryInitialization::Paged { map, out_of_bounds } => {
for (index, pages) in map {
let memory = instance.memory(index);
@@ -404,12 +411,14 @@ fn initialize_instance(
Ok(())
}
unsafe fn initialize_vmcontext(instance: &Instance, req: InstanceAllocationRequest) {
let module = &instance.module;
unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationRequest) {
if let Some(store) = req.store {
*instance.interrupts() = (*store).vminterrupts();
*instance.externref_activations_table() = (*store).externref_activations_table().0;
instance.set_store(store);
}
*instance.interrupts() = req.interrupts;
*instance.externref_activations_table() = req.externref_activations_table;
*instance.module_info_lookup() = req.module_info_lookup.unwrap_or(&EMPTY_MODULE_LOOKUP);
let module = &instance.module;
// Initialize shared signatures
let mut ptr = instance.signature_ids_ptr();
@@ -520,17 +529,24 @@ unsafe fn initialize_vmcontext_globals(instance: &Instance) {
let from = if let Some(def_x) = module.defined_global_index(x) {
instance.global(def_x)
} else {
*instance.imported_global(x).from
&*instance.imported_global(x).from
};
*to = from;
// Globals of type `externref` need to manage the reference
// count as values move between globals, everything else is just
// copy-able bits.
match global.wasm_ty {
WasmType::ExternRef => *(*to).as_externref_mut() = from.as_externref().clone(),
_ => ptr::copy_nonoverlapping(from, to, 1),
}
}
GlobalInit::RefFunc(f) => {
*(*to).as_anyfunc_mut() = instance.get_caller_checked_anyfunc(f).unwrap()
as *const VMCallerCheckedAnyfunc;
}
GlobalInit::RefNullConst => match global.wasm_ty {
WasmType::FuncRef => *(*to).as_anyfunc_mut() = ptr::null(),
WasmType::ExternRef => *(*to).as_externref_mut() = None,
// `VMGlobalDefinition::new()` already zeroed out the bits
WasmType::FuncRef => {}
WasmType::ExternRef => {}
ty => panic!("unsupported reference type for global: {:?}", ty),
},
GlobalInit::Import => panic!("locally-defined global initialized as import"),
@@ -545,6 +561,17 @@ pub struct OnDemandInstanceAllocator {
stack_size: usize,
}
// rustc is quite strict with the lifetimes when dealing with mutable borrows,
// so this is a little helper to get a shorter lifetime on `Option<&mut T>`
fn borrow_limiter<'a>(
limiter: &'a mut Option<&mut dyn ResourceLimiter>,
) -> Option<&'a mut dyn ResourceLimiter> {
match limiter {
Some(limiter) => Some(&mut **limiter),
None => None,
}
}
impl OnDemandInstanceAllocator {
/// Creates a new on-demand instance allocator.
pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>, stack_size: usize) -> Self {
@@ -556,13 +583,16 @@ impl OnDemandInstanceAllocator {
fn create_tables(
module: &Module,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<PrimaryMap<DefinedTableIndex, Table>, InstantiationError> {
let num_imports = module.num_imported_tables;
let mut tables: PrimaryMap<DefinedTableIndex, _> =
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
for table in &module.table_plans.values().as_slice()[num_imports..] {
tables.push(Table::new_dynamic(table, limiter).map_err(InstantiationError::Resource)?);
tables.push(
Table::new_dynamic(table, borrow_limiter(&mut limiter))
.map_err(InstantiationError::Resource)?,
);
}
Ok(tables)
}
@@ -570,7 +600,7 @@ impl OnDemandInstanceAllocator {
fn create_memories(
&self,
module: &Module,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<PrimaryMap<DefinedMemoryIndex, Memory>, InstantiationError> {
let creator = self
.mem_creator
@@ -581,7 +611,7 @@ impl OnDemandInstanceAllocator {
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
memories.push(
Memory::new_dynamic(plan, creator, limiter)
Memory::new_dynamic(plan, creator, borrow_limiter(&mut limiter))
.map_err(InstantiationError::Resource)?,
);
}
@@ -603,23 +633,24 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
&self,
mut req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError> {
let memories = self.create_memories(&req.module, req.limiter)?;
let tables = Self::create_tables(&req.module, req.limiter)?;
let mut limiter = req.store.and_then(|s| (*s).limiter());
let memories = self.create_memories(&req.module, borrow_limiter(&mut limiter))?;
let tables = Self::create_tables(&req.module, borrow_limiter(&mut limiter))?;
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
let handle = {
let mut handle = {
let instance = Instance {
module: req.module.clone(),
offsets: VMOffsets::new(std::mem::size_of::<*const u8>() as u8, &req.module),
memories,
tables,
dropped_elements: RefCell::new(EntitySet::with_capacity(
req.module.passive_elements.len(),
)),
dropped_data: RefCell::new(EntitySet::with_capacity(req.module.passive_data.len())),
dropped_elements: EntitySet::with_capacity(req.module.passive_elements.len()),
dropped_data: EntitySet::with_capacity(req.module.passive_data.len()),
host_state,
vmctx: VMContext {},
vmctx: VMContext {
_marker: marker::PhantomPinned,
},
};
let layout = instance.alloc_layout();
let instance_ptr = alloc::alloc(layout) as *mut Instance;
@@ -632,17 +663,18 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
}
};
initialize_vmcontext(handle.instance(), req);
initialize_vmcontext(handle.instance_mut(), req);
Ok(handle)
}
unsafe fn initialize(
&self,
handle: &InstanceHandle,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError> {
initialize_instance(handle.instance(), is_bulk_memory)
initialize_instance(handle.instance_mut(), module, is_bulk_memory)
}
unsafe fn deallocate(&self, handle: &InstanceHandle) {

View File

@@ -7,6 +7,7 @@
//! Using the pooling instance allocator can speed up module instantiation
//! when modules can be constrained based on configurable limits.
use super::borrow_limiter;
use super::{
initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator,
InstanceHandle, InstantiationError, ResourceLimiter,
@@ -14,11 +15,10 @@ use super::{
use crate::{instance::Instance, Memory, Mmap, Table, VMContext};
use anyhow::{anyhow, bail, Context, Result};
use rand::Rng;
use std::cell::RefCell;
use std::cmp::min;
use std::convert::TryFrom;
use std::marker;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use wasmtime_environ::{
entity::{EntitySet, PrimaryMap},
@@ -290,12 +290,12 @@ impl Default for PoolingAllocationStrategy {
#[derive(Debug)]
struct InstancePool {
mapping: Mmap,
offsets: VMOffsets,
instance_size: usize,
max_instances: usize,
free_list: Mutex<Vec<usize>>,
memories: MemoryPool,
tables: TablePool,
empty_module: Arc<Module>,
}
impl InstancePool {
@@ -334,18 +334,17 @@ impl InstancePool {
let pool = Self {
mapping,
offsets,
instance_size,
max_instances,
free_list: Mutex::new((0..max_instances).collect()),
memories: MemoryPool::new(module_limits, instance_limits)?,
tables: TablePool::new(module_limits, instance_limits)?,
empty_module: Arc::new(Module::default()),
};
// Use a default module to initialize the instances to start
let module = Arc::new(Module::default());
for i in 0..instance_limits.count as usize {
pool.initialize(i, &module);
pool.initialize(module_limits, i);
}
Ok(pool)
@@ -356,7 +355,7 @@ impl InstancePool {
&mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance)
}
fn initialize(&self, index: usize, module: &Arc<Module>) {
fn initialize(&self, limits: &ModuleLimits, index: usize) {
unsafe {
let instance = self.instance(index);
@@ -364,14 +363,19 @@ impl InstancePool {
std::ptr::write(
instance as _,
Instance {
module: module.clone(),
offsets: self.offsets,
memories: PrimaryMap::with_capacity(self.offsets.num_defined_memories as usize),
tables: PrimaryMap::with_capacity(self.offsets.num_defined_tables as usize),
dropped_elements: RefCell::new(EntitySet::new()),
dropped_data: RefCell::new(EntitySet::new()),
module: self.empty_module.clone(),
offsets: VMOffsets::new(
std::mem::size_of::<*const u8>() as u8,
&self.empty_module,
),
memories: PrimaryMap::with_capacity(limits.memories as usize),
tables: PrimaryMap::with_capacity(limits.tables as usize),
dropped_elements: EntitySet::new(),
dropped_data: EntitySet::new(),
host_state: Box::new(()),
vmctx: VMContext {},
vmctx: VMContext {
_marker: marker::PhantomPinned,
},
},
);
}
@@ -391,18 +395,19 @@ impl InstancePool {
);
instance.host_state = std::mem::replace(&mut req.host_state, Box::new(()));
let mut limiter = req.store.and_then(|s| (*s).limiter());
Self::set_instance_memories(
instance,
self.memories.get(index),
self.memories.max_wasm_pages,
req.limiter,
borrow_limiter(&mut limiter),
)?;
Self::set_instance_tables(
instance,
self.tables.get(index),
self.tables.get(index).map(|x| x as *mut usize),
self.tables.max_elements,
req.limiter,
borrow_limiter(&mut limiter),
)?;
initialize_vmcontext(instance, req);
@@ -452,7 +457,7 @@ impl InstancePool {
// Decommit any linear memories that were used
for (memory, base) in instance.memories.values_mut().zip(self.memories.get(index)) {
let memory = mem::take(memory);
let mut memory = mem::take(memory);
debug_assert!(memory.is_static());
// Reset any faulted guard pages as the physical memory may be reused for another instance in the future
@@ -460,14 +465,15 @@ impl InstancePool {
memory
.reset_guard_pages()
.expect("failed to reset guard pages");
drop(&mut memory); // require mutable on all platforms, not just uffd
let size = (memory.size() * WASM_PAGE_SIZE) as usize;
let size = (memory.size() as usize) * (WASM_PAGE_SIZE as usize);
drop(memory);
decommit_memory_pages(base, size).expect("failed to decommit linear memory pages");
}
instance.memories.clear();
instance.dropped_data.borrow_mut().clear();
instance.dropped_data.clear();
// Decommit any tables that were used
for (table, base) in instance.tables.values_mut().zip(self.tables.get(index)) {
@@ -484,11 +490,22 @@ impl InstancePool {
}
instance.tables.clear();
instance.dropped_elements.borrow_mut().clear();
instance.dropped_elements.clear();
// Drop all `global` values which need a destructor, such as externref
// values which now need their reference count dropped.
instance.drop_globals();
// Drop any host state
instance.host_state = Box::new(());
// And finally reset the module/offsets back to their original. This
// should put everything back in a relatively pristine state for each
// fresh allocation later on.
instance.module = self.empty_module.clone();
instance.offsets =
VMOffsets::new(std::mem::size_of::<*const u8>() as u8, &self.empty_module);
self.free_list.lock().unwrap().push(index);
}
@@ -496,7 +513,7 @@ impl InstancePool {
instance: &mut Instance,
mut memories: impl Iterator<Item = *mut u8>,
max_pages: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<(), InstantiationError> {
let module = instance.module.as_ref();
@@ -505,30 +522,34 @@ impl InstancePool {
for plan in
(&module.memory_plans.values().as_slice()[module.num_imported_memories..]).iter()
{
let memory = unsafe {
std::slice::from_raw_parts_mut(
memories.next().unwrap(),
(max_pages as usize) * (WASM_PAGE_SIZE as usize),
)
};
instance.memories.push(
Memory::new_static(
plan,
memories.next().unwrap(),
max_pages,
memory,
commit_memory_pages,
limiter,
borrow_limiter(&mut limiter),
)
.map_err(InstantiationError::Resource)?,
);
}
let mut dropped_data = instance.dropped_data.borrow_mut();
debug_assert!(dropped_data.is_empty());
dropped_data.resize(module.passive_data.len());
debug_assert!(instance.dropped_data.is_empty());
instance.dropped_data.resize(module.passive_data.len());
Ok(())
}
fn set_instance_tables(
instance: &mut Instance,
mut tables: impl Iterator<Item = *mut u8>,
mut tables: impl Iterator<Item = *mut usize>,
max_elements: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
mut limiter: Option<&mut dyn ResourceLimiter>,
) -> Result<(), InstantiationError> {
let module = instance.module.as_ref();
@@ -537,18 +558,23 @@ impl InstancePool {
for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() {
let base = tables.next().unwrap();
commit_table_pages(base, max_elements as usize * mem::size_of::<*mut u8>())
.map_err(InstantiationError::Resource)?;
commit_table_pages(
base as *mut u8,
max_elements as usize * mem::size_of::<*mut u8>(),
)
.map_err(InstantiationError::Resource)?;
let table = unsafe { std::slice::from_raw_parts_mut(base, max_elements as usize) };
instance.tables.push(
Table::new_static(plan, base as _, max_elements, limiter)
Table::new_static(plan, table, borrow_limiter(&mut limiter))
.map_err(InstantiationError::Resource)?,
);
}
let mut dropped_elements = instance.dropped_elements.borrow_mut();
debug_assert!(dropped_elements.is_empty());
dropped_elements.resize(module.passive_elements.len());
debug_assert!(instance.dropped_elements.is_empty());
instance
.dropped_elements
.resize(module.passive_elements.len());
Ok(())
}
@@ -595,7 +621,7 @@ impl MemoryPool {
}
// The maximum module memory page count cannot exceed the memory reservation size
if (module_limits.memory_pages * WASM_PAGE_SIZE) as u64
if u64::from(module_limits.memory_pages) * u64::from(WASM_PAGE_SIZE)
> instance_limits.memory_reservation_size
{
bail!(
@@ -957,21 +983,22 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
unsafe fn initialize(
&self,
handle: &InstanceHandle,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
) -> Result<(), InstantiationError> {
let instance = handle.instance();
let instance = handle.instance_mut();
cfg_if::cfg_if! {
if #[cfg(all(feature = "uffd", target_os = "linux"))] {
match &instance.module.memory_initialization {
match &module.memory_initialization {
wasmtime_environ::MemoryInitialization::Paged{ out_of_bounds, .. } => {
if !is_bulk_memory {
super::check_init_bounds(instance)?;
super::check_init_bounds(instance, module)?;
}
// Initialize the tables
super::initialize_tables(instance)?;
super::initialize_tables(instance, module)?;
// Don't initialize the memory; the fault handler will back the pages when accessed
@@ -984,10 +1011,10 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
Ok(())
},
_ => initialize_instance(instance, is_bulk_memory)
_ => initialize_instance(instance, module, is_bulk_memory)
}
} else {
initialize_instance(instance, is_bulk_memory)
initialize_instance(instance, module, is_bulk_memory)
}
}
}
@@ -1355,19 +1382,6 @@ mod test {
let instances = InstancePool::new(&module_limits, &instance_limits)?;
assert_eq!(
instances.offsets.pointer_size,
std::mem::size_of::<*const u8>() as u8
);
assert_eq!(instances.offsets.num_signature_ids, 0);
assert_eq!(instances.offsets.num_imported_functions, 0);
assert_eq!(instances.offsets.num_imported_tables, 0);
assert_eq!(instances.offsets.num_imported_memories, 0);
assert_eq!(instances.offsets.num_imported_globals, 0);
assert_eq!(instances.offsets.num_defined_functions, 0);
assert_eq!(instances.offsets.num_defined_tables, 1);
assert_eq!(instances.offsets.num_defined_memories, 1);
assert_eq!(instances.offsets.num_defined_globals, 0);
// As of April 2021, the instance struct's size is largely below the size of a single page,
// so it's safe to assume it's been rounded to the size of a single memory page here.
assert_eq!(instances.instance_size, region::page::size());
@@ -1395,10 +1409,7 @@ mod test {
},
shared_signatures: VMSharedSignatureIndex::default().into(),
host_state: Box::new(()),
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
store: None,
},
)
.expect("allocation should succeed"),
@@ -1420,10 +1431,7 @@ mod test {
},
shared_signatures: VMSharedSignatureIndex::default().into(),
host_state: Box::new(()),
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
store: None,
},
) {
Err(InstantiationError::Limit(3)) => {}

View File

@@ -130,7 +130,7 @@ fn reset_guard_page(addr: *mut u8, len: usize) -> Result<()> {
}
/// Represents a location of a page fault within monitored regions of memory.
enum FaultLocation<'a> {
enum FaultLocation {
/// The address location is in a WebAssembly linear memory page.
/// The fault handler will copy the pages from initialization data if necessary.
MemoryPage {
@@ -139,7 +139,7 @@ enum FaultLocation<'a> {
/// The length of the page being accessed.
len: usize,
/// The instance related to the memory page that was accessed.
instance: &'a Instance,
instance: *mut Instance,
/// The index of the memory that was accessed.
memory_index: DefinedMemoryIndex,
/// The Wasm page index to initialize if the access was not a guard page.
@@ -194,9 +194,9 @@ impl FaultLocator {
///
/// If the assumption holds true, accessing the instance data from the handler thread
/// should, in theory, be safe.
unsafe fn get_instance(&self, index: usize) -> &Instance {
unsafe fn get_instance(&self, index: usize) -> *mut Instance {
debug_assert!(index < self.max_instances);
&*((self.instances_start + (index * self.instance_size)) as *const Instance)
(self.instances_start + (index * self.instance_size)) as *mut Instance
}
unsafe fn locate(&self, addr: usize) -> Option<FaultLocation> {
@@ -208,7 +208,7 @@ impl FaultLocator {
let page_index = (addr - memory_start) / WASM_PAGE_SIZE;
let instance = self.get_instance(index / self.max_memories);
let init_page_index = instance.memories.get(memory_index).and_then(|m| {
let init_page_index = (*instance).memories.get(memory_index).and_then(|m| {
if page_index < m.size() as usize {
Some(page_index)
} else {
@@ -310,13 +310,13 @@ unsafe fn handle_page_fault(
match page_index {
Some(page_index) => {
initialize_wasm_page(&uffd, instance, page_addr, memory_index, page_index)?;
initialize_wasm_page(&uffd, &*instance, page_addr, memory_index, page_index)?;
}
None => {
log::trace!("out of bounds memory access at {:p}", addr);
// Record the guard page fault so the page protection level can be reset later
instance.memories[memory_index].record_guard_page_fault(
(*instance).memories[memory_index].record_guard_page_fault(
page_addr,
len,
reset_guard_page,
@@ -436,7 +436,6 @@ mod test {
Imports, InstanceAllocationRequest, InstanceLimits, ModuleLimits,
PoolingAllocationStrategy, VMSharedSignatureIndex,
};
use std::ptr;
use std::sync::Arc;
use wasmtime_environ::{entity::PrimaryMap, wasm::Memory, MemoryPlan, MemoryStyle, Module};
@@ -521,10 +520,7 @@ mod test {
},
shared_signatures: VMSharedSignatureIndex::default().into(),
host_state: Box::new(()),
interrupts: ptr::null(),
externref_activations_table: ptr::null_mut(),
module_info_lookup: None,
limiter: None,
store: None,
},
)
.expect("instance should allocate"),