Refactor module instantiation in the runtime.

This commit refactors module instantiation in the runtime to allow for
different instance allocation strategy implementations.

It adds an `InstanceAllocator` trait with the current implementation put behind
the `OnDemandInstanceAllocator` struct.

The Wasmtime API has been updated to allow a `Config` to have an instance
allocation strategy set which will determine how instances get allocated.

This change is in preparation for an alternative *pooling* instance allocator
that can reserve all needed host process address space in advance.

This commit also makes changes to the `wasmtime_environ` crate to represent
compiled modules in a way that reduces copying at instantiation time.
This commit is contained in:
Peter Huene
2020-11-25 16:10:09 -08:00
parent 8854dec01d
commit b58afbf849
14 changed files with 829 additions and 686 deletions

View File

@@ -101,6 +101,8 @@ pub fn translate_module<'data>(
Payload::DataCountSection { count, range } => { Payload::DataCountSection { count, range } => {
validator.data_count_section(count, &range)?; validator.data_count_section(count, &range)?;
// NOTE: the count here is the total segment count, not the passive segment count
environ.reserve_passive_data(count)?; environ.reserve_passive_data(count)?;
} }

View File

@@ -20,7 +20,7 @@ pub mod isa {
} }
pub mod entity { pub mod entity {
pub use cranelift_entity::{packed_option, BoxedSlice, EntityRef, PrimaryMap}; pub use cranelift_entity::{packed_option, BoxedSlice, EntityRef, EntitySet, PrimaryMap};
} }
pub mod wasm { pub mod wasm {

View File

@@ -158,13 +158,19 @@ pub struct Module {
pub table_elements: Vec<TableElements>, pub table_elements: Vec<TableElements>,
/// WebAssembly passive elements. /// WebAssembly passive elements.
pub passive_elements: HashMap<ElemIndex, Box<[FuncIndex]>>, pub passive_elements: Vec<Box<[FuncIndex]>>,
/// The map from passive element index (element segment index space) to index in `passive_elements`.
pub passive_elements_map: HashMap<ElemIndex, usize>,
/// WebAssembly passive data segments. /// WebAssembly passive data segments.
#[serde(with = "passive_data_serde")] #[serde(with = "passive_data_serde")]
pub passive_data: HashMap<DataIndex, Arc<[u8]>>, pub passive_data: Vec<Arc<[u8]>>,
/// WebAssembly table initializers. /// The map from passive data index (data segment index space) to index in `passive_data`.
pub passive_data_map: HashMap<DataIndex, usize>,
/// WebAssembly function names.
pub func_names: HashMap<FuncIndex, String>, pub func_names: HashMap<FuncIndex, String>,
/// Types declared in the wasm module. /// Types declared in the wasm module.
@@ -272,7 +278,8 @@ impl Module {
/// Get the given passive element, if it exists. /// Get the given passive element, if it exists.
pub fn get_passive_element(&self, index: ElemIndex) -> Option<&[FuncIndex]> { pub fn get_passive_element(&self, index: ElemIndex) -> Option<&[FuncIndex]> {
self.passive_elements.get(&index).map(|es| &**es) let index = *self.passive_elements_map.get(&index)?;
Some(self.passive_elements[index].as_ref())
} }
/// Convert a `DefinedFuncIndex` into a `FuncIndex`. /// Convert a `DefinedFuncIndex` into a `FuncIndex`.
@@ -419,47 +426,45 @@ pub struct InstanceSignature {
} }
mod passive_data_serde { mod passive_data_serde {
use super::{Arc, DataIndex, HashMap}; use super::Arc;
use serde::{de::MapAccess, de::Visitor, ser::SerializeMap, Deserializer, Serializer}; use serde::{de::SeqAccess, de::Visitor, ser::SerializeSeq, Deserializer, Serializer};
use std::fmt; use std::fmt;
pub(super) fn serialize<S>( pub(super) fn serialize<S>(data: &Vec<Arc<[u8]>>, ser: S) -> Result<S::Ok, S::Error>
data: &HashMap<DataIndex, Arc<[u8]>>,
ser: S,
) -> Result<S::Ok, S::Error>
where where
S: Serializer, S: Serializer,
{ {
let mut map = ser.serialize_map(Some(data.len()))?; let mut seq = ser.serialize_seq(Some(data.len()))?;
for (k, v) in data { for v in data {
map.serialize_entry(k, v.as_ref())?; seq.serialize_element(v.as_ref())?;
} }
map.end() seq.end()
} }
struct PassiveDataVisitor; struct PassiveDataVisitor;
impl<'de> Visitor<'de> for PassiveDataVisitor { impl<'de> Visitor<'de> for PassiveDataVisitor {
type Value = HashMap<DataIndex, Arc<[u8]>>; type Value = Vec<Arc<[u8]>>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a passive_data map") formatter.write_str("a passive data sequence")
} }
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
fn visit_seq<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where where
M: MapAccess<'de>, M: SeqAccess<'de>,
{ {
let mut map = HashMap::with_capacity(access.size_hint().unwrap_or(0)); let mut data = Vec::with_capacity(access.size_hint().unwrap_or(0));
while let Some((key, value)) = access.next_entry::<_, Vec<u8>>()? { while let Some(value) = access.next_element::<Vec<u8>>()? {
map.insert(key, value.into()); data.push(value.into());
} }
Ok(map) Ok(data)
} }
} }
pub(super) fn deserialize<'de, D>(de: D) -> Result<HashMap<DataIndex, Arc<[u8]>>, D::Error> pub(super) fn deserialize<'de, D>(de: D) -> Result<Vec<Arc<[u8]>>, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
de.deserialize_map(PassiveDataVisitor) de.deserialize_seq(PassiveDataVisitor)
} }
} }

View File

@@ -710,11 +710,13 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
elem_index: ElemIndex, elem_index: ElemIndex,
segments: Box<[FuncIndex]>, segments: Box<[FuncIndex]>,
) -> WasmResult<()> { ) -> WasmResult<()> {
let index = self.result.module.passive_elements.len();
self.result.module.passive_elements.push(segments);
let old = self let old = self
.result .result
.module .module
.passive_elements .passive_elements_map
.insert(elem_index, segments); .insert(elem_index, index);
debug_assert!( debug_assert!(
old.is_none(), old.is_none(),
"should never get duplicate element indices, that would be a bug in `cranelift_wasm`'s \ "should never get duplicate element indices, that would be a bug in `cranelift_wasm`'s \
@@ -782,17 +784,21 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
Ok(()) Ok(())
} }
fn reserve_passive_data(&mut self, count: u32) -> WasmResult<()> { fn reserve_passive_data(&mut self, _count: u32) -> WasmResult<()> {
self.result.module.passive_data.reserve(count as usize); // Note: the count passed in here is the *total* segment count
// There is no way to reserve for just the passive segments as they are discovered when iterating the data section entries
// Given that the total segment count might be much larger than the passive count, do not reserve
Ok(()) Ok(())
} }
fn declare_passive_data(&mut self, data_index: DataIndex, data: &'data [u8]) -> WasmResult<()> { fn declare_passive_data(&mut self, data_index: DataIndex, data: &'data [u8]) -> WasmResult<()> {
let index = self.result.module.passive_data.len();
self.result.module.passive_data.push(Arc::from(data));
let old = self let old = self
.result .result
.module .module
.passive_data .passive_data_map
.insert(data_index, Arc::from(data)); .insert(data_index, index);
debug_assert!( debug_assert!(
old.is_none(), old.is_none(),
"a module can't have duplicate indices, this would be a cranelift-wasm bug" "a module can't have duplicate indices, this would be a cranelift-wasm bug"
@@ -1088,3 +1094,24 @@ pub struct DataInitializer<'data> {
/// The initialization data. /// The initialization data.
pub data: &'data [u8], pub data: &'data [u8],
} }
/// Similar to `DataInitializer`, but owns its own copy of the data rather
/// than holding a slice of the original module.
#[derive(Serialize, Deserialize)]
pub struct OwnedDataInitializer {
/// The location where the initialization is to be performed.
pub location: DataInitializerLocation,
/// The initialization data.
pub data: Box<[u8]>,
}
impl OwnedDataInitializer {
/// Creates a new owned data initializer from a borrowed data initializer.
pub fn new(borrowed: DataInitializer<'_>) -> Self {
Self {
location: borrowed.location.clone(),
data: borrowed.data.into(),
}
}
}

View File

@@ -11,7 +11,6 @@ use object::File as ObjectFile;
#[cfg(feature = "parallel-compilation")] #[cfg(feature = "parallel-compilation")]
use rayon::prelude::*; use rayon::prelude::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::any::Any;
use std::ops::Range; use std::ops::Range;
use std::sync::Arc; use std::sync::Arc;
use thiserror::Error; use thiserror::Error;
@@ -22,16 +21,11 @@ use wasmtime_environ::wasm::{
DefinedFuncIndex, InstanceTypeIndex, ModuleTypeIndex, SignatureIndex, WasmFuncType, DefinedFuncIndex, InstanceTypeIndex, ModuleTypeIndex, SignatureIndex, WasmFuncType,
}; };
use wasmtime_environ::{ use wasmtime_environ::{
CompileError, DataInitializer, DataInitializerLocation, DebugInfoData, FunctionAddressMap, CompileError, DebugInfoData, FunctionAddressMap, InstanceSignature, Module, ModuleEnvironment,
InstanceSignature, Module, ModuleEnvironment, ModuleSignature, ModuleTranslation, ModuleSignature, ModuleTranslation, OwnedDataInitializer, StackMapInformation, TrapInformation,
StackMapInformation, TrapInformation,
}; };
use wasmtime_profiling::ProfilingAgent; use wasmtime_profiling::ProfilingAgent;
use wasmtime_runtime::{ use wasmtime_runtime::{GdbJitImageRegistration, InstantiationError, VMFunctionBody, VMTrampoline};
GdbJitImageRegistration, Imports, InstanceHandle, InstantiationError, RuntimeMemoryCreator,
StackMapRegistry, VMExternRefActivationsTable, VMFunctionBody, VMInterrupts,
VMSharedSignatureIndex, VMTrampoline,
};
/// An error condition while setting up a wasm instance, be it validation, /// An error condition while setting up a wasm instance, be it validation,
/// compilation, or instantiation. /// compilation, or instantiation.
@@ -59,7 +53,8 @@ pub enum SetupError {
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct CompilationArtifacts { pub struct CompilationArtifacts {
/// Module metadata. /// Module metadata.
module: Module, #[serde(with = "arc_serde")]
module: Arc<Module>,
/// ELF image with functions code. /// ELF image with functions code.
obj: Box<[u8]>, obj: Box<[u8]>,
@@ -68,7 +63,8 @@ pub struct CompilationArtifacts {
unwind_info: Box<[ObjectUnwindInfo]>, unwind_info: Box<[ObjectUnwindInfo]>,
/// Data initiailizers. /// Data initiailizers.
data_initializers: Box<[OwnedDataInitializer]>, #[serde(with = "arc_slice_serde")]
data_initializers: Arc<[OwnedDataInitializer]>,
/// Descriptions of compiled functions /// Descriptions of compiled functions
funcs: PrimaryMap<DefinedFuncIndex, FunctionInfo>, funcs: PrimaryMap<DefinedFuncIndex, FunctionInfo>,
@@ -134,7 +130,7 @@ impl CompilationArtifacts {
.into_iter() .into_iter()
.map(OwnedDataInitializer::new) .map(OwnedDataInitializer::new)
.collect::<Vec<_>>() .collect::<Vec<_>>()
.into_boxed_slice(); .into();
let obj = obj.write().map_err(|_| { let obj = obj.write().map_err(|_| {
SetupError::Instantiate(InstantiationError::Resource( SetupError::Instantiate(InstantiationError::Resource(
@@ -143,7 +139,7 @@ impl CompilationArtifacts {
})?; })?;
Ok(CompilationArtifacts { Ok(CompilationArtifacts {
module, module: Arc::new(module),
obj: obj.into_boxed_slice(), obj: obj.into_boxed_slice(),
unwind_info: unwind_info.into_boxed_slice(), unwind_info: unwind_info.into_boxed_slice(),
data_initializers, data_initializers,
@@ -208,7 +204,6 @@ pub struct ModuleCode {
/// A compiled wasm module, ready to be instantiated. /// A compiled wasm module, ready to be instantiated.
pub struct CompiledModule { pub struct CompiledModule {
artifacts: CompilationArtifacts, artifacts: CompilationArtifacts,
module: Arc<Module>,
code: Arc<ModuleCode>, code: Arc<ModuleCode>,
finished_functions: FinishedFunctions, finished_functions: FinishedFunctions,
trampolines: PrimaryMap<SignatureIndex, VMTrampoline>, trampolines: PrimaryMap<SignatureIndex, VMTrampoline>,
@@ -267,7 +262,6 @@ impl CompiledModule {
let finished_functions = FinishedFunctions(finished_functions); let finished_functions = FinishedFunctions(finished_functions);
Ok(Arc::new(Self { Ok(Arc::new(Self {
module: Arc::new(artifacts.module.clone()),
artifacts, artifacts,
code: Arc::new(ModuleCode { code: Arc::new(ModuleCode {
code_memory, code_memory,
@@ -278,62 +272,24 @@ impl CompiledModule {
})) }))
} }
/// Crate an `Instance` from this `CompiledModule`.
///
/// Note that if only one instance of this module is needed, it may be more
/// efficient to call the top-level `instantiate`, since that avoids copying
/// the data initializers.
///
/// # Unsafety
///
/// See `InstanceHandle::new`
pub unsafe fn instantiate(
&self,
imports: Imports<'_>,
lookup_shared_signature: &dyn Fn(SignatureIndex) -> VMSharedSignatureIndex,
mem_creator: Option<&dyn RuntimeMemoryCreator>,
interrupts: *const VMInterrupts,
host_state: Box<dyn Any>,
externref_activations_table: *mut VMExternRefActivationsTable,
stack_map_registry: *mut StackMapRegistry,
) -> Result<InstanceHandle, InstantiationError> {
InstanceHandle::new(
self.module.clone(),
&self.finished_functions.0,
imports,
mem_creator,
lookup_shared_signature,
host_state,
interrupts,
externref_activations_table,
stack_map_registry,
)
}
/// Extracts `CompilationArtifacts` from the compiled module. /// Extracts `CompilationArtifacts` from the compiled module.
pub fn compilation_artifacts(&self) -> &CompilationArtifacts { pub fn compilation_artifacts(&self) -> &CompilationArtifacts {
&self.artifacts &self.artifacts
} }
/// Returns data initializers to pass to `InstanceHandle::initialize` /// Returns the data initializers from the compiled module.
pub fn data_initializers(&self) -> Vec<DataInitializer<'_>> { pub fn data_initializers(&self) -> &Arc<[OwnedDataInitializer]> {
self.artifacts &self.artifacts.data_initializers
.data_initializers
.iter()
.map(|init| DataInitializer {
location: init.location.clone(),
data: &*init.data,
})
.collect()
} }
/// Return a reference-counting pointer to a module. /// Return a reference-counting pointer to a module.
pub fn module(&self) -> &Arc<Module> { pub fn module(&self) -> &Arc<Module> {
&self.module &self.artifacts.module
} }
/// Return a reference to a mutable module (if possible). /// Return a reference to a mutable module (if possible).
pub fn module_mut(&mut self) -> Option<&mut Module> { pub fn module_mut(&mut self) -> Option<&mut Module> {
Arc::get_mut(&mut self.module) Arc::get_mut(&mut self.artifacts.module)
} }
/// Returns the map of all finished JIT functions compiled for this module /// Returns the map of all finished JIT functions compiled for this module
@@ -470,26 +426,6 @@ impl SymbolizeContext {
} }
} }
/// Similar to `DataInitializer`, but owns its own copy of the data rather
/// than holding a slice of the original module.
#[derive(Clone, Serialize, Deserialize)]
pub struct OwnedDataInitializer {
/// The location where the initialization is to be performed.
location: DataInitializerLocation,
/// The initialization data.
data: Box<[u8]>,
}
impl OwnedDataInitializer {
fn new(borrowed: DataInitializer<'_>) -> Self {
Self {
location: borrowed.location.clone(),
data: borrowed.data.to_vec().into_boxed_slice(),
}
}
}
fn create_dbg_image( fn create_dbg_image(
obj: Vec<u8>, obj: Vec<u8>,
code_range: (*const u8, usize), code_range: (*const u8, usize),
@@ -586,3 +522,45 @@ impl From<DebugInfoData<'_>> for DebugInfo {
} }
} }
} }
mod arc_serde {
use super::Arc;
use serde::{de::Deserialize, ser::Serialize, Deserializer, Serializer};
pub(super) fn serialize<S, T>(arc: &Arc<T>, ser: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: Serialize,
{
(**arc).serialize(ser)
}
pub(super) fn deserialize<'de, D, T>(de: D) -> Result<Arc<T>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
Ok(Arc::new(T::deserialize(de)?))
}
}
mod arc_slice_serde {
use super::Arc;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub(super) fn serialize<S, T>(arc: &Arc<[T]>, ser: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: Serialize,
{
(**arc).serialize(ser)
}
pub(super) fn deserialize<'de, D, T>(de: D) -> Result<Arc<[T]>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
Ok(Vec::<T>::deserialize(de)?.into())
}
}

View File

@@ -4,12 +4,11 @@
use crate::export::Export; use crate::export::Export;
use crate::externref::{StackMapRegistry, VMExternRefActivationsTable}; use crate::externref::{StackMapRegistry, VMExternRefActivationsTable};
use crate::imports::Imports; use crate::memory::{RuntimeLinearMemory, RuntimeMemoryCreator};
use crate::memory::{DefaultMemoryCreator, RuntimeLinearMemory, RuntimeMemoryCreator};
use crate::table::{Table, TableElement}; use crate::table::{Table, TableElement};
use crate::traphandlers::Trap; use crate::traphandlers::Trap;
use crate::vmcontext::{ use crate::vmcontext::{
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport, VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionImport,
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryDefinition, VMMemoryImport, VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryDefinition, VMMemoryImport,
VMSharedSignatureIndex, VMTableDefinition, VMTableImport, VMSharedSignatureIndex, VMTableDefinition, VMTableImport,
}; };
@@ -17,23 +16,24 @@ use crate::{ExportFunction, ExportGlobal, ExportMemory, ExportTable};
use indexmap::IndexMap; use indexmap::IndexMap;
use memoffset::offset_of; use memoffset::offset_of;
use more_asserts::assert_lt; use more_asserts::assert_lt;
use std::alloc::{self, Layout}; use std::alloc::Layout;
use std::any::Any; use std::any::Any;
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::HashMap;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::ptr::NonNull; use std::ptr::NonNull;
use std::rc::Rc; use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use std::{mem, ptr, slice}; use std::{mem, ptr, slice};
use thiserror::Error; use wasmtime_environ::entity::{packed_option::ReservedValue, BoxedSlice, EntityRef, EntitySet};
use wasmtime_environ::entity::{packed_option::ReservedValue, BoxedSlice, EntityRef, PrimaryMap};
use wasmtime_environ::wasm::{ use wasmtime_environ::wasm::{
DataIndex, DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, EntityIndex,
ElemIndex, EntityIndex, FuncIndex, GlobalIndex, GlobalInit, MemoryIndex, SignatureIndex, FuncIndex, GlobalIndex, MemoryIndex, TableElementType, TableIndex,
TableElementType, TableIndex, WasmType,
}; };
use wasmtime_environ::{ir, DataInitializer, Module, ModuleType, TableElements, VMOffsets}; use wasmtime_environ::{ir, Module, VMOffsets};
mod allocator;
pub use allocator::*;
/// Runtime representation of an instance value, which erases all `Instance` /// Runtime representation of an instance value, which erases all `Instance`
/// information since instances are just a collection of values. /// information since instances are just a collection of values.
@@ -56,14 +56,13 @@ pub(crate) struct Instance {
/// WebAssembly table data. /// WebAssembly table data.
tables: BoxedSlice<DefinedTableIndex, Table>, tables: BoxedSlice<DefinedTableIndex, Table>,
/// Passive elements in this instantiation. As `elem.drop`s happen, these /// Stores the dropped passive element segments in this instantiation by index.
/// entries get removed. A missing entry is considered equivalent to an /// If the index is present in the set, the segment has been dropped.
/// empty slice. dropped_elements: RefCell<EntitySet<ElemIndex>>,
passive_elements: RefCell<HashMap<ElemIndex, Box<[*mut VMCallerCheckedAnyfunc]>>>,
/// Passive data segments from our module. As `data.drop`s happen, entries /// Stores the dropped passive data segments in this instantiation by index.
/// get removed. A missing entry is considered equivalent to an empty slice. /// If the index is present in the set, the segment has been dropped.
passive_data: RefCell<HashMap<DataIndex, Arc<[u8]>>>, dropped_data: RefCell<EntitySet<DataIndex>>,
/// Hosts can store arbitrary per-instance information here. /// Hosts can store arbitrary per-instance information here.
host_state: Box<dyn Any>, host_state: Box<dyn Any>,
@@ -551,11 +550,21 @@ impl Instance {
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
let table = self.get_table(table_index); let table = self.get_table(table_index);
let passive_elements = self.passive_elements.borrow(); let elem_index = self.module.passive_elements_map.get(&elem_index);
let elem = passive_elements let elem = match elem_index {
.get(&elem_index) Some(index) => {
.map(|e| &**e) if self
.unwrap_or_else(|| &[]); .dropped_elements
.borrow()
.contains(ElemIndex::new(*index))
{
&[]
} else {
self.module.passive_elements[*index].as_ref()
}
}
None => &[],
};
if src if src
.checked_add(len) .checked_add(len)
@@ -567,8 +576,14 @@ impl Instance {
// TODO(#983): investigate replacing this get/set loop with a `memcpy`. // TODO(#983): investigate replacing this get/set loop with a `memcpy`.
for (dst, src) in (dst..dst + len).zip(src..src + len) { for (dst, src) in (dst..dst + len).zip(src..src + len) {
let elem = self
.get_caller_checked_anyfunc(elem[src as usize])
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
f as *const VMCallerCheckedAnyfunc as *mut _
});
table table
.set(dst, TableElement::FuncRef(elem[src as usize])) .set(dst, TableElement::FuncRef(elem))
.expect("should never panic because we already did the bounds check above"); .expect("should never panic because we already did the bounds check above");
} }
@@ -579,10 +594,14 @@ impl Instance {
pub(crate) fn elem_drop(&self, elem_index: ElemIndex) { pub(crate) fn elem_drop(&self, elem_index: ElemIndex) {
// https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
let mut passive_elements = self.passive_elements.borrow_mut(); if let Some(index) = self.module.passive_elements_map.get(&elem_index) {
passive_elements.remove(&elem_index); self.dropped_elements
// Note that we don't check that we actually removed an element because .borrow_mut()
// dropping a non-passive element is a no-op (not a trap). .insert(ElemIndex::new(*index));
}
// Note that we don't check that we actually removed a segment because
// dropping a non-passive segment is a no-op (not a trap).
} }
/// Do a `memory.copy` /// Do a `memory.copy`
@@ -701,10 +720,17 @@ impl Instance {
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
let memory = self.get_memory(memory_index); let memory = self.get_memory(memory_index);
let passive_data = self.passive_data.borrow(); let data_index = self.module.passive_data_map.get(&data_index);
let data = passive_data let data = match data_index {
.get(&data_index) Some(index) => {
.map_or(&[][..], |data| &**data); if self.dropped_data.borrow().contains(DataIndex::new(*index)) {
&[]
} else {
self.module.passive_data[*index].as_ref()
}
}
None => &[],
};
if src if src
.checked_add(len) .checked_add(len)
@@ -729,8 +755,14 @@ impl Instance {
/// Drop the given data segment, truncating its length to zero. /// Drop the given data segment, truncating its length to zero.
pub(crate) fn data_drop(&self, data_index: DataIndex) { pub(crate) fn data_drop(&self, data_index: DataIndex) {
let mut passive_data = self.passive_data.borrow_mut(); if let Some(index) = self.module.passive_data_map.get(&data_index) {
passive_data.remove(&data_index); self.dropped_data
.borrow_mut()
.insert(DataIndex::new(*index));
}
// Note that we don't check that we actually removed a segment because
// dropping a non-passive segment is a no-op (not a trap).
} }
/// Get a table by index regardless of whether it is locally-defined or an /// Get a table by index regardless of whether it is locally-defined or an
@@ -780,197 +812,8 @@ pub struct InstanceHandle {
} }
impl InstanceHandle { impl InstanceHandle {
/// Create a new `InstanceHandle` pointing at a new `Instance`. pub(crate) unsafe fn new(instance: *mut Instance) -> Self {
/// Self { instance }
/// # Unsafety
///
/// This method is not necessarily inherently unsafe to call, but in general
/// the APIs of an `Instance` are quite unsafe and have not been really
/// audited for safety that much. As a result the unsafety here on this
/// method is a low-overhead way of saying "this is an extremely unsafe type
/// to work with".
///
/// Extreme care must be taken when working with `InstanceHandle` and it's
/// recommended to have relatively intimate knowledge of how it works
/// internally if you'd like to do so. If possible it's recommended to use
/// the `wasmtime` crate API rather than this type since that is vetted for
/// safety.
///
/// It is your responsibility to ensure that the given raw
/// `externref_activations_table` and `stack_map_registry` outlive this
/// instance.
pub unsafe fn new(
module: Arc<Module>,
finished_functions: &PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
imports: Imports,
mem_creator: Option<&dyn RuntimeMemoryCreator>,
lookup_shared_signature: &dyn Fn(SignatureIndex) -> VMSharedSignatureIndex,
host_state: Box<dyn Any>,
interrupts: *const VMInterrupts,
externref_activations_table: *mut VMExternRefActivationsTable,
stack_map_registry: *mut StackMapRegistry,
) -> Result<Self, InstantiationError> {
debug_assert!(!externref_activations_table.is_null());
debug_assert!(!stack_map_registry.is_null());
let tables = create_tables(&module);
let memories = create_memories(&module, mem_creator.unwrap_or(&DefaultMemoryCreator {}))?;
let vmctx_tables = tables
.values()
.map(Table::vmtable)
.collect::<PrimaryMap<DefinedTableIndex, _>>()
.into_boxed_slice();
let vmctx_memories = memories
.values()
.map(|a| a.vmmemory())
.collect::<PrimaryMap<DefinedMemoryIndex, _>>()
.into_boxed_slice();
let vmctx_globals = create_globals(&module);
let offsets = VMOffsets::new(mem::size_of::<*const u8>() as u8, &module);
let passive_data = RefCell::new(module.passive_data.clone());
let handle = {
let instance = Instance {
module,
offsets,
memories,
tables,
passive_elements: Default::default(),
passive_data,
host_state,
vmctx: VMContext {},
};
let layout = instance.alloc_layout();
let instance_ptr = alloc::alloc(layout) as *mut Instance;
if instance_ptr.is_null() {
alloc::handle_alloc_error(layout);
}
ptr::write(instance_ptr, instance);
InstanceHandle {
instance: instance_ptr,
}
};
let instance = handle.instance();
let mut ptr = instance.signature_ids_ptr();
for sig in handle.module().types.values() {
*ptr = match sig {
ModuleType::Function(sig) => lookup_shared_signature(*sig),
_ => VMSharedSignatureIndex::new(u32::max_value()),
};
ptr = ptr.add(1);
}
debug_assert_eq!(imports.functions.len(), handle.module().num_imported_funcs);
ptr::copy(
imports.functions.as_ptr(),
instance.imported_functions_ptr() as *mut VMFunctionImport,
imports.functions.len(),
);
debug_assert_eq!(imports.tables.len(), handle.module().num_imported_tables);
ptr::copy(
imports.tables.as_ptr(),
instance.imported_tables_ptr() as *mut VMTableImport,
imports.tables.len(),
);
debug_assert_eq!(
imports.memories.len(),
handle.module().num_imported_memories
);
ptr::copy(
imports.memories.as_ptr(),
instance.imported_memories_ptr() as *mut VMMemoryImport,
imports.memories.len(),
);
debug_assert_eq!(imports.globals.len(), handle.module().num_imported_globals);
ptr::copy(
imports.globals.as_ptr(),
instance.imported_globals_ptr() as *mut VMGlobalImport,
imports.globals.len(),
);
ptr::copy(
vmctx_tables.values().as_slice().as_ptr(),
instance.tables_ptr() as *mut VMTableDefinition,
vmctx_tables.len(),
);
ptr::copy(
vmctx_memories.values().as_slice().as_ptr(),
instance.memories_ptr() as *mut VMMemoryDefinition,
vmctx_memories.len(),
);
ptr::copy(
vmctx_globals.values().as_slice().as_ptr(),
instance.globals_ptr() as *mut VMGlobalDefinition,
vmctx_globals.len(),
);
ptr::write(
instance.builtin_functions_ptr() as *mut VMBuiltinFunctionsArray,
VMBuiltinFunctionsArray::initialized(),
);
*instance.interrupts() = interrupts;
*instance.externref_activations_table() = externref_activations_table;
*instance.stack_map_registry() = stack_map_registry;
for (index, sig) in instance.module.functions.iter() {
let type_index = lookup_shared_signature(*sig);
let (func_ptr, vmctx) =
if let Some(def_index) = instance.module.defined_func_index(index) {
(
NonNull::new(finished_functions[def_index] as *mut _).unwrap(),
instance.vmctx_ptr(),
)
} else {
let import = instance.imported_function(index);
(import.body, import.vmctx)
};
ptr::write(
instance.anyfunc_ptr(index),
VMCallerCheckedAnyfunc {
func_ptr,
type_index,
vmctx,
},
);
}
// Perform infallible initialization in this constructor, while fallible
// initialization is deferred to the `initialize` method.
initialize_passive_elements(instance);
initialize_globals(instance);
Ok(handle)
}
/// Finishes the instantiation process started by `Instance::new`.
///
/// Only safe to call immediately after instantiation.
pub unsafe fn initialize(
&self,
is_bulk_memory: bool,
data_initializers: &[DataInitializer<'_>],
) -> Result<(), InstantiationError> {
// Check initializer bounds before initializing anything. Only do this
// when bulk memory is disabled, since the bulk memory proposal changes
// instantiation such that the intermediate results of failed
// initializations are visible.
if !is_bulk_memory {
check_table_init_bounds(self.instance())?;
check_memory_init_bounds(self.instance(), data_initializers)?;
}
// Apply fallible initializers. Note that this can "leak" state even if
// it fails.
initialize_tables(self.instance())?;
initialize_memories(self.instance(), data_initializers)?;
Ok(())
} }
/// Create a new `InstanceHandle` pointing at the instance /// Create a new `InstanceHandle` pointing at the instance
@@ -1126,305 +969,4 @@ impl InstanceHandle {
instance: self.instance, instance: self.instance,
} }
} }
/// Deallocates memory associated with this instance.
///
/// Note that this is unsafe because there might be other handles to this
/// `InstanceHandle` elsewhere, and there's nothing preventing usage of
/// this handle after this function is called.
pub unsafe fn dealloc(&self) {
let instance = self.instance();
let layout = instance.alloc_layout();
ptr::drop_in_place(self.instance);
alloc::dealloc(self.instance.cast(), layout);
}
}
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module().table_elements {
let start = get_table_init_start(init, instance);
let table = instance.get_table(init.table_index);
let size = usize::try_from(table.size()).unwrap();
if size < start + init.elements.len() {
return Err(InstantiationError::Link(LinkError(
"table out of bounds: elements segment does not fit".to_owned(),
)));
}
}
Ok(())
}
/// Compute the offset for a memory data initializer.
fn get_memory_init_start(init: &DataInitializer<'_>, instance: &Instance) -> usize {
let mut start = init.location.offset;
if let Some(base) = init.location.base {
let val = unsafe {
if let Some(def_index) = instance.module.defined_global_index(base) {
*instance.global(def_index).as_u32()
} else {
*(*instance.imported_global(base).from).as_u32()
}
};
start += usize::try_from(val).unwrap();
}
start
}
/// Return a byte-slice view of a memory's data.
unsafe fn get_memory_slice<'instance>(
init: &DataInitializer<'_>,
instance: &'instance Instance,
) -> &'instance mut [u8] {
let memory = if let Some(defined_memory_index) = instance
.module
.defined_memory_index(init.location.memory_index)
{
instance.memory(defined_memory_index)
} else {
let import = instance.imported_memory(init.location.memory_index);
let foreign_instance = (&mut *(import).vmctx).instance();
let foreign_memory = &mut *(import).from;
let foreign_index = foreign_instance.memory_index(foreign_memory);
foreign_instance.memory(foreign_index)
};
slice::from_raw_parts_mut(memory.base, memory.current_length)
}
fn check_memory_init_bounds(
instance: &Instance,
data_initializers: &[DataInitializer<'_>],
) -> Result<(), InstantiationError> {
for init in data_initializers {
let start = get_memory_init_start(init, instance);
unsafe {
let mem_slice = get_memory_slice(init, instance);
if mem_slice.get_mut(start..start + init.data.len()).is_none() {
return Err(InstantiationError::Link(LinkError(
"memory out of bounds: data segment does not fit".into(),
)));
}
}
}
Ok(())
}
/// Allocate memory for just the tables of the current module.
fn create_tables(module: &Module) -> BoxedSlice<DefinedTableIndex, Table> {
let num_imports = module.num_imported_tables;
let mut tables: PrimaryMap<DefinedTableIndex, _> =
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
for table in &module.table_plans.values().as_slice()[num_imports..] {
tables.push(Table::new(table));
}
tables.into_boxed_slice()
}
/// Compute the offset for a table element initializer.
fn get_table_init_start(init: &TableElements, instance: &Instance) -> usize {
let mut start = init.offset;
if let Some(base) = init.base {
let val = unsafe {
if let Some(def_index) = instance.module.defined_global_index(base) {
*instance.global(def_index).as_u32()
} else {
*(*instance.imported_global(base).from).as_u32()
}
};
start += usize::try_from(val).unwrap();
}
start
}
/// Initialize the table memory from the provided initializers.
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module().table_elements {
let start = get_table_init_start(init, instance);
let table = instance.get_table(init.table_index);
if start
.checked_add(init.elements.len())
.map_or(true, |end| end > table.size() as usize)
{
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::TableOutOfBounds,
)));
}
for (i, func_idx) in init.elements.iter().enumerate() {
let item = match table.element_type() {
TableElementType::Func => instance
.get_caller_checked_anyfunc(*func_idx)
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc
})
.into(),
TableElementType::Val(_) => {
assert!(*func_idx == FuncIndex::reserved_value());
TableElement::ExternRef(None)
}
};
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
}
}
Ok(())
}
/// Initialize the `Instance::passive_elements` map by resolving the
/// `Module::passive_elements`'s `FuncIndex`s into `VMCallerCheckedAnyfunc`s for
/// this instance.
fn initialize_passive_elements(instance: &Instance) {
let mut passive_elements = instance.passive_elements.borrow_mut();
debug_assert!(
passive_elements.is_empty(),
"should only be called once, at initialization time"
);
passive_elements.extend(
instance
.module
.passive_elements
.iter()
.filter(|(_, segments)| !segments.is_empty())
.map(|(idx, segments)| {
(
*idx,
segments
.iter()
.map(|s| {
instance.get_caller_checked_anyfunc(*s).map_or(
ptr::null_mut(),
|f: &VMCallerCheckedAnyfunc| {
f as *const VMCallerCheckedAnyfunc as *mut _
},
)
})
.collect(),
)
}),
);
}
/// Allocate memory for just the memories of the current module.
fn create_memories(
module: &Module,
mem_creator: &dyn RuntimeMemoryCreator,
) -> Result<BoxedSlice<DefinedMemoryIndex, Box<dyn RuntimeLinearMemory>>, InstantiationError> {
let num_imports = module.num_imported_memories;
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
memories.push(
mem_creator
.new_memory(plan)
.map_err(InstantiationError::Resource)?,
);
}
Ok(memories.into_boxed_slice())
}
/// Initialize the table memory from the provided initializers.
fn initialize_memories(
instance: &Instance,
data_initializers: &[DataInitializer<'_>],
) -> Result<(), InstantiationError> {
for init in data_initializers {
let memory = instance.get_memory(init.location.memory_index);
let start = get_memory_init_start(init, instance);
if start
.checked_add(init.data.len())
.map_or(true, |end| end > memory.current_length)
{
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::HeapOutOfBounds,
)));
}
unsafe {
let mem_slice = get_memory_slice(init, instance);
let end = start + init.data.len();
let to_init = &mut mem_slice[start..end];
to_init.copy_from_slice(init.data);
}
}
Ok(())
}
/// Allocate memory for just the globals of the current module,
/// with initializers applied.
fn create_globals(module: &Module) -> BoxedSlice<DefinedGlobalIndex, VMGlobalDefinition> {
let num_imports = module.num_imported_globals;
let mut vmctx_globals = PrimaryMap::with_capacity(module.globals.len() - num_imports);
for _ in &module.globals.values().as_slice()[num_imports..] {
vmctx_globals.push(VMGlobalDefinition::new());
}
vmctx_globals.into_boxed_slice()
}
fn initialize_globals(instance: &Instance) {
let module = instance.module();
let num_imports = module.num_imported_globals;
for (index, global) in module.globals.iter().skip(num_imports) {
let def_index = module.defined_global_index(index).unwrap();
unsafe {
let to = instance.global_ptr(def_index);
match global.initializer {
GlobalInit::I32Const(x) => *(*to).as_i32_mut() = x,
GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
GlobalInit::V128Const(x) => *(*to).as_u128_bits_mut() = x.0,
GlobalInit::GetGlobal(x) => {
let from = if let Some(def_x) = module.defined_global_index(x) {
instance.global(def_x)
} else {
*instance.imported_global(x).from
};
*to = from;
}
GlobalInit::RefFunc(f) => {
*(*to).as_anyfunc_mut() = instance.get_caller_checked_anyfunc(f).unwrap()
as *const VMCallerCheckedAnyfunc;
}
GlobalInit::RefNullConst => match global.wasm_ty {
WasmType::FuncRef => *(*to).as_anyfunc_mut() = ptr::null(),
WasmType::ExternRef => *(*to).as_externref_mut() = None,
ty => panic!("unsupported reference type for global: {:?}", ty),
},
GlobalInit::Import => panic!("locally-defined global initialized as import"),
}
}
}
}
/// An link error while instantiating a module.
#[derive(Error, Debug)]
#[error("Link error: {0}")]
pub struct LinkError(pub String);
/// An error while instantiating a module.
#[derive(Error, Debug)]
pub enum InstantiationError {
/// Insufficient resources available for execution.
#[error("Insufficient resources: {0}")]
Resource(String),
/// A wasm link error occured.
#[error("Failed to link module")]
Link(#[from] LinkError),
/// A trap ocurred during instantiation, after linking.
#[error("Trap occurred during instantiation")]
Trap(Trap),
} }

View File

@@ -0,0 +1,536 @@
use crate::externref::{StackMapRegistry, VMExternRefActivationsTable};
use crate::imports::Imports;
use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator};
use crate::memory::{DefaultMemoryCreator, RuntimeLinearMemory};
use crate::table::{Table, TableElement};
use crate::traphandlers::Trap;
use crate::vmcontext::{
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryDefinition, VMMemoryImport,
VMSharedSignatureIndex, VMTableDefinition, VMTableImport,
};
use std::alloc;
use std::any::Any;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::ptr::{self, NonNull};
use std::slice;
use std::sync::Arc;
use thiserror::Error;
use wasmtime_environ::entity::{
packed_option::ReservedValue, BoxedSlice, EntityRef, EntitySet, PrimaryMap,
};
use wasmtime_environ::wasm::{
DefinedFuncIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, GlobalInit, SignatureIndex,
TableElementType, WasmType,
};
use wasmtime_environ::{ir, Module, ModuleType, OwnedDataInitializer, TableElements, VMOffsets};
/// Represents a request for a new runtime instance.
pub struct InstanceAllocationRequest<'a> {
/// The module being instantiated.
pub module: Arc<Module>,
/// The finished (JIT) functions for the module.
pub finished_functions: &'a PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
/// The imports to use for the instantiation.
pub imports: Imports<'a>,
/// A callback for looking up shared signature indexes.
pub lookup_shared_signature: &'a dyn Fn(SignatureIndex) -> VMSharedSignatureIndex,
/// The host state to associate with the instance.
pub host_state: Box<dyn Any>,
/// The pointer to the VM interrupts structure to use for the instance.
pub interrupts: *const VMInterrupts,
/// The pointer to the reference activations table to use for the instance.
pub externref_activations_table: *mut VMExternRefActivationsTable,
/// The pointer to the stack map registry to use for the instance.
pub stack_map_registry: *mut StackMapRegistry,
}
/// An link error while instantiating a module.
#[derive(Error, Debug)]
#[error("Link error: {0}")]
pub struct LinkError(pub String);
/// An error while instantiating a module.
#[derive(Error, Debug)]
pub enum InstantiationError {
/// Insufficient resources available for execution.
#[error("Insufficient resources: {0}")]
Resource(String),
/// A wasm link error occured.
#[error("Failed to link module")]
Link(#[from] LinkError),
/// A trap ocurred during instantiation, after linking.
#[error("Trap occurred during instantiation")]
Trap(Trap),
}
/// Represents a runtime instance allocator.
///
/// # Safety
///
/// This trait is unsafe as it requires knowledge of Wasmtime's runtime internals to implement correctly.
pub unsafe trait InstanceAllocator: Send + Sync {
/// Allocates an instance for the given allocation request.
///
/// # Safety
///
/// This method is not inherently unsafe, but care must be made to ensure
/// pointers passed in the allocation request outlive the returned instance.
unsafe fn allocate(
&self,
req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError>;
/// Finishes the instantiation process started by an instance allocator.
///
/// # Safety
///
/// This method is only safe to call immediately after an instance has been allocated.
unsafe fn initialize(
&self,
handle: &InstanceHandle,
is_bulk_memory: bool,
data_initializers: &Arc<[OwnedDataInitializer]>,
) -> Result<(), InstantiationError>;
/// Deallocates a previously allocated instance.
///
/// # Safety
///
/// This function is unsafe because there are no guarantees that the given handle
/// is the only owner of the underlying instance to deallocate.
///
/// Use extreme care when deallocating an instance so that there are no dangling instance pointers.
unsafe fn deallocate(&self, handle: &InstanceHandle);
}
unsafe fn initialize_vmcontext(
instance: &Instance,
functions: &[VMFunctionImport],
tables: &[VMTableImport],
memories: &[VMMemoryImport],
globals: &[VMGlobalImport],
finished_functions: &PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
lookup_shared_signature: &dyn Fn(SignatureIndex) -> VMSharedSignatureIndex,
interrupts: *const VMInterrupts,
externref_activations_table: *mut VMExternRefActivationsTable,
stack_map_registry: *mut StackMapRegistry,
get_mem_def: impl Fn(DefinedMemoryIndex) -> VMMemoryDefinition,
get_table_def: impl Fn(DefinedTableIndex) -> VMTableDefinition,
) {
let module = &instance.module;
*instance.interrupts() = interrupts;
*instance.externref_activations_table() = externref_activations_table;
*instance.stack_map_registry() = stack_map_registry;
// Initialize shared signatures
let mut ptr = instance.signature_ids_ptr();
for sig in module.types.values() {
*ptr = match sig {
ModuleType::Function(sig) => lookup_shared_signature(*sig),
_ => VMSharedSignatureIndex::new(u32::max_value()),
};
ptr = ptr.add(1);
}
// Initialize the built-in functions
ptr::write(
instance.builtin_functions_ptr() as *mut VMBuiltinFunctionsArray,
VMBuiltinFunctionsArray::initialized(),
);
// Initialize the imports
debug_assert_eq!(functions.len(), module.num_imported_funcs);
ptr::copy(
functions.as_ptr(),
instance.imported_functions_ptr() as *mut VMFunctionImport,
functions.len(),
);
debug_assert_eq!(tables.len(), module.num_imported_tables);
ptr::copy(
tables.as_ptr(),
instance.imported_tables_ptr() as *mut VMTableImport,
tables.len(),
);
debug_assert_eq!(memories.len(), module.num_imported_memories);
ptr::copy(
memories.as_ptr(),
instance.imported_memories_ptr() as *mut VMMemoryImport,
memories.len(),
);
debug_assert_eq!(globals.len(), module.num_imported_globals);
ptr::copy(
globals.as_ptr(),
instance.imported_globals_ptr() as *mut VMGlobalImport,
globals.len(),
);
// Initialize the defined functions
for (index, sig) in instance.module.functions.iter() {
let type_index = lookup_shared_signature(*sig);
let (func_ptr, vmctx) = if let Some(def_index) = instance.module.defined_func_index(index) {
(
NonNull::new(finished_functions[def_index] as *mut _).unwrap(),
instance.vmctx_ptr(),
)
} else {
let import = instance.imported_function(index);
(import.body, import.vmctx)
};
ptr::write(
instance.anyfunc_ptr(index),
VMCallerCheckedAnyfunc {
func_ptr,
type_index,
vmctx,
},
);
}
// Initialize the defined tables
let mut ptr = instance.tables_ptr();
for i in 0..module.table_plans.len() - module.num_imported_tables {
ptr::write(ptr, get_table_def(DefinedTableIndex::new(i)));
ptr = ptr.add(1);
}
// Initialize the defined memories
let mut ptr = instance.memories_ptr();
for i in 0..module.memory_plans.len() - module.num_imported_memories {
ptr::write(ptr, get_mem_def(DefinedMemoryIndex::new(i)));
ptr = ptr.add(1);
}
// Initialize the defined globals
initialize_vmcontext_globals(instance);
}
unsafe fn initialize_vmcontext_globals(instance: &Instance) {
let module = &instance.module;
let num_imports = module.num_imported_globals;
for (index, global) in module.globals.iter().skip(num_imports) {
let def_index = module.defined_global_index(index).unwrap();
let to = instance.global_ptr(def_index);
// Initialize the global before writing to it
ptr::write(to, VMGlobalDefinition::new());
match global.initializer {
GlobalInit::I32Const(x) => *(*to).as_i32_mut() = x,
GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
GlobalInit::V128Const(x) => *(*to).as_u128_bits_mut() = x.0,
GlobalInit::GetGlobal(x) => {
let from = if let Some(def_x) = module.defined_global_index(x) {
instance.global(def_x)
} else {
*instance.imported_global(x).from
};
*to = from;
}
GlobalInit::RefFunc(f) => {
*(*to).as_anyfunc_mut() = instance.get_caller_checked_anyfunc(f).unwrap()
as *const VMCallerCheckedAnyfunc;
}
GlobalInit::RefNullConst => match global.wasm_ty {
WasmType::FuncRef => *(*to).as_anyfunc_mut() = ptr::null(),
WasmType::ExternRef => *(*to).as_externref_mut() = None,
ty => panic!("unsupported reference type for global: {:?}", ty),
},
GlobalInit::Import => panic!("locally-defined global initialized as import"),
}
}
}
/// Represents the on-demand instance allocator.
#[derive(Clone)]
pub struct OnDemandInstanceAllocator {
mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
}
impl OnDemandInstanceAllocator {
/// Creates a new on-demand instance allocator.
pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>) -> Self {
Self { mem_creator }
}
fn create_tables(module: &Module) -> BoxedSlice<DefinedTableIndex, Table> {
let num_imports = module.num_imported_tables;
let mut tables: PrimaryMap<DefinedTableIndex, _> =
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
for table in &module.table_plans.values().as_slice()[num_imports..] {
tables.push(Table::new(table));
}
tables.into_boxed_slice()
}
fn create_memories(
&self,
module: &Module,
) -> Result<BoxedSlice<DefinedMemoryIndex, Box<dyn RuntimeLinearMemory>>, InstantiationError>
{
let creator = self
.mem_creator
.as_deref()
.unwrap_or_else(|| &DefaultMemoryCreator);
let num_imports = module.num_imported_memories;
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
memories.push(
creator
.new_memory(plan)
.map_err(InstantiationError::Resource)?,
);
}
Ok(memories.into_boxed_slice())
}
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_elements {
let start = Self::get_table_init_start(init, instance);
let table = instance.get_table(init.table_index);
let size = usize::try_from(table.size()).unwrap();
if size < start + init.elements.len() {
return Err(InstantiationError::Link(LinkError(
"table out of bounds: elements segment does not fit".to_owned(),
)));
}
}
Ok(())
}
fn get_memory_init_start(init: &OwnedDataInitializer, instance: &Instance) -> usize {
let mut start = init.location.offset;
if let Some(base) = init.location.base {
let val = unsafe {
if let Some(def_index) = instance.module.defined_global_index(base) {
*instance.global(def_index).as_u32()
} else {
*(*instance.imported_global(base).from).as_u32()
}
};
start += usize::try_from(val).unwrap();
}
start
}
unsafe fn get_memory_slice<'instance>(
init: &OwnedDataInitializer,
instance: &'instance Instance,
) -> &'instance mut [u8] {
let memory = if let Some(defined_memory_index) = instance
.module
.defined_memory_index(init.location.memory_index)
{
instance.memory(defined_memory_index)
} else {
let import = instance.imported_memory(init.location.memory_index);
let foreign_instance = (&mut *(import).vmctx).instance();
let foreign_memory = &mut *(import).from;
let foreign_index = foreign_instance.memory_index(foreign_memory);
foreign_instance.memory(foreign_index)
};
slice::from_raw_parts_mut(memory.base, memory.current_length)
}
fn check_memory_init_bounds(
instance: &Instance,
data_initializers: &[OwnedDataInitializer],
) -> Result<(), InstantiationError> {
for init in data_initializers {
let start = Self::get_memory_init_start(init, instance);
unsafe {
let mem_slice = Self::get_memory_slice(init, instance);
if mem_slice.get_mut(start..start + init.data.len()).is_none() {
return Err(InstantiationError::Link(LinkError(
"memory out of bounds: data segment does not fit".into(),
)));
}
}
}
Ok(())
}
fn get_table_init_start(init: &TableElements, instance: &Instance) -> usize {
let mut start = init.offset;
if let Some(base) = init.base {
let val = unsafe {
if let Some(def_index) = instance.module.defined_global_index(base) {
*instance.global(def_index).as_u32()
} else {
*(*instance.imported_global(base).from).as_u32()
}
};
start += usize::try_from(val).unwrap();
}
start
}
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_elements {
let start = Self::get_table_init_start(init, instance);
let table = instance.get_table(init.table_index);
if start
.checked_add(init.elements.len())
.map_or(true, |end| end > table.size() as usize)
{
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::TableOutOfBounds,
)));
}
for (i, func_idx) in init.elements.iter().enumerate() {
let item = match table.element_type() {
TableElementType::Func => instance
.get_caller_checked_anyfunc(*func_idx)
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc
})
.into(),
TableElementType::Val(_) => {
assert!(*func_idx == FuncIndex::reserved_value());
TableElement::ExternRef(None)
}
};
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
}
}
Ok(())
}
/// Initialize the table memory from the provided initializers.
fn initialize_memories(
instance: &Instance,
data_initializers: &[OwnedDataInitializer],
) -> Result<(), InstantiationError> {
for init in data_initializers {
let memory = instance.get_memory(init.location.memory_index);
let start = Self::get_memory_init_start(init, instance);
if start
.checked_add(init.data.len())
.map_or(true, |end| end > memory.current_length)
{
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::HeapOutOfBounds,
)));
}
unsafe {
let mem_slice = Self::get_memory_slice(init, instance);
let end = start + init.data.len();
let to_init = &mut mem_slice[start..end];
to_init.copy_from_slice(&init.data);
}
}
Ok(())
}
}
unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
unsafe fn allocate(
&self,
req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError> {
debug_assert!(!req.externref_activations_table.is_null());
debug_assert!(!req.stack_map_registry.is_null());
let memories = self.create_memories(&req.module)?;
let tables = Self::create_tables(&req.module);
let handle = {
let instance = Instance {
module: req.module.clone(),
offsets: VMOffsets::new(std::mem::size_of::<*const u8>() as u8, &req.module),
memories,
tables,
dropped_elements: RefCell::new(EntitySet::with_capacity(
req.module.passive_elements.len(),
)),
dropped_data: RefCell::new(EntitySet::with_capacity(req.module.passive_data.len())),
host_state: req.host_state,
vmctx: VMContext {},
};
let layout = instance.alloc_layout();
let instance_ptr = alloc::alloc(layout) as *mut Instance;
if instance_ptr.is_null() {
alloc::handle_alloc_error(layout);
}
ptr::write(instance_ptr, instance);
InstanceHandle::new(instance_ptr)
};
let instance = handle.instance();
initialize_vmcontext(
instance,
req.imports.functions,
req.imports.tables,
req.imports.memories,
req.imports.globals,
req.finished_functions,
req.lookup_shared_signature,
req.interrupts,
req.externref_activations_table,
req.stack_map_registry,
&|index| instance.memories[index].vmmemory(),
&|index| instance.tables[index].vmtable(),
);
Ok(handle)
}
unsafe fn initialize(
&self,
handle: &InstanceHandle,
is_bulk_memory: bool,
data_initializers: &Arc<[OwnedDataInitializer]>,
) -> Result<(), InstantiationError> {
// Check initializer bounds before initializing anything. Only do this
// when bulk memory is disabled, since the bulk memory proposal changes
// instantiation such that the intermediate results of failed
// initializations are visible.
if !is_bulk_memory {
Self::check_table_init_bounds(handle.instance())?;
Self::check_memory_init_bounds(handle.instance(), data_initializers.as_ref())?;
}
// Apply fallible initializers. Note that this can "leak" state even if
// it fails.
Self::initialize_tables(handle.instance())?;
Self::initialize_memories(handle.instance(), data_initializers.as_ref())?;
Ok(())
}
unsafe fn deallocate(&self, handle: &InstanceHandle) {
let instance = handle.instance();
let layout = instance.alloc_layout();
ptr::drop_in_place(instance as *const Instance as *mut Instance);
alloc::dealloc(instance as *const Instance as *mut _, layout);
}
}

View File

@@ -37,7 +37,10 @@ pub mod libcalls;
pub use crate::export::*; pub use crate::export::*;
pub use crate::externref::*; pub use crate::externref::*;
pub use crate::imports::Imports; pub use crate::imports::Imports;
pub use crate::instance::{InstanceHandle, InstantiationError, LinkError, RuntimeInstance}; pub use crate::instance::{
InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstantiationError, LinkError,
OnDemandInstanceAllocator, RuntimeInstance,
};
pub use crate::jit_int::GdbJitImageRegistration; pub use crate::jit_int::GdbJitImageRegistration;
pub use crate::memory::{RuntimeLinearMemory, RuntimeMemoryCreator}; pub use crate::memory::{RuntimeLinearMemory, RuntimeMemoryCreator};
pub use crate::mmap::Mmap; pub use crate::mmap::Mmap;

View File

@@ -750,7 +750,7 @@ impl VMContext {
} }
} }
/// /// Trampoline function pointer type.
pub type VMTrampoline = unsafe extern "C" fn( pub type VMTrampoline = unsafe extern "C" fn(
*mut VMContext, // callee vmctx *mut VMContext, // callee vmctx
*mut VMContext, // caller vmctx *mut VMContext, // caller vmctx

View File

@@ -14,6 +14,25 @@ use wasmtime_environ::settings::{self, Configurable, SetError};
use wasmtime_environ::{isa, isa::TargetIsa, Tunables}; use wasmtime_environ::{isa, isa::TargetIsa, Tunables};
use wasmtime_jit::{native, CompilationStrategy, Compiler}; use wasmtime_jit::{native, CompilationStrategy, Compiler};
use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent}; use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent};
use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator};
/// Represents the module instance allocation strategy to use.
#[derive(Clone)]
pub enum InstanceAllocationStrategy {
/// The on-demand instance allocation strategy.
///
/// Resources related to a module instance are allocated at instantiation time and
/// immediately deallocated when the `Store` referencing the instance is dropped.
///
/// This is the default allocation strategy for Wasmtime.
OnDemand,
}
impl Default for InstanceAllocationStrategy {
fn default() -> Self {
Self::OnDemand
}
}
/// Global configuration options used to create an [`Engine`](crate::Engine) /// Global configuration options used to create an [`Engine`](crate::Engine)
/// and customize its behavior. /// and customize its behavior.
@@ -29,7 +48,10 @@ pub struct Config {
#[cfg(feature = "cache")] #[cfg(feature = "cache")]
pub(crate) cache_config: CacheConfig, pub(crate) cache_config: CacheConfig,
pub(crate) profiler: Arc<dyn ProfilingAgent>, pub(crate) profiler: Arc<dyn ProfilingAgent>,
pub(crate) memory_creator: Option<MemoryCreatorProxy>, pub(crate) instance_allocator: Option<Arc<dyn InstanceAllocator>>,
// The default instance allocator is used for instantiating host objects
// and for module instatiation when `instance_allocator` is None
pub(crate) default_instance_allocator: OnDemandInstanceAllocator,
pub(crate) max_wasm_stack: usize, pub(crate) max_wasm_stack: usize,
pub(crate) features: WasmFeatures, pub(crate) features: WasmFeatures,
pub(crate) wasm_backtrace_details_env_used: bool, pub(crate) wasm_backtrace_details_env_used: bool,
@@ -73,7 +95,8 @@ impl Config {
#[cfg(feature = "cache")] #[cfg(feature = "cache")]
cache_config: CacheConfig::new_cache_disabled(), cache_config: CacheConfig::new_cache_disabled(),
profiler: Arc::new(NullProfilerAgent), profiler: Arc::new(NullProfilerAgent),
memory_creator: None, instance_allocator: None,
default_instance_allocator: OnDemandInstanceAllocator::new(None),
max_wasm_stack: 1 << 20, max_wasm_stack: 1 << 20,
wasm_backtrace_details_env_used: false, wasm_backtrace_details_env_used: false,
features: WasmFeatures { features: WasmFeatures {
@@ -504,9 +527,24 @@ impl Config {
Ok(self) Ok(self)
} }
/// Sets a custom memory creator /// Sets a custom memory creator.
///
/// Custom memory creators are used when creating host `Memory` objects or when
/// creating instance linear memories for the on-demand instance allocation strategy.
pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self { pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
self.memory_creator = Some(MemoryCreatorProxy { mem_creator }); self.default_instance_allocator =
OnDemandInstanceAllocator::new(Some(Arc::new(MemoryCreatorProxy(mem_creator))));
self
}
/// Sets the instance allocation strategy to use.
pub fn with_instance_allocation_strategy(
&mut self,
strategy: InstanceAllocationStrategy,
) -> &mut Self {
self.instance_allocator = match strategy {
InstanceAllocationStrategy::OnDemand => None,
};
self self
} }
@@ -728,6 +766,12 @@ impl Config {
let isa = self.target_isa(); let isa = self.target_isa();
Compiler::new(isa, self.strategy, self.tunables.clone(), self.features) Compiler::new(isa, self.strategy, self.tunables.clone(), self.features)
} }
pub(crate) fn instance_allocator(&self) -> &dyn InstanceAllocator {
self.instance_allocator
.as_deref()
.unwrap_or(&self.default_instance_allocator)
}
} }
fn round_up_to_pages(val: u64) -> u64 { fn round_up_to_pages(val: u64) -> u64 {

View File

@@ -12,9 +12,9 @@ use wasmtime_environ::wasm::{
}; };
use wasmtime_environ::Initializer; use wasmtime_environ::Initializer;
use wasmtime_runtime::{ use wasmtime_runtime::{
Imports, InstantiationError, RuntimeInstance, StackMapRegistry, VMContext, Imports, InstanceAllocationRequest, InstantiationError, RuntimeInstance, StackMapRegistry,
VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMGlobalImport, VMMemoryImport, VMContext, VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMGlobalImport,
VMTableImport, VMMemoryImport, VMTableImport,
}; };
/// An instantiated WebAssembly module. /// An instantiated WebAssembly module.
@@ -492,18 +492,26 @@ impl<'a> Instantiator<'a> {
// compiled JIT code within the `Store`. // compiled JIT code within the `Store`.
self.store.register_module(&self.cur.module); self.store.register_module(&self.cur.module);
let config = self.store.engine().config();
unsafe { unsafe {
let instance = compiled_module.instantiate( let config = self.store.engine().config();
self.cur.build(),
&self.store.lookup_shared_signature(self.cur.module.types()), let allocator = config.instance_allocator();
config.memory_creator.as_ref().map(|a| a as _),
self.store.interrupts(), let instance = allocator.allocate(InstanceAllocationRequest {
Box::new(()), module: compiled_module.module().clone(),
self.store.externref_activations_table() as *const VMExternRefActivationsTable finished_functions: compiled_module.finished_functions(),
imports: self.cur.build(),
lookup_shared_signature: &self
.store
.lookup_shared_signature(self.cur.module.types()),
host_state: Box::new(()),
interrupts: self.store.interrupts(),
externref_activations_table: self.store.externref_activations_table()
as *const VMExternRefActivationsTable
as *mut _, as *mut _,
self.store.stack_map_registry() as *const StackMapRegistry as *mut _, stack_map_registry: self.store.stack_map_registry() as *const StackMapRegistry
)?; as *mut _,
})?;
// After we've created the `InstanceHandle` we still need to run // After we've created the `InstanceHandle` we still need to run
// initialization to set up data/elements/etc. We do this after adding // initialization to set up data/elements/etc. We do this after adding
@@ -513,8 +521,9 @@ impl<'a> Instantiator<'a> {
// tables. This means that from this point on, regardless of whether // tables. This means that from this point on, regardless of whether
// initialization is successful, we need to keep the instance alive. // initialization is successful, we need to keep the instance alive.
let instance = self.store.add_instance(instance); let instance = self.store.add_instance(instance);
instance allocator
.initialize( .initialize(
&instance.handle,
config.features.bulk_memory, config.features.bulk_memory,
&compiled_module.data_initializers(), &compiled_module.data_initializers(),
) )

View File

@@ -18,8 +18,8 @@ use std::task::{Context, Poll};
use wasmtime_environ::wasm; use wasmtime_environ::wasm;
use wasmtime_jit::{CompiledModule, ModuleCode, TypeTables}; use wasmtime_jit::{CompiledModule, ModuleCode, TypeTables};
use wasmtime_runtime::{ use wasmtime_runtime::{
InstanceHandle, RuntimeMemoryCreator, SignalHandler, StackMapRegistry, TrapInfo, VMContext, InstanceHandle, SignalHandler, StackMapRegistry, TrapInfo, VMContext, VMExternRef,
VMExternRef, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex,
}; };
/// A `Store` is a collection of WebAssembly instances and host-defined items. /// A `Store` is a collection of WebAssembly instances and host-defined items.
@@ -254,15 +254,6 @@ impl Store {
&self.inner.engine &self.inner.engine
} }
/// Returns an optional reference to a ['RuntimeMemoryCreator']
pub(crate) fn memory_creator(&self) -> Option<&dyn RuntimeMemoryCreator> {
self.engine()
.config()
.memory_creator
.as_ref()
.map(|x| x as _)
}
pub(crate) fn signatures(&self) -> &RefCell<SignatureRegistry> { pub(crate) fn signatures(&self) -> &RefCell<SignatureRegistry> {
&self.inner.signatures &self.inner.signatures
} }
@@ -969,9 +960,10 @@ impl fmt::Debug for Store {
impl Drop for StoreInner { impl Drop for StoreInner {
fn drop(&mut self) { fn drop(&mut self) {
for instance in self.instances.get_mut().iter() { let allocator = self.engine.config().instance_allocator();
for instance in self.instances.borrow().iter() {
unsafe { unsafe {
instance.dealloc(); allocator.deallocate(instance);
} }
} }
} }

View File

@@ -9,15 +9,15 @@ use wasmtime_environ::entity::PrimaryMap;
use wasmtime_environ::wasm::DefinedFuncIndex; use wasmtime_environ::wasm::DefinedFuncIndex;
use wasmtime_environ::Module; use wasmtime_environ::Module;
use wasmtime_runtime::{ use wasmtime_runtime::{
Imports, InstanceHandle, StackMapRegistry, VMExternRefActivationsTable, VMFunctionBody, Imports, InstanceAllocationRequest, InstanceAllocator, StackMapRegistry,
VMFunctionImport, VMSharedSignatureIndex, VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMSharedSignatureIndex,
}; };
pub(crate) fn create_handle( pub(crate) fn create_handle(
module: Module, module: Module,
store: &Store, store: &Store,
finished_functions: PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>, finished_functions: PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
state: Box<dyn Any>, host_state: Box<dyn Any>,
func_imports: &[VMFunctionImport], func_imports: &[VMFunctionImport],
shared_signature_id: Option<VMSharedSignatureIndex>, shared_signature_id: Option<VMSharedSignatureIndex>,
) -> Result<StoreInstanceHandle> { ) -> Result<StoreInstanceHandle> {
@@ -26,17 +26,24 @@ pub(crate) fn create_handle(
let module = Arc::new(module); let module = Arc::new(module);
unsafe { unsafe {
let handle = InstanceHandle::new( // Use the default allocator when creating handles associated with host objects
module, let handle = store
&finished_functions, .engine()
imports, .config()
store.memory_creator(), .default_instance_allocator
&|_| shared_signature_id.unwrap(), .allocate(InstanceAllocationRequest {
state, module: module.clone(),
store.interrupts(), finished_functions: &finished_functions,
store.externref_activations_table() as *const VMExternRefActivationsTable as *mut _, imports,
store.stack_map_registry() as *const StackMapRegistry as *mut _, lookup_shared_signature: &|_| shared_signature_id.unwrap(),
)?; host_state,
interrupts: store.interrupts(),
externref_activations_table: store.externref_activations_table()
as *const VMExternRefActivationsTable
as *mut _,
stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _,
})?;
Ok(store.add_instance(handle)) Ok(store.add_instance(handle))
} }
} }

View File

@@ -54,9 +54,7 @@ impl RuntimeLinearMemory for LinearMemoryProxy {
} }
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct MemoryCreatorProxy { pub(crate) struct MemoryCreatorProxy(pub Arc<dyn MemoryCreator>);
pub(crate) mem_creator: Arc<dyn MemoryCreator>,
}
impl RuntimeMemoryCreator for MemoryCreatorProxy { impl RuntimeMemoryCreator for MemoryCreatorProxy {
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String> { fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String> {
@@ -65,7 +63,7 @@ impl RuntimeMemoryCreator for MemoryCreatorProxy {
MemoryStyle::Static { bound } => Some(bound as u64 * WASM_PAGE_SIZE as u64), MemoryStyle::Static { bound } => Some(bound as u64 * WASM_PAGE_SIZE as u64),
MemoryStyle::Dynamic => None, MemoryStyle::Dynamic => None,
}; };
self.mem_creator self.0
.new_memory(ty, reserved_size_in_bytes, plan.offset_guard_size) .new_memory(ty, reserved_size_in_bytes, plan.offset_guard_size)
.map(|mem| Box::new(LinearMemoryProxy { mem }) as Box<dyn RuntimeLinearMemory>) .map(|mem| Box::new(LinearMemoryProxy { mem }) as Box<dyn RuntimeLinearMemory>)
} }