Tidy up some internals of instance allocation (#5346)

* Simplify the `ModuleRuntimeInfo` trait slightly

Fold two functions into one as they're only called from one location
anyway.

* Remove ModuleRuntimeInfo::signature

This is redundant as the array mapping is already stored within the
`VMContext` so that can be consulted rather than having a separate trait
function for it. This required altering the `Global` creation slightly
to work correctly in this situation.

* Remove a now-dead constant

* Shared `VMOffsets` across instances

This commit removes the computation of `VMOffsets` to being per-module
instead of per-instance. The `VMOffsets` structure is also quite large
so this shaves off 112 bytes per instance which isn't a huge impact but
should help lower the cost of instantiating small modules.

* Remove `InstanceAllocator::adjust_tunables`

This is no longer needed or necessary with the pooling allocator.

* Fix compile warning

* Fix a vtune warning

* Fix pooling tests

* Fix another test warning
This commit is contained in:
Alex Crichton
2022-12-01 16:22:08 -06:00
committed by GitHub
parent ed6769084b
commit 03715dda9d
14 changed files with 142 additions and 176 deletions

View File

@@ -13,7 +13,7 @@ use crate::vmcontext::{
};
use crate::{
ExportFunction, ExportGlobal, ExportMemory, ExportTable, Imports, ModuleRuntimeInfo, Store,
VMFunctionBody,
VMFunctionBody, VMSharedSignatureIndex,
};
use anyhow::Error;
use memoffset::offset_of;
@@ -61,9 +61,6 @@ pub(crate) struct Instance {
/// functions, lazy initialization state, etc.
runtime_info: Arc<dyn ModuleRuntimeInfo>,
/// Offsets in the `vmctx` region, precomputed from the `module` above.
offsets: VMOffsets<HostPtr>,
/// WebAssembly linear memory data.
///
/// This is where all runtime information about defined linear memories in
@@ -105,13 +102,12 @@ impl Instance {
unsafe fn new_at(
ptr: *mut Instance,
alloc_size: usize,
offsets: VMOffsets<HostPtr>,
req: InstanceAllocationRequest,
memories: PrimaryMap<DefinedMemoryIndex, Memory>,
tables: PrimaryMap<DefinedTableIndex, Table>,
) {
// The allocation must be *at least* the size required of `Instance`.
assert!(alloc_size >= Self::alloc_layout(&offsets).size());
assert!(alloc_size >= Self::alloc_layout(req.runtime_info.offsets()).size());
let module = req.runtime_info.module();
let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
@@ -121,7 +117,6 @@ impl Instance {
ptr,
Instance {
runtime_info: req.runtime_info.clone(),
offsets,
memories,
tables,
dropped_elements,
@@ -133,7 +128,7 @@ impl Instance {
},
);
(*ptr).initialize_vmctx(module, req.store, req.imports);
(*ptr).initialize_vmctx(module, req.runtime_info.offsets(), req.store, req.imports);
}
/// Helper function to access various locations offset from our `*mut
@@ -148,24 +143,28 @@ impl Instance {
self.runtime_info.module()
}
fn offsets(&self) -> &VMOffsets<HostPtr> {
self.runtime_info.offsets()
}
/// Return the indexed `VMFunctionImport`.
fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmfunction_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
}
/// Return the index `VMTableImport`.
fn imported_table(&self, index: TableIndex) -> &VMTableImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmtable_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
}
/// Return the indexed `VMMemoryImport`.
fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
}
/// Return the indexed `VMGlobalImport`.
fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmglobal_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
}
/// Return the indexed `VMTableDefinition`.
@@ -183,7 +182,7 @@ impl Instance {
/// Return the indexed `VMTableDefinition`.
fn table_ptr(&self, index: DefinedTableIndex) -> *mut VMTableDefinition {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_vmtable_definition(index)) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_definition(index)) }
}
/// Get a locally defined or imported memory.
@@ -221,7 +220,7 @@ impl Instance {
/// Return the indexed `VMMemoryDefinition`.
fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition {
unsafe { *self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_pointer(index)) }
unsafe { *self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_pointer(index)) }
}
/// Return the indexed `VMGlobalDefinition`.
@@ -231,7 +230,7 @@ impl Instance {
/// Return the indexed `VMGlobalDefinition`.
fn global_ptr(&self, index: DefinedGlobalIndex) -> *mut VMGlobalDefinition {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_vmglobal_definition(index)) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_definition(index)) }
}
/// Get a raw pointer to the global at the given index regardless whether it
@@ -251,17 +250,17 @@ impl Instance {
/// Return a pointer to the interrupts structure
pub fn runtime_limits(&self) -> *mut *const VMRuntimeLimits {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_runtime_limits()) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_runtime_limits()) }
}
/// Return a pointer to the global epoch counter used by this instance.
pub fn epoch_ptr(&self) -> *mut *const AtomicU64 {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_epoch_ptr()) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_epoch_ptr()) }
}
/// Return a pointer to the `VMExternRefActivationsTable`.
pub fn externref_activations_table(&self) -> *mut *mut VMExternRefActivationsTable {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_externref_activations_table()) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_externref_activations_table()) }
}
/// Gets a pointer to this instance's `Store` which was originally
@@ -276,14 +275,15 @@ impl Instance {
/// store).
#[inline]
pub fn store(&self) -> *mut dyn Store {
let ptr = unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets.vmctx_store()) };
let ptr =
unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets().vmctx_store()) };
assert!(!ptr.is_null());
ptr
}
pub unsafe fn set_store(&mut self, store: Option<*mut dyn Store>) {
if let Some(store) = store {
*self.vmctx_plus_offset(self.offsets.vmctx_store()) = store;
*self.vmctx_plus_offset(self.offsets().vmctx_store()) = store;
*self.runtime_limits() = (*store).vmruntime_limits();
*self.epoch_ptr() = (*store).epoch_ptr();
*self.externref_activations_table() = (*store).externref_activations_table().0;
@@ -292,7 +292,7 @@ impl Instance {
mem::size_of::<*mut dyn Store>(),
mem::size_of::<[*mut (); 2]>()
);
*self.vmctx_plus_offset::<[*mut (); 2]>(self.offsets.vmctx_store()) =
*self.vmctx_plus_offset::<[*mut (); 2]>(self.offsets().vmctx_store()) =
[ptr::null_mut(), ptr::null_mut()];
*self.runtime_limits() = ptr::null_mut();
@@ -302,7 +302,7 @@ impl Instance {
}
pub(crate) unsafe fn set_callee(&mut self, callee: Option<NonNull<VMFunctionBody>>) {
*self.vmctx_plus_offset(self.offsets.vmctx_callee()) =
*self.vmctx_plus_offset(self.offsets().vmctx_callee()) =
callee.map_or(ptr::null_mut(), |c| c.as_ptr());
}
@@ -499,13 +499,15 @@ impl Instance {
sig: SignatureIndex,
into: *mut VMCallerCheckedAnyfunc,
) {
let type_index = self.runtime_info.signature(sig);
let type_index = unsafe {
let base: *const VMSharedSignatureIndex =
*self.vmctx_plus_offset(self.offsets().vmctx_signature_ids_array());
*base.add(sig.index())
};
let (func_ptr, vmctx) = if let Some(def_index) = self.module().defined_func_index(index) {
(
(self.runtime_info.image_base()
+ self.runtime_info.function_loc(def_index).start as usize)
as *mut _,
self.runtime_info.function(def_index),
VMOpaqueContext::from_vmcontext(self.vmctx_ptr()),
)
} else {
@@ -569,7 +571,7 @@ impl Instance {
let sig = func.signature;
let anyfunc: *mut VMCallerCheckedAnyfunc = self
.vmctx_plus_offset::<VMCallerCheckedAnyfunc>(
self.offsets.vmctx_anyfunc(func.anyfunc),
self.offsets().vmctx_anyfunc(func.anyfunc),
);
self.construct_anyfunc(index, sig, anyfunc);
@@ -898,44 +900,49 @@ impl Instance {
/// The `VMContext` memory is assumed to be uninitialized; any field
/// that we need in a certain state will be explicitly written by this
/// function.
unsafe fn initialize_vmctx(&mut self, module: &Module, store: StorePtr, imports: Imports) {
unsafe fn initialize_vmctx(
&mut self,
module: &Module,
offsets: &VMOffsets<HostPtr>,
store: StorePtr,
imports: Imports,
) {
assert!(std::ptr::eq(module, self.module().as_ref()));
*self.vmctx_plus_offset(self.offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
*self.vmctx_plus_offset(offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
self.set_callee(None);
self.set_store(store.as_raw());
// Initialize shared signatures
let signatures = self.runtime_info.signature_ids();
*self.vmctx_plus_offset(self.offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
*self.vmctx_plus_offset(offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
// Initialize the built-in functions
*self.vmctx_plus_offset(self.offsets.vmctx_builtin_functions()) =
&VMBuiltinFunctionsArray::INIT;
*self.vmctx_plus_offset(offsets.vmctx_builtin_functions()) = &VMBuiltinFunctionsArray::INIT;
// Initialize the imports
debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
ptr::copy_nonoverlapping(
imports.functions.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_functions_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_functions_begin()),
imports.functions.len(),
);
debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
ptr::copy_nonoverlapping(
imports.tables.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_tables_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_tables_begin()),
imports.tables.len(),
);
debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
ptr::copy_nonoverlapping(
imports.memories.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_memories_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_memories_begin()),
imports.memories.len(),
);
debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
ptr::copy_nonoverlapping(
imports.globals.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_globals_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_globals_begin()),
imports.globals.len(),
);
@@ -946,7 +953,7 @@ impl Instance {
// any state now.
// Initialize the defined tables
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_tables_begin());
let mut ptr = self.vmctx_plus_offset(offsets.vmctx_tables_begin());
for i in 0..module.table_plans.len() - module.num_imported_tables {
ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].vmtable());
ptr = ptr.add(1);
@@ -957,8 +964,8 @@ impl Instance {
// time. Entries in `defined_memories` hold a pointer to a definition
// (all memories) whereas the `owned_memories` hold the actual
// definitions of memories owned (not shared) in the module.
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_memories_begin());
let mut owned_ptr = self.vmctx_plus_offset(self.offsets.vmctx_owned_memories_begin());
let mut ptr = self.vmctx_plus_offset(offsets.vmctx_memories_begin());
let mut owned_ptr = self.vmctx_plus_offset(offsets.vmctx_owned_memories_begin());
for i in 0..module.memory_plans.len() - module.num_imported_memories {
let defined_memory_index = DefinedMemoryIndex::new(i);
let memory_index = module.memory_index(defined_memory_index);