Tidy up some internals of instance allocation (#5346)

* Simplify the `ModuleRuntimeInfo` trait slightly

Fold two functions into one as they're only called from one location
anyway.

* Remove ModuleRuntimeInfo::signature

This is redundant as the array mapping is already stored within the
`VMContext` so that can be consulted rather than having a separate trait
function for it. This required altering the `Global` creation slightly
to work correctly in this situation.

* Remove a now-dead constant

* Shared `VMOffsets` across instances

This commit removes the computation of `VMOffsets` to being per-module
instead of per-instance. The `VMOffsets` structure is also quite large
so this shaves off 112 bytes per instance which isn't a huge impact but
should help lower the cost of instantiating small modules.

* Remove `InstanceAllocator::adjust_tunables`

This is no longer needed or necessary with the pooling allocator.

* Fix compile warning

* Fix a vtune warning

* Fix pooling tests

* Fix another test warning
This commit is contained in:
Alex Crichton
2022-12-01 16:22:08 -06:00
committed by GitHub
parent ed6769084b
commit 03715dda9d
14 changed files with 142 additions and 176 deletions

View File

@@ -13,7 +13,7 @@ use crate::vmcontext::{
};
use crate::{
ExportFunction, ExportGlobal, ExportMemory, ExportTable, Imports, ModuleRuntimeInfo, Store,
VMFunctionBody,
VMFunctionBody, VMSharedSignatureIndex,
};
use anyhow::Error;
use memoffset::offset_of;
@@ -61,9 +61,6 @@ pub(crate) struct Instance {
/// functions, lazy initialization state, etc.
runtime_info: Arc<dyn ModuleRuntimeInfo>,
/// Offsets in the `vmctx` region, precomputed from the `module` above.
offsets: VMOffsets<HostPtr>,
/// WebAssembly linear memory data.
///
/// This is where all runtime information about defined linear memories in
@@ -105,13 +102,12 @@ impl Instance {
unsafe fn new_at(
ptr: *mut Instance,
alloc_size: usize,
offsets: VMOffsets<HostPtr>,
req: InstanceAllocationRequest,
memories: PrimaryMap<DefinedMemoryIndex, Memory>,
tables: PrimaryMap<DefinedTableIndex, Table>,
) {
// The allocation must be *at least* the size required of `Instance`.
assert!(alloc_size >= Self::alloc_layout(&offsets).size());
assert!(alloc_size >= Self::alloc_layout(req.runtime_info.offsets()).size());
let module = req.runtime_info.module();
let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
@@ -121,7 +117,6 @@ impl Instance {
ptr,
Instance {
runtime_info: req.runtime_info.clone(),
offsets,
memories,
tables,
dropped_elements,
@@ -133,7 +128,7 @@ impl Instance {
},
);
(*ptr).initialize_vmctx(module, req.store, req.imports);
(*ptr).initialize_vmctx(module, req.runtime_info.offsets(), req.store, req.imports);
}
/// Helper function to access various locations offset from our `*mut
@@ -148,24 +143,28 @@ impl Instance {
self.runtime_info.module()
}
fn offsets(&self) -> &VMOffsets<HostPtr> {
self.runtime_info.offsets()
}
/// Return the indexed `VMFunctionImport`.
fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmfunction_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
}
/// Return the index `VMTableImport`.
fn imported_table(&self, index: TableIndex) -> &VMTableImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmtable_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
}
/// Return the indexed `VMMemoryImport`.
fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
}
/// Return the indexed `VMGlobalImport`.
fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmglobal_import(index)) }
unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
}
/// Return the indexed `VMTableDefinition`.
@@ -183,7 +182,7 @@ impl Instance {
/// Return the indexed `VMTableDefinition`.
fn table_ptr(&self, index: DefinedTableIndex) -> *mut VMTableDefinition {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_vmtable_definition(index)) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_definition(index)) }
}
/// Get a locally defined or imported memory.
@@ -221,7 +220,7 @@ impl Instance {
/// Return the indexed `VMMemoryDefinition`.
fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition {
unsafe { *self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_pointer(index)) }
unsafe { *self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_pointer(index)) }
}
/// Return the indexed `VMGlobalDefinition`.
@@ -231,7 +230,7 @@ impl Instance {
/// Return the indexed `VMGlobalDefinition`.
fn global_ptr(&self, index: DefinedGlobalIndex) -> *mut VMGlobalDefinition {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_vmglobal_definition(index)) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_definition(index)) }
}
/// Get a raw pointer to the global at the given index regardless whether it
@@ -251,17 +250,17 @@ impl Instance {
/// Return a pointer to the interrupts structure
pub fn runtime_limits(&self) -> *mut *const VMRuntimeLimits {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_runtime_limits()) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_runtime_limits()) }
}
/// Return a pointer to the global epoch counter used by this instance.
pub fn epoch_ptr(&self) -> *mut *const AtomicU64 {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_epoch_ptr()) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_epoch_ptr()) }
}
/// Return a pointer to the `VMExternRefActivationsTable`.
pub fn externref_activations_table(&self) -> *mut *mut VMExternRefActivationsTable {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_externref_activations_table()) }
unsafe { self.vmctx_plus_offset(self.offsets().vmctx_externref_activations_table()) }
}
/// Gets a pointer to this instance's `Store` which was originally
@@ -276,14 +275,15 @@ impl Instance {
/// store).
#[inline]
pub fn store(&self) -> *mut dyn Store {
let ptr = unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets.vmctx_store()) };
let ptr =
unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets().vmctx_store()) };
assert!(!ptr.is_null());
ptr
}
pub unsafe fn set_store(&mut self, store: Option<*mut dyn Store>) {
if let Some(store) = store {
*self.vmctx_plus_offset(self.offsets.vmctx_store()) = store;
*self.vmctx_plus_offset(self.offsets().vmctx_store()) = store;
*self.runtime_limits() = (*store).vmruntime_limits();
*self.epoch_ptr() = (*store).epoch_ptr();
*self.externref_activations_table() = (*store).externref_activations_table().0;
@@ -292,7 +292,7 @@ impl Instance {
mem::size_of::<*mut dyn Store>(),
mem::size_of::<[*mut (); 2]>()
);
*self.vmctx_plus_offset::<[*mut (); 2]>(self.offsets.vmctx_store()) =
*self.vmctx_plus_offset::<[*mut (); 2]>(self.offsets().vmctx_store()) =
[ptr::null_mut(), ptr::null_mut()];
*self.runtime_limits() = ptr::null_mut();
@@ -302,7 +302,7 @@ impl Instance {
}
pub(crate) unsafe fn set_callee(&mut self, callee: Option<NonNull<VMFunctionBody>>) {
*self.vmctx_plus_offset(self.offsets.vmctx_callee()) =
*self.vmctx_plus_offset(self.offsets().vmctx_callee()) =
callee.map_or(ptr::null_mut(), |c| c.as_ptr());
}
@@ -499,13 +499,15 @@ impl Instance {
sig: SignatureIndex,
into: *mut VMCallerCheckedAnyfunc,
) {
let type_index = self.runtime_info.signature(sig);
let type_index = unsafe {
let base: *const VMSharedSignatureIndex =
*self.vmctx_plus_offset(self.offsets().vmctx_signature_ids_array());
*base.add(sig.index())
};
let (func_ptr, vmctx) = if let Some(def_index) = self.module().defined_func_index(index) {
(
(self.runtime_info.image_base()
+ self.runtime_info.function_loc(def_index).start as usize)
as *mut _,
self.runtime_info.function(def_index),
VMOpaqueContext::from_vmcontext(self.vmctx_ptr()),
)
} else {
@@ -569,7 +571,7 @@ impl Instance {
let sig = func.signature;
let anyfunc: *mut VMCallerCheckedAnyfunc = self
.vmctx_plus_offset::<VMCallerCheckedAnyfunc>(
self.offsets.vmctx_anyfunc(func.anyfunc),
self.offsets().vmctx_anyfunc(func.anyfunc),
);
self.construct_anyfunc(index, sig, anyfunc);
@@ -898,44 +900,49 @@ impl Instance {
/// The `VMContext` memory is assumed to be uninitialized; any field
/// that we need in a certain state will be explicitly written by this
/// function.
unsafe fn initialize_vmctx(&mut self, module: &Module, store: StorePtr, imports: Imports) {
unsafe fn initialize_vmctx(
&mut self,
module: &Module,
offsets: &VMOffsets<HostPtr>,
store: StorePtr,
imports: Imports,
) {
assert!(std::ptr::eq(module, self.module().as_ref()));
*self.vmctx_plus_offset(self.offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
*self.vmctx_plus_offset(offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
self.set_callee(None);
self.set_store(store.as_raw());
// Initialize shared signatures
let signatures = self.runtime_info.signature_ids();
*self.vmctx_plus_offset(self.offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
*self.vmctx_plus_offset(offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
// Initialize the built-in functions
*self.vmctx_plus_offset(self.offsets.vmctx_builtin_functions()) =
&VMBuiltinFunctionsArray::INIT;
*self.vmctx_plus_offset(offsets.vmctx_builtin_functions()) = &VMBuiltinFunctionsArray::INIT;
// Initialize the imports
debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
ptr::copy_nonoverlapping(
imports.functions.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_functions_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_functions_begin()),
imports.functions.len(),
);
debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
ptr::copy_nonoverlapping(
imports.tables.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_tables_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_tables_begin()),
imports.tables.len(),
);
debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
ptr::copy_nonoverlapping(
imports.memories.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_memories_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_memories_begin()),
imports.memories.len(),
);
debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
ptr::copy_nonoverlapping(
imports.globals.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_globals_begin()),
self.vmctx_plus_offset(offsets.vmctx_imported_globals_begin()),
imports.globals.len(),
);
@@ -946,7 +953,7 @@ impl Instance {
// any state now.
// Initialize the defined tables
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_tables_begin());
let mut ptr = self.vmctx_plus_offset(offsets.vmctx_tables_begin());
for i in 0..module.table_plans.len() - module.num_imported_tables {
ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].vmtable());
ptr = ptr.add(1);
@@ -957,8 +964,8 @@ impl Instance {
// time. Entries in `defined_memories` hold a pointer to a definition
// (all memories) whereas the `owned_memories` hold the actual
// definitions of memories owned (not shared) in the module.
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_memories_begin());
let mut owned_ptr = self.vmctx_plus_offset(self.offsets.vmctx_owned_memories_begin());
let mut ptr = self.vmctx_plus_offset(offsets.vmctx_memories_begin());
let mut owned_ptr = self.vmctx_plus_offset(offsets.vmctx_owned_memories_begin());
for i in 0..module.memory_plans.len() - module.num_imported_memories {
let defined_memory_index = DefinedMemoryIndex::new(i);
let memory_index = module.memory_index(defined_memory_index);

View File

@@ -92,18 +92,11 @@ impl StorePtr {
/// This trait is unsafe as it requires knowledge of Wasmtime's runtime internals to implement correctly.
pub unsafe trait InstanceAllocator: Send + Sync {
/// Validates that a module is supported by the allocator.
fn validate(&self, module: &Module) -> Result<()> {
drop(module);
fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
drop((module, offsets));
Ok(())
}
/// Adjusts the tunables prior to creation of any JIT compiler.
///
/// This method allows the instance allocator control over tunables passed to a `wasmtime_jit::Compiler`.
fn adjust_tunables(&self, tunables: &mut wasmtime_environ::Tunables) {
drop(tunables);
}
/// Allocates an instance for the given allocation request.
///
/// # Safety
@@ -464,11 +457,9 @@ pub unsafe fn allocate_single_memory_instance(
let mut memories = PrimaryMap::default();
memories.push(memory);
let tables = PrimaryMap::default();
let module = req.runtime_info.module();
let offsets = VMOffsets::new(HostPtr, module);
let layout = Instance::alloc_layout(&offsets);
let layout = Instance::alloc_layout(req.runtime_info.offsets());
let instance = alloc::alloc(layout) as *mut Instance;
Instance::new_at(instance, layout.size(), offsets, req, memories, tables);
Instance::new_at(instance, layout.size(), req, memories, tables);
Ok(InstanceHandle { instance })
}
@@ -476,7 +467,7 @@ pub unsafe fn allocate_single_memory_instance(
///
/// See [`InstanceAllocator::deallocate()`] for more details.
pub unsafe fn deallocate(handle: &InstanceHandle) {
let layout = Instance::alloc_layout(&handle.instance().offsets);
let layout = Instance::alloc_layout(handle.instance().offsets());
ptr::drop_in_place(handle.instance);
alloc::dealloc(handle.instance.cast(), layout);
}
@@ -485,12 +476,10 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
unsafe fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> {
let memories = self.create_memories(&mut req.store, &req.runtime_info)?;
let tables = Self::create_tables(&mut req.store, &req.runtime_info)?;
let module = req.runtime_info.module();
let offsets = VMOffsets::new(HostPtr, module);
let layout = Instance::alloc_layout(&offsets);
let layout = Instance::alloc_layout(req.runtime_info.offsets());
let instance_ptr = alloc::alloc(layout) as *mut Instance;
Instance::new_at(instance_ptr, layout.size(), offsets, req, memories, tables);
Instance::new_at(instance_ptr, layout.size(), req, memories, tables);
Ok(InstanceHandle {
instance: instance_ptr,

View File

@@ -169,7 +169,7 @@ impl InstancePool {
// If this fails then it's a configuration error at the `Engine` level
// from when this pooling allocator was created and that needs updating
// if this is to succeed.
let offsets = self.validate_instance_size(module)?;
self.validate_instance_size(req.runtime_info.offsets())?;
let mut memories =
PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
@@ -192,14 +192,7 @@ impl InstancePool {
let instance_ptr = self.instance(instance_index) as _;
Instance::new_at(
instance_ptr,
self.instance_size,
offsets,
req,
memories,
tables,
);
Instance::new_at(instance_ptr, self.instance_size, req, memories, tables);
Ok(InstanceHandle {
instance: instance_ptr,
@@ -485,11 +478,10 @@ impl InstancePool {
Ok(())
}
fn validate_instance_size(&self, module: &Module) -> Result<VMOffsets<HostPtr>> {
let offsets = VMOffsets::new(HostPtr, module);
let layout = Instance::alloc_layout(&offsets);
fn validate_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
let layout = Instance::alloc_layout(offsets);
if layout.size() <= self.instance_size {
return Ok(offsets);
return Ok(());
}
// If this `module` exceeds the allocation size allotted to it then an
@@ -1078,17 +1070,10 @@ impl PoolingInstanceAllocator {
}
unsafe impl InstanceAllocator for PoolingInstanceAllocator {
fn validate(&self, module: &Module) -> Result<()> {
fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
self.instances.validate_memory_plans(module)?;
self.instances.validate_table_plans(module)?;
// Note that this check is not 100% accurate for cross-compiled systems
// where the pointer size may change since this check is often performed
// at compile time instead of runtime. Given that Wasmtime is almost
// always on a 64-bit platform though this is generally ok, and
// otherwise this check also happens during instantiation to
// double-check at that point.
self.instances.validate_instance_size(module)?;
self.instances.validate_instance_size(offsets)?;
Ok(())
}
@@ -1145,26 +1130,22 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
#[cfg(test)]
mod test {
use super::*;
use crate::{CompiledModuleId, Imports, MemoryImage, StorePtr, VMSharedSignatureIndex};
use crate::{
CompiledModuleId, Imports, MemoryImage, StorePtr, VMFunctionBody, VMSharedSignatureIndex,
};
use std::sync::Arc;
use wasmtime_environ::{DefinedFuncIndex, DefinedMemoryIndex, FunctionLoc, SignatureIndex};
use wasmtime_environ::{DefinedFuncIndex, DefinedMemoryIndex};
pub(crate) fn empty_runtime_info(
module: Arc<wasmtime_environ::Module>,
) -> Arc<dyn ModuleRuntimeInfo> {
struct RuntimeInfo(Arc<wasmtime_environ::Module>);
struct RuntimeInfo(Arc<wasmtime_environ::Module>, VMOffsets<HostPtr>);
impl ModuleRuntimeInfo for RuntimeInfo {
fn module(&self) -> &Arc<wasmtime_environ::Module> {
&self.0
}
fn image_base(&self) -> usize {
0
}
fn function_loc(&self, _: DefinedFuncIndex) -> &FunctionLoc {
unimplemented!()
}
fn signature(&self, _: SignatureIndex) -> VMSharedSignatureIndex {
fn function(&self, _: DefinedFuncIndex) -> *mut VMFunctionBody {
unimplemented!()
}
fn memory_image(
@@ -1183,9 +1164,13 @@ mod test {
fn signature_ids(&self) -> &[VMSharedSignatureIndex] {
&[]
}
fn offsets(&self) -> &VMOffsets<HostPtr> {
&self.1
}
}
Arc::new(RuntimeInfo(module))
let offsets = VMOffsets::new(HostPtr, &module);
Arc::new(RuntimeInfo(module, offsets))
}
#[cfg(target_pointer_width = "64")]

View File

@@ -23,10 +23,7 @@
use anyhow::Error;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::Arc;
use wasmtime_environ::DefinedFuncIndex;
use wasmtime_environ::DefinedMemoryIndex;
use wasmtime_environ::FunctionLoc;
use wasmtime_environ::SignatureIndex;
use wasmtime_environ::{DefinedFuncIndex, DefinedMemoryIndex, HostPtr, VMOffsets};
#[macro_use]
mod trampolines;
@@ -172,15 +169,8 @@ pub trait ModuleRuntimeInfo: Send + Sync + 'static {
/// The underlying Module.
fn module(&self) -> &Arc<wasmtime_environ::Module>;
/// The signatures.
fn signature(&self, index: SignatureIndex) -> VMSharedSignatureIndex;
/// The base address of where JIT functions are located.
fn image_base(&self) -> usize;
/// Descriptors about each compiled function, such as the offset from
/// `image_base`.
fn function_loc(&self, func_index: DefinedFuncIndex) -> &FunctionLoc;
/// Returns the address, in memory, that the function `index` resides at.
fn function(&self, index: DefinedFuncIndex) -> *mut VMFunctionBody;
/// Returns the `MemoryImage` structure used for copy-on-write
/// initialization of the memory, if it's applicable.
@@ -198,6 +188,9 @@ pub trait ModuleRuntimeInfo: Send + Sync + 'static {
/// Returns an array, indexed by `SignatureIndex` of all
/// `VMSharedSignatureIndex` entries corresponding to the `SignatureIndex`.
fn signature_ids(&self) -> &[VMSharedSignatureIndex];
/// Offset information for the current host.
fn offsets(&self) -> &VMOffsets<HostPtr>;
}
/// Returns the host OS page size, in bytes.