Tidy up some internals of instance allocation (#5346)

* Simplify the `ModuleRuntimeInfo` trait slightly

Fold two functions into one as they're only called from one location
anyway.

* Remove ModuleRuntimeInfo::signature

This is redundant as the array mapping is already stored within the
`VMContext` so that can be consulted rather than having a separate trait
function for it. This required altering the `Global` creation slightly
to work correctly in this situation.

* Remove a now-dead constant

* Shared `VMOffsets` across instances

This commit removes the computation of `VMOffsets` to being per-module
instead of per-instance. The `VMOffsets` structure is also quite large
so this shaves off 112 bytes per instance which isn't a huge impact but
should help lower the cost of instantiating small modules.

* Remove `InstanceAllocator::adjust_tunables`

This is no longer needed or necessary with the pooling allocator.

* Fix compile warning

* Fix a vtune warning

* Fix pooling tests

* Fix another test warning
This commit is contained in:
Alex Crichton
2022-12-01 16:22:08 -06:00
committed by GitHub
parent ed6769084b
commit 03715dda9d
14 changed files with 142 additions and 176 deletions

View File

@@ -31,11 +31,6 @@ use cranelift_entity::packed_option::ReservedValue;
use std::convert::TryFrom; use std::convert::TryFrom;
use wasmtime_types::OwnedMemoryIndex; use wasmtime_types::OwnedMemoryIndex;
/// Sentinel value indicating that wasm has been interrupted.
// Note that this has a bit of an odd definition. See the `insert_stack_check`
// function in `cranelift/codegen/src/isa/x86/abi.rs` for more information
pub const INTERRUPTED: usize = usize::max_value() - 32 * 1024;
#[cfg(target_pointer_width = "32")] #[cfg(target_pointer_width = "32")]
fn cast_to_u32(sz: usize) -> u32 { fn cast_to_u32(sz: usize) -> u32 {
u32::try_from(sz).unwrap() u32::try_from(sz).unwrap()

View File

@@ -20,8 +20,7 @@ use wasmtime_environ::{
PrimaryMap, SignatureIndex, StackMapInformation, Tunables, WasmFunctionInfo, PrimaryMap, SignatureIndex, StackMapInformation, Tunables, WasmFunctionInfo,
}; };
use wasmtime_runtime::{ use wasmtime_runtime::{
CompiledModuleId, CompiledModuleIdAllocator, GdbJitImageRegistration, MmapVec, VMFunctionBody, CompiledModuleId, CompiledModuleIdAllocator, GdbJitImageRegistration, MmapVec, VMTrampoline,
VMTrampoline,
}; };
/// Secondary in-memory results of compilation. /// Secondary in-memory results of compilation.
@@ -482,19 +481,22 @@ impl CompiledModule {
Arc::get_mut(&mut self.module) Arc::get_mut(&mut self.module)
} }
/// Returns the map of all finished JIT functions compiled for this module /// Returns an iterator over all functions defined within this module with
/// their index and their body in memory.
#[inline] #[inline]
pub fn finished_functions( pub fn finished_functions(
&self, &self,
) -> impl ExactSizeIterator<Item = (DefinedFuncIndex, *const [VMFunctionBody])> + '_ { ) -> impl ExactSizeIterator<Item = (DefinedFuncIndex, &[u8])> + '_ {
let text = self.text(); self.funcs
self.funcs.iter().map(move |(i, (_, loc))| { .iter()
let func = &text[loc.start as usize..][..loc.length as usize]; .map(move |(i, _)| (i, self.finished_function(i)))
( }
i,
std::ptr::slice_from_raw_parts(func.as_ptr().cast::<VMFunctionBody>(), func.len()), /// Returns the body of the function that `index` points to.
) #[inline]
}) pub fn finished_function(&self, index: DefinedFuncIndex) -> &[u8] {
let (_, loc) = &self.funcs[index];
&self.text()[loc.start as usize..][..loc.length as usize]
} }
/// Returns the per-signature trampolines for this module. /// Returns the per-signature trampolines for this module.
@@ -517,9 +519,7 @@ impl CompiledModule {
/// ///
/// The iterator returned iterates over the span of the compiled function in /// The iterator returned iterates over the span of the compiled function in
/// memory with the stack maps associated with those bytes. /// memory with the stack maps associated with those bytes.
pub fn stack_maps( pub fn stack_maps(&self) -> impl Iterator<Item = (&[u8], &[StackMapInformation])> {
&self,
) -> impl Iterator<Item = (*const [VMFunctionBody], &[StackMapInformation])> {
self.finished_functions() self.finished_functions()
.map(|(_, f)| f) .map(|(_, f)| f)
.zip(self.funcs.values().map(|f| &f.0.stack_maps[..])) .zip(self.funcs.values().map(|f| &f.0.stack_maps[..]))

View File

@@ -83,7 +83,8 @@ impl State {
let tid = pid; // ThreadId does appear to track underlying thread. Using PID. let tid = pid; // ThreadId does appear to track underlying thread. Using PID.
for (idx, func) in module.finished_functions() { for (idx, func) in module.finished_functions() {
let (addr, len) = unsafe { ((*func).as_ptr().cast::<u8>(), (*func).len()) }; let addr = func.as_ptr();
let len = func.len();
if let Some(img) = &dbg_image { if let Some(img) = &dbg_image {
if let Err(err) = self.dump_from_debug_image(img, "wasm", addr, len, pid, tid) { if let Err(err) = self.dump_from_debug_image(img, "wasm", addr, len, pid, tid) {
println!( println!(

View File

@@ -93,7 +93,8 @@ impl State {
.unwrap_or_else(|| format!("wasm_module_{}", global_module_id)); .unwrap_or_else(|| format!("wasm_module_{}", global_module_id));
for (idx, func) in module.finished_functions() { for (idx, func) in module.finished_functions() {
let (addr, len) = unsafe { ((*func).as_ptr().cast::<u8>(), (*func).len()) }; let addr = func.as_ptr();
let len = func.len();
let method_name = super::debug_name(module, idx); let method_name = super::debug_name(module, idx);
log::trace!( log::trace!(
"new function {:?}::{:?} @ {:?}\n", "new function {:?}::{:?} @ {:?}\n",

View File

@@ -13,7 +13,7 @@ use crate::vmcontext::{
}; };
use crate::{ use crate::{
ExportFunction, ExportGlobal, ExportMemory, ExportTable, Imports, ModuleRuntimeInfo, Store, ExportFunction, ExportGlobal, ExportMemory, ExportTable, Imports, ModuleRuntimeInfo, Store,
VMFunctionBody, VMFunctionBody, VMSharedSignatureIndex,
}; };
use anyhow::Error; use anyhow::Error;
use memoffset::offset_of; use memoffset::offset_of;
@@ -61,9 +61,6 @@ pub(crate) struct Instance {
/// functions, lazy initialization state, etc. /// functions, lazy initialization state, etc.
runtime_info: Arc<dyn ModuleRuntimeInfo>, runtime_info: Arc<dyn ModuleRuntimeInfo>,
/// Offsets in the `vmctx` region, precomputed from the `module` above.
offsets: VMOffsets<HostPtr>,
/// WebAssembly linear memory data. /// WebAssembly linear memory data.
/// ///
/// This is where all runtime information about defined linear memories in /// This is where all runtime information about defined linear memories in
@@ -105,13 +102,12 @@ impl Instance {
unsafe fn new_at( unsafe fn new_at(
ptr: *mut Instance, ptr: *mut Instance,
alloc_size: usize, alloc_size: usize,
offsets: VMOffsets<HostPtr>,
req: InstanceAllocationRequest, req: InstanceAllocationRequest,
memories: PrimaryMap<DefinedMemoryIndex, Memory>, memories: PrimaryMap<DefinedMemoryIndex, Memory>,
tables: PrimaryMap<DefinedTableIndex, Table>, tables: PrimaryMap<DefinedTableIndex, Table>,
) { ) {
// The allocation must be *at least* the size required of `Instance`. // The allocation must be *at least* the size required of `Instance`.
assert!(alloc_size >= Self::alloc_layout(&offsets).size()); assert!(alloc_size >= Self::alloc_layout(req.runtime_info.offsets()).size());
let module = req.runtime_info.module(); let module = req.runtime_info.module();
let dropped_elements = EntitySet::with_capacity(module.passive_elements.len()); let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
@@ -121,7 +117,6 @@ impl Instance {
ptr, ptr,
Instance { Instance {
runtime_info: req.runtime_info.clone(), runtime_info: req.runtime_info.clone(),
offsets,
memories, memories,
tables, tables,
dropped_elements, dropped_elements,
@@ -133,7 +128,7 @@ impl Instance {
}, },
); );
(*ptr).initialize_vmctx(module, req.store, req.imports); (*ptr).initialize_vmctx(module, req.runtime_info.offsets(), req.store, req.imports);
} }
/// Helper function to access various locations offset from our `*mut /// Helper function to access various locations offset from our `*mut
@@ -148,24 +143,28 @@ impl Instance {
self.runtime_info.module() self.runtime_info.module()
} }
fn offsets(&self) -> &VMOffsets<HostPtr> {
self.runtime_info.offsets()
}
/// Return the indexed `VMFunctionImport`. /// Return the indexed `VMFunctionImport`.
fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport { fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmfunction_import(index)) } unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
} }
/// Return the index `VMTableImport`. /// Return the index `VMTableImport`.
fn imported_table(&self, index: TableIndex) -> &VMTableImport { fn imported_table(&self, index: TableIndex) -> &VMTableImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmtable_import(index)) } unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
} }
/// Return the indexed `VMMemoryImport`. /// Return the indexed `VMMemoryImport`.
fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport { fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_import(index)) } unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
} }
/// Return the indexed `VMGlobalImport`. /// Return the indexed `VMGlobalImport`.
fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport { fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
unsafe { &*self.vmctx_plus_offset(self.offsets.vmctx_vmglobal_import(index)) } unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
} }
/// Return the indexed `VMTableDefinition`. /// Return the indexed `VMTableDefinition`.
@@ -183,7 +182,7 @@ impl Instance {
/// Return the indexed `VMTableDefinition`. /// Return the indexed `VMTableDefinition`.
fn table_ptr(&self, index: DefinedTableIndex) -> *mut VMTableDefinition { fn table_ptr(&self, index: DefinedTableIndex) -> *mut VMTableDefinition {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_vmtable_definition(index)) } unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_definition(index)) }
} }
/// Get a locally defined or imported memory. /// Get a locally defined or imported memory.
@@ -221,7 +220,7 @@ impl Instance {
/// Return the indexed `VMMemoryDefinition`. /// Return the indexed `VMMemoryDefinition`.
fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition { fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition {
unsafe { *self.vmctx_plus_offset(self.offsets.vmctx_vmmemory_pointer(index)) } unsafe { *self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_pointer(index)) }
} }
/// Return the indexed `VMGlobalDefinition`. /// Return the indexed `VMGlobalDefinition`.
@@ -231,7 +230,7 @@ impl Instance {
/// Return the indexed `VMGlobalDefinition`. /// Return the indexed `VMGlobalDefinition`.
fn global_ptr(&self, index: DefinedGlobalIndex) -> *mut VMGlobalDefinition { fn global_ptr(&self, index: DefinedGlobalIndex) -> *mut VMGlobalDefinition {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_vmglobal_definition(index)) } unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_definition(index)) }
} }
/// Get a raw pointer to the global at the given index regardless whether it /// Get a raw pointer to the global at the given index regardless whether it
@@ -251,17 +250,17 @@ impl Instance {
/// Return a pointer to the interrupts structure /// Return a pointer to the interrupts structure
pub fn runtime_limits(&self) -> *mut *const VMRuntimeLimits { pub fn runtime_limits(&self) -> *mut *const VMRuntimeLimits {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_runtime_limits()) } unsafe { self.vmctx_plus_offset(self.offsets().vmctx_runtime_limits()) }
} }
/// Return a pointer to the global epoch counter used by this instance. /// Return a pointer to the global epoch counter used by this instance.
pub fn epoch_ptr(&self) -> *mut *const AtomicU64 { pub fn epoch_ptr(&self) -> *mut *const AtomicU64 {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_epoch_ptr()) } unsafe { self.vmctx_plus_offset(self.offsets().vmctx_epoch_ptr()) }
} }
/// Return a pointer to the `VMExternRefActivationsTable`. /// Return a pointer to the `VMExternRefActivationsTable`.
pub fn externref_activations_table(&self) -> *mut *mut VMExternRefActivationsTable { pub fn externref_activations_table(&self) -> *mut *mut VMExternRefActivationsTable {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_externref_activations_table()) } unsafe { self.vmctx_plus_offset(self.offsets().vmctx_externref_activations_table()) }
} }
/// Gets a pointer to this instance's `Store` which was originally /// Gets a pointer to this instance's `Store` which was originally
@@ -276,14 +275,15 @@ impl Instance {
/// store). /// store).
#[inline] #[inline]
pub fn store(&self) -> *mut dyn Store { pub fn store(&self) -> *mut dyn Store {
let ptr = unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets.vmctx_store()) }; let ptr =
unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets().vmctx_store()) };
assert!(!ptr.is_null()); assert!(!ptr.is_null());
ptr ptr
} }
pub unsafe fn set_store(&mut self, store: Option<*mut dyn Store>) { pub unsafe fn set_store(&mut self, store: Option<*mut dyn Store>) {
if let Some(store) = store { if let Some(store) = store {
*self.vmctx_plus_offset(self.offsets.vmctx_store()) = store; *self.vmctx_plus_offset(self.offsets().vmctx_store()) = store;
*self.runtime_limits() = (*store).vmruntime_limits(); *self.runtime_limits() = (*store).vmruntime_limits();
*self.epoch_ptr() = (*store).epoch_ptr(); *self.epoch_ptr() = (*store).epoch_ptr();
*self.externref_activations_table() = (*store).externref_activations_table().0; *self.externref_activations_table() = (*store).externref_activations_table().0;
@@ -292,7 +292,7 @@ impl Instance {
mem::size_of::<*mut dyn Store>(), mem::size_of::<*mut dyn Store>(),
mem::size_of::<[*mut (); 2]>() mem::size_of::<[*mut (); 2]>()
); );
*self.vmctx_plus_offset::<[*mut (); 2]>(self.offsets.vmctx_store()) = *self.vmctx_plus_offset::<[*mut (); 2]>(self.offsets().vmctx_store()) =
[ptr::null_mut(), ptr::null_mut()]; [ptr::null_mut(), ptr::null_mut()];
*self.runtime_limits() = ptr::null_mut(); *self.runtime_limits() = ptr::null_mut();
@@ -302,7 +302,7 @@ impl Instance {
} }
pub(crate) unsafe fn set_callee(&mut self, callee: Option<NonNull<VMFunctionBody>>) { pub(crate) unsafe fn set_callee(&mut self, callee: Option<NonNull<VMFunctionBody>>) {
*self.vmctx_plus_offset(self.offsets.vmctx_callee()) = *self.vmctx_plus_offset(self.offsets().vmctx_callee()) =
callee.map_or(ptr::null_mut(), |c| c.as_ptr()); callee.map_or(ptr::null_mut(), |c| c.as_ptr());
} }
@@ -499,13 +499,15 @@ impl Instance {
sig: SignatureIndex, sig: SignatureIndex,
into: *mut VMCallerCheckedAnyfunc, into: *mut VMCallerCheckedAnyfunc,
) { ) {
let type_index = self.runtime_info.signature(sig); let type_index = unsafe {
let base: *const VMSharedSignatureIndex =
*self.vmctx_plus_offset(self.offsets().vmctx_signature_ids_array());
*base.add(sig.index())
};
let (func_ptr, vmctx) = if let Some(def_index) = self.module().defined_func_index(index) { let (func_ptr, vmctx) = if let Some(def_index) = self.module().defined_func_index(index) {
( (
(self.runtime_info.image_base() self.runtime_info.function(def_index),
+ self.runtime_info.function_loc(def_index).start as usize)
as *mut _,
VMOpaqueContext::from_vmcontext(self.vmctx_ptr()), VMOpaqueContext::from_vmcontext(self.vmctx_ptr()),
) )
} else { } else {
@@ -569,7 +571,7 @@ impl Instance {
let sig = func.signature; let sig = func.signature;
let anyfunc: *mut VMCallerCheckedAnyfunc = self let anyfunc: *mut VMCallerCheckedAnyfunc = self
.vmctx_plus_offset::<VMCallerCheckedAnyfunc>( .vmctx_plus_offset::<VMCallerCheckedAnyfunc>(
self.offsets.vmctx_anyfunc(func.anyfunc), self.offsets().vmctx_anyfunc(func.anyfunc),
); );
self.construct_anyfunc(index, sig, anyfunc); self.construct_anyfunc(index, sig, anyfunc);
@@ -898,44 +900,49 @@ impl Instance {
/// The `VMContext` memory is assumed to be uninitialized; any field /// The `VMContext` memory is assumed to be uninitialized; any field
/// that we need in a certain state will be explicitly written by this /// that we need in a certain state will be explicitly written by this
/// function. /// function.
unsafe fn initialize_vmctx(&mut self, module: &Module, store: StorePtr, imports: Imports) { unsafe fn initialize_vmctx(
&mut self,
module: &Module,
offsets: &VMOffsets<HostPtr>,
store: StorePtr,
imports: Imports,
) {
assert!(std::ptr::eq(module, self.module().as_ref())); assert!(std::ptr::eq(module, self.module().as_ref()));
*self.vmctx_plus_offset(self.offsets.vmctx_magic()) = VMCONTEXT_MAGIC; *self.vmctx_plus_offset(offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
self.set_callee(None); self.set_callee(None);
self.set_store(store.as_raw()); self.set_store(store.as_raw());
// Initialize shared signatures // Initialize shared signatures
let signatures = self.runtime_info.signature_ids(); let signatures = self.runtime_info.signature_ids();
*self.vmctx_plus_offset(self.offsets.vmctx_signature_ids_array()) = signatures.as_ptr(); *self.vmctx_plus_offset(offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
// Initialize the built-in functions // Initialize the built-in functions
*self.vmctx_plus_offset(self.offsets.vmctx_builtin_functions()) = *self.vmctx_plus_offset(offsets.vmctx_builtin_functions()) = &VMBuiltinFunctionsArray::INIT;
&VMBuiltinFunctionsArray::INIT;
// Initialize the imports // Initialize the imports
debug_assert_eq!(imports.functions.len(), module.num_imported_funcs); debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
imports.functions.as_ptr(), imports.functions.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_functions_begin()), self.vmctx_plus_offset(offsets.vmctx_imported_functions_begin()),
imports.functions.len(), imports.functions.len(),
); );
debug_assert_eq!(imports.tables.len(), module.num_imported_tables); debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
imports.tables.as_ptr(), imports.tables.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_tables_begin()), self.vmctx_plus_offset(offsets.vmctx_imported_tables_begin()),
imports.tables.len(), imports.tables.len(),
); );
debug_assert_eq!(imports.memories.len(), module.num_imported_memories); debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
imports.memories.as_ptr(), imports.memories.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_memories_begin()), self.vmctx_plus_offset(offsets.vmctx_imported_memories_begin()),
imports.memories.len(), imports.memories.len(),
); );
debug_assert_eq!(imports.globals.len(), module.num_imported_globals); debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
imports.globals.as_ptr(), imports.globals.as_ptr(),
self.vmctx_plus_offset(self.offsets.vmctx_imported_globals_begin()), self.vmctx_plus_offset(offsets.vmctx_imported_globals_begin()),
imports.globals.len(), imports.globals.len(),
); );
@@ -946,7 +953,7 @@ impl Instance {
// any state now. // any state now.
// Initialize the defined tables // Initialize the defined tables
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_tables_begin()); let mut ptr = self.vmctx_plus_offset(offsets.vmctx_tables_begin());
for i in 0..module.table_plans.len() - module.num_imported_tables { for i in 0..module.table_plans.len() - module.num_imported_tables {
ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].vmtable()); ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].vmtable());
ptr = ptr.add(1); ptr = ptr.add(1);
@@ -957,8 +964,8 @@ impl Instance {
// time. Entries in `defined_memories` hold a pointer to a definition // time. Entries in `defined_memories` hold a pointer to a definition
// (all memories) whereas the `owned_memories` hold the actual // (all memories) whereas the `owned_memories` hold the actual
// definitions of memories owned (not shared) in the module. // definitions of memories owned (not shared) in the module.
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_memories_begin()); let mut ptr = self.vmctx_plus_offset(offsets.vmctx_memories_begin());
let mut owned_ptr = self.vmctx_plus_offset(self.offsets.vmctx_owned_memories_begin()); let mut owned_ptr = self.vmctx_plus_offset(offsets.vmctx_owned_memories_begin());
for i in 0..module.memory_plans.len() - module.num_imported_memories { for i in 0..module.memory_plans.len() - module.num_imported_memories {
let defined_memory_index = DefinedMemoryIndex::new(i); let defined_memory_index = DefinedMemoryIndex::new(i);
let memory_index = module.memory_index(defined_memory_index); let memory_index = module.memory_index(defined_memory_index);

View File

@@ -92,18 +92,11 @@ impl StorePtr {
/// This trait is unsafe as it requires knowledge of Wasmtime's runtime internals to implement correctly. /// This trait is unsafe as it requires knowledge of Wasmtime's runtime internals to implement correctly.
pub unsafe trait InstanceAllocator: Send + Sync { pub unsafe trait InstanceAllocator: Send + Sync {
/// Validates that a module is supported by the allocator. /// Validates that a module is supported by the allocator.
fn validate(&self, module: &Module) -> Result<()> { fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
drop(module); drop((module, offsets));
Ok(()) Ok(())
} }
/// Adjusts the tunables prior to creation of any JIT compiler.
///
/// This method allows the instance allocator control over tunables passed to a `wasmtime_jit::Compiler`.
fn adjust_tunables(&self, tunables: &mut wasmtime_environ::Tunables) {
drop(tunables);
}
/// Allocates an instance for the given allocation request. /// Allocates an instance for the given allocation request.
/// ///
/// # Safety /// # Safety
@@ -464,11 +457,9 @@ pub unsafe fn allocate_single_memory_instance(
let mut memories = PrimaryMap::default(); let mut memories = PrimaryMap::default();
memories.push(memory); memories.push(memory);
let tables = PrimaryMap::default(); let tables = PrimaryMap::default();
let module = req.runtime_info.module(); let layout = Instance::alloc_layout(req.runtime_info.offsets());
let offsets = VMOffsets::new(HostPtr, module);
let layout = Instance::alloc_layout(&offsets);
let instance = alloc::alloc(layout) as *mut Instance; let instance = alloc::alloc(layout) as *mut Instance;
Instance::new_at(instance, layout.size(), offsets, req, memories, tables); Instance::new_at(instance, layout.size(), req, memories, tables);
Ok(InstanceHandle { instance }) Ok(InstanceHandle { instance })
} }
@@ -476,7 +467,7 @@ pub unsafe fn allocate_single_memory_instance(
/// ///
/// See [`InstanceAllocator::deallocate()`] for more details. /// See [`InstanceAllocator::deallocate()`] for more details.
pub unsafe fn deallocate(handle: &InstanceHandle) { pub unsafe fn deallocate(handle: &InstanceHandle) {
let layout = Instance::alloc_layout(&handle.instance().offsets); let layout = Instance::alloc_layout(handle.instance().offsets());
ptr::drop_in_place(handle.instance); ptr::drop_in_place(handle.instance);
alloc::dealloc(handle.instance.cast(), layout); alloc::dealloc(handle.instance.cast(), layout);
} }
@@ -485,12 +476,10 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
unsafe fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> { unsafe fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> {
let memories = self.create_memories(&mut req.store, &req.runtime_info)?; let memories = self.create_memories(&mut req.store, &req.runtime_info)?;
let tables = Self::create_tables(&mut req.store, &req.runtime_info)?; let tables = Self::create_tables(&mut req.store, &req.runtime_info)?;
let module = req.runtime_info.module(); let layout = Instance::alloc_layout(req.runtime_info.offsets());
let offsets = VMOffsets::new(HostPtr, module);
let layout = Instance::alloc_layout(&offsets);
let instance_ptr = alloc::alloc(layout) as *mut Instance; let instance_ptr = alloc::alloc(layout) as *mut Instance;
Instance::new_at(instance_ptr, layout.size(), offsets, req, memories, tables); Instance::new_at(instance_ptr, layout.size(), req, memories, tables);
Ok(InstanceHandle { Ok(InstanceHandle {
instance: instance_ptr, instance: instance_ptr,

View File

@@ -169,7 +169,7 @@ impl InstancePool {
// If this fails then it's a configuration error at the `Engine` level // If this fails then it's a configuration error at the `Engine` level
// from when this pooling allocator was created and that needs updating // from when this pooling allocator was created and that needs updating
// if this is to succeed. // if this is to succeed.
let offsets = self.validate_instance_size(module)?; self.validate_instance_size(req.runtime_info.offsets())?;
let mut memories = let mut memories =
PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories); PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
@@ -192,14 +192,7 @@ impl InstancePool {
let instance_ptr = self.instance(instance_index) as _; let instance_ptr = self.instance(instance_index) as _;
Instance::new_at( Instance::new_at(instance_ptr, self.instance_size, req, memories, tables);
instance_ptr,
self.instance_size,
offsets,
req,
memories,
tables,
);
Ok(InstanceHandle { Ok(InstanceHandle {
instance: instance_ptr, instance: instance_ptr,
@@ -485,11 +478,10 @@ impl InstancePool {
Ok(()) Ok(())
} }
fn validate_instance_size(&self, module: &Module) -> Result<VMOffsets<HostPtr>> { fn validate_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
let offsets = VMOffsets::new(HostPtr, module); let layout = Instance::alloc_layout(offsets);
let layout = Instance::alloc_layout(&offsets);
if layout.size() <= self.instance_size { if layout.size() <= self.instance_size {
return Ok(offsets); return Ok(());
} }
// If this `module` exceeds the allocation size allotted to it then an // If this `module` exceeds the allocation size allotted to it then an
@@ -1078,17 +1070,10 @@ impl PoolingInstanceAllocator {
} }
unsafe impl InstanceAllocator for PoolingInstanceAllocator { unsafe impl InstanceAllocator for PoolingInstanceAllocator {
fn validate(&self, module: &Module) -> Result<()> { fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
self.instances.validate_memory_plans(module)?; self.instances.validate_memory_plans(module)?;
self.instances.validate_table_plans(module)?; self.instances.validate_table_plans(module)?;
self.instances.validate_instance_size(offsets)?;
// Note that this check is not 100% accurate for cross-compiled systems
// where the pointer size may change since this check is often performed
// at compile time instead of runtime. Given that Wasmtime is almost
// always on a 64-bit platform though this is generally ok, and
// otherwise this check also happens during instantiation to
// double-check at that point.
self.instances.validate_instance_size(module)?;
Ok(()) Ok(())
} }
@@ -1145,26 +1130,22 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::{CompiledModuleId, Imports, MemoryImage, StorePtr, VMSharedSignatureIndex}; use crate::{
CompiledModuleId, Imports, MemoryImage, StorePtr, VMFunctionBody, VMSharedSignatureIndex,
};
use std::sync::Arc; use std::sync::Arc;
use wasmtime_environ::{DefinedFuncIndex, DefinedMemoryIndex, FunctionLoc, SignatureIndex}; use wasmtime_environ::{DefinedFuncIndex, DefinedMemoryIndex};
pub(crate) fn empty_runtime_info( pub(crate) fn empty_runtime_info(
module: Arc<wasmtime_environ::Module>, module: Arc<wasmtime_environ::Module>,
) -> Arc<dyn ModuleRuntimeInfo> { ) -> Arc<dyn ModuleRuntimeInfo> {
struct RuntimeInfo(Arc<wasmtime_environ::Module>); struct RuntimeInfo(Arc<wasmtime_environ::Module>, VMOffsets<HostPtr>);
impl ModuleRuntimeInfo for RuntimeInfo { impl ModuleRuntimeInfo for RuntimeInfo {
fn module(&self) -> &Arc<wasmtime_environ::Module> { fn module(&self) -> &Arc<wasmtime_environ::Module> {
&self.0 &self.0
} }
fn image_base(&self) -> usize { fn function(&self, _: DefinedFuncIndex) -> *mut VMFunctionBody {
0
}
fn function_loc(&self, _: DefinedFuncIndex) -> &FunctionLoc {
unimplemented!()
}
fn signature(&self, _: SignatureIndex) -> VMSharedSignatureIndex {
unimplemented!() unimplemented!()
} }
fn memory_image( fn memory_image(
@@ -1183,9 +1164,13 @@ mod test {
fn signature_ids(&self) -> &[VMSharedSignatureIndex] { fn signature_ids(&self) -> &[VMSharedSignatureIndex] {
&[] &[]
} }
fn offsets(&self) -> &VMOffsets<HostPtr> {
&self.1
}
} }
Arc::new(RuntimeInfo(module)) let offsets = VMOffsets::new(HostPtr, &module);
Arc::new(RuntimeInfo(module, offsets))
} }
#[cfg(target_pointer_width = "64")] #[cfg(target_pointer_width = "64")]

View File

@@ -23,10 +23,7 @@
use anyhow::Error; use anyhow::Error;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::Arc; use std::sync::Arc;
use wasmtime_environ::DefinedFuncIndex; use wasmtime_environ::{DefinedFuncIndex, DefinedMemoryIndex, HostPtr, VMOffsets};
use wasmtime_environ::DefinedMemoryIndex;
use wasmtime_environ::FunctionLoc;
use wasmtime_environ::SignatureIndex;
#[macro_use] #[macro_use]
mod trampolines; mod trampolines;
@@ -172,15 +169,8 @@ pub trait ModuleRuntimeInfo: Send + Sync + 'static {
/// The underlying Module. /// The underlying Module.
fn module(&self) -> &Arc<wasmtime_environ::Module>; fn module(&self) -> &Arc<wasmtime_environ::Module>;
/// The signatures. /// Returns the address, in memory, that the function `index` resides at.
fn signature(&self, index: SignatureIndex) -> VMSharedSignatureIndex; fn function(&self, index: DefinedFuncIndex) -> *mut VMFunctionBody;
/// The base address of where JIT functions are located.
fn image_base(&self) -> usize;
/// Descriptors about each compiled function, such as the offset from
/// `image_base`.
fn function_loc(&self, func_index: DefinedFuncIndex) -> &FunctionLoc;
/// Returns the `MemoryImage` structure used for copy-on-write /// Returns the `MemoryImage` structure used for copy-on-write
/// initialization of the memory, if it's applicable. /// initialization of the memory, if it's applicable.
@@ -198,6 +188,9 @@ pub trait ModuleRuntimeInfo: Send + Sync + 'static {
/// Returns an array, indexed by `SignatureIndex` of all /// Returns an array, indexed by `SignatureIndex` of all
/// `VMSharedSignatureIndex` entries corresponding to the `SignatureIndex`. /// `VMSharedSignatureIndex` entries corresponding to the `SignatureIndex`.
fn signature_ids(&self) -> &[VMSharedSignatureIndex]; fn signature_ids(&self) -> &[VMSharedSignatureIndex];
/// Offset information for the current host.
fn offsets(&self) -> &VMOffsets<HostPtr>;
} }
/// Returns the host OS page size, in bytes. /// Returns the host OS page size, in bytes.

View File

@@ -86,9 +86,9 @@ impl Engine {
#[cfg(compiler)] #[cfg(compiler)]
let compiler = config.build_compiler()?; let compiler = config.build_compiler()?;
drop(&mut config); // silence warnings without `cfg(compiler)`
let allocator = config.build_allocator()?; let allocator = config.build_allocator()?;
allocator.adjust_tunables(&mut config.tunables);
let profiler = config.build_profiler()?; let profiler = config.build_profiler()?;
Ok(Engine { Ok(Engine {

View File

@@ -14,12 +14,13 @@ use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use wasmparser::{Parser, ValidPayload, Validator}; use wasmparser::{Parser, ValidPayload, Validator};
use wasmtime_environ::{ use wasmtime_environ::{
DefinedFuncIndex, DefinedMemoryIndex, FunctionLoc, ModuleEnvironment, ModuleTranslation, DefinedFuncIndex, DefinedMemoryIndex, HostPtr, ModuleEnvironment, ModuleTranslation,
ModuleTypes, ObjectKind, PrimaryMap, SignatureIndex, WasmFunctionInfo, ModuleTypes, ObjectKind, PrimaryMap, VMOffsets, WasmFunctionInfo,
}; };
use wasmtime_jit::{CodeMemory, CompiledModule, CompiledModuleInfo}; use wasmtime_jit::{CodeMemory, CompiledModule, CompiledModuleInfo};
use wasmtime_runtime::{ use wasmtime_runtime::{
CompiledModuleId, MemoryImage, MmapVec, ModuleMemoryImages, VMSharedSignatureIndex, CompiledModuleId, MemoryImage, MmapVec, ModuleMemoryImages, VMFunctionBody,
VMSharedSignatureIndex,
}; };
mod registry; mod registry;
@@ -123,6 +124,9 @@ struct ModuleInner {
/// Flag indicating whether this module can be serialized or not. /// Flag indicating whether this module can be serialized or not.
serializable: bool, serializable: bool,
/// Runtime offset information for `VMContext`.
offsets: VMOffsets<HostPtr>,
} }
impl Module { impl Module {
@@ -664,7 +668,8 @@ impl Module {
)?; )?;
// Validate the module can be used with the current allocator // Validate the module can be used with the current allocator
engine.allocator().validate(module.module())?; let offsets = VMOffsets::new(HostPtr, module.module());
engine.allocator().validate(module.module(), &offsets)?;
Ok(Self { Ok(Self {
inner: Arc::new(ModuleInner { inner: Arc::new(ModuleInner {
@@ -673,6 +678,7 @@ impl Module {
memory_images: OnceCell::new(), memory_images: OnceCell::new(),
module, module,
serializable, serializable,
offsets,
}), }),
}) })
} }
@@ -1098,16 +1104,12 @@ impl wasmtime_runtime::ModuleRuntimeInfo for ModuleInner {
self.module.module() self.module.module()
} }
fn signature(&self, index: SignatureIndex) -> VMSharedSignatureIndex { fn function(&self, index: DefinedFuncIndex) -> *mut VMFunctionBody {
self.code.signatures().as_module_map()[index] self.module
} .finished_function(index)
.as_ptr()
fn image_base(&self) -> usize { .cast::<VMFunctionBody>()
self.module.text().as_ptr() as usize .cast_mut()
}
fn function_loc(&self, index: DefinedFuncIndex) -> &FunctionLoc {
self.module.func_loc(index)
} }
fn memory_image(&self, memory: DefinedMemoryIndex) -> Result<Option<&Arc<MemoryImage>>> { fn memory_image(&self, memory: DefinedMemoryIndex) -> Result<Option<&Arc<MemoryImage>>> {
@@ -1126,6 +1128,10 @@ impl wasmtime_runtime::ModuleRuntimeInfo for ModuleInner {
fn signature_ids(&self) -> &[VMSharedSignatureIndex] { fn signature_ids(&self) -> &[VMSharedSignatureIndex] {
self.code.signatures().as_module_map().values().as_slice() self.code.signatures().as_module_map().values().as_slice()
} }
fn offsets(&self) -> &VMOffsets<HostPtr> {
&self.offsets
}
} }
impl wasmtime_runtime::ModuleInfo for ModuleInner { impl wasmtime_runtime::ModuleInfo for ModuleInner {
@@ -1160,26 +1166,22 @@ impl wasmtime_runtime::ModuleInfo for ModuleInner {
/// default-callee instance). /// default-callee instance).
pub(crate) struct BareModuleInfo { pub(crate) struct BareModuleInfo {
module: Arc<wasmtime_environ::Module>, module: Arc<wasmtime_environ::Module>,
image_base: usize, one_signature: Option<VMSharedSignatureIndex>,
one_signature: Option<(SignatureIndex, VMSharedSignatureIndex)>, offsets: VMOffsets<HostPtr>,
} }
impl BareModuleInfo { impl BareModuleInfo {
pub(crate) fn empty(module: Arc<wasmtime_environ::Module>) -> Self { pub(crate) fn empty(module: Arc<wasmtime_environ::Module>) -> Self {
BareModuleInfo { BareModuleInfo::maybe_imported_func(module, None)
module,
image_base: 0,
one_signature: None,
}
} }
pub(crate) fn maybe_imported_func( pub(crate) fn maybe_imported_func(
module: Arc<wasmtime_environ::Module>, module: Arc<wasmtime_environ::Module>,
one_signature: Option<(SignatureIndex, VMSharedSignatureIndex)>, one_signature: Option<VMSharedSignatureIndex>,
) -> Self { ) -> Self {
BareModuleInfo { BareModuleInfo {
offsets: VMOffsets::new(HostPtr, &module),
module, module,
image_base: 0,
one_signature, one_signature,
} }
} }
@@ -1194,19 +1196,7 @@ impl wasmtime_runtime::ModuleRuntimeInfo for BareModuleInfo {
&self.module &self.module
} }
fn signature(&self, index: SignatureIndex) -> VMSharedSignatureIndex { fn function(&self, _index: DefinedFuncIndex) -> *mut VMFunctionBody {
let (signature_id, signature) = self
.one_signature
.expect("Signature for one function should be present if queried");
assert_eq!(index, signature_id);
signature
}
fn image_base(&self) -> usize {
self.image_base
}
fn function_loc(&self, _index: DefinedFuncIndex) -> &FunctionLoc {
unreachable!() unreachable!()
} }
@@ -1224,10 +1214,14 @@ impl wasmtime_runtime::ModuleRuntimeInfo for BareModuleInfo {
fn signature_ids(&self) -> &[VMSharedSignatureIndex] { fn signature_ids(&self) -> &[VMSharedSignatureIndex] {
match &self.one_signature { match &self.one_signature {
Some((_, id)) => std::slice::from_ref(id), Some(id) => std::slice::from_ref(id),
None => &[], None => &[],
} }
} }
fn offsets(&self) -> &VMOffsets<HostPtr> {
&self.offsets
}
} }
/// Helper method to construct a `ModuleMemoryImages` for an associated /// Helper method to construct a `ModuleMemoryImages` for an associated

View File

@@ -161,7 +161,7 @@ impl LoadedCode {
// functions. // functions.
None => return, None => return,
}; };
let start = unsafe { (*func).as_ptr() as usize }; let start = func.as_ptr() as usize;
match self.modules.entry(start) { match self.modules.entry(start) {
// This module is already present, and it should be the same as // This module is already present, and it should be the same as
@@ -279,9 +279,9 @@ fn test_frame_info() -> Result<(), anyhow::Error> {
Instance::new(&mut store, &module, &[])?; Instance::new(&mut store, &module, &[])?;
for (i, alloc) in module.compiled_module().finished_functions() { for (i, alloc) in module.compiled_module().finished_functions() {
let (start, end) = unsafe { let (start, end) = {
let ptr = (*alloc).as_ptr(); let ptr = alloc.as_ptr();
let len = (*alloc).len(); let len = alloc.len();
(ptr as usize, ptr as usize + len) (ptr as usize, ptr as usize + len)
}; };
for pc in start..end { for pc in start..end {

View File

@@ -17,7 +17,7 @@ use crate::{GlobalType, MemoryType, TableType, Val};
use anyhow::Result; use anyhow::Result;
use std::any::Any; use std::any::Any;
use std::sync::Arc; use std::sync::Arc;
use wasmtime_environ::{GlobalIndex, MemoryIndex, Module, SignatureIndex, TableIndex}; use wasmtime_environ::{GlobalIndex, MemoryIndex, Module, TableIndex};
use wasmtime_runtime::{ use wasmtime_runtime::{
Imports, InstanceAllocationRequest, InstanceAllocator, OnDemandInstanceAllocator, SharedMemory, Imports, InstanceAllocationRequest, InstanceAllocator, OnDemandInstanceAllocator, SharedMemory,
StorePtr, VMFunctionImport, VMSharedSignatureIndex, StorePtr, VMFunctionImport, VMSharedSignatureIndex,
@@ -28,7 +28,7 @@ fn create_handle(
store: &mut StoreOpaque, store: &mut StoreOpaque,
host_state: Box<dyn Any + Send + Sync>, host_state: Box<dyn Any + Send + Sync>,
func_imports: &[VMFunctionImport], func_imports: &[VMFunctionImport],
one_signature: Option<(SignatureIndex, VMSharedSignatureIndex)>, one_signature: Option<VMSharedSignatureIndex>,
) -> Result<InstanceId> { ) -> Result<InstanceId> {
let mut imports = Imports::default(); let mut imports = Imports::default();
imports.functions = func_imports; imports.functions = func_imports;

View File

@@ -39,8 +39,8 @@ pub fn create_global(store: &mut StoreOpaque, gt: &GlobalType, val: Val) -> Resu
// our global with a `ref.func` to grab that imported function. // our global with a `ref.func` to grab that imported function.
let f = f.caller_checked_anyfunc(store); let f = f.caller_checked_anyfunc(store);
let f = unsafe { f.as_ref() }; let f = unsafe { f.as_ref() };
let sig_id = SignatureIndex::from_u32(u32::max_value() - 1); let sig_id = SignatureIndex::from_u32(0);
one_signature = Some((sig_id, f.type_index)); one_signature = Some(f.type_index);
module.types.push(ModuleType::Function(sig_id)); module.types.push(ModuleType::Function(sig_id));
let func_index = module.push_escaped_function(sig_id, AnyfuncIndex::from_u32(0)); let func_index = module.push_escaped_function(sig_id, AnyfuncIndex::from_u32(0));
module.num_imported_funcs = 1; module.num_imported_funcs = 1;

View File

@@ -646,10 +646,11 @@ fn instance_too_large() -> Result<()> {
let engine = Engine::new(&config)?; let engine = Engine::new(&config)?;
let expected = "\ let expected = "\
instance allocation for this module requires 336 bytes which exceeds the \ instance allocation for this module requires 224 bytes which exceeds the \
configured maximum of 16 bytes; breakdown of allocation requirement: configured maximum of 16 bytes; breakdown of allocation requirement:
* 76.19% - 256 bytes - instance state management * 64.29% - 144 bytes - instance state management
* 7.14% - 16 bytes - jit store state
"; ";
match Module::new(&engine, "(module)") { match Module::new(&engine, "(module)") {
Ok(_) => panic!("should have failed to compile"), Ok(_) => panic!("should have failed to compile"),
@@ -663,11 +664,11 @@ configured maximum of 16 bytes; breakdown of allocation requirement:
lots_of_globals.push_str(")"); lots_of_globals.push_str(")");
let expected = "\ let expected = "\
instance allocation for this module requires 1936 bytes which exceeds the \ instance allocation for this module requires 1824 bytes which exceeds the \
configured maximum of 16 bytes; breakdown of allocation requirement: configured maximum of 16 bytes; breakdown of allocation requirement:
* 13.22% - 256 bytes - instance state management * 7.89% - 144 bytes - instance state management
* 82.64% - 1600 bytes - defined globals * 87.72% - 1600 bytes - defined globals
"; ";
match Module::new(&engine, &lots_of_globals) { match Module::new(&engine, &lots_of_globals) {
Ok(_) => panic!("should have failed to compile"), Ok(_) => panic!("should have failed to compile"),