Always allocate Instance memory with malloc (#5656)

This commit removes the pooling of `Instance` allocations from the
pooling instance allocator. This means that the allocation of `Instance`
(and `VMContext`) memory, now always happens through the system `malloc`
and `free` instead of optionally being part of the pooling instance
allocator. Along the way this refactors the `InstanceAllocator` trait so
the pooling and on-demand allocators can share more structure with this
new property of the implementation.

The main rationale for this commit is to reduce the RSS of long-lived
programs which allocate instances with the pooling instance allocator
and aren't using the "next available" allocation strategy. In this
situation the memory for an instance is never decommitted until the end
of the program, meaning that eventually all instance slots will become
occupied and resident. This has the effect of Wasmtime slowly eating
more and more memory over time as each slot gets an instance allocated.
By switching to the system allocator this should reduce the current RSS
workload from O(used slots) to O(active slots), which is more in line
with expectations.
This commit is contained in:
Alex Crichton
2023-02-01 13:37:45 -06:00
committed by GitHub
parent 8ffbb9cfd7
commit 91b8a2c527
12 changed files with 571 additions and 646 deletions

View File

@@ -16,8 +16,9 @@ use crate::{
VMFunctionBody, VMSharedSignatureIndex,
};
use anyhow::Error;
use anyhow::Result;
use memoffset::offset_of;
use std::alloc::Layout;
use std::alloc::{self, Layout};
use std::any::Any;
use std::convert::TryFrom;
use std::hash::Hash;
@@ -87,6 +88,13 @@ pub(crate) struct Instance {
/// allocation, but some host-defined objects will store their state here.
host_state: Box<dyn Any + Send + Sync>,
/// Instance of this instance within its `InstanceAllocator` trait
/// implementation.
///
/// This is always 0 for the on-demand instance allocator and it's the
/// index of the slot in the pooling allocator.
index: usize,
/// Additional context used by compiled wasm code. This field is last, and
/// represents a dynamically-sized array that extends beyond the nominal
/// end of the struct (similar to a flexible array member).
@@ -99,15 +107,19 @@ impl Instance {
///
/// It is assumed the memory was properly aligned and the
/// allocation was `alloc_size` in bytes.
unsafe fn new_at(
ptr: *mut Instance,
alloc_size: usize,
unsafe fn new(
req: InstanceAllocationRequest,
index: usize,
memories: PrimaryMap<DefinedMemoryIndex, Memory>,
tables: PrimaryMap<DefinedTableIndex, Table>,
) {
) -> InstanceHandle {
// The allocation must be *at least* the size required of `Instance`.
assert!(alloc_size >= Self::alloc_layout(req.runtime_info.offsets()).size());
let layout = Self::alloc_layout(req.runtime_info.offsets());
let ptr = alloc::alloc(layout);
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
let ptr = ptr.cast::<Instance>();
let module = req.runtime_info.module();
let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
@@ -117,6 +129,7 @@ impl Instance {
ptr,
Instance {
runtime_info: req.runtime_info.clone(),
index,
memories,
tables,
dropped_elements,
@@ -129,6 +142,7 @@ impl Instance {
);
(*ptr).initialize_vmctx(module, req.runtime_info.offsets(), req.store, req.imports);
InstanceHandle { instance: ptr }
}
/// Helper function to access various locations offset from our `*mut
@@ -1207,4 +1221,14 @@ impl InstanceHandle {
instance: self.instance,
}
}
/// Performs post-initialization of an instance after its handle has been
/// creqtaed and registered with a store.
///
/// Failure of this function means that the instance still must persist
/// within the store since failure may indicate partial failure, or some
/// state could be referenced by other instances.
pub fn initialize(&mut self, module: &Module, is_bulk_memory: bool) -> Result<()> {
allocator::initialize_instance(self.instance_mut(), module, is_bulk_memory)
}
}

View File

@@ -87,42 +87,98 @@ impl StorePtr {
/// # Safety
///
/// This trait is unsafe as it requires knowledge of Wasmtime's runtime internals to implement correctly.
pub unsafe trait InstanceAllocator: Send + Sync {
pub unsafe trait InstanceAllocator {
/// Validates that a module is supported by the allocator.
fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
drop((module, offsets));
Ok(())
}
/// Allocates an instance for the given allocation request.
/// Allocates a fresh `InstanceHandle` for the `req` given.
///
/// # Safety
/// This will allocate memories and tables internally from this allocator
/// and weave that altogether into a final and complete `InstanceHandle`
/// ready to be registered with a store.
///
/// This method is not inherently unsafe, but care must be made to ensure
/// pointers passed in the allocation request outlive the returned instance.
unsafe fn allocate(&self, req: InstanceAllocationRequest) -> Result<InstanceHandle>;
/// Note that the returned instance must still have `.initialize(..)` called
/// on it to complete the instantiation process.
fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> {
let index = self.allocate_index(&req)?;
let module = req.runtime_info.module();
let mut memories =
PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
let mut tables =
PrimaryMap::with_capacity(module.table_plans.len() - module.num_imported_tables);
/// Finishes the instantiation process started by an instance allocator.
let result = self
.allocate_memories(index, &mut req, &mut memories)
.and_then(|()| self.allocate_tables(index, &mut req, &mut tables));
if let Err(e) = result {
self.deallocate_memories(index, &mut memories);
self.deallocate_tables(index, &mut tables);
self.deallocate_index(index);
return Err(e);
}
unsafe { Ok(Instance::new(req, index, memories, tables)) }
}
/// Deallocates the provided instance.
///
/// # Safety
/// This will null-out the pointer within `handle` and otherwise reclaim
/// resources such as tables, memories, and the instance memory itself.
fn deallocate(&self, handle: &mut InstanceHandle) {
let index = handle.instance().index;
self.deallocate_memories(index, &mut handle.instance_mut().memories);
self.deallocate_tables(index, &mut handle.instance_mut().tables);
unsafe {
let layout = Instance::alloc_layout(handle.instance().offsets());
ptr::drop_in_place(handle.instance);
alloc::dealloc(handle.instance.cast(), layout);
handle.instance = std::ptr::null_mut();
}
self.deallocate_index(index);
}
/// Optionally allocates an allocator-defined index for the `req` provided.
///
/// This method is only safe to call immediately after an instance has been allocated.
unsafe fn initialize(
/// The return value here, if successful, is passed to the various methods
/// below for memory/table allocation/deallocation.
fn allocate_index(&self, req: &InstanceAllocationRequest) -> Result<usize>;
/// Deallocates indices allocated by `allocate_index`.
fn deallocate_index(&self, index: usize);
/// Attempts to allocate all defined linear memories for a module.
///
/// Pushes all memories for `req` onto the `mems` storage provided which is
/// already appropriately allocated to contain all memories.
///
/// Note that this is allowed to fail. Failure can additionally happen after
/// some memories have already been successfully allocated. All memories
/// pushed onto `mem` are guaranteed to one day make their way to
/// `deallocate_memories`.
fn allocate_memories(
&self,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
index: usize,
req: &mut InstanceAllocationRequest,
mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) -> Result<()>;
/// Deallocates a previously allocated instance.
///
/// # Safety
///
/// This function is unsafe because there are no guarantees that the given handle
/// is the only owner of the underlying instance to deallocate.
///
/// Use extreme care when deallocating an instance so that there are no dangling instance pointers.
unsafe fn deallocate(&self, handle: &InstanceHandle);
/// Deallocates all memories provided, optionally reclaiming resources for
/// the pooling allocator for example.
fn deallocate_memories(&self, index: usize, mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>);
/// Same as `allocate_memories`, but for tables.
fn allocate_tables(
&self,
index: usize,
req: &mut InstanceAllocationRequest,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) -> Result<()>;
/// Same as `deallocate_memories`, but for tables.
fn deallocate_tables(&self, index: usize, tables: &mut PrimaryMap<DefinedTableIndex, Table>);
/// Allocates a fiber stack for calling async functions on.
#[cfg(feature = "async")]
@@ -338,7 +394,7 @@ fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
Ok(())
}
fn initialize_instance(
pub(super) fn initialize_instance(
instance: &mut Instance,
module: &Module,
is_bulk_memory: bool,
@@ -378,57 +434,6 @@ impl OnDemandInstanceAllocator {
stack_size,
}
}
fn create_tables(
store: &mut StorePtr,
runtime_info: &Arc<dyn ModuleRuntimeInfo>,
) -> Result<PrimaryMap<DefinedTableIndex, Table>> {
let module = runtime_info.module();
let num_imports = module.num_imported_tables;
let mut tables: PrimaryMap<DefinedTableIndex, _> =
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
for (_, table) in module.table_plans.iter().skip(num_imports) {
tables.push(Table::new_dynamic(table, unsafe {
store
.get()
.expect("if module has table plans, store is not empty")
})?);
}
Ok(tables)
}
fn create_memories(
&self,
store: &mut StorePtr,
runtime_info: &Arc<dyn ModuleRuntimeInfo>,
) -> Result<PrimaryMap<DefinedMemoryIndex, Memory>> {
let module = runtime_info.module();
let creator = self
.mem_creator
.as_deref()
.unwrap_or_else(|| &DefaultMemoryCreator);
let num_imports = module.num_imported_memories;
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
for (memory_idx, plan) in module.memory_plans.iter().skip(num_imports) {
let defined_memory_idx = module
.defined_memory_index(memory_idx)
.expect("Skipped imports, should never be None");
let image = runtime_info.memory_image(defined_memory_idx)?;
memories.push(Memory::new_dynamic(
plan,
creator,
unsafe {
store
.get()
.expect("if module has memory plans, store is not empty")
},
image,
)?);
}
Ok(memories)
}
}
impl Default for OnDemandInstanceAllocator {
@@ -441,59 +446,75 @@ impl Default for OnDemandInstanceAllocator {
}
}
/// Allocate an instance containing a single memory.
///
/// In order to import a [`Memory`] into a WebAssembly instance, Wasmtime
/// requires that memory to exist in its own instance. Here we bring to life
/// such a "Frankenstein" instance with the only purpose of exporting a
/// [`Memory`].
pub unsafe fn allocate_single_memory_instance(
req: InstanceAllocationRequest,
memory: Memory,
) -> Result<InstanceHandle> {
let mut memories = PrimaryMap::default();
memories.push(memory);
let tables = PrimaryMap::default();
let layout = Instance::alloc_layout(req.runtime_info.offsets());
let instance = alloc::alloc(layout) as *mut Instance;
Instance::new_at(instance, layout.size(), req, memories, tables);
Ok(InstanceHandle { instance })
}
/// Internal implementation of [`InstanceHandle`] deallocation.
///
/// See [`InstanceAllocator::deallocate()`] for more details.
pub unsafe fn deallocate(handle: &InstanceHandle) {
let layout = Instance::alloc_layout(handle.instance().offsets());
ptr::drop_in_place(handle.instance);
alloc::dealloc(handle.instance.cast(), layout);
}
unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
unsafe fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> {
let memories = self.create_memories(&mut req.store, &req.runtime_info)?;
let tables = Self::create_tables(&mut req.store, &req.runtime_info)?;
let layout = Instance::alloc_layout(req.runtime_info.offsets());
let instance_ptr = alloc::alloc(layout) as *mut Instance;
Instance::new_at(instance_ptr, layout.size(), req, memories, tables);
Ok(InstanceHandle {
instance: instance_ptr,
})
fn allocate_index(&self, _req: &InstanceAllocationRequest) -> Result<usize> {
Ok(0)
}
unsafe fn initialize(
fn deallocate_index(&self, index: usize) {
assert_eq!(index, 0);
}
fn allocate_memories(
&self,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
_index: usize,
req: &mut InstanceAllocationRequest,
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) -> Result<()> {
initialize_instance(handle.instance_mut(), module, is_bulk_memory)
let module = req.runtime_info.module();
let creator = self
.mem_creator
.as_deref()
.unwrap_or_else(|| &DefaultMemoryCreator);
let num_imports = module.num_imported_memories;
for (memory_idx, plan) in module.memory_plans.iter().skip(num_imports) {
let defined_memory_idx = module
.defined_memory_index(memory_idx)
.expect("Skipped imports, should never be None");
let image = req.runtime_info.memory_image(defined_memory_idx)?;
memories.push(Memory::new_dynamic(
plan,
creator,
unsafe {
req.store
.get()
.expect("if module has memory plans, store is not empty")
},
image,
)?);
}
Ok(())
}
unsafe fn deallocate(&self, handle: &InstanceHandle) {
deallocate(handle)
fn deallocate_memories(
&self,
_index: usize,
_mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) {
// normal destructors do cleanup here
}
fn allocate_tables(
&self,
_index: usize,
req: &mut InstanceAllocationRequest,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) -> Result<()> {
let module = req.runtime_info.module();
let num_imports = module.num_imported_tables;
for (_, table) in module.table_plans.iter().skip(num_imports) {
tables.push(Table::new_dynamic(table, unsafe {
req.store
.get()
.expect("if module has table plans, store is not empty")
})?);
}
Ok(())
}
fn deallocate_tables(&self, _index: usize, _tables: &mut PrimaryMap<DefinedTableIndex, Table>) {
// normal destructors do cleanup here
}
#[cfg(feature = "async")]

View File

@@ -7,9 +7,9 @@
//! Using the pooling instance allocator can speed up module instantiation
//! when modules can be constrained based on configurable limits.
use super::{initialize_instance, InstanceAllocationRequest, InstanceAllocator, InstanceHandle};
use super::{InstanceAllocationRequest, InstanceAllocator};
use crate::{instance::Instance, Memory, Mmap, Table};
use crate::{CompiledModuleId, MemoryImageSlot, ModuleRuntimeInfo, Store};
use crate::{CompiledModuleId, MemoryImageSlot};
use anyhow::{anyhow, bail, Context, Result};
use libc::c_void;
use std::convert::TryFrom;
@@ -83,456 +83,6 @@ impl Default for InstanceLimits {
}
}
/// Represents a pool of maximal `Instance` structures.
///
/// Each index in the pool provides enough space for a maximal `Instance`
/// structure depending on the limits used to create the pool.
///
/// The pool maintains a free list for fast instance allocation.
#[derive(Debug)]
struct InstancePool {
mapping: Mmap,
instance_size: usize,
max_instances: usize,
index_allocator: IndexAllocator,
memories: MemoryPool,
tables: TablePool,
linear_memory_keep_resident: usize,
table_keep_resident: usize,
}
impl InstancePool {
fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result<Self> {
let page_size = crate::page_size();
let instance_size = round_up_to_pow2(config.limits.size, mem::align_of::<Instance>());
let max_instances = config.limits.count as usize;
let allocation_size = round_up_to_pow2(
instance_size
.checked_mul(max_instances)
.ok_or_else(|| anyhow!("total size of instance data exceeds addressable memory"))?,
page_size,
);
let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
.context("failed to create instance pool mapping")?;
let pool = Self {
mapping,
instance_size,
max_instances,
index_allocator: IndexAllocator::new(config.limits.count, config.max_unused_warm_slots),
memories: MemoryPool::new(&config.limits, tunables)?,
tables: TablePool::new(&config.limits)?,
linear_memory_keep_resident: config.linear_memory_keep_resident,
table_keep_resident: config.table_keep_resident,
};
Ok(pool)
}
unsafe fn instance(&self, index: usize) -> &mut Instance {
assert!(index < self.max_instances);
&mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance)
}
unsafe fn initialize_instance(
&self,
instance_index: usize,
req: InstanceAllocationRequest,
) -> Result<InstanceHandle> {
let module = req.runtime_info.module();
// Before doing anything else ensure that our instance slot is actually
// big enough to hold the `Instance` and `VMContext` for this instance.
// If this fails then it's a configuration error at the `Engine` level
// from when this pooling allocator was created and that needs updating
// if this is to succeed.
self.validate_instance_size(req.runtime_info.offsets())?;
let mut memories =
PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
let mut tables =
PrimaryMap::with_capacity(module.table_plans.len() - module.num_imported_tables);
// If we fail to allocate the instance's resources, deallocate
// what was successfully allocated and return before initializing the instance
if let Err(e) = self.allocate_instance_resources(
instance_index,
req.runtime_info.as_ref(),
req.store.as_raw(),
&mut memories,
&mut tables,
) {
self.deallocate_memories(instance_index, &mut memories);
self.deallocate_tables(instance_index, &mut tables);
return Err(e);
}
let instance_ptr = self.instance(instance_index) as _;
Instance::new_at(instance_ptr, self.instance_size, req, memories, tables);
Ok(InstanceHandle {
instance: instance_ptr,
})
}
fn allocate(&self, req: InstanceAllocationRequest) -> Result<InstanceHandle> {
let id = self
.index_allocator
.alloc(req.runtime_info.unique_id())
.ok_or_else(|| {
anyhow!(
"maximum concurrent instance limit of {} reached",
self.max_instances
)
})?;
match unsafe { self.initialize_instance(id.index(), req) } {
Ok(handle) => Ok(handle),
Err(e) => {
// If we failed to initialize the instance, there's no need to drop
// it as it was never "allocated", but we still need to free the
// instance's slot.
self.index_allocator.free(id);
Err(e)
}
}
}
fn deallocate(&self, handle: &InstanceHandle) {
let addr = handle.instance as usize;
let base = self.mapping.as_ptr() as usize;
assert!(addr >= base && addr < base + self.mapping.len());
assert!((addr - base) % self.instance_size == 0);
let index = (addr - base) / self.instance_size;
assert!(index < self.max_instances);
let instance = unsafe { &mut *handle.instance };
// Deallocate any resources used by the instance
self.deallocate_memories(index, &mut instance.memories);
self.deallocate_tables(index, &mut instance.tables);
// We've now done all of the pooling-allocator-specific
// teardown, so we can drop the Instance and let destructors
// take care of any other fields (host state, globals, etc.).
unsafe {
std::ptr::drop_in_place(instance as *mut _);
}
// The instance is now uninitialized memory and cannot be
// touched again until we write a fresh Instance in-place with
// std::ptr::write in allocate() above.
self.index_allocator.free(SlotId(index as u32));
}
fn allocate_instance_resources(
&self,
instance_index: usize,
runtime_info: &dyn ModuleRuntimeInfo,
store: Option<*mut dyn Store>,
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) -> Result<()> {
self.allocate_memories(instance_index, runtime_info, store, memories)?;
self.allocate_tables(instance_index, runtime_info, store, tables)?;
Ok(())
}
fn allocate_memories(
&self,
instance_index: usize,
runtime_info: &dyn ModuleRuntimeInfo,
store: Option<*mut dyn Store>,
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) -> Result<()> {
let module = runtime_info.module();
self.validate_memory_plans(module)?;
for (memory_index, plan) in module
.memory_plans
.iter()
.skip(module.num_imported_memories)
{
let defined_index = module
.defined_memory_index(memory_index)
.expect("should be a defined memory since we skipped imported ones");
// Double-check that the runtime requirements of the memory are
// satisfied by the configuration of this pooling allocator. This
// should be returned as an error through `validate_memory_plans`
// but double-check here to be sure.
match plan.style {
MemoryStyle::Static { bound } => {
let bound = bound * u64::from(WASM_PAGE_SIZE);
assert!(bound <= (self.memories.memory_size as u64));
}
MemoryStyle::Dynamic { .. } => {}
}
let memory = unsafe {
std::slice::from_raw_parts_mut(
self.memories.get_base(instance_index, defined_index),
self.memories.max_accessible,
)
};
let mut slot = self
.memories
.take_memory_image_slot(instance_index, defined_index);
let image = runtime_info.memory_image(defined_index)?;
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
// If instantiation fails, we can propagate the error
// upward and drop the slot. This will cause the Drop
// handler to attempt to map the range with PROT_NONE
// memory, to reserve the space while releasing any
// stale mappings. The next use of this slot will then
// create a new slot that will try to map over
// this, returning errors as well if the mapping
// errors persist. The unmap-on-drop is best effort;
// if it fails, then we can still soundly continue
// using the rest of the pool and allowing the rest of
// the process to continue, because we never perform a
// mmap that would leave an open space for someone
// else to come in and map something.
slot.instantiate(initial_size as usize, image, &plan.style)?;
memories.push(Memory::new_static(plan, memory, slot, unsafe {
&mut *store.unwrap()
})?);
}
Ok(())
}
fn deallocate_memories(
&self,
instance_index: usize,
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) {
// Decommit any linear memories that were used.
let memories = mem::take(memories);
for (def_mem_idx, memory) in memories {
let mut image = memory.unwrap_static_image();
// Reset the image slot. If there is any error clearing the
// image, just drop it here, and let the drop handler for the
// slot unmap in a way that retains the address space
// reservation.
if image
.clear_and_remain_ready(self.linear_memory_keep_resident)
.is_ok()
{
self.memories
.return_memory_image_slot(instance_index, def_mem_idx, image);
}
}
}
fn allocate_tables(
&self,
instance_index: usize,
runtime_info: &dyn ModuleRuntimeInfo,
store: Option<*mut dyn Store>,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) -> Result<()> {
let module = runtime_info.module();
self.validate_table_plans(module)?;
let mut bases = self.tables.get(instance_index);
for (_, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
let base = bases.next().unwrap() as _;
commit_table_pages(
base as *mut u8,
self.tables.max_elements as usize * mem::size_of::<*mut u8>(),
)?;
tables.push(Table::new_static(
plan,
unsafe { std::slice::from_raw_parts_mut(base, self.tables.max_elements as usize) },
unsafe { &mut *store.unwrap() },
)?);
}
Ok(())
}
fn deallocate_tables(
&self,
instance_index: usize,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) {
// Decommit any tables that were used
for (table, base) in tables.values_mut().zip(self.tables.get(instance_index)) {
let table = mem::take(table);
assert!(table.is_static());
let size = round_up_to_pow2(
table.size() as usize * mem::size_of::<*mut u8>(),
self.tables.page_size,
);
drop(table);
self.reset_table_pages_to_zero(base, size)
.expect("failed to decommit table pages");
}
}
fn reset_table_pages_to_zero(&self, base: *mut u8, size: usize) -> Result<()> {
let size_to_memset = size.min(self.table_keep_resident);
unsafe {
std::ptr::write_bytes(base, 0, size_to_memset);
decommit_table_pages(base.add(size_to_memset), size - size_to_memset)?;
}
Ok(())
}
fn validate_table_plans(&self, module: &Module) -> Result<()> {
let tables = module.table_plans.len() - module.num_imported_tables;
if tables > self.tables.max_tables {
bail!(
"defined tables count of {} exceeds the limit of {}",
tables,
self.tables.max_tables,
);
}
for (i, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
if plan.table.minimum > self.tables.max_elements {
bail!(
"table index {} has a minimum element size of {} which exceeds the limit of {}",
i.as_u32(),
plan.table.minimum,
self.tables.max_elements,
);
}
}
Ok(())
}
fn validate_memory_plans(&self, module: &Module) -> Result<()> {
let memories = module.memory_plans.len() - module.num_imported_memories;
if memories > self.memories.max_memories {
bail!(
"defined memories count of {} exceeds the limit of {}",
memories,
self.memories.max_memories,
);
}
for (i, plan) in module
.memory_plans
.iter()
.skip(module.num_imported_memories)
{
match plan.style {
MemoryStyle::Static { bound } => {
if (self.memories.memory_size as u64) < bound {
bail!(
"memory size allocated per-memory is too small to \
satisfy static bound of {bound:#x} pages"
);
}
}
MemoryStyle::Dynamic { .. } => {}
}
let max = self.memories.max_accessible / (WASM_PAGE_SIZE as usize);
if plan.memory.minimum > (max as u64) {
bail!(
"memory index {} has a minimum page size of {} which exceeds the limit of {}",
i.as_u32(),
plan.memory.minimum,
max,
);
}
}
Ok(())
}
fn validate_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
let layout = Instance::alloc_layout(offsets);
if layout.size() <= self.instance_size {
return Ok(());
}
// If this `module` exceeds the allocation size allotted to it then an
// error will be reported here. The error of "required N bytes but
// cannot allocate that" is pretty opaque, however, because it's not
// clear what the breakdown of the N bytes are and what to optimize
// next. To help provide a better error message here some fancy-ish
// logic is done here to report the breakdown of the byte request into
// the largest portions and where it's coming from.
let mut message = format!(
"instance allocation for this module \
requires {} bytes which exceeds the configured maximum \
of {} bytes; breakdown of allocation requirement:\n\n",
layout.size(),
self.instance_size,
);
let mut remaining = layout.size();
let mut push = |name: &str, bytes: usize| {
assert!(remaining >= bytes);
remaining -= bytes;
// If the `name` region is more than 5% of the allocation request
// then report it here, otherwise ignore it. We have less than 20
// fields so we're guaranteed that something should be reported, and
// otherwise it's not particularly interesting to learn about 5
// different fields that are all 8 or 0 bytes. Only try to report
// the "major" sources of bytes here.
if bytes > layout.size() / 20 {
message.push_str(&format!(
" * {:.02}% - {} bytes - {}\n",
((bytes as f32) / (layout.size() as f32)) * 100.0,
bytes,
name,
));
}
};
// The `Instance` itself requires some size allocated to it.
push("instance state management", mem::size_of::<Instance>());
// Afterwards the `VMContext`'s regions are why we're requesting bytes,
// so ask it for descriptions on each region's byte size.
for (desc, size) in offsets.region_sizes() {
push(desc, size as usize);
}
// double-check we accounted for all the bytes
assert_eq!(remaining, 0);
bail!("{}", message)
}
fn purge_module(&self, module: CompiledModuleId) {
// Purging everything related to `module` primarily means clearing out
// all of its memory images present in the virtual address space. Go
// through the index allocator for slots affine to `module` and reset
// them, freeing up the index when we're done.
//
// Note that this is only called when the specified `module` won't be
// allocated further (the module is being dropped) so this shouldn't hit
// any sort of infinite loop since this should be the final operation
// working with `module`.
while let Some(index) = self.index_allocator.alloc_affine_and_clear_affinity(module) {
self.memories.clear_images(index.index());
self.index_allocator.free(index);
}
}
}
/// Represents a pool of WebAssembly linear memories.
///
/// A linear memory is divided into accessible pages and guard pages.
@@ -1018,7 +568,14 @@ impl Default for PoolingInstanceAllocatorConfig {
/// Note: the resource pools are manually dropped so that the fault handler terminates correctly.
#[derive(Debug)]
pub struct PoolingInstanceAllocator {
instances: InstancePool,
instance_size: usize,
max_instances: usize,
index_allocator: IndexAllocator,
memories: MemoryPool,
tables: TablePool,
linear_memory_keep_resident: usize,
table_keep_resident: usize,
#[cfg(all(feature = "async", unix))]
stacks: StackPool,
#[cfg(all(feature = "async", windows))]
@@ -1032,43 +589,304 @@ impl PoolingInstanceAllocator {
bail!("the instance count limit cannot be zero");
}
let instances = InstancePool::new(config, tunables)?;
let max_instances = config.limits.count as usize;
Ok(Self {
instances: instances,
instance_size: round_up_to_pow2(config.limits.size, mem::align_of::<Instance>()),
max_instances,
index_allocator: IndexAllocator::new(config.limits.count, config.max_unused_warm_slots),
memories: MemoryPool::new(&config.limits, tunables)?,
tables: TablePool::new(&config.limits)?,
linear_memory_keep_resident: config.linear_memory_keep_resident,
table_keep_resident: config.table_keep_resident,
#[cfg(all(feature = "async", unix))]
stacks: StackPool::new(config)?,
#[cfg(all(feature = "async", windows))]
stack_size: config.stack_size,
})
}
fn reset_table_pages_to_zero(&self, base: *mut u8, size: usize) -> Result<()> {
let size_to_memset = size.min(self.table_keep_resident);
unsafe {
std::ptr::write_bytes(base, 0, size_to_memset);
decommit_table_pages(base.add(size_to_memset), size - size_to_memset)?;
}
Ok(())
}
fn validate_table_plans(&self, module: &Module) -> Result<()> {
let tables = module.table_plans.len() - module.num_imported_tables;
if tables > self.tables.max_tables {
bail!(
"defined tables count of {} exceeds the limit of {}",
tables,
self.tables.max_tables,
);
}
for (i, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
if plan.table.minimum > self.tables.max_elements {
bail!(
"table index {} has a minimum element size of {} which exceeds the limit of {}",
i.as_u32(),
plan.table.minimum,
self.tables.max_elements,
);
}
}
Ok(())
}
fn validate_memory_plans(&self, module: &Module) -> Result<()> {
let memories = module.memory_plans.len() - module.num_imported_memories;
if memories > self.memories.max_memories {
bail!(
"defined memories count of {} exceeds the limit of {}",
memories,
self.memories.max_memories,
);
}
for (i, plan) in module
.memory_plans
.iter()
.skip(module.num_imported_memories)
{
match plan.style {
MemoryStyle::Static { bound } => {
if (self.memories.memory_size as u64) < bound {
bail!(
"memory size allocated per-memory is too small to \
satisfy static bound of {bound:#x} pages"
);
}
}
MemoryStyle::Dynamic { .. } => {}
}
let max = self.memories.max_accessible / (WASM_PAGE_SIZE as usize);
if plan.memory.minimum > (max as u64) {
bail!(
"memory index {} has a minimum page size of {} which exceeds the limit of {}",
i.as_u32(),
plan.memory.minimum,
max,
);
}
}
Ok(())
}
fn validate_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
let layout = Instance::alloc_layout(offsets);
if layout.size() <= self.instance_size {
return Ok(());
}
// If this `module` exceeds the allocation size allotted to it then an
// error will be reported here. The error of "required N bytes but
// cannot allocate that" is pretty opaque, however, because it's not
// clear what the breakdown of the N bytes are and what to optimize
// next. To help provide a better error message here some fancy-ish
// logic is done here to report the breakdown of the byte request into
// the largest portions and where it's coming from.
let mut message = format!(
"instance allocation for this module \
requires {} bytes which exceeds the configured maximum \
of {} bytes; breakdown of allocation requirement:\n\n",
layout.size(),
self.instance_size,
);
let mut remaining = layout.size();
let mut push = |name: &str, bytes: usize| {
assert!(remaining >= bytes);
remaining -= bytes;
// If the `name` region is more than 5% of the allocation request
// then report it here, otherwise ignore it. We have less than 20
// fields so we're guaranteed that something should be reported, and
// otherwise it's not particularly interesting to learn about 5
// different fields that are all 8 or 0 bytes. Only try to report
// the "major" sources of bytes here.
if bytes > layout.size() / 20 {
message.push_str(&format!(
" * {:.02}% - {} bytes - {}\n",
((bytes as f32) / (layout.size() as f32)) * 100.0,
bytes,
name,
));
}
};
// The `Instance` itself requires some size allocated to it.
push("instance state management", mem::size_of::<Instance>());
// Afterwards the `VMContext`'s regions are why we're requesting bytes,
// so ask it for descriptions on each region's byte size.
for (desc, size) in offsets.region_sizes() {
push(desc, size as usize);
}
// double-check we accounted for all the bytes
assert_eq!(remaining, 0);
bail!("{}", message)
}
}
unsafe impl InstanceAllocator for PoolingInstanceAllocator {
fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
self.instances.validate_memory_plans(module)?;
self.instances.validate_table_plans(module)?;
self.instances.validate_instance_size(offsets)?;
self.validate_memory_plans(module)?;
self.validate_table_plans(module)?;
self.validate_instance_size(offsets)?;
Ok(())
}
unsafe fn allocate(&self, req: InstanceAllocationRequest) -> Result<InstanceHandle> {
self.instances.allocate(req)
fn allocate_index(&self, req: &InstanceAllocationRequest) -> Result<usize> {
self.index_allocator
.alloc(req.runtime_info.unique_id())
.map(|id| id.index())
.ok_or_else(|| {
anyhow!(
"maximum concurrent instance limit of {} reached",
self.max_instances
)
})
}
unsafe fn initialize(
fn deallocate_index(&self, index: usize) {
self.index_allocator.free(SlotId(index as u32));
}
fn allocate_memories(
&self,
handle: &mut InstanceHandle,
module: &Module,
is_bulk_memory: bool,
index: usize,
req: &mut InstanceAllocationRequest,
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) -> Result<()> {
let instance = handle.instance_mut();
initialize_instance(instance, module, is_bulk_memory)
let module = req.runtime_info.module();
self.validate_memory_plans(module)?;
for (memory_index, plan) in module
.memory_plans
.iter()
.skip(module.num_imported_memories)
{
let defined_index = module
.defined_memory_index(memory_index)
.expect("should be a defined memory since we skipped imported ones");
// Double-check that the runtime requirements of the memory are
// satisfied by the configuration of this pooling allocator. This
// should be returned as an error through `validate_memory_plans`
// but double-check here to be sure.
match plan.style {
MemoryStyle::Static { bound } => {
let bound = bound * u64::from(WASM_PAGE_SIZE);
assert!(bound <= (self.memories.memory_size as u64));
}
MemoryStyle::Dynamic { .. } => {}
}
let memory = unsafe {
std::slice::from_raw_parts_mut(
self.memories.get_base(index, defined_index),
self.memories.max_accessible,
)
};
let mut slot = self.memories.take_memory_image_slot(index, defined_index);
let image = req.runtime_info.memory_image(defined_index)?;
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
// If instantiation fails, we can propagate the error
// upward and drop the slot. This will cause the Drop
// handler to attempt to map the range with PROT_NONE
// memory, to reserve the space while releasing any
// stale mappings. The next use of this slot will then
// create a new slot that will try to map over
// this, returning errors as well if the mapping
// errors persist. The unmap-on-drop is best effort;
// if it fails, then we can still soundly continue
// using the rest of the pool and allowing the rest of
// the process to continue, because we never perform a
// mmap that would leave an open space for someone
// else to come in and map something.
slot.instantiate(initial_size as usize, image, &plan.style)?;
memories.push(Memory::new_static(plan, memory, slot, unsafe {
&mut *req.store.get().unwrap()
})?);
}
Ok(())
}
unsafe fn deallocate(&self, handle: &InstanceHandle) {
self.instances.deallocate(handle);
fn deallocate_memories(&self, index: usize, mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>) {
// Decommit any linear memories that were used.
for (def_mem_idx, memory) in mem::take(mems) {
let mut image = memory.unwrap_static_image();
// Reset the image slot. If there is any error clearing the
// image, just drop it here, and let the drop handler for the
// slot unmap in a way that retains the address space
// reservation.
if image
.clear_and_remain_ready(self.linear_memory_keep_resident)
.is_ok()
{
self.memories
.return_memory_image_slot(index, def_mem_idx, image);
}
}
}
fn allocate_tables(
&self,
index: usize,
req: &mut InstanceAllocationRequest,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) -> Result<()> {
let module = req.runtime_info.module();
self.validate_table_plans(module)?;
let mut bases = self.tables.get(index);
for (_, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
let base = bases.next().unwrap() as _;
commit_table_pages(
base as *mut u8,
self.tables.max_elements as usize * mem::size_of::<*mut u8>(),
)?;
tables.push(Table::new_static(
plan,
unsafe { std::slice::from_raw_parts_mut(base, self.tables.max_elements as usize) },
unsafe { &mut *req.store.get().unwrap() },
)?);
}
Ok(())
}
fn deallocate_tables(&self, index: usize, tables: &mut PrimaryMap<DefinedTableIndex, Table>) {
// Decommit any tables that were used
for (table, base) in tables.values_mut().zip(self.tables.get(index)) {
let table = mem::take(table);
assert!(table.is_static());
let size = round_up_to_pow2(
table.size() as usize * mem::size_of::<*mut u8>(),
self.tables.page_size,
);
drop(table);
self.reset_table_pages_to_zero(base, size)
.expect("failed to decommit table pages");
}
}
#[cfg(all(feature = "async", unix))]
@@ -1098,7 +916,19 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
}
fn purge_module(&self, module: CompiledModuleId) {
self.instances.purge_module(module);
// Purging everything related to `module` primarily means clearing out
// all of its memory images present in the virtual address space. Go
// through the index allocator for slots affine to `module` and reset
// them, freeing up the index when we're done.
//
// Note that this is only called when the specified `module` won't be
// allocated further (the module is being dropped) so this shouldn't hit
// any sort of infinite loop since this should be the final operation
// working with `module`.
while let Some(index) = self.index_allocator.alloc_affine_and_clear_affinity(module) {
self.memories.clear_images(index.index());
self.index_allocator.free(index);
}
}
}
@@ -1106,7 +936,8 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
mod test {
use super::*;
use crate::{
CompiledModuleId, Imports, MemoryImage, StorePtr, VMFunctionBody, VMSharedSignatureIndex,
CompiledModuleId, Imports, MemoryImage, ModuleRuntimeInfo, StorePtr, VMFunctionBody,
VMSharedSignatureIndex,
};
use std::sync::Arc;
use wasmtime_environ::{DefinedFuncIndex, DefinedMemoryIndex};
@@ -1163,7 +994,7 @@ mod test {
..Default::default()
};
let instances = InstancePool::new(
let instances = PoolingInstanceAllocator::new(
&config,
&Tunables {
static_memory_bound: 1,
@@ -1214,8 +1045,8 @@ mod test {
_ => panic!("unexpected error"),
};
for handle in handles.drain(..) {
instances.deallocate(&handle);
for mut handle in handles.drain(..) {
instances.deallocate(&mut handle);
}
assert_eq!(
@@ -1426,7 +1257,7 @@ mod test {
},
)
.unwrap();
assert_eq!(pool.instances.memories.memory_size, 2 * 65536);
assert_eq!(pool.memories.memory_size, 2 * 65536);
}
#[cfg(all(unix, target_pointer_width = "64", feature = "async"))]

View File

@@ -51,8 +51,8 @@ pub use crate::export::*;
pub use crate::externref::*;
pub use crate::imports::Imports;
pub use crate::instance::{
allocate_single_memory_instance, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
OnDemandInstanceAllocator, StorePtr,
InstanceAllocationRequest, InstanceAllocator, InstanceHandle, OnDemandInstanceAllocator,
StorePtr,
};
#[cfg(feature = "pooling-allocator")]
pub use crate::instance::{

View File

@@ -1460,7 +1460,7 @@ impl Config {
Ok(())
}
pub(crate) fn build_allocator(&self) -> Result<Box<dyn InstanceAllocator>> {
pub(crate) fn build_allocator(&self) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
#[cfg(feature = "async")]
let stack_size = self.async_stack_size;

View File

@@ -49,7 +49,7 @@ struct EngineInner {
config: Config,
#[cfg(compiler)]
compiler: Box<dyn wasmtime_environ::Compiler>,
allocator: Box<dyn InstanceAllocator>,
allocator: Box<dyn InstanceAllocator + Send + Sync>,
profiler: Box<dyn ProfilingAgent>,
signatures: SignatureRegistry,
epoch: AtomicU64,

View File

@@ -317,8 +317,7 @@ impl Instance {
// items from this instance into other instances should be ok when
// those items are loaded and run we'll have all the metadata to
// look at them.
store.engine().allocator().initialize(
&mut instance_handle,
instance_handle.initialize(
compiled_module.module(),
store.engine().config().features.bulk_memory,
)?;

View File

@@ -885,9 +885,7 @@ impl SharedMemory {
/// Construct a single-memory instance to provide a way to import
/// [`SharedMemory`] into other modules.
pub(crate) fn vmimport(&self, store: &mut StoreOpaque) -> wasmtime_runtime::VMMemoryImport {
let runtime_shared_memory = self.clone().0;
let export_memory =
generate_memory_export(store, &self.ty(), Some(runtime_shared_memory)).unwrap();
let export_memory = generate_memory_export(store, &self.ty(), Some(&self.0)).unwrap();
VMMemoryImport {
from: export_memory.definition,
vmctx: export_memory.vmctx,

View File

@@ -454,7 +454,7 @@ impl<T> Store<T> {
// single "default callee" for the entire `Store`. This is then used as
// part of `Func::call` to guarantee that the `callee: *mut VMContext`
// is never null.
let default_callee = unsafe {
let default_callee = {
let module = Arc::new(wasmtime_environ::Module::default());
let shim = BareModuleInfo::empty(module).into_traitobj();
OnDemandInstanceAllocator::default()
@@ -2020,14 +2020,14 @@ impl Drop for StoreOpaque {
unsafe {
let allocator = self.engine.allocator();
let ondemand = OnDemandInstanceAllocator::default();
for instance in self.instances.iter() {
for instance in self.instances.iter_mut() {
if instance.ondemand {
ondemand.deallocate(&instance.handle);
ondemand.deallocate(&mut instance.handle);
} else {
allocator.deallocate(&instance.handle);
allocator.deallocate(&mut instance.handle);
}
}
ondemand.deallocate(&self.default_caller);
ondemand.deallocate(&mut self.default_caller);
// See documentation for these fields on `StoreOpaque` for why they
// must be dropped in this order.

View File

@@ -68,7 +68,7 @@ pub fn generate_global_export(
pub fn generate_memory_export(
store: &mut StoreOpaque,
m: &MemoryType,
preallocation: Option<SharedMemory>,
preallocation: Option<&SharedMemory>,
) -> Result<wasmtime_runtime::ExportMemory> {
let instance = create_memory(store, m, preallocation)?;
Ok(store

View File

@@ -5,11 +5,14 @@ use crate::MemoryType;
use anyhow::{anyhow, Result};
use std::convert::TryFrom;
use std::sync::Arc;
use wasmtime_environ::{EntityIndex, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE};
use wasmtime_environ::{
DefinedMemoryIndex, DefinedTableIndex, EntityIndex, MemoryPlan, MemoryStyle, Module,
PrimaryMap, WASM_PAGE_SIZE,
};
use wasmtime_runtime::{
allocate_single_memory_instance, DefaultMemoryCreator, Imports, InstanceAllocationRequest,
Memory, MemoryImage, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, StorePtr,
VMMemoryDefinition,
CompiledModuleId, Imports, InstanceAllocationRequest, InstanceAllocator, Memory, MemoryImage,
OnDemandInstanceAllocator, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, StorePtr,
Table, VMMemoryDefinition,
};
/// Create a "frankenstein" instance with a single memory.
@@ -20,7 +23,7 @@ use wasmtime_runtime::{
pub fn create_memory(
store: &mut StoreOpaque,
memory_ty: &MemoryType,
preallocation: Option<SharedMemory>,
preallocation: Option<&SharedMemory>,
) -> Result<InstanceId> {
let mut module = Module::new();
@@ -33,25 +36,6 @@ pub fn create_memory(
);
let memory_id = module.memory_plans.push(plan.clone());
let memory = match &preallocation {
// If we are passing in a pre-allocated shared memory, we can clone its
// `Arc`. We know that a preallocated memory *must* be shared--it could
// be used by several instances.
Some(shared_memory) => shared_memory.clone().as_memory(),
// If we do not have a pre-allocated memory, then we create it here and
// associate it with the "frankenstein" instance, which now owns it.
None => {
let creator = &DefaultMemoryCreator;
let store = unsafe {
store
.traitobj()
.as_mut()
.expect("the store pointer cannot be null here")
};
Memory::new_dynamic(&plan, creator, store, None)?
}
};
// Since we have only associated a single memory with the "frankenstein"
// instance, it will be exported at index 0.
debug_assert_eq!(memory_id.as_u32(), 0);
@@ -74,7 +58,11 @@ pub fn create_memory(
};
unsafe {
let handle = allocate_single_memory_instance(request, memory)?;
let handle = SingleMemoryInstance {
preallocation,
ondemand: OnDemandInstanceAllocator::default(),
}
.allocate(request)?;
let instance_id = store.add_instance(handle.clone(), true);
Ok(instance_id)
}
@@ -143,3 +131,67 @@ impl RuntimeMemoryCreator for MemoryCreatorProxy {
.map_err(|e| anyhow!(e))
}
}
struct SingleMemoryInstance<'a> {
preallocation: Option<&'a SharedMemory>,
ondemand: OnDemandInstanceAllocator,
}
unsafe impl InstanceAllocator for SingleMemoryInstance<'_> {
fn allocate_index(&self, req: &InstanceAllocationRequest) -> Result<usize> {
self.ondemand.allocate_index(req)
}
fn deallocate_index(&self, index: usize) {
self.ondemand.deallocate_index(index)
}
fn allocate_memories(
&self,
index: usize,
req: &mut InstanceAllocationRequest,
mem: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
) -> Result<()> {
assert_eq!(req.runtime_info.module().memory_plans.len(), 1);
match self.preallocation {
Some(shared_memory) => {
mem.push(shared_memory.clone().as_memory());
}
None => {
self.ondemand.allocate_memories(index, req, mem)?;
}
}
Ok(())
}
fn deallocate_memories(&self, index: usize, mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>) {
self.ondemand.deallocate_memories(index, mems)
}
fn allocate_tables(
&self,
index: usize,
req: &mut InstanceAllocationRequest,
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
) -> Result<()> {
self.ondemand.allocate_tables(index, req, tables)
}
fn deallocate_tables(&self, index: usize, tables: &mut PrimaryMap<DefinedTableIndex, Table>) {
self.ondemand.deallocate_tables(index, tables)
}
#[cfg(feature = "async")]
fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
unreachable!()
}
#[cfg(feature = "async")]
unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
unreachable!()
}
fn purge_module(&self, _: CompiledModuleId) {
unreachable!()
}
}

View File

@@ -646,11 +646,11 @@ fn instance_too_large() -> Result<()> {
let engine = Engine::new(&config)?;
let expected = "\
instance allocation for this module requires 224 bytes which exceeds the \
instance allocation for this module requires 240 bytes which exceeds the \
configured maximum of 16 bytes; breakdown of allocation requirement:
* 64.29% - 144 bytes - instance state management
* 7.14% - 16 bytes - jit store state
* 66.67% - 160 bytes - instance state management
* 6.67% - 16 bytes - jit store state
";
match Module::new(&engine, "(module)") {
Ok(_) => panic!("should have failed to compile"),
@@ -664,11 +664,11 @@ configured maximum of 16 bytes; breakdown of allocation requirement:
lots_of_globals.push_str(")");
let expected = "\
instance allocation for this module requires 1824 bytes which exceeds the \
instance allocation for this module requires 1840 bytes which exceeds the \
configured maximum of 16 bytes; breakdown of allocation requirement:
* 7.89% - 144 bytes - instance state management
* 87.72% - 1600 bytes - defined globals
* 8.70% - 160 bytes - instance state management
* 86.96% - 1600 bytes - defined globals
";
match Module::new(&engine, &lots_of_globals) {
Ok(_) => panic!("should have failed to compile"),