Always allocate Instance memory with malloc (#5656)
This commit removes the pooling of `Instance` allocations from the pooling instance allocator. This means that the allocation of `Instance` (and `VMContext`) memory, now always happens through the system `malloc` and `free` instead of optionally being part of the pooling instance allocator. Along the way this refactors the `InstanceAllocator` trait so the pooling and on-demand allocators can share more structure with this new property of the implementation. The main rationale for this commit is to reduce the RSS of long-lived programs which allocate instances with the pooling instance allocator and aren't using the "next available" allocation strategy. In this situation the memory for an instance is never decommitted until the end of the program, meaning that eventually all instance slots will become occupied and resident. This has the effect of Wasmtime slowly eating more and more memory over time as each slot gets an instance allocated. By switching to the system allocator this should reduce the current RSS workload from O(used slots) to O(active slots), which is more in line with expectations.
This commit is contained in:
@@ -1460,7 +1460,7 @@ impl Config {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn build_allocator(&self) -> Result<Box<dyn InstanceAllocator>> {
|
||||
pub(crate) fn build_allocator(&self) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
|
||||
#[cfg(feature = "async")]
|
||||
let stack_size = self.async_stack_size;
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ struct EngineInner {
|
||||
config: Config,
|
||||
#[cfg(compiler)]
|
||||
compiler: Box<dyn wasmtime_environ::Compiler>,
|
||||
allocator: Box<dyn InstanceAllocator>,
|
||||
allocator: Box<dyn InstanceAllocator + Send + Sync>,
|
||||
profiler: Box<dyn ProfilingAgent>,
|
||||
signatures: SignatureRegistry,
|
||||
epoch: AtomicU64,
|
||||
|
||||
@@ -317,8 +317,7 @@ impl Instance {
|
||||
// items from this instance into other instances should be ok when
|
||||
// those items are loaded and run we'll have all the metadata to
|
||||
// look at them.
|
||||
store.engine().allocator().initialize(
|
||||
&mut instance_handle,
|
||||
instance_handle.initialize(
|
||||
compiled_module.module(),
|
||||
store.engine().config().features.bulk_memory,
|
||||
)?;
|
||||
|
||||
@@ -885,9 +885,7 @@ impl SharedMemory {
|
||||
/// Construct a single-memory instance to provide a way to import
|
||||
/// [`SharedMemory`] into other modules.
|
||||
pub(crate) fn vmimport(&self, store: &mut StoreOpaque) -> wasmtime_runtime::VMMemoryImport {
|
||||
let runtime_shared_memory = self.clone().0;
|
||||
let export_memory =
|
||||
generate_memory_export(store, &self.ty(), Some(runtime_shared_memory)).unwrap();
|
||||
let export_memory = generate_memory_export(store, &self.ty(), Some(&self.0)).unwrap();
|
||||
VMMemoryImport {
|
||||
from: export_memory.definition,
|
||||
vmctx: export_memory.vmctx,
|
||||
|
||||
@@ -454,7 +454,7 @@ impl<T> Store<T> {
|
||||
// single "default callee" for the entire `Store`. This is then used as
|
||||
// part of `Func::call` to guarantee that the `callee: *mut VMContext`
|
||||
// is never null.
|
||||
let default_callee = unsafe {
|
||||
let default_callee = {
|
||||
let module = Arc::new(wasmtime_environ::Module::default());
|
||||
let shim = BareModuleInfo::empty(module).into_traitobj();
|
||||
OnDemandInstanceAllocator::default()
|
||||
@@ -2020,14 +2020,14 @@ impl Drop for StoreOpaque {
|
||||
unsafe {
|
||||
let allocator = self.engine.allocator();
|
||||
let ondemand = OnDemandInstanceAllocator::default();
|
||||
for instance in self.instances.iter() {
|
||||
for instance in self.instances.iter_mut() {
|
||||
if instance.ondemand {
|
||||
ondemand.deallocate(&instance.handle);
|
||||
ondemand.deallocate(&mut instance.handle);
|
||||
} else {
|
||||
allocator.deallocate(&instance.handle);
|
||||
allocator.deallocate(&mut instance.handle);
|
||||
}
|
||||
}
|
||||
ondemand.deallocate(&self.default_caller);
|
||||
ondemand.deallocate(&mut self.default_caller);
|
||||
|
||||
// See documentation for these fields on `StoreOpaque` for why they
|
||||
// must be dropped in this order.
|
||||
|
||||
@@ -68,7 +68,7 @@ pub fn generate_global_export(
|
||||
pub fn generate_memory_export(
|
||||
store: &mut StoreOpaque,
|
||||
m: &MemoryType,
|
||||
preallocation: Option<SharedMemory>,
|
||||
preallocation: Option<&SharedMemory>,
|
||||
) -> Result<wasmtime_runtime::ExportMemory> {
|
||||
let instance = create_memory(store, m, preallocation)?;
|
||||
Ok(store
|
||||
|
||||
@@ -5,11 +5,14 @@ use crate::MemoryType;
|
||||
use anyhow::{anyhow, Result};
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use wasmtime_environ::{EntityIndex, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE};
|
||||
use wasmtime_environ::{
|
||||
DefinedMemoryIndex, DefinedTableIndex, EntityIndex, MemoryPlan, MemoryStyle, Module,
|
||||
PrimaryMap, WASM_PAGE_SIZE,
|
||||
};
|
||||
use wasmtime_runtime::{
|
||||
allocate_single_memory_instance, DefaultMemoryCreator, Imports, InstanceAllocationRequest,
|
||||
Memory, MemoryImage, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, StorePtr,
|
||||
VMMemoryDefinition,
|
||||
CompiledModuleId, Imports, InstanceAllocationRequest, InstanceAllocator, Memory, MemoryImage,
|
||||
OnDemandInstanceAllocator, RuntimeLinearMemory, RuntimeMemoryCreator, SharedMemory, StorePtr,
|
||||
Table, VMMemoryDefinition,
|
||||
};
|
||||
|
||||
/// Create a "frankenstein" instance with a single memory.
|
||||
@@ -20,7 +23,7 @@ use wasmtime_runtime::{
|
||||
pub fn create_memory(
|
||||
store: &mut StoreOpaque,
|
||||
memory_ty: &MemoryType,
|
||||
preallocation: Option<SharedMemory>,
|
||||
preallocation: Option<&SharedMemory>,
|
||||
) -> Result<InstanceId> {
|
||||
let mut module = Module::new();
|
||||
|
||||
@@ -33,25 +36,6 @@ pub fn create_memory(
|
||||
);
|
||||
let memory_id = module.memory_plans.push(plan.clone());
|
||||
|
||||
let memory = match &preallocation {
|
||||
// If we are passing in a pre-allocated shared memory, we can clone its
|
||||
// `Arc`. We know that a preallocated memory *must* be shared--it could
|
||||
// be used by several instances.
|
||||
Some(shared_memory) => shared_memory.clone().as_memory(),
|
||||
// If we do not have a pre-allocated memory, then we create it here and
|
||||
// associate it with the "frankenstein" instance, which now owns it.
|
||||
None => {
|
||||
let creator = &DefaultMemoryCreator;
|
||||
let store = unsafe {
|
||||
store
|
||||
.traitobj()
|
||||
.as_mut()
|
||||
.expect("the store pointer cannot be null here")
|
||||
};
|
||||
Memory::new_dynamic(&plan, creator, store, None)?
|
||||
}
|
||||
};
|
||||
|
||||
// Since we have only associated a single memory with the "frankenstein"
|
||||
// instance, it will be exported at index 0.
|
||||
debug_assert_eq!(memory_id.as_u32(), 0);
|
||||
@@ -74,7 +58,11 @@ pub fn create_memory(
|
||||
};
|
||||
|
||||
unsafe {
|
||||
let handle = allocate_single_memory_instance(request, memory)?;
|
||||
let handle = SingleMemoryInstance {
|
||||
preallocation,
|
||||
ondemand: OnDemandInstanceAllocator::default(),
|
||||
}
|
||||
.allocate(request)?;
|
||||
let instance_id = store.add_instance(handle.clone(), true);
|
||||
Ok(instance_id)
|
||||
}
|
||||
@@ -143,3 +131,67 @@ impl RuntimeMemoryCreator for MemoryCreatorProxy {
|
||||
.map_err(|e| anyhow!(e))
|
||||
}
|
||||
}
|
||||
|
||||
struct SingleMemoryInstance<'a> {
|
||||
preallocation: Option<&'a SharedMemory>,
|
||||
ondemand: OnDemandInstanceAllocator,
|
||||
}
|
||||
|
||||
unsafe impl InstanceAllocator for SingleMemoryInstance<'_> {
|
||||
fn allocate_index(&self, req: &InstanceAllocationRequest) -> Result<usize> {
|
||||
self.ondemand.allocate_index(req)
|
||||
}
|
||||
|
||||
fn deallocate_index(&self, index: usize) {
|
||||
self.ondemand.deallocate_index(index)
|
||||
}
|
||||
|
||||
fn allocate_memories(
|
||||
&self,
|
||||
index: usize,
|
||||
req: &mut InstanceAllocationRequest,
|
||||
mem: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
|
||||
) -> Result<()> {
|
||||
assert_eq!(req.runtime_info.module().memory_plans.len(), 1);
|
||||
match self.preallocation {
|
||||
Some(shared_memory) => {
|
||||
mem.push(shared_memory.clone().as_memory());
|
||||
}
|
||||
None => {
|
||||
self.ondemand.allocate_memories(index, req, mem)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deallocate_memories(&self, index: usize, mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>) {
|
||||
self.ondemand.deallocate_memories(index, mems)
|
||||
}
|
||||
|
||||
fn allocate_tables(
|
||||
&self,
|
||||
index: usize,
|
||||
req: &mut InstanceAllocationRequest,
|
||||
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
|
||||
) -> Result<()> {
|
||||
self.ondemand.allocate_tables(index, req, tables)
|
||||
}
|
||||
|
||||
fn deallocate_tables(&self, index: usize, tables: &mut PrimaryMap<DefinedTableIndex, Table>) {
|
||||
self.ondemand.deallocate_tables(index, tables)
|
||||
}
|
||||
|
||||
#[cfg(feature = "async")]
|
||||
fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
#[cfg(feature = "async")]
|
||||
unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn purge_module(&self, _: CompiledModuleId) {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user