Implement the pooling instance allocator.
This commit implements the pooling instance allocator. The allocation strategy can be set with `Config::with_allocation_strategy`. The pooling strategy uses the pooling instance allocator to preallocate a contiguous region of memory for instantiating modules that adhere to various limits. The intention of the pooling instance allocator is to reserve as much of the host address space needed for instantiating modules ahead of time and to reuse committed memory pages wherever possible.
This commit is contained in:
@@ -50,6 +50,7 @@ fn align(offset: u32, width: u32) -> u32 {
|
||||
|
||||
/// This class computes offsets to fields within `VMContext` and other
|
||||
/// related structs that JIT code accesses directly.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct VMOffsets {
|
||||
/// The size in bytes of a pointer on the target.
|
||||
pub pointer_size: u8,
|
||||
|
||||
@@ -24,10 +24,14 @@ cfg-if = "1.0"
|
||||
backtrace = "0.3.55"
|
||||
lazy_static = "1.3.0"
|
||||
psm = "0.1.11"
|
||||
rand = "0.7.3"
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
winapi = { version = "0.3.7", features = ["winbase", "memoryapi", "errhandlingapi"] }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
userfaultfd = { version = "0.3.0", optional = true }
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0"
|
||||
|
||||
|
||||
@@ -26,6 +26,12 @@ use wasmtime_environ::{
|
||||
ir, Module, ModuleTranslation, ModuleType, OwnedDataInitializer, TableElements, VMOffsets,
|
||||
};
|
||||
|
||||
mod pooling;
|
||||
|
||||
pub use self::pooling::{
|
||||
InstanceLimits, ModuleLimits, PoolingAllocationStrategy, PoolingInstanceAllocator,
|
||||
};
|
||||
|
||||
/// Represents a request for a new runtime instance.
|
||||
pub struct InstanceAllocationRequest<'a> {
|
||||
/// The module being instantiated.
|
||||
@@ -72,11 +78,18 @@ pub enum InstantiationError {
|
||||
/// A trap ocurred during instantiation, after linking.
|
||||
#[error("Trap occurred during instantiation")]
|
||||
Trap(Trap),
|
||||
|
||||
/// A limit on how many instances are supported has been reached.
|
||||
#[error("Limit of {0} concurrent instances has been reached")]
|
||||
Limit(u32),
|
||||
}
|
||||
|
||||
/// An error while creating a fiber stack.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum FiberStackError {
|
||||
/// Insufficient resources available for the request.
|
||||
#[error("Insufficient resources: {0}")]
|
||||
Resource(String),
|
||||
/// An error for when the allocator doesn't support custom fiber stacks.
|
||||
#[error("Custom fiber stacks are not supported by the allocator")]
|
||||
NotSupported,
|
||||
@@ -218,7 +231,7 @@ unsafe fn initialize_vmcontext(
|
||||
globals.len(),
|
||||
);
|
||||
|
||||
// Initialize the defined functions
|
||||
// Initialize the functions
|
||||
for (index, sig) in instance.module.functions.iter() {
|
||||
let type_index = lookup_shared_signature(*sig);
|
||||
|
||||
|
||||
1666
crates/runtime/src/instance/allocator/pooling.rs
Normal file
1666
crates/runtime/src/instance/allocator/pooling.rs
Normal file
File diff suppressed because it is too large
Load Diff
22
crates/runtime/src/instance/allocator/pooling/linux.rs
Normal file
22
crates/runtime/src/instance/allocator/pooling/linux.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use crate::Mmap;
|
||||
|
||||
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
|
||||
region::protect(addr, len, region::Protection::READ_WRITE).is_ok()
|
||||
}
|
||||
|
||||
pub unsafe fn decommit(addr: *mut u8, len: usize) {
|
||||
region::protect(addr, len, region::Protection::NONE).unwrap();
|
||||
|
||||
// On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access
|
||||
assert_eq!(
|
||||
libc::madvise(addr as _, len, libc::MADV_DONTNEED),
|
||||
0,
|
||||
"madvise failed to mark pages as missing: {}",
|
||||
std::io::Error::last_os_error()
|
||||
);
|
||||
}
|
||||
|
||||
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap, String> {
|
||||
Mmap::accessible_reserved(accessible_size, mapping_size)
|
||||
.map_err(|e| format!("failed to allocate pool memory: {}", e))
|
||||
}
|
||||
26
crates/runtime/src/instance/allocator/pooling/unix.rs
Normal file
26
crates/runtime/src/instance/allocator/pooling/unix.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use crate::Mmap;
|
||||
|
||||
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
|
||||
region::protect(addr, len, region::Protection::READ_WRITE).is_ok()
|
||||
}
|
||||
|
||||
pub unsafe fn decommit(addr: *mut u8, len: usize) {
|
||||
assert_eq!(
|
||||
libc::mmap(
|
||||
addr as _,
|
||||
len,
|
||||
libc::PROT_NONE,
|
||||
libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED,
|
||||
-1,
|
||||
0,
|
||||
) as *mut u8,
|
||||
addr,
|
||||
"mmap failed to remap pages: {}",
|
||||
std::io::Error::last_os_error()
|
||||
);
|
||||
}
|
||||
|
||||
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap, String> {
|
||||
Mmap::accessible_reserved(accessible_size, mapping_size)
|
||||
.map_err(|e| format!("failed to allocate pool memory: {}", e))
|
||||
}
|
||||
21
crates/runtime/src/instance/allocator/pooling/windows.rs
Normal file
21
crates/runtime/src/instance/allocator/pooling/windows.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
use crate::Mmap;
|
||||
use winapi::um::memoryapi::{VirtualAlloc, VirtualFree};
|
||||
use winapi::um::winnt::{MEM_COMMIT, MEM_DECOMMIT, PAGE_READWRITE};
|
||||
|
||||
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
|
||||
// This doesn't use the `region` crate because the memory needs to be committed
|
||||
!VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null()
|
||||
}
|
||||
|
||||
pub unsafe fn decommit(addr: *mut u8, len: usize) {
|
||||
assert!(
|
||||
VirtualFree(addr as _, len, MEM_DECOMMIT) != 0,
|
||||
"failed to decommit memory pages: {}",
|
||||
std::io::Error::last_os_error()
|
||||
);
|
||||
}
|
||||
|
||||
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap, String> {
|
||||
Mmap::accessible_reserved(accessible_size, mapping_size)
|
||||
.map_err(|e| format!("failed to allocate pool memory: {}", e))
|
||||
}
|
||||
@@ -38,8 +38,9 @@ pub use crate::export::*;
|
||||
pub use crate::externref::*;
|
||||
pub use crate::imports::Imports;
|
||||
pub use crate::instance::{
|
||||
FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
|
||||
InstantiationError, LinkError, OnDemandInstanceAllocator, RuntimeInstance,
|
||||
FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits,
|
||||
InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator,
|
||||
PoolingAllocationStrategy, PoolingInstanceAllocator, RuntimeInstance,
|
||||
};
|
||||
pub use crate::jit_int::GdbJitImageRegistration;
|
||||
pub use crate::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator};
|
||||
|
||||
@@ -164,7 +164,7 @@ impl RuntimeLinearMemory for MmapMemory {
|
||||
|
||||
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
|
||||
fn vmmemory(&self) -> VMMemoryDefinition {
|
||||
let mut mmap = self.mmap.borrow_mut();
|
||||
let mmap = self.mmap.borrow();
|
||||
VMMemoryDefinition {
|
||||
base: mmap.alloc.as_mut_ptr(),
|
||||
current_length: mmap.size as usize * WASM_PAGE_SIZE as usize,
|
||||
@@ -177,7 +177,7 @@ enum MemoryStorage {
|
||||
base: *mut u8,
|
||||
size: Cell<u32>,
|
||||
maximum: u32,
|
||||
make_accessible: Option<fn(*mut u8, usize) -> bool>,
|
||||
make_accessible: unsafe fn(*mut u8, usize) -> bool,
|
||||
},
|
||||
Dynamic(Box<dyn RuntimeLinearMemory>),
|
||||
}
|
||||
@@ -203,13 +203,13 @@ impl Memory {
|
||||
plan: &MemoryPlan,
|
||||
base: *mut u8,
|
||||
maximum: u32,
|
||||
make_accessible: Option<fn(*mut u8, usize) -> bool>,
|
||||
make_accessible: unsafe fn(*mut u8, usize) -> bool,
|
||||
) -> Result<Self, String> {
|
||||
if plan.memory.minimum > 0 {
|
||||
if let Some(make_accessible) = &make_accessible {
|
||||
if !make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize) {
|
||||
return Err("memory cannot be made accessible".into());
|
||||
}
|
||||
if unsafe {
|
||||
!make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize)
|
||||
} {
|
||||
return Err("memory cannot be made accessible".into());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,10 +258,8 @@ impl Memory {
|
||||
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
|
||||
if let Some(make_accessible) = make_accessible {
|
||||
if !make_accessible(unsafe { base.add(start) }, len) {
|
||||
return None;
|
||||
}
|
||||
if unsafe { !make_accessible(base.add(start), len) } {
|
||||
return None;
|
||||
}
|
||||
|
||||
size.set(new_size);
|
||||
|
||||
@@ -234,7 +234,7 @@ impl Mmap {
|
||||
}
|
||||
|
||||
/// Return the allocated memory as a mutable pointer to u8.
|
||||
pub fn as_mut_ptr(&mut self) -> *mut u8 {
|
||||
pub fn as_mut_ptr(&self) -> *mut u8 {
|
||||
self.ptr as *mut u8
|
||||
}
|
||||
|
||||
@@ -247,6 +247,11 @@ impl Mmap {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) unsafe fn from_raw(ptr: usize, len: usize) -> Self {
|
||||
Self { ptr, len }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Mmap {
|
||||
|
||||
@@ -66,6 +66,20 @@ enum TableElements {
|
||||
ExternRefs(Vec<Option<VMExternRef>>),
|
||||
}
|
||||
|
||||
// Ideally this should be static assertion that table elements are pointer-sized
|
||||
#[inline(always)]
|
||||
pub(crate) fn max_table_element_size() -> usize {
|
||||
debug_assert_eq!(
|
||||
std::mem::size_of::<*mut VMCallerCheckedAnyfunc>(),
|
||||
std::mem::size_of::<*const ()>()
|
||||
);
|
||||
debug_assert_eq!(
|
||||
std::mem::size_of::<Option<VMExternRef>>(),
|
||||
std::mem::size_of::<*const ()>()
|
||||
);
|
||||
std::mem::size_of::<*const ()>()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum TableStorage {
|
||||
Static {
|
||||
|
||||
@@ -14,7 +14,10 @@ use wasmtime_environ::settings::{self, Configurable, SetError};
|
||||
use wasmtime_environ::{isa, isa::TargetIsa, Tunables};
|
||||
use wasmtime_jit::{native, CompilationStrategy, Compiler};
|
||||
use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent};
|
||||
use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator};
|
||||
use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator, PoolingInstanceAllocator};
|
||||
|
||||
// Re-export the limit structures for the pooling allocator
|
||||
pub use wasmtime_runtime::{InstanceLimits, ModuleLimits, PoolingAllocationStrategy};
|
||||
|
||||
/// Represents the module instance allocation strategy to use.
|
||||
#[derive(Clone)]
|
||||
@@ -26,6 +29,19 @@ pub enum InstanceAllocationStrategy {
|
||||
///
|
||||
/// This is the default allocation strategy for Wasmtime.
|
||||
OnDemand,
|
||||
/// The pooling instance allocation strategy.
|
||||
///
|
||||
/// A pool of resources is created in advance and module instantiation reuses resources
|
||||
/// from the pool. Resources are returned to the pool when the `Store` referencing the instance
|
||||
/// is dropped.
|
||||
Pooling {
|
||||
/// The allocation strategy to use.
|
||||
strategy: PoolingAllocationStrategy,
|
||||
/// The module limits to use.
|
||||
module_limits: ModuleLimits,
|
||||
/// The instance limits to use.
|
||||
instance_limits: InstanceLimits,
|
||||
},
|
||||
}
|
||||
|
||||
impl Default for InstanceAllocationStrategy {
|
||||
@@ -205,6 +221,9 @@ impl Config {
|
||||
/// on stack overflow, a host function that overflows the stack will
|
||||
/// abort the process.
|
||||
///
|
||||
/// `max_wasm_stack` must be set prior to setting an instance allocation
|
||||
/// strategy.
|
||||
///
|
||||
/// By default this option is 1 MiB.
|
||||
pub fn max_wasm_stack(&mut self, size: usize) -> Result<&mut Self> {
|
||||
#[cfg(feature = "async")]
|
||||
@@ -216,6 +235,12 @@ impl Config {
|
||||
bail!("wasm stack size cannot be zero");
|
||||
}
|
||||
|
||||
if self.instance_allocator.is_some() {
|
||||
bail!(
|
||||
"wasm stack size cannot be modified after setting an instance allocation strategy"
|
||||
);
|
||||
}
|
||||
|
||||
self.max_wasm_stack = size;
|
||||
Ok(self)
|
||||
}
|
||||
@@ -230,12 +255,20 @@ impl Config {
|
||||
/// close to one another; doing so may cause host functions to overflow the
|
||||
/// stack and abort the process.
|
||||
///
|
||||
/// `async_stack_size` must be set prior to setting an instance allocation
|
||||
/// strategy.
|
||||
///
|
||||
/// By default this option is 2 MiB.
|
||||
#[cfg(feature = "async")]
|
||||
pub fn async_stack_size(&mut self, size: usize) -> Result<&mut Self> {
|
||||
if size < self.max_wasm_stack {
|
||||
bail!("async stack size cannot be less than the maximum wasm stack size");
|
||||
}
|
||||
if self.instance_allocator.is_some() {
|
||||
bail!(
|
||||
"async stack size cannot be modified after setting an instance allocation strategy"
|
||||
);
|
||||
}
|
||||
self.async_stack_size = size;
|
||||
Ok(self)
|
||||
}
|
||||
@@ -577,14 +610,35 @@ impl Config {
|
||||
}
|
||||
|
||||
/// Sets the instance allocation strategy to use.
|
||||
pub fn with_instance_allocation_strategy(
|
||||
pub fn with_allocation_strategy(
|
||||
&mut self,
|
||||
strategy: InstanceAllocationStrategy,
|
||||
) -> &mut Self {
|
||||
) -> Result<&mut Self> {
|
||||
self.instance_allocator = match strategy {
|
||||
InstanceAllocationStrategy::OnDemand => None,
|
||||
InstanceAllocationStrategy::Pooling {
|
||||
strategy,
|
||||
module_limits,
|
||||
instance_limits,
|
||||
} => {
|
||||
#[cfg(feature = "async")]
|
||||
let stack_size = self.async_stack_size;
|
||||
|
||||
#[cfg(not(feature = "async"))]
|
||||
let stack_size = 0;
|
||||
|
||||
Some(Arc::new(
|
||||
PoolingInstanceAllocator::new(
|
||||
strategy,
|
||||
module_limits,
|
||||
instance_limits,
|
||||
stack_size,
|
||||
)
|
||||
.map_err(|e| anyhow::anyhow!(e))?,
|
||||
))
|
||||
}
|
||||
};
|
||||
self
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Configures the maximum size, in bytes, where a linear memory is
|
||||
|
||||
Reference in New Issue
Block a user