Implement the pooling instance allocator.

This commit implements the pooling instance allocator.

The allocation strategy can be set with `Config::with_allocation_strategy`.

The pooling strategy uses the pooling instance allocator to preallocate a
contiguous region of memory for instantiating modules that adhere to various
limits.

The intention of the pooling instance allocator is to reserve as much of the
host address space needed for instantiating modules ahead of time and to reuse
committed memory pages wherever possible.
This commit is contained in:
Peter Huene
2020-12-08 16:00:48 -08:00
parent 16ca5e16d9
commit e71ccbf9bc
16 changed files with 2374 additions and 20 deletions

View File

@@ -24,10 +24,14 @@ cfg-if = "1.0"
backtrace = "0.3.55"
lazy_static = "1.3.0"
psm = "0.1.11"
rand = "0.7.3"
[target.'cfg(target_os = "windows")'.dependencies]
winapi = { version = "0.3.7", features = ["winbase", "memoryapi", "errhandlingapi"] }
[target.'cfg(target_os = "linux")'.dependencies]
userfaultfd = { version = "0.3.0", optional = true }
[build-dependencies]
cc = "1.0"

View File

@@ -26,6 +26,12 @@ use wasmtime_environ::{
ir, Module, ModuleTranslation, ModuleType, OwnedDataInitializer, TableElements, VMOffsets,
};
mod pooling;
pub use self::pooling::{
InstanceLimits, ModuleLimits, PoolingAllocationStrategy, PoolingInstanceAllocator,
};
/// Represents a request for a new runtime instance.
pub struct InstanceAllocationRequest<'a> {
/// The module being instantiated.
@@ -72,11 +78,18 @@ pub enum InstantiationError {
/// A trap ocurred during instantiation, after linking.
#[error("Trap occurred during instantiation")]
Trap(Trap),
/// A limit on how many instances are supported has been reached.
#[error("Limit of {0} concurrent instances has been reached")]
Limit(u32),
}
/// An error while creating a fiber stack.
#[derive(Error, Debug)]
pub enum FiberStackError {
/// Insufficient resources available for the request.
#[error("Insufficient resources: {0}")]
Resource(String),
/// An error for when the allocator doesn't support custom fiber stacks.
#[error("Custom fiber stacks are not supported by the allocator")]
NotSupported,
@@ -218,7 +231,7 @@ unsafe fn initialize_vmcontext(
globals.len(),
);
// Initialize the defined functions
// Initialize the functions
for (index, sig) in instance.module.functions.iter() {
let type_index = lookup_shared_signature(*sig);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
use crate::Mmap;
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
region::protect(addr, len, region::Protection::READ_WRITE).is_ok()
}
pub unsafe fn decommit(addr: *mut u8, len: usize) {
region::protect(addr, len, region::Protection::NONE).unwrap();
// On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access
assert_eq!(
libc::madvise(addr as _, len, libc::MADV_DONTNEED),
0,
"madvise failed to mark pages as missing: {}",
std::io::Error::last_os_error()
);
}
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap, String> {
Mmap::accessible_reserved(accessible_size, mapping_size)
.map_err(|e| format!("failed to allocate pool memory: {}", e))
}

View File

@@ -0,0 +1,26 @@
use crate::Mmap;
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
region::protect(addr, len, region::Protection::READ_WRITE).is_ok()
}
pub unsafe fn decommit(addr: *mut u8, len: usize) {
assert_eq!(
libc::mmap(
addr as _,
len,
libc::PROT_NONE,
libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED,
-1,
0,
) as *mut u8,
addr,
"mmap failed to remap pages: {}",
std::io::Error::last_os_error()
);
}
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap, String> {
Mmap::accessible_reserved(accessible_size, mapping_size)
.map_err(|e| format!("failed to allocate pool memory: {}", e))
}

View File

@@ -0,0 +1,21 @@
use crate::Mmap;
use winapi::um::memoryapi::{VirtualAlloc, VirtualFree};
use winapi::um::winnt::{MEM_COMMIT, MEM_DECOMMIT, PAGE_READWRITE};
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
// This doesn't use the `region` crate because the memory needs to be committed
!VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null()
}
pub unsafe fn decommit(addr: *mut u8, len: usize) {
assert!(
VirtualFree(addr as _, len, MEM_DECOMMIT) != 0,
"failed to decommit memory pages: {}",
std::io::Error::last_os_error()
);
}
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap, String> {
Mmap::accessible_reserved(accessible_size, mapping_size)
.map_err(|e| format!("failed to allocate pool memory: {}", e))
}

View File

@@ -38,8 +38,9 @@ pub use crate::export::*;
pub use crate::externref::*;
pub use crate::imports::Imports;
pub use crate::instance::{
FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
InstantiationError, LinkError, OnDemandInstanceAllocator, RuntimeInstance,
FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits,
InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator,
PoolingAllocationStrategy, PoolingInstanceAllocator, RuntimeInstance,
};
pub use crate::jit_int::GdbJitImageRegistration;
pub use crate::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator};

View File

@@ -164,7 +164,7 @@ impl RuntimeLinearMemory for MmapMemory {
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
fn vmmemory(&self) -> VMMemoryDefinition {
let mut mmap = self.mmap.borrow_mut();
let mmap = self.mmap.borrow();
VMMemoryDefinition {
base: mmap.alloc.as_mut_ptr(),
current_length: mmap.size as usize * WASM_PAGE_SIZE as usize,
@@ -177,7 +177,7 @@ enum MemoryStorage {
base: *mut u8,
size: Cell<u32>,
maximum: u32,
make_accessible: Option<fn(*mut u8, usize) -> bool>,
make_accessible: unsafe fn(*mut u8, usize) -> bool,
},
Dynamic(Box<dyn RuntimeLinearMemory>),
}
@@ -203,13 +203,13 @@ impl Memory {
plan: &MemoryPlan,
base: *mut u8,
maximum: u32,
make_accessible: Option<fn(*mut u8, usize) -> bool>,
make_accessible: unsafe fn(*mut u8, usize) -> bool,
) -> Result<Self, String> {
if plan.memory.minimum > 0 {
if let Some(make_accessible) = &make_accessible {
if !make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize) {
return Err("memory cannot be made accessible".into());
}
if unsafe {
!make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize)
} {
return Err("memory cannot be made accessible".into());
}
}
@@ -258,10 +258,8 @@ impl Memory {
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
if let Some(make_accessible) = make_accessible {
if !make_accessible(unsafe { base.add(start) }, len) {
return None;
}
if unsafe { !make_accessible(base.add(start), len) } {
return None;
}
size.set(new_size);

View File

@@ -234,7 +234,7 @@ impl Mmap {
}
/// Return the allocated memory as a mutable pointer to u8.
pub fn as_mut_ptr(&mut self) -> *mut u8 {
pub fn as_mut_ptr(&self) -> *mut u8 {
self.ptr as *mut u8
}
@@ -247,6 +247,11 @@ impl Mmap {
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[allow(dead_code)]
pub(crate) unsafe fn from_raw(ptr: usize, len: usize) -> Self {
Self { ptr, len }
}
}
impl Drop for Mmap {

View File

@@ -66,6 +66,20 @@ enum TableElements {
ExternRefs(Vec<Option<VMExternRef>>),
}
// Ideally this should be static assertion that table elements are pointer-sized
#[inline(always)]
pub(crate) fn max_table_element_size() -> usize {
debug_assert_eq!(
std::mem::size_of::<*mut VMCallerCheckedAnyfunc>(),
std::mem::size_of::<*const ()>()
);
debug_assert_eq!(
std::mem::size_of::<Option<VMExternRef>>(),
std::mem::size_of::<*const ()>()
);
std::mem::size_of::<*const ()>()
}
#[derive(Debug)]
enum TableStorage {
Static {