Implement allocating fiber stacks for an instance allocator.

This commit implements allocating fiber stacks in an instance allocator.

The on-demand instance allocator doesn't support custom stacks, so the
implementation will use the allocation from `wasmtime-fiber` for the fiber
stacks.

In the future, the pooling instance allocator will return custom stacks to use
on Linux and macOS.

On Windows, the native fiber implementation will always be used.
This commit is contained in:
Peter Huene
2021-02-04 13:05:01 -08:00
parent 3bb145f65c
commit 16ca5e16d9
7 changed files with 161 additions and 37 deletions

View File

@@ -61,8 +61,8 @@ pub extern "C" fn wasmtime_config_consume_fuel_set(c: &mut wasm_config_t, enable
} }
#[no_mangle] #[no_mangle]
pub extern "C" fn wasmtime_config_max_wasm_stack_set(c: &mut wasm_config_t, size: usize) { pub extern "C" fn wasmtime_config_max_wasm_stack_set(c: &mut wasm_config_t, size: usize) -> bool {
c.config.max_wasm_stack(size); c.config.max_wasm_stack(size).is_ok()
} }
#[no_mangle] #[no_mangle]

View File

@@ -51,6 +51,27 @@ impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> {
}) })
} }
/// Creates a new fiber with existing stack space that will execute `func`.
///
/// This function returns a `Fiber` which, when resumed, will execute `func`
/// to completion. When desired the `func` can suspend itself via
/// `Fiber::suspend`.
///
/// # Safety
///
/// The caller must properly allocate the stack space with a guard page and
/// make the pages accessible for correct behavior.
pub unsafe fn new_with_stack(
top_of_stack: *mut u8,
func: impl FnOnce(Resume, &Suspend<Resume, Yield, Return>) -> Return + 'a,
) -> io::Result<Fiber<'a, Resume, Yield, Return>> {
Ok(Fiber {
inner: imp::Fiber::new_with_stack(top_of_stack, func),
done: Cell::new(false),
_phantom: PhantomData,
})
}
/// Resumes execution of this fiber. /// Resumes execution of this fiber.
/// ///
/// This function will transfer execution to the fiber and resume from where /// This function will transfer execution to the fiber and resume from where

View File

@@ -35,10 +35,10 @@ use std::io;
use std::ptr; use std::ptr;
pub struct Fiber { pub struct Fiber {
// Description of the mmap region we own. This should be abstracted // The top of the stack; for stacks allocated by the fiber implementation itself,
// eventually so we aren't personally mmap-ing this region. // the base address of the allocation will be `top_of_stack.sub(alloc_len.unwrap())`
mmap: *mut libc::c_void, top_of_stack: *mut u8,
mmap_len: usize, alloc_len: Option<usize>,
} }
pub struct Suspend { pub struct Suspend {
@@ -66,21 +66,40 @@ where
} }
impl Fiber { impl Fiber {
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Fiber> pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Self>
where where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C, F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{ {
let fiber = Fiber::alloc_with_stack(stack_size)?; let fiber = Self::alloc_with_stack(stack_size)?;
unsafe { fiber.init(func);
// Initialize the top of the stack to be resumed from
let top_of_stack = fiber.top_of_stack();
let data = Box::into_raw(Box::new(func)).cast();
wasmtime_fiber_init(top_of_stack, fiber_start::<F, A, B, C>, data);
Ok(fiber) Ok(fiber)
} }
pub fn new_with_stack<F, A, B, C>(top_of_stack: *mut u8, func: F) -> Self
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{
let fiber = Self {
top_of_stack,
alloc_len: None,
};
fiber.init(func);
fiber
}
fn init<F, A, B, C>(&self, func: F)
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{
unsafe {
let data = Box::into_raw(Box::new(func)).cast();
wasmtime_fiber_init(self.top_of_stack, fiber_start::<F, A, B, C>, data);
}
} }
fn alloc_with_stack(stack_size: usize) -> io::Result<Fiber> { fn alloc_with_stack(stack_size: usize) -> io::Result<Self> {
unsafe { unsafe {
// Round up our stack size request to the nearest multiple of the // Round up our stack size request to the nearest multiple of the
// page size. // page size.
@@ -104,7 +123,10 @@ impl Fiber {
if mmap == libc::MAP_FAILED { if mmap == libc::MAP_FAILED {
return Err(io::Error::last_os_error()); return Err(io::Error::last_os_error());
} }
let ret = Fiber { mmap, mmap_len }; let ret = Self {
top_of_stack: mmap.cast::<u8>().add(mmap_len),
alloc_len: Some(mmap_len),
};
let res = libc::mprotect( let res = libc::mprotect(
mmap.cast::<u8>().add(page_size).cast(), mmap.cast::<u8>().add(page_size).cast(),
stack_size, stack_size,
@@ -124,29 +146,26 @@ impl Fiber {
// stack, otherwise known as our reserved slot for this information. // stack, otherwise known as our reserved slot for this information.
// //
// In the diagram above this is updating address 0xAff8 // In the diagram above this is updating address 0xAff8
let top_of_stack = self.top_of_stack(); let addr = self.top_of_stack.cast::<usize>().offset(-1);
let addr = top_of_stack.cast::<usize>().offset(-1);
addr.write(result as *const _ as usize); addr.write(result as *const _ as usize);
wasmtime_fiber_switch(top_of_stack); wasmtime_fiber_switch(self.top_of_stack);
// null this out to help catch use-after-free // null this out to help catch use-after-free
addr.write(0); addr.write(0);
} }
} }
unsafe fn top_of_stack(&self) -> *mut u8 {
self.mmap.cast::<u8>().add(self.mmap_len)
}
} }
impl Drop for Fiber { impl Drop for Fiber {
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
let ret = libc::munmap(self.mmap, self.mmap_len); if let Some(alloc_len) = self.alloc_len {
let ret = libc::munmap(self.top_of_stack.sub(alloc_len) as _, alloc_len);
debug_assert!(ret == 0); debug_assert!(ret == 0);
} }
} }
}
} }
impl Suspend { impl Suspend {

View File

@@ -40,7 +40,7 @@ where
} }
impl Fiber { impl Fiber {
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Fiber> pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Self>
where where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C, F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{ {
@@ -61,11 +61,19 @@ impl Fiber {
drop(Box::from_raw(state.initial_closure.get().cast::<F>())); drop(Box::from_raw(state.initial_closure.get().cast::<F>()));
Err(io::Error::last_os_error()) Err(io::Error::last_os_error())
} else { } else {
Ok(Fiber { fiber, state }) Ok(Self { fiber, state })
} }
} }
} }
pub fn new_with_stack<F, A, B, C>(_top_of_stack: *mut u8, _func: F) -> Self
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{
// Windows fibers have no support for custom stacks
unimplemented!()
}
pub(crate) fn resume<A, B, C>(&self, result: &Cell<RunResult<A, B, C>>) { pub(crate) fn resume<A, B, C>(&self, result: &Cell<RunResult<A, B, C>>) {
unsafe { unsafe {
let is_fiber = IsThreadAFiber() != 0; let is_fiber = IsThreadAFiber() != 0;

View File

@@ -74,6 +74,17 @@ pub enum InstantiationError {
Trap(Trap), Trap(Trap),
} }
/// An error while creating a fiber stack.
#[derive(Error, Debug)]
pub enum FiberStackError {
/// An error for when the allocator doesn't support custom fiber stacks.
#[error("Custom fiber stacks are not supported by the allocator")]
NotSupported,
/// A limit on how many fibers are supported has been reached.
#[error("Limit of {0} concurrent fibers has been reached")]
Limit(u32),
}
/// Represents a runtime instance allocator. /// Represents a runtime instance allocator.
/// ///
/// # Safety /// # Safety
@@ -127,6 +138,22 @@ pub unsafe trait InstanceAllocator: Send + Sync {
/// ///
/// Use extreme care when deallocating an instance so that there are no dangling instance pointers. /// Use extreme care when deallocating an instance so that there are no dangling instance pointers.
unsafe fn deallocate(&self, handle: &InstanceHandle); unsafe fn deallocate(&self, handle: &InstanceHandle);
/// Allocates a fiber stack for calling async functions on.
///
/// Returns the top of the fiber stack if successfully allocated.
fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError>;
/// Deallocates a fiber stack that was previously allocated.
///
/// # Safety
///
/// This function is unsafe because there are no guarantees that the given stack
/// is no longer in use.
///
/// Additionally, passing a stack pointer that was not returned from `allocate_fiber_stack`
/// will lead to undefined behavior.
unsafe fn deallocate_fiber_stack(&self, stack: *mut u8);
} }
unsafe fn initialize_vmcontext( unsafe fn initialize_vmcontext(
@@ -544,4 +571,14 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
ptr::drop_in_place(instance as *const Instance as *mut Instance); ptr::drop_in_place(instance as *const Instance as *mut Instance);
alloc::dealloc(instance as *const Instance as *mut _, layout); alloc::dealloc(instance as *const Instance as *mut _, layout);
} }
fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> {
// The on-demand allocator does not support allocating fiber stacks
Err(FiberStackError::NotSupported)
}
unsafe fn deallocate_fiber_stack(&self, _stack: *mut u8) {
// This should never be called as `allocate_fiber_stack` never returns success
unreachable!()
}
} }

View File

@@ -38,8 +38,8 @@ pub use crate::export::*;
pub use crate::externref::*; pub use crate::externref::*;
pub use crate::imports::Imports; pub use crate::imports::Imports;
pub use crate::instance::{ pub use crate::instance::{
InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstantiationError, LinkError, FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
OnDemandInstanceAllocator, RuntimeInstance, InstantiationError, LinkError, OnDemandInstanceAllocator, RuntimeInstance,
}; };
pub use crate::jit_int::GdbJitImageRegistration; pub use crate::jit_int::GdbJitImageRegistration;
pub use crate::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator}; pub use crate::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator};

View File

@@ -58,6 +58,8 @@ pub struct Config {
pub(crate) max_instances: usize, pub(crate) max_instances: usize,
pub(crate) max_tables: usize, pub(crate) max_tables: usize,
pub(crate) max_memories: usize, pub(crate) max_memories: usize,
#[cfg(feature = "async")]
pub(crate) async_stack_size: usize,
} }
impl Config { impl Config {
@@ -108,6 +110,8 @@ impl Config {
max_instances: 10_000, max_instances: 10_000,
max_tables: 10_000, max_tables: 10_000,
max_memories: 10_000, max_memories: 10_000,
#[cfg(feature = "async")]
async_stack_size: 2 << 20,
}; };
ret.wasm_backtrace_details(WasmBacktraceDetails::Environment); ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
return ret; return ret;
@@ -182,23 +186,58 @@ impl Config {
self self
} }
/// Configures the maximum amount of native stack space available to /// Configures the maximum amount of stack space available for
/// executing WebAssembly code. /// executing WebAssembly code.
/// ///
/// WebAssembly code currently executes on the native call stack for its own /// WebAssembly has well-defined semantics on stack overflow. This is
/// call frames. WebAssembly, however, also has well-defined semantics on /// intended to be a knob which can help configure how much stack space
/// stack overflow. This is intended to be a knob which can help configure /// wasm execution is allowed to consume. Note that the number here is not
/// how much native stack space a wasm module is allowed to consume. Note /// super-precise, but rather wasm will take at most "pretty close to this
/// that the number here is not super-precise, but rather wasm will take at /// much" stack space.
/// most "pretty close to this much" stack space.
/// ///
/// If a wasm call (or series of nested wasm calls) take more stack space /// If a wasm call (or series of nested wasm calls) take more stack space
/// than the `size` specified then a stack overflow trap will be raised. /// than the `size` specified then a stack overflow trap will be raised.
/// ///
/// By default this option is 1 MB. /// When the `async` feature is enabled, this value cannot exceed the
pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self { /// `async_stack_size` option. Be careful not to set this value too close
/// to `async_stack_size` as doing so may limit how much stack space
/// is available for host functions. Unlike wasm functions that trap
/// on stack overflow, a host function that overflows the stack will
/// abort the process.
///
/// By default this option is 1 MiB.
pub fn max_wasm_stack(&mut self, size: usize) -> Result<&mut Self> {
#[cfg(feature = "async")]
if size > self.async_stack_size {
bail!("wasm stack size cannot exceed the async stack size");
}
if size == 0 {
bail!("wasm stack size cannot be zero");
}
self.max_wasm_stack = size; self.max_wasm_stack = size;
self Ok(self)
}
/// Configures the size of the stacks used for asynchronous execution.
///
/// This setting configures the size of the stacks that are allocated for
/// asynchronous execution. The value cannot be less than `max_wasm_stack`.
///
/// The amount of stack space guaranteed for host functions is
/// `async_stack_size - max_wasm_stack`, so take care not to set these two values
/// close to one another; doing so may cause host functions to overflow the
/// stack and abort the process.
///
/// By default this option is 2 MiB.
#[cfg(feature = "async")]
pub fn async_stack_size(&mut self, size: usize) -> Result<&mut Self> {
if size < self.max_wasm_stack {
bail!("async stack size cannot be less than the maximum wasm stack size");
}
self.async_stack_size = size;
Ok(self)
} }
/// Configures whether the WebAssembly threads proposal will be enabled for /// Configures whether the WebAssembly threads proposal will be enabled for