Split out fiber stacks from fibers.

This commit splits out a `FiberStack` from `Fiber`, allowing the instance
allocator trait to return `FiberStack` rather than raw stack pointers. This
keeps the stack creation mostly in `wasmtime_fiber`, but now the on-demand
instance allocator can make use of it.

The instance allocators no longer have to return a "not supported" error to
indicate that the store should allocate its own fiber stack.

This includes a bunch of cleanup in the instance allocator to scope stacks to
the new "async" feature in the runtime.

Closes #2708.
This commit is contained in:
Peter Huene
2021-03-18 17:09:36 -07:00
parent 59dfe4b9f4
commit f8f51afac1
20 changed files with 343 additions and 292 deletions

1
Cargo.lock generated
View File

@@ -3484,6 +3484,7 @@ dependencies = [
"thiserror", "thiserror",
"userfaultfd", "userfaultfd",
"wasmtime-environ", "wasmtime-environ",
"wasmtime-fiber",
"winapi", "winapi",
] ]

View File

@@ -68,7 +68,7 @@ FUNCTION(wasmtime_fiber_init):
// And then we specify the stack pointer resumption should begin at. Our // And then we specify the stack pointer resumption should begin at. Our
// `wasmtime_fiber_switch` function consumes 6 registers plus a return // `wasmtime_fiber_switch` function consumes 6 registers plus a return
// pointer, and the top 16 bytes aree resereved, so that's: // pointer, and the top 16 bytes are reserved, so that's:
// //
// (6 + 1) * 16 + 16 = 0x48 // (6 + 1) * 16 + 16 = 0x48
lea -0x48(%rdi), %rax lea -0x48(%rdi), %rax

View File

@@ -14,7 +14,38 @@ mod unix;
#[cfg(unix)] #[cfg(unix)]
use unix as imp; use unix as imp;
/// Represents an execution stack to use for a fiber.
#[derive(Debug)]
pub struct FiberStack(imp::FiberStack);
impl FiberStack {
/// Creates a new fiber stack of the given size.
pub fn new(size: usize) -> io::Result<Self> {
Ok(Self(imp::FiberStack::new(size)?))
}
/// Creates a new fiber stack with the given pointer to the top of the stack.
///
/// # Safety
///
/// This is unsafe because there is no validation of the given stack pointer.
///
/// The caller must properly allocate the stack space with a guard page and
/// make the pages accessible for correct behavior.
pub unsafe fn from_top_ptr(top: *mut u8) -> io::Result<Self> {
Ok(Self(imp::FiberStack::from_top_ptr(top)?))
}
/// Gets the top of the stack.
///
/// Returns `None` if the platform does not support getting the top of the stack.
pub fn top(&self) -> Option<*mut u8> {
self.0.top()
}
}
pub struct Fiber<'a, Resume, Yield, Return> { pub struct Fiber<'a, Resume, Yield, Return> {
stack: FiberStack,
inner: imp::Fiber, inner: imp::Fiber,
done: Cell<bool>, done: Cell<bool>,
_phantom: PhantomData<&'a (Resume, Yield, Return)>, _phantom: PhantomData<&'a (Resume, Yield, Return)>,
@@ -34,39 +65,20 @@ enum RunResult<Resume, Yield, Return> {
} }
impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> { impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> {
/// Creates a new fiber which will execute `func` on a new native stack of /// Creates a new fiber which will execute `func` on the given stack.
/// size `stack_size`.
/// ///
/// This function returns a `Fiber` which, when resumed, will execute `func` /// This function returns a `Fiber` which, when resumed, will execute `func`
/// to completion. When desired the `func` can suspend itself via /// to completion. When desired the `func` can suspend itself via
/// `Fiber::suspend`. /// `Fiber::suspend`.
pub fn new( pub fn new(
stack_size: usize, stack: FiberStack,
func: impl FnOnce(Resume, &Suspend<Resume, Yield, Return>) -> Return + 'a, func: impl FnOnce(Resume, &Suspend<Resume, Yield, Return>) -> Return + 'a,
) -> io::Result<Fiber<'a, Resume, Yield, Return>> { ) -> io::Result<Self> {
Ok(Fiber { let inner = imp::Fiber::new(&stack.0, func)?;
inner: imp::Fiber::new(stack_size, func)?,
done: Cell::new(false),
_phantom: PhantomData,
})
}
/// Creates a new fiber with existing stack space that will execute `func`. Ok(Self {
/// stack,
/// This function returns a `Fiber` which, when resumed, will execute `func` inner,
/// to completion. When desired the `func` can suspend itself via
/// `Fiber::suspend`.
///
/// # Safety
///
/// The caller must properly allocate the stack space with a guard page and
/// make the pages accessible for correct behavior.
pub unsafe fn new_with_stack(
top_of_stack: *mut u8,
func: impl FnOnce(Resume, &Suspend<Resume, Yield, Return>) -> Return + 'a,
) -> io::Result<Fiber<'a, Resume, Yield, Return>> {
Ok(Fiber {
inner: imp::Fiber::new_with_stack(top_of_stack, func)?,
done: Cell::new(false), done: Cell::new(false),
_phantom: PhantomData, _phantom: PhantomData,
}) })
@@ -90,7 +102,7 @@ impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> {
pub fn resume(&self, val: Resume) -> Result<Return, Yield> { pub fn resume(&self, val: Resume) -> Result<Return, Yield> {
assert!(!self.done.replace(true), "cannot resume a finished fiber"); assert!(!self.done.replace(true), "cannot resume a finished fiber");
let result = Cell::new(RunResult::Resuming(val)); let result = Cell::new(RunResult::Resuming(val));
self.inner.resume(&result); self.inner.resume(&self.stack.0, &result);
match result.into_inner() { match result.into_inner() {
RunResult::Resuming(_) | RunResult::Executing => unreachable!(), RunResult::Resuming(_) | RunResult::Executing => unreachable!(),
RunResult::Yield(y) => { RunResult::Yield(y) => {
@@ -106,6 +118,11 @@ impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> {
pub fn done(&self) -> bool { pub fn done(&self) -> bool {
self.done.get() self.done.get()
} }
/// Gets the stack associated with this fiber.
pub fn stack(&self) -> &FiberStack {
&self.stack
}
} }
impl<Resume, Yield, Return> Suspend<Resume, Yield, Return> { impl<Resume, Yield, Return> Suspend<Resume, Yield, Return> {
@@ -148,18 +165,18 @@ impl<A, B, C> Drop for Fiber<'_, A, B, C> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::Fiber; use super::{Fiber, FiberStack};
use std::cell::Cell; use std::cell::Cell;
use std::panic::{self, AssertUnwindSafe}; use std::panic::{self, AssertUnwindSafe};
use std::rc::Rc; use std::rc::Rc;
#[test] #[test]
fn small_stacks() { fn small_stacks() {
Fiber::<(), (), ()>::new(0, |_, _| {}) Fiber::<(), (), ()>::new(FiberStack::new(0).unwrap(), |_, _| {})
.unwrap() .unwrap()
.resume(()) .resume(())
.unwrap(); .unwrap();
Fiber::<(), (), ()>::new(1, |_, _| {}) Fiber::<(), (), ()>::new(FiberStack::new(1).unwrap(), |_, _| {})
.unwrap() .unwrap()
.resume(()) .resume(())
.unwrap(); .unwrap();
@@ -169,7 +186,7 @@ mod tests {
fn smoke() { fn smoke() {
let hit = Rc::new(Cell::new(false)); let hit = Rc::new(Cell::new(false));
let hit2 = hit.clone(); let hit2 = hit.clone();
let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |_, _| { let fiber = Fiber::<(), (), ()>::new(FiberStack::new(1024 * 1024).unwrap(), move |_, _| {
hit2.set(true); hit2.set(true);
}) })
.unwrap(); .unwrap();
@@ -182,7 +199,7 @@ mod tests {
fn suspend_and_resume() { fn suspend_and_resume() {
let hit = Rc::new(Cell::new(false)); let hit = Rc::new(Cell::new(false));
let hit2 = hit.clone(); let hit2 = hit.clone();
let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |_, s| { let fiber = Fiber::<(), (), ()>::new(FiberStack::new(1024 * 1024).unwrap(), move |_, s| {
s.suspend(()); s.suspend(());
hit2.set(true); hit2.set(true);
s.suspend(()); s.suspend(());
@@ -219,7 +236,8 @@ mod tests {
} }
fn run_test() { fn run_test() {
let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |(), s| { let fiber =
Fiber::<(), (), ()>::new(FiberStack::new(1024 * 1024).unwrap(), move |(), s| {
assert_contains_host(); assert_contains_host();
s.suspend(()); s.suspend(());
assert_contains_host(); assert_contains_host();
@@ -239,7 +257,8 @@ mod tests {
fn panics_propagated() { fn panics_propagated() {
let a = Rc::new(Cell::new(false)); let a = Rc::new(Cell::new(false));
let b = SetOnDrop(a.clone()); let b = SetOnDrop(a.clone());
let fiber = Fiber::<(), (), ()>::new(1024 * 1024, move |(), _s| { let fiber =
Fiber::<(), (), ()>::new(FiberStack::new(1024 * 1024).unwrap(), move |(), _s| {
drop(&b); drop(&b);
panic!(); panic!();
}) })
@@ -258,7 +277,7 @@ mod tests {
#[test] #[test]
fn suspend_and_resume_values() { fn suspend_and_resume_values() {
let fiber = Fiber::new(1024 * 1024, move |first, s| { let fiber = Fiber::new(FiberStack::new(1024 * 1024).unwrap(), move |first, s| {
assert_eq!(first, 2.0); assert_eq!(first, 2.0);
assert_eq!(s.suspend(4), 3.0); assert_eq!(s.suspend(4), 3.0);
"hello".to_string() "hello".to_string()

View File

@@ -34,17 +34,81 @@ use std::cell::Cell;
use std::io; use std::io;
use std::ptr; use std::ptr;
pub struct Fiber { #[derive(Debug)]
pub struct FiberStack {
// The top of the stack; for stacks allocated by the fiber implementation itself, // The top of the stack; for stacks allocated by the fiber implementation itself,
// the base address of the allocation will be `top_of_stack.sub(alloc_len.unwrap())` // the base address of the allocation will be `top.sub(len.unwrap())`
top_of_stack: *mut u8, top: *mut u8,
alloc_len: Option<usize>, // The length of the stack; `None` when the stack was not created by this implementation.
len: Option<usize>,
} }
pub struct Suspend { impl FiberStack {
top_of_stack: *mut u8, pub fn new(size: usize) -> io::Result<Self> {
unsafe {
// Round up our stack size request to the nearest multiple of the
// page size.
let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize;
let size = if size == 0 {
page_size
} else {
(size + (page_size - 1)) & (!(page_size - 1))
};
// Add in one page for a guard page and then ask for some memory.
let mmap_len = size + page_size;
let mmap = libc::mmap(
ptr::null_mut(),
mmap_len,
libc::PROT_NONE,
libc::MAP_ANON | libc::MAP_PRIVATE,
-1,
0,
);
if mmap == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
if libc::mprotect(
mmap.cast::<u8>().add(page_size).cast(),
size,
libc::PROT_READ | libc::PROT_WRITE,
) != 0
{
return Err(io::Error::last_os_error());
}
Ok(Self {
top: mmap.cast::<u8>().add(mmap_len),
len: Some(mmap_len),
})
}
}
pub unsafe fn from_top_ptr(top: *mut u8) -> io::Result<Self> {
Ok(Self { top, len: None })
}
pub fn top(&self) -> Option<*mut u8> {
Some(self.top)
}
} }
impl Drop for FiberStack {
fn drop(&mut self) {
unsafe {
if let Some(len) = self.len {
let ret = libc::munmap(self.top.sub(len) as _, len);
debug_assert!(ret == 0);
}
}
}
}
pub struct Fiber;
pub struct Suspend(*mut u8);
extern "C" { extern "C" {
fn wasmtime_fiber_init( fn wasmtime_fiber_init(
top_of_stack: *mut u8, top_of_stack: *mut u8,
@@ -59,97 +123,35 @@ where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C, F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{ {
unsafe { unsafe {
let inner = Suspend { top_of_stack }; let inner = Suspend(top_of_stack);
let initial = inner.take_resume::<A, B, C>(); let initial = inner.take_resume::<A, B, C>();
super::Suspend::<A, B, C>::execute(inner, initial, Box::from_raw(arg0.cast::<F>())) super::Suspend::<A, B, C>::execute(inner, initial, Box::from_raw(arg0.cast::<F>()))
} }
} }
impl Fiber { impl Fiber {
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Self> pub fn new<F, A, B, C>(stack: &FiberStack, func: F) -> io::Result<Self>
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{
let fiber = Self::alloc_with_stack(stack_size)?;
fiber.init(func);
Ok(fiber)
}
pub fn new_with_stack<F, A, B, C>(top_of_stack: *mut u8, func: F) -> io::Result<Self>
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{
let fiber = Self {
top_of_stack,
alloc_len: None,
};
fiber.init(func);
Ok(fiber)
}
fn init<F, A, B, C>(&self, func: F)
where where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C, F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{ {
unsafe { unsafe {
let data = Box::into_raw(Box::new(func)).cast(); let data = Box::into_raw(Box::new(func)).cast();
wasmtime_fiber_init(self.top_of_stack, fiber_start::<F, A, B, C>, data); wasmtime_fiber_init(stack.top, fiber_start::<F, A, B, C>, data);
}
} }
fn alloc_with_stack(stack_size: usize) -> io::Result<Self> { Ok(Self)
unsafe {
// Round up our stack size request to the nearest multiple of the
// page size.
let page_size = libc::sysconf(libc::_SC_PAGESIZE) as usize;
let stack_size = if stack_size == 0 {
page_size
} else {
(stack_size + (page_size - 1)) & (!(page_size - 1))
};
// Add in one page for a guard page and then ask for some memory.
let mmap_len = stack_size + page_size;
let mmap = libc::mmap(
ptr::null_mut(),
mmap_len,
libc::PROT_NONE,
libc::MAP_ANON | libc::MAP_PRIVATE,
-1,
0,
);
if mmap == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
let ret = Self {
top_of_stack: mmap.cast::<u8>().add(mmap_len),
alloc_len: Some(mmap_len),
};
let res = libc::mprotect(
mmap.cast::<u8>().add(page_size).cast(),
stack_size,
libc::PROT_READ | libc::PROT_WRITE,
);
if res != 0 {
Err(io::Error::last_os_error())
} else {
Ok(ret)
}
}
} }
pub(crate) fn resume<A, B, C>(&self, result: &Cell<RunResult<A, B, C>>) { pub(crate) fn resume<A, B, C>(&self, stack: &FiberStack, result: &Cell<RunResult<A, B, C>>) {
unsafe { unsafe {
// Store where our result is going at the very tip-top of the // Store where our result is going at the very tip-top of the
// stack, otherwise known as our reserved slot for this information. // stack, otherwise known as our reserved slot for this information.
// //
// In the diagram above this is updating address 0xAff8 // In the diagram above this is updating address 0xAff8
let addr = self.top_of_stack.cast::<usize>().offset(-1); let addr = stack.top.cast::<usize>().offset(-1);
addr.write(result as *const _ as usize); addr.write(result as *const _ as usize);
wasmtime_fiber_switch(self.top_of_stack); wasmtime_fiber_switch(stack.top);
// null this out to help catch use-after-free // null this out to help catch use-after-free
addr.write(0); addr.write(0);
@@ -157,23 +159,12 @@ impl Fiber {
} }
} }
impl Drop for Fiber {
fn drop(&mut self) {
unsafe {
if let Some(alloc_len) = self.alloc_len {
let ret = libc::munmap(self.top_of_stack.sub(alloc_len) as _, alloc_len);
debug_assert!(ret == 0);
}
}
}
}
impl Suspend { impl Suspend {
pub(crate) fn switch<A, B, C>(&self, result: RunResult<A, B, C>) -> A { pub(crate) fn switch<A, B, C>(&self, result: RunResult<A, B, C>) -> A {
unsafe { unsafe {
// Calculate 0xAff8 and then write to it // Calculate 0xAff8 and then write to it
(*self.result_location::<A, B, C>()).set(result); (*self.result_location::<A, B, C>()).set(result);
wasmtime_fiber_switch(self.top_of_stack); wasmtime_fiber_switch(self.0);
self.take_resume::<A, B, C>() self.take_resume::<A, B, C>()
} }
} }
@@ -186,8 +177,8 @@ impl Suspend {
} }
unsafe fn result_location<A, B, C>(&self) -> *const Cell<RunResult<A, B, C>> { unsafe fn result_location<A, B, C>(&self) -> *const Cell<RunResult<A, B, C>> {
let ret = self.top_of_stack.cast::<*const u8>().offset(-1).read(); let ret = self.0.cast::<*const u8>().offset(-1).read();
assert!(!ret.is_null()); assert!(!ret.is_null());
return ret.cast(); ret.cast()
} }
} }

View File

@@ -7,6 +7,23 @@ use winapi::shared::winerror::ERROR_NOT_SUPPORTED;
use winapi::um::fibersapi::*; use winapi::um::fibersapi::*;
use winapi::um::winbase::*; use winapi::um::winbase::*;
#[derive(Debug)]
pub struct FiberStack(usize);
impl FiberStack {
pub fn new(size: usize) -> io::Result<Self> {
Ok(Self(size))
}
pub unsafe fn from_top_ptr(_top: *mut u8) -> io::Result<Self> {
Err(io::Error::from_raw_os_error(ERROR_NOT_SUPPORTED as i32))
}
pub fn top(&self) -> Option<*mut u8> {
None
}
}
pub struct Fiber { pub struct Fiber {
fiber: LPVOID, fiber: LPVOID,
state: Box<StartState>, state: Box<StartState>,
@@ -41,7 +58,7 @@ where
} }
impl Fiber { impl Fiber {
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Self> pub fn new<F, A, B, C>(stack: &FiberStack, func: F) -> io::Result<Self>
where where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C, F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{ {
@@ -51,30 +68,25 @@ impl Fiber {
parent: Cell::new(ptr::null_mut()), parent: Cell::new(ptr::null_mut()),
result_location: Cell::new(ptr::null()), result_location: Cell::new(ptr::null()),
}); });
let fiber = CreateFiberEx( let fiber = CreateFiberEx(
0, 0,
stack_size, stack.0,
FIBER_FLAG_FLOAT_SWITCH, FIBER_FLAG_FLOAT_SWITCH,
Some(fiber_start::<F, A, B, C>), Some(fiber_start::<F, A, B, C>),
&*state as *const StartState as *mut _, &*state as *const StartState as *mut _,
); );
if fiber.is_null() { if fiber.is_null() {
drop(Box::from_raw(state.initial_closure.get().cast::<F>())); drop(Box::from_raw(state.initial_closure.get().cast::<F>()));
Err(io::Error::last_os_error()) return Err(io::Error::last_os_error());
} else { }
Ok(Self { fiber, state }) Ok(Self { fiber, state })
} }
} }
}
pub fn new_with_stack<F, A, B, C>(_top_of_stack: *mut u8, _func: F) -> io::Result<Self> pub(crate) fn resume<A, B, C>(&self, _stack: &FiberStack, result: &Cell<RunResult<A, B, C>>) {
where
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
{
Err(io::Error::from_raw_os_error(ERROR_NOT_SUPPORTED as i32))
}
pub(crate) fn resume<A, B, C>(&self, result: &Cell<RunResult<A, B, C>>) {
unsafe { unsafe {
let is_fiber = IsThreadAFiber() != 0; let is_fiber = IsThreadAFiber() != 0;
let parent_fiber = if is_fiber { let parent_fiber = if is_fiber {

View File

@@ -13,6 +13,7 @@ edition = "2018"
[dependencies] [dependencies]
wasmtime-environ = { path = "../environ", version = "0.25.0" } wasmtime-environ = { path = "../environ", version = "0.25.0" }
wasmtime-fiber = { path = "../fiber", version = "0.25.0", optional = true }
region = "2.1.0" region = "2.1.0"
libc = { version = "0.2.82", default-features = false } libc = { version = "0.2.82", default-features = false }
log = "0.4.8" log = "0.4.8"
@@ -45,5 +46,7 @@ maintenance = { status = "actively-developed" }
[features] [features]
default = [] default = []
async = ["wasmtime-fiber"]
# Enables support for userfaultfd in the pooling allocator when building on Linux # Enables support for userfaultfd in the pooling allocator when building on Linux
uffd = ["userfaultfd"] uffd = ["userfaultfd"]

View File

@@ -87,13 +87,14 @@ pub enum InstantiationError {
} }
/// An error while creating a fiber stack. /// An error while creating a fiber stack.
#[cfg(feature = "async")]
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum FiberStackError { pub enum FiberStackError {
/// Insufficient resources available for the request. /// Insufficient resources available for the request.
#[error("Insufficient resources: {0}")] #[error("Insufficient resources: {0}")]
Resource(anyhow::Error), Resource(anyhow::Error),
/// An error for when the allocator doesn't support custom fiber stacks. /// An error for when the allocator doesn't support fiber stacks.
#[error("Custom fiber stacks are not supported by the allocator")] #[error("fiber stacks are not supported by the allocator")]
NotSupported, NotSupported,
/// A limit on how many fibers are supported has been reached. /// A limit on how many fibers are supported has been reached.
#[error("Limit of {0} concurrent fibers has been reached")] #[error("Limit of {0} concurrent fibers has been reached")]
@@ -152,20 +153,16 @@ pub unsafe trait InstanceAllocator: Send + Sync {
unsafe fn deallocate(&self, handle: &InstanceHandle); unsafe fn deallocate(&self, handle: &InstanceHandle);
/// Allocates a fiber stack for calling async functions on. /// Allocates a fiber stack for calling async functions on.
/// #[cfg(feature = "async")]
/// Returns the top of the fiber stack if successfully allocated. fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack, FiberStackError>;
fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError>;
/// Deallocates a fiber stack that was previously allocated. /// Deallocates a fiber stack that was previously allocated with `allocate_fiber_stack`.
/// ///
/// # Safety /// # Safety
/// ///
/// This function is unsafe because there are no guarantees that the given stack /// The provided stack is required to have been allocated with `allocate_fiber_stack`.
/// is no longer in use. #[cfg(feature = "async")]
/// unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack);
/// Additionally, passing a stack pointer that was not returned from `allocate_fiber_stack`
/// will lead to undefined behavior.
unsafe fn deallocate_fiber_stack(&self, stack: *mut u8);
} }
fn get_table_init_start( fn get_table_init_start(
@@ -539,12 +536,21 @@ unsafe fn initialize_vmcontext_globals(instance: &Instance) {
#[derive(Clone)] #[derive(Clone)]
pub struct OnDemandInstanceAllocator { pub struct OnDemandInstanceAllocator {
mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>, mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
#[cfg(feature = "async")]
stack_size: usize,
} }
impl OnDemandInstanceAllocator { impl OnDemandInstanceAllocator {
/// Creates a new on-demand instance allocator. /// Creates a new on-demand instance allocator.
pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>) -> Self { pub fn new(
Self { mem_creator } mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
#[cfg(feature = "async")] stack_size: usize,
) -> Self {
Self {
mem_creator,
#[cfg(feature = "async")]
stack_size,
}
} }
fn create_tables(module: &Module) -> PrimaryMap<DefinedTableIndex, Table> { fn create_tables(module: &Module) -> PrimaryMap<DefinedTableIndex, Table> {
@@ -576,6 +582,16 @@ impl OnDemandInstanceAllocator {
} }
} }
impl Default for OnDemandInstanceAllocator {
fn default() -> Self {
Self {
mem_creator: None,
#[cfg(feature = "async")]
stack_size: 0,
}
}
}
unsafe impl InstanceAllocator for OnDemandInstanceAllocator { unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
unsafe fn allocate( unsafe fn allocate(
&self, &self,
@@ -627,13 +643,18 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
alloc::dealloc(handle.instance.cast(), layout); alloc::dealloc(handle.instance.cast(), layout);
} }
fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> { #[cfg(feature = "async")]
// The on-demand allocator does not support allocating fiber stacks fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack, FiberStackError> {
Err(FiberStackError::NotSupported) if self.stack_size == 0 {
return Err(FiberStackError::NotSupported);
} }
unsafe fn deallocate_fiber_stack(&self, _stack: *mut u8) { wasmtime_fiber::FiberStack::new(self.stack_size)
// This should never be called as `allocate_fiber_stack` never returns success .map_err(|e| FiberStackError::Resource(e.into()))
unreachable!() }
#[cfg(feature = "async")]
unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
// The on-demand allocator has no further bookkeeping for fiber stacks
} }
} }

View File

@@ -8,8 +8,8 @@
//! when modules can be constrained based on configurable limits. //! when modules can be constrained based on configurable limits.
use super::{ use super::{
initialize_instance, initialize_vmcontext, FiberStackError, InstanceAllocationRequest, initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator,
InstanceAllocator, InstanceHandle, InstantiationError, InstanceHandle, InstantiationError,
}; };
use crate::{instance::Instance, Memory, Mmap, Table, VMContext}; use crate::{instance::Instance, Memory, Mmap, Table, VMContext};
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, bail, Context, Result};
@@ -41,10 +41,13 @@ cfg_if::cfg_if! {
} }
} }
use imp::{ use imp::{commit_memory_pages, commit_table_pages, decommit_memory_pages, decommit_table_pages};
commit_memory_pages, commit_stack_pages, commit_table_pages, decommit_memory_pages,
decommit_stack_pages, decommit_table_pages, #[cfg(all(feature = "async", unix))]
}; use imp::{commit_stack_pages, decommit_stack_pages};
#[cfg(feature = "async")]
use super::FiberStackError;
fn round_up_to_pow2(n: usize, to: usize) -> usize { fn round_up_to_pow2(n: usize, to: usize) -> usize {
debug_assert!(to > 0); debug_assert!(to > 0);
@@ -705,6 +708,7 @@ impl TablePool {
/// ///
/// The top of the stack (starting stack pointer) is returned when a stack is allocated /// The top of the stack (starting stack pointer) is returned when a stack is allocated
/// from the pool. /// from the pool.
#[cfg(all(feature = "async", unix))]
#[derive(Debug)] #[derive(Debug)]
struct StackPool { struct StackPool {
mapping: Mmap, mapping: Mmap,
@@ -714,13 +718,14 @@ struct StackPool {
free_list: Mutex<Vec<usize>>, free_list: Mutex<Vec<usize>>,
} }
#[cfg(all(feature = "async", unix))]
impl StackPool { impl StackPool {
fn new(instance_limits: &InstanceLimits, stack_size: usize) -> Result<Self> { fn new(instance_limits: &InstanceLimits, stack_size: usize) -> Result<Self> {
let page_size = region::page::size(); let page_size = region::page::size();
// On Windows, don't allocate any fiber stacks as native fibers are always used // On Windows, don't allocate any fiber stacks as native fibers are always used
// Add a page to the stack size for the guard page when using fiber stacks // Add a page to the stack size for the guard page when using fiber stacks
let stack_size = if cfg!(windows) || stack_size == 0 { let stack_size = if stack_size == 0 {
0 0
} else { } else {
round_up_to_pow2(stack_size, page_size) round_up_to_pow2(stack_size, page_size)
@@ -758,8 +763,10 @@ impl StackPool {
}) })
} }
fn allocate(&self, strategy: PoolingAllocationStrategy) -> Result<*mut u8, FiberStackError> { fn allocate(
// Stacks are not supported if nothing was allocated &self,
strategy: PoolingAllocationStrategy,
) -> Result<wasmtime_fiber::FiberStack, FiberStackError> {
if self.stack_size == 0 { if self.stack_size == 0 {
return Err(FiberStackError::NotSupported); return Err(FiberStackError::NotSupported);
} }
@@ -787,18 +794,15 @@ impl StackPool {
commit_stack_pages(bottom_of_stack, size_without_guard) commit_stack_pages(bottom_of_stack, size_without_guard)
.map_err(FiberStackError::Resource)?; .map_err(FiberStackError::Resource)?;
// The top of the stack should be returned wasmtime_fiber::FiberStack::from_top_ptr(bottom_of_stack.add(size_without_guard))
Ok(bottom_of_stack.add(size_without_guard)) .map_err(|e| FiberStackError::Resource(e.into()))
} }
} }
fn deallocate(&self, top_of_stack: *mut u8) { fn deallocate(&self, stack: &wasmtime_fiber::FiberStack) {
debug_assert!(!top_of_stack.is_null());
unsafe {
// Remove the guard page from the size // Remove the guard page from the size
let stack_size = self.stack_size - self.page_size; let stack_size = self.stack_size - self.page_size;
let bottom_of_stack = top_of_stack.sub(stack_size); let bottom_of_stack = unsafe { stack.top().unwrap().sub(stack_size) };
let base = self.mapping.as_ptr() as usize; let base = self.mapping.as_ptr() as usize;
let start_of_stack = (bottom_of_stack as usize) - self.page_size; let start_of_stack = (bottom_of_stack as usize) - self.page_size;
@@ -813,7 +817,6 @@ impl StackPool {
self.free_list.lock().unwrap().push(index); self.free_list.lock().unwrap().push(index);
} }
}
} }
/// Implements the pooling instance allocator. /// Implements the pooling instance allocator.
@@ -828,7 +831,10 @@ pub struct PoolingInstanceAllocator {
instance_limits: InstanceLimits, instance_limits: InstanceLimits,
// This is manually drop so that the pools unmap their memory before the page fault handler drops. // This is manually drop so that the pools unmap their memory before the page fault handler drops.
instances: mem::ManuallyDrop<InstancePool>, instances: mem::ManuallyDrop<InstancePool>,
#[cfg(all(feature = "async", unix))]
stacks: StackPool, stacks: StackPool,
#[cfg(all(feature = "async", windows))]
stack_size: usize,
#[cfg(all(feature = "uffd", target_os = "linux"))] #[cfg(all(feature = "uffd", target_os = "linux"))]
_fault_handler: imp::PageFaultHandler, _fault_handler: imp::PageFaultHandler,
} }
@@ -839,7 +845,7 @@ impl PoolingInstanceAllocator {
strategy: PoolingAllocationStrategy, strategy: PoolingAllocationStrategy,
module_limits: ModuleLimits, module_limits: ModuleLimits,
mut instance_limits: InstanceLimits, mut instance_limits: InstanceLimits,
stack_size: usize, #[cfg(feature = "async")] stack_size: usize,
) -> Result<Self> { ) -> Result<Self> {
if instance_limits.count == 0 { if instance_limits.count == 0 {
bail!("the instance count limit cannot be zero"); bail!("the instance count limit cannot be zero");
@@ -857,7 +863,6 @@ impl PoolingInstanceAllocator {
min(instance_limits.memory_reservation_size, 0x200000000); min(instance_limits.memory_reservation_size, 0x200000000);
let instances = InstancePool::new(&module_limits, &instance_limits)?; let instances = InstancePool::new(&module_limits, &instance_limits)?;
let stacks = StackPool::new(&instance_limits, stack_size)?;
#[cfg(all(feature = "uffd", target_os = "linux"))] #[cfg(all(feature = "uffd", target_os = "linux"))]
let _fault_handler = imp::PageFaultHandler::new(&instances)?; let _fault_handler = imp::PageFaultHandler::new(&instances)?;
@@ -867,7 +872,10 @@ impl PoolingInstanceAllocator {
module_limits, module_limits,
instance_limits, instance_limits,
instances: mem::ManuallyDrop::new(instances), instances: mem::ManuallyDrop::new(instances),
stacks, #[cfg(all(feature = "async", unix))]
stacks: StackPool::new(&instance_limits, stack_size)?,
#[cfg(all(feature = "async", windows))]
stack_size,
#[cfg(all(feature = "uffd", target_os = "linux"))] #[cfg(all(feature = "uffd", target_os = "linux"))]
_fault_handler, _fault_handler,
}) })
@@ -956,13 +964,31 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
self.instances.deallocate(handle); self.instances.deallocate(handle);
} }
fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> { #[cfg(all(feature = "async", unix))]
fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack, FiberStackError> {
self.stacks.allocate(self.strategy) self.stacks.allocate(self.strategy)
} }
unsafe fn deallocate_fiber_stack(&self, stack: *mut u8) { #[cfg(all(feature = "async", unix))]
unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack) {
self.stacks.deallocate(stack); self.stacks.deallocate(stack);
} }
#[cfg(all(feature = "async", windows))]
fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack, FiberStackError> {
if self.stack_size == 0 {
return Err(FiberStackError::NotSupported);
}
// On windows, we don't use a stack pool as we use the native fiber implementation
wasmtime_fiber::FiberStack::new(self.stack_size)
.map_err(|e| FiberStackError::Resource(e.into()))
}
#[cfg(all(feature = "async", windows))]
unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
// A no-op as we don't own the fiber stack on Windows
}
} }
#[cfg(test)] #[cfg(test)]
@@ -1470,7 +1496,7 @@ mod test {
Ok(()) Ok(())
} }
#[cfg(all(unix, target_pointer_width = "64"))] #[cfg(all(unix, target_pointer_width = "64", feature = "async"))]
#[test] #[test]
fn test_stack_pool() -> Result<()> { fn test_stack_pool() -> Result<()> {
let pool = StackPool::new( let pool = StackPool::new(
@@ -1497,7 +1523,10 @@ mod test {
let stack = pool let stack = pool
.allocate(PoolingAllocationStrategy::NextAvailable) .allocate(PoolingAllocationStrategy::NextAvailable)
.expect("allocation should succeed"); .expect("allocation should succeed");
assert_eq!(((stack as usize - base) / pool.stack_size) - 1, i); assert_eq!(
((stack.top().unwrap() as usize - base) / pool.stack_size) - 1,
i
);
stacks.push(stack); stacks.push(stack);
} }
@@ -1512,7 +1541,7 @@ mod test {
}; };
for stack in stacks { for stack in stacks {
pool.deallocate(stack); pool.deallocate(&stack);
} }
assert_eq!( assert_eq!(
@@ -1611,13 +1640,13 @@ mod test {
for _ in 0..10 { for _ in 0..10 {
let stack = allocator.allocate_fiber_stack()?; let stack = allocator.allocate_fiber_stack()?;
// The stack pointer is at the top, so decerement it first // The stack pointer is at the top, so decrement it first
let addr = stack.sub(1); let addr = stack.top().unwrap().sub(1);
assert_eq!(*addr, 0); assert_eq!(*addr, 0);
*addr = 1; *addr = 1;
allocator.deallocate_fiber_stack(stack); allocator.deallocate_fiber_stack(&stack);
} }
} }

View File

@@ -48,11 +48,13 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false) decommit(addr, len, false)
} }
#[cfg(feature = "async")]
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as stack pages remain READ|WRITE // A no-op as stack pages remain READ|WRITE
Ok(()) Ok(())
} }
#[cfg(feature = "async")]
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false) decommit(addr, len, false)
} }

View File

@@ -79,11 +79,13 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len) decommit(addr, len)
} }
#[cfg(feature = "async")]
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as stack pages remain READ|WRITE // A no-op as stack pages remain READ|WRITE
Ok(()) Ok(())
} }
#[cfg(feature = "async")]
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len) decommit(addr, len)
} }

View File

@@ -58,11 +58,13 @@ pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false) decommit(addr, len, false)
} }
#[cfg(feature = "async")]
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as stack pages remain READ|WRITE // A no-op as stack pages remain READ|WRITE
Ok(()) Ok(())
} }
#[cfg(feature = "async")]
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false) decommit(addr, len, false)
} }

View File

@@ -45,11 +45,3 @@ pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len) decommit(addr, len)
} }
pub fn commit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
commit(addr, len)
}
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len)
}

View File

@@ -38,7 +38,7 @@ pub use crate::export::*;
pub use crate::externref::*; pub use crate::externref::*;
pub use crate::imports::Imports; pub use crate::imports::Imports;
pub use crate::instance::{ pub use crate::instance::{
FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits,
InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator, InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator,
PoolingAllocationStrategy, PoolingInstanceAllocator, RuntimeInstance, PoolingAllocationStrategy, PoolingInstanceAllocator, RuntimeInstance,
}; };

View File

@@ -86,8 +86,7 @@ unsafe extern "C" fn trap_handler(
// handling, and reset our trap handling flag. Then we figure // handling, and reset our trap handling flag. Then we figure
// out what to do based on the result of the trap handling. // out what to do based on the result of the trap handling.
let pc = get_pc(context); let pc = get_pc(context);
let jmp_buf = let jmp_buf = info.jmp_buf_if_trap(pc, |handler| handler(signum, siginfo, context));
info.jmp_buf_if_trap(pc, |handler| handler(signum, siginfo, context));
// Figure out what to do based on the result of this handling of // Figure out what to do based on the result of this handling of
// the trap. Note that our sentinel value of 1 means that the // the trap. Note that our sentinel value of 1 means that the

View File

@@ -72,7 +72,7 @@ experimental_x64 = ["wasmtime-jit/experimental_x64"]
# Enables support for "async stores" as well as defining host functions as # Enables support for "async stores" as well as defining host functions as
# `async fn` and calling functions asynchronously. # `async fn` and calling functions asynchronously.
async = ["wasmtime-fiber"] async = ["wasmtime-fiber", "wasmtime-runtime/async"]
# Enables userfaultfd support in the runtime's pooling allocator when building on Linux # Enables userfaultfd support in the runtime's pooling allocator when building on Linux
uffd = ["wasmtime-runtime/uffd"] uffd = ["wasmtime-runtime/uffd"]

View File

@@ -1332,25 +1332,20 @@ impl Config {
match self.allocation_strategy { match self.allocation_strategy {
InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new( InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new(
self.mem_creator.clone(), self.mem_creator.clone(),
#[cfg(feature = "async")]
self.async_stack_size,
))), ))),
InstanceAllocationStrategy::Pooling { InstanceAllocationStrategy::Pooling {
strategy, strategy,
module_limits, module_limits,
instance_limits, instance_limits,
} => { } => Ok(Box::new(PoolingInstanceAllocator::new(
#[cfg(feature = "async")]
let stack_size = self.async_stack_size;
#[cfg(not(feature = "async"))]
let stack_size = 0;
Ok(Box::new(PoolingInstanceAllocator::new(
strategy.into(), strategy.into(),
module_limits.into(), module_limits.into(),
instance_limits.into(), instance_limits.into(),
stack_size, #[cfg(feature = "async")]
)?)) self.async_stack_size,
} )?)),
} }
} }
} }

View File

@@ -106,7 +106,7 @@ impl HostFunc {
impl Drop for HostFunc { impl Drop for HostFunc {
fn drop(&mut self) { fn drop(&mut self) {
// Host functions are always allocated with the default (on-demand) allocator // Host functions are always allocated with the default (on-demand) allocator
unsafe { OnDemandInstanceAllocator::new(None).deallocate(&self.instance) } unsafe { OnDemandInstanceAllocator::default().deallocate(&self.instance) }
} }
} }

View File

@@ -219,7 +219,7 @@ impl Store {
pub fn get<T: Any>(&self) -> Option<&T> { pub fn get<T: Any>(&self) -> Option<&T> {
let values = self.inner.context_values.borrow(); let values = self.inner.context_values.borrow();
// Safety: a context value cannot be removed once added and therefore the addres is // Safety: a context value cannot be removed once added and therefore the address is
// stable for the life of the store // stable for the life of the store
values values
.get(&TypeId::of::<T>()) .get(&TypeId::of::<T>())
@@ -740,9 +740,15 @@ impl Store {
debug_assert!(self.async_support()); debug_assert!(self.async_support());
debug_assert!(config.async_stack_size > 0); debug_assert!(config.async_stack_size > 0);
type SuspendType = wasmtime_fiber::Suspend<Result<(), Trap>, (), Result<(), Trap>>; let stack = self
.inner
.engine
.allocator()
.allocate_fiber_stack()
.map_err(|e| Trap::from(anyhow::Error::from(e)))?;
let mut slot = None; let mut slot = None;
let func = |keep_going, suspend: &SuspendType| { let fiber = wasmtime_fiber::Fiber::new(stack, |keep_going, suspend| {
// First check and see if we were interrupted/dropped, and only // First check and see if we were interrupted/dropped, and only
// continue if we haven't been. // continue if we haven't been.
keep_going?; keep_going?;
@@ -760,46 +766,19 @@ impl Store {
slot = Some(func()); slot = Some(func());
Ok(()) Ok(())
}; })
.map_err(|e| Trap::from(anyhow::Error::from(e)))?;
let (fiber, stack) = match self.inner.engine.allocator().allocate_fiber_stack() {
Ok(stack) => {
// Use the returned stack and deallocate it when finished
(
unsafe {
wasmtime_fiber::Fiber::new_with_stack(stack, func)
.map_err(|e| Trap::from(anyhow::Error::from(e)))?
},
stack,
)
}
Err(wasmtime_runtime::FiberStackError::NotSupported) => {
// The allocator doesn't support custom fiber stacks for the current platform
// Request that the fiber itself allocate the stack
(
wasmtime_fiber::Fiber::new(config.async_stack_size, func)
.map_err(|e| Trap::from(anyhow::Error::from(e)))?,
std::ptr::null_mut(),
)
}
Err(e) => return Err(Trap::from(anyhow::Error::from(e))),
};
// Once we have the fiber representing our synchronous computation, we // Once we have the fiber representing our synchronous computation, we
// wrap that in a custom future implementation which does the // wrap that in a custom future implementation which does the
// translation from the future protocol to our fiber API. // translation from the future protocol to our fiber API.
FiberFuture { FiberFuture { fiber, store: self }.await?;
fiber,
store: self,
stack,
}
.await?;
return Ok(slot.unwrap()); return Ok(slot.unwrap());
struct FiberFuture<'a> { struct FiberFuture<'a> {
fiber: wasmtime_fiber::Fiber<'a, Result<(), Trap>, (), Result<(), Trap>>, fiber: wasmtime_fiber::Fiber<'a, Result<(), Trap>, (), Result<(), Trap>>,
store: &'a Store, store: &'a Store,
stack: *mut u8,
} }
impl Future for FiberFuture<'_> { impl Future for FiberFuture<'_> {
@@ -807,7 +786,7 @@ impl Store {
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
// We need to carry over this `cx` into our fiber's runtime // We need to carry over this `cx` into our fiber's runtime
// for when it trys to poll sub-futures that are created. Doing // for when it tries to poll sub-futures that are created. Doing
// this must be done unsafely, however, since `cx` is only alive // this must be done unsafely, however, since `cx` is only alive
// for this one singular function call. Here we do a `transmute` // for this one singular function call. Here we do a `transmute`
// to extend the lifetime of `Context` so it can be stored in // to extend the lifetime of `Context` so it can be stored in
@@ -864,13 +843,12 @@ impl Store {
// callers that they shouldn't be doing that. // callers that they shouldn't be doing that.
debug_assert!(result.is_ok()); debug_assert!(result.is_ok());
} }
if !self.stack.is_null() {
unsafe { unsafe {
self.store self.store
.engine() .engine()
.allocator() .allocator()
.deallocate_fiber_stack(self.stack) .deallocate_fiber_stack(self.fiber.stack());
};
} }
} }
} }
@@ -999,7 +977,7 @@ impl fmt::Debug for Store {
impl Drop for StoreInner { impl Drop for StoreInner {
fn drop(&mut self) { fn drop(&mut self) {
let allocator = self.engine.allocator(); let allocator = self.engine.allocator();
let ondemand = OnDemandInstanceAllocator::new(self.engine.config().mem_creator.clone()); let ondemand = OnDemandInstanceAllocator::default();
for instance in self.instances.borrow().iter() { for instance in self.instances.borrow().iter() {
unsafe { unsafe {
if instance.ondemand { if instance.ondemand {

View File

@@ -62,10 +62,15 @@ fn create_handle(
imports.functions = func_imports; imports.functions = func_imports;
unsafe { unsafe {
let config = store.engine().config();
// Use the on-demand allocator when creating handles associated with host objects // Use the on-demand allocator when creating handles associated with host objects
// The configured instance allocator should only be used when creating module instances // The configured instance allocator should only be used when creating module instances
// as we don't want host objects to count towards instance limits. // as we don't want host objects to count towards instance limits.
let handle = OnDemandInstanceAllocator::new(store.engine().config().mem_creator.clone()) let handle = OnDemandInstanceAllocator::new(
config.mem_creator.clone(),
#[cfg(feature = "async")]
config.async_stack_size,
)
.allocate(InstanceAllocationRequest { .allocate(InstanceAllocationRequest {
module: Arc::new(module), module: Arc::new(module),
finished_functions: &finished_functions, finished_functions: &finished_functions,

View File

@@ -276,7 +276,7 @@ pub fn create_function(
unsafe { unsafe {
Ok(( Ok((
OnDemandInstanceAllocator::new(None).allocate(InstanceAllocationRequest { OnDemandInstanceAllocator::default().allocate(InstanceAllocationRequest {
module: Arc::new(module), module: Arc::new(module),
finished_functions: &finished_functions, finished_functions: &finished_functions,
imports: Imports::default(), imports: Imports::default(),
@@ -308,7 +308,7 @@ pub unsafe fn create_raw_function(
finished_functions.push(func); finished_functions.push(func);
Ok( Ok(
OnDemandInstanceAllocator::new(None).allocate(InstanceAllocationRequest { OnDemandInstanceAllocator::default().allocate(InstanceAllocationRequest {
module: Arc::new(module), module: Arc::new(module),
finished_functions: &finished_functions, finished_functions: &finished_functions,
imports: Imports::default(), imports: Imports::default(),