From e6dda413a45c5cb922c9bbeee5dc64eb982bda0c Mon Sep 17 00:00:00 2001 From: Peter Huene Date: Sat, 20 Mar 2021 00:05:08 -0700 Subject: [PATCH] Code review feedback. * Add assert to `StackPool::deallocate` to ensure the fiber stack given to it comes from the pool. * Remove outdated comment about windows and stacks as the allocator now returns fiber stacks. * Remove conditional compilation around `stack_size` in the allocators as it was just clutter. --- crates/runtime/src/instance/allocator.rs | 8 +---- .../runtime/src/instance/allocator/pooling.rs | 25 +++++++++------ crates/wasmtime/src/config.rs | 12 ++++--- crates/wasmtime/src/trampoline.rs | 31 +++++++++---------- 4 files changed, 38 insertions(+), 38 deletions(-) diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index b8f2b3e3d8..e8e1e654af 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -536,19 +536,14 @@ unsafe fn initialize_vmcontext_globals(instance: &Instance) { #[derive(Clone)] pub struct OnDemandInstanceAllocator { mem_creator: Option>, - #[cfg(feature = "async")] stack_size: usize, } impl OnDemandInstanceAllocator { /// Creates a new on-demand instance allocator. - pub fn new( - mem_creator: Option>, - #[cfg(feature = "async")] stack_size: usize, - ) -> Self { + pub fn new(mem_creator: Option>, stack_size: usize) -> Self { Self { mem_creator, - #[cfg(feature = "async")] stack_size, } } @@ -586,7 +581,6 @@ impl Default for OnDemandInstanceAllocator { fn default() -> Self { Self { mem_creator: None, - #[cfg(feature = "async")] stack_size: 0, } } diff --git a/crates/runtime/src/instance/allocator/pooling.rs b/crates/runtime/src/instance/allocator/pooling.rs index 70247a9e62..c63f4198a3 100644 --- a/crates/runtime/src/instance/allocator/pooling.rs +++ b/crates/runtime/src/instance/allocator/pooling.rs @@ -723,7 +723,6 @@ impl StackPool { fn new(instance_limits: &InstanceLimits, stack_size: usize) -> Result { let page_size = region::page::size(); - // On Windows, don't allocate any fiber stacks as native fibers are always used // Add a page to the stack size for the guard page when using fiber stacks let stack_size = if stack_size == 0 { 0 @@ -800,20 +799,28 @@ impl StackPool { } fn deallocate(&self, stack: &wasmtime_fiber::FiberStack) { - // Remove the guard page from the size - let stack_size = self.stack_size - self.page_size; - let bottom_of_stack = unsafe { stack.top().unwrap().sub(stack_size) }; + let top = stack + .top() + .expect("fiber stack not allocated from the pool") as usize; let base = self.mapping.as_ptr() as usize; - let start_of_stack = (bottom_of_stack as usize) - self.page_size; + let len = self.mapping.len(); + assert!( + top > base && top <= (base + len), + "fiber stack top pointer not in range" + ); - debug_assert!(start_of_stack >= base && start_of_stack < (base + self.mapping.len())); + // Remove the guard page from the size + let stack_size = self.stack_size - self.page_size; + let bottom_of_stack = top - stack_size; + let start_of_stack = bottom_of_stack - self.page_size; + debug_assert!(start_of_stack >= base && start_of_stack < (base + len)); debug_assert!((start_of_stack - base) % self.stack_size == 0); let index = (start_of_stack - base) / self.stack_size; debug_assert!(index < self.max_instances); - decommit_stack_pages(bottom_of_stack, stack_size).unwrap(); + decommit_stack_pages(bottom_of_stack as _, stack_size).unwrap(); self.free_list.lock().unwrap().push(index); } @@ -833,7 +840,6 @@ pub struct PoolingInstanceAllocator { instances: mem::ManuallyDrop, #[cfg(all(feature = "async", unix))] stacks: StackPool, - #[cfg(all(feature = "async", windows))] stack_size: usize, #[cfg(all(feature = "uffd", target_os = "linux"))] _fault_handler: imp::PageFaultHandler, @@ -845,7 +851,7 @@ impl PoolingInstanceAllocator { strategy: PoolingAllocationStrategy, module_limits: ModuleLimits, mut instance_limits: InstanceLimits, - #[cfg(feature = "async")] stack_size: usize, + stack_size: usize, ) -> Result { if instance_limits.count == 0 { bail!("the instance count limit cannot be zero"); @@ -874,7 +880,6 @@ impl PoolingInstanceAllocator { instances: mem::ManuallyDrop::new(instances), #[cfg(all(feature = "async", unix))] stacks: StackPool::new(&instance_limits, stack_size)?, - #[cfg(all(feature = "async", windows))] stack_size, #[cfg(all(feature = "uffd", target_os = "linux"))] _fault_handler, diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index af5b48d797..05296216f9 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -1329,11 +1329,16 @@ impl Config { } pub(crate) fn build_allocator(&self) -> Result> { + #[cfg(feature = "async")] + let stack_size = self.async_stack_size; + + #[cfg(not(feature = "async"))] + let stack_size = 0; + match self.allocation_strategy { InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new( self.mem_creator.clone(), - #[cfg(feature = "async")] - self.async_stack_size, + stack_size, ))), InstanceAllocationStrategy::Pooling { strategy, @@ -1343,8 +1348,7 @@ impl Config { strategy.into(), module_limits.into(), instance_limits.into(), - #[cfg(feature = "async")] - self.async_stack_size, + stack_size, )?)), } } diff --git a/crates/wasmtime/src/trampoline.rs b/crates/wasmtime/src/trampoline.rs index 49b2b32e14..6bb97f855a 100644 --- a/crates/wasmtime/src/trampoline.rs +++ b/crates/wasmtime/src/trampoline.rs @@ -66,23 +66,20 @@ fn create_handle( // Use the on-demand allocator when creating handles associated with host objects // The configured instance allocator should only be used when creating module instances // as we don't want host objects to count towards instance limits. - let handle = OnDemandInstanceAllocator::new( - config.mem_creator.clone(), - #[cfg(feature = "async")] - config.async_stack_size, - ) - .allocate(InstanceAllocationRequest { - module: Arc::new(module), - finished_functions: &finished_functions, - imports, - lookup_shared_signature: &|_| shared_signature_id.unwrap(), - host_state, - interrupts: store.interrupts(), - externref_activations_table: store.externref_activations_table() - as *const VMExternRefActivationsTable - as *mut _, - stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _, - })?; + let handle = OnDemandInstanceAllocator::new(config.mem_creator.clone(), 0).allocate( + InstanceAllocationRequest { + module: Arc::new(module), + finished_functions: &finished_functions, + imports, + lookup_shared_signature: &|_| shared_signature_id.unwrap(), + host_state, + interrupts: store.interrupts(), + externref_activations_table: store.externref_activations_table() + as *const VMExternRefActivationsTable + as *mut _, + stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _, + }, + )?; Ok(store.add_instance(handle, true)) }