Code review feedback.

* Add assert to `StackPool::deallocate` to ensure the fiber stack given to it
  comes from the pool.
* Remove outdated comment about windows and stacks as the allocator now returns
  fiber stacks.
* Remove conditional compilation around `stack_size` in the allocators as it
  was just clutter.
This commit is contained in:
Peter Huene
2021-03-20 00:05:08 -07:00
parent f556bd18a7
commit e6dda413a4
4 changed files with 38 additions and 38 deletions

View File

@@ -536,19 +536,14 @@ unsafe fn initialize_vmcontext_globals(instance: &Instance) {
#[derive(Clone)] #[derive(Clone)]
pub struct OnDemandInstanceAllocator { pub struct OnDemandInstanceAllocator {
mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>, mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
#[cfg(feature = "async")]
stack_size: usize, stack_size: usize,
} }
impl OnDemandInstanceAllocator { impl OnDemandInstanceAllocator {
/// Creates a new on-demand instance allocator. /// Creates a new on-demand instance allocator.
pub fn new( pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>, stack_size: usize) -> Self {
mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
#[cfg(feature = "async")] stack_size: usize,
) -> Self {
Self { Self {
mem_creator, mem_creator,
#[cfg(feature = "async")]
stack_size, stack_size,
} }
} }
@@ -586,7 +581,6 @@ impl Default for OnDemandInstanceAllocator {
fn default() -> Self { fn default() -> Self {
Self { Self {
mem_creator: None, mem_creator: None,
#[cfg(feature = "async")]
stack_size: 0, stack_size: 0,
} }
} }

View File

@@ -723,7 +723,6 @@ impl StackPool {
fn new(instance_limits: &InstanceLimits, stack_size: usize) -> Result<Self> { fn new(instance_limits: &InstanceLimits, stack_size: usize) -> Result<Self> {
let page_size = region::page::size(); let page_size = region::page::size();
// On Windows, don't allocate any fiber stacks as native fibers are always used
// Add a page to the stack size for the guard page when using fiber stacks // Add a page to the stack size for the guard page when using fiber stacks
let stack_size = if stack_size == 0 { let stack_size = if stack_size == 0 {
0 0
@@ -800,20 +799,28 @@ impl StackPool {
} }
fn deallocate(&self, stack: &wasmtime_fiber::FiberStack) { fn deallocate(&self, stack: &wasmtime_fiber::FiberStack) {
// Remove the guard page from the size let top = stack
let stack_size = self.stack_size - self.page_size; .top()
let bottom_of_stack = unsafe { stack.top().unwrap().sub(stack_size) }; .expect("fiber stack not allocated from the pool") as usize;
let base = self.mapping.as_ptr() as usize; let base = self.mapping.as_ptr() as usize;
let start_of_stack = (bottom_of_stack as usize) - self.page_size; let len = self.mapping.len();
assert!(
top > base && top <= (base + len),
"fiber stack top pointer not in range"
);
debug_assert!(start_of_stack >= base && start_of_stack < (base + self.mapping.len())); // Remove the guard page from the size
let stack_size = self.stack_size - self.page_size;
let bottom_of_stack = top - stack_size;
let start_of_stack = bottom_of_stack - self.page_size;
debug_assert!(start_of_stack >= base && start_of_stack < (base + len));
debug_assert!((start_of_stack - base) % self.stack_size == 0); debug_assert!((start_of_stack - base) % self.stack_size == 0);
let index = (start_of_stack - base) / self.stack_size; let index = (start_of_stack - base) / self.stack_size;
debug_assert!(index < self.max_instances); debug_assert!(index < self.max_instances);
decommit_stack_pages(bottom_of_stack, stack_size).unwrap(); decommit_stack_pages(bottom_of_stack as _, stack_size).unwrap();
self.free_list.lock().unwrap().push(index); self.free_list.lock().unwrap().push(index);
} }
@@ -833,7 +840,6 @@ pub struct PoolingInstanceAllocator {
instances: mem::ManuallyDrop<InstancePool>, instances: mem::ManuallyDrop<InstancePool>,
#[cfg(all(feature = "async", unix))] #[cfg(all(feature = "async", unix))]
stacks: StackPool, stacks: StackPool,
#[cfg(all(feature = "async", windows))]
stack_size: usize, stack_size: usize,
#[cfg(all(feature = "uffd", target_os = "linux"))] #[cfg(all(feature = "uffd", target_os = "linux"))]
_fault_handler: imp::PageFaultHandler, _fault_handler: imp::PageFaultHandler,
@@ -845,7 +851,7 @@ impl PoolingInstanceAllocator {
strategy: PoolingAllocationStrategy, strategy: PoolingAllocationStrategy,
module_limits: ModuleLimits, module_limits: ModuleLimits,
mut instance_limits: InstanceLimits, mut instance_limits: InstanceLimits,
#[cfg(feature = "async")] stack_size: usize, stack_size: usize,
) -> Result<Self> { ) -> Result<Self> {
if instance_limits.count == 0 { if instance_limits.count == 0 {
bail!("the instance count limit cannot be zero"); bail!("the instance count limit cannot be zero");
@@ -874,7 +880,6 @@ impl PoolingInstanceAllocator {
instances: mem::ManuallyDrop::new(instances), instances: mem::ManuallyDrop::new(instances),
#[cfg(all(feature = "async", unix))] #[cfg(all(feature = "async", unix))]
stacks: StackPool::new(&instance_limits, stack_size)?, stacks: StackPool::new(&instance_limits, stack_size)?,
#[cfg(all(feature = "async", windows))]
stack_size, stack_size,
#[cfg(all(feature = "uffd", target_os = "linux"))] #[cfg(all(feature = "uffd", target_os = "linux"))]
_fault_handler, _fault_handler,

View File

@@ -1329,11 +1329,16 @@ impl Config {
} }
pub(crate) fn build_allocator(&self) -> Result<Box<dyn InstanceAllocator>> { pub(crate) fn build_allocator(&self) -> Result<Box<dyn InstanceAllocator>> {
#[cfg(feature = "async")]
let stack_size = self.async_stack_size;
#[cfg(not(feature = "async"))]
let stack_size = 0;
match self.allocation_strategy { match self.allocation_strategy {
InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new( InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new(
self.mem_creator.clone(), self.mem_creator.clone(),
#[cfg(feature = "async")] stack_size,
self.async_stack_size,
))), ))),
InstanceAllocationStrategy::Pooling { InstanceAllocationStrategy::Pooling {
strategy, strategy,
@@ -1343,8 +1348,7 @@ impl Config {
strategy.into(), strategy.into(),
module_limits.into(), module_limits.into(),
instance_limits.into(), instance_limits.into(),
#[cfg(feature = "async")] stack_size,
self.async_stack_size,
)?)), )?)),
} }
} }

View File

@@ -66,23 +66,20 @@ fn create_handle(
// Use the on-demand allocator when creating handles associated with host objects // Use the on-demand allocator when creating handles associated with host objects
// The configured instance allocator should only be used when creating module instances // The configured instance allocator should only be used when creating module instances
// as we don't want host objects to count towards instance limits. // as we don't want host objects to count towards instance limits.
let handle = OnDemandInstanceAllocator::new( let handle = OnDemandInstanceAllocator::new(config.mem_creator.clone(), 0).allocate(
config.mem_creator.clone(), InstanceAllocationRequest {
#[cfg(feature = "async")] module: Arc::new(module),
config.async_stack_size, finished_functions: &finished_functions,
) imports,
.allocate(InstanceAllocationRequest { lookup_shared_signature: &|_| shared_signature_id.unwrap(),
module: Arc::new(module), host_state,
finished_functions: &finished_functions, interrupts: store.interrupts(),
imports, externref_activations_table: store.externref_activations_table()
lookup_shared_signature: &|_| shared_signature_id.unwrap(), as *const VMExternRefActivationsTable
host_state, as *mut _,
interrupts: store.interrupts(), stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _,
externref_activations_table: store.externref_activations_table() },
as *const VMExternRefActivationsTable )?;
as *mut _,
stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _,
})?;
Ok(store.add_instance(handle, true)) Ok(store.add_instance(handle, true))
} }