Use deterministic randomness fuzzing the pooling allocator (#5247)

This commit updates the index allocation performed in the pooling
allocator with a few refactorings:

* With `cfg(fuzzing)` a deterministic rng is now used to improve
  reproducibility of fuzz test cases.
* The `Mutex` was pushed inside of `IndexAllocator`, renamed from
  `PoolingAllocationState`.
* Randomness is now always done through a `SmallRng` stored in the
  `IndexAllocator` instead of using `thread_rng`.
* The `is_empty` method has been removed in favor of an `Option`-based
  return on `alloc`.

This refactoring is additionally intended to encapsulate more
implementation details of `IndexAllocator` to more easily allow for
alternate implementations in the future such as lock-free approaches
(possibly).
This commit is contained in:
Alex Crichton
2022-11-10 14:53:04 -06:00
committed by GitHub
parent 42e88c7b24
commit 7ec626b898
3 changed files with 141 additions and 125 deletions

View File

@@ -24,7 +24,7 @@ use wasmtime_environ::{
};
mod index_allocator;
use index_allocator::{PoolingAllocationState, SlotId};
use index_allocator::{IndexAllocator, SlotId};
cfg_if::cfg_if! {
if #[cfg(windows)] {
@@ -119,7 +119,7 @@ struct InstancePool {
mapping: Mmap,
instance_size: usize,
max_instances: usize,
index_allocator: Mutex<PoolingAllocationState>,
index_allocator: IndexAllocator,
memories: MemoryPool,
tables: TablePool,
linear_memory_keep_resident: usize,
@@ -148,10 +148,7 @@ impl InstancePool {
mapping,
instance_size,
max_instances,
index_allocator: Mutex::new(PoolingAllocationState::new(
config.strategy,
max_instances,
)),
index_allocator: IndexAllocator::new(config.strategy, max_instances),
memories: MemoryPool::new(&config.limits, tunables)?,
tables: TablePool::new(&config.limits)?,
linear_memory_keep_resident: config.linear_memory_keep_resident,
@@ -221,21 +218,18 @@ impl InstancePool {
&self,
req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError> {
let index = {
let mut alloc = self.index_allocator.lock().unwrap();
if alloc.is_empty() {
return Err(InstantiationError::Limit(self.max_instances as u32));
}
alloc.alloc(req.runtime_info.unique_id()).index()
};
let id = self
.index_allocator
.alloc(req.runtime_info.unique_id())
.ok_or_else(|| InstantiationError::Limit(self.max_instances as u32))?;
match unsafe { self.initialize_instance(index, req) } {
match unsafe { self.initialize_instance(id.index(), req) } {
Ok(handle) => Ok(handle),
Err(e) => {
// If we failed to initialize the instance, there's no need to drop
// it as it was never "allocated", but we still need to free the
// instance's slot.
self.index_allocator.lock().unwrap().free(SlotId(index));
self.index_allocator.free(id);
Err(e)
}
}
@@ -267,7 +261,7 @@ impl InstancePool {
// touched again until we write a fresh Instance in-place with
// std::ptr::write in allocate() above.
self.index_allocator.lock().unwrap().free(SlotId(index));
self.index_allocator.free(SlotId(index));
}
fn allocate_instance_resources(
@@ -839,7 +833,7 @@ struct StackPool {
stack_size: usize,
max_instances: usize,
page_size: usize,
index_allocator: Mutex<PoolingAllocationState>,
index_allocator: IndexAllocator,
async_stack_zeroing: bool,
async_stack_keep_resident: usize,
}
@@ -893,10 +887,10 @@ impl StackPool {
// here: stacks do not benefit from being allocated to the
// same compiled module with the same image (they always
// start zeroed just the same for everyone).
index_allocator: Mutex::new(PoolingAllocationState::new(
index_allocator: IndexAllocator::new(
PoolingAllocationStrategy::NextAvailable,
max_instances,
)),
),
})
}
@@ -905,13 +899,11 @@ impl StackPool {
return Err(FiberStackError::NotSupported);
}
let index = {
let mut alloc = self.index_allocator.lock().unwrap();
if alloc.is_empty() {
return Err(FiberStackError::Limit(self.max_instances as u32));
}
alloc.alloc(None).index()
};
let index = self
.index_allocator
.alloc(None)
.ok_or(FiberStackError::Limit(self.max_instances as u32))?
.index();
assert!(index < self.max_instances);
@@ -958,7 +950,7 @@ impl StackPool {
self.zero_stack(bottom_of_stack, stack_size);
}
self.index_allocator.lock().unwrap().free(SlotId(index));
self.index_allocator.free(SlotId(index));
}
fn zero_stack(&self, bottom: usize, size: usize) {
@@ -1199,8 +1191,8 @@ mod test {
assert_eq!(instances.max_instances, 3);
assert_eq!(
instances.index_allocator.lock().unwrap().testing_freelist(),
&[SlotId(0), SlotId(1), SlotId(2)]
instances.index_allocator.testing_freelist(),
[SlotId(0), SlotId(1), SlotId(2)]
);
let mut handles = Vec::new();
@@ -1224,10 +1216,7 @@ mod test {
);
}
assert_eq!(
instances.index_allocator.lock().unwrap().testing_freelist(),
&[]
);
assert_eq!(instances.index_allocator.testing_freelist(), []);
match instances.allocate(InstanceAllocationRequest {
runtime_info: &empty_runtime_info(module),
@@ -1249,8 +1238,8 @@ mod test {
}
assert_eq!(
instances.index_allocator.lock().unwrap().testing_freelist(),
&[SlotId(2), SlotId(1), SlotId(0)]
instances.index_allocator.testing_freelist(),
[SlotId(2), SlotId(1), SlotId(0)]
);
Ok(())
@@ -1356,8 +1345,8 @@ mod test {
assert_eq!(pool.page_size, native_page_size);
assert_eq!(
pool.index_allocator.lock().unwrap().testing_freelist(),
&[
pool.index_allocator.testing_freelist(),
[
SlotId(0),
SlotId(1),
SlotId(2),
@@ -1383,7 +1372,7 @@ mod test {
stacks.push(stack);
}
assert_eq!(pool.index_allocator.lock().unwrap().testing_freelist(), &[]);
assert_eq!(pool.index_allocator.testing_freelist(), []);
match pool.allocate().unwrap_err() {
FiberStackError::Limit(10) => {}
@@ -1395,8 +1384,8 @@ mod test {
}
assert_eq!(
pool.index_allocator.lock().unwrap().testing_freelist(),
&[
pool.index_allocator.testing_freelist(),
[
SlotId(9),
SlotId(8),
SlotId(7),