Clear affine slots when dropping a Module (#5321)
* Clear affine slots when dropping a `Module` This commit implements a resource usage optimization for Wasmtime with the pooling instance allocator by ensuring that when a `Module` is dropped its backing virtual memory mappings are all removed. Currently when a `Module` is dropped it releases a strong reference to its internal memory image but the memory image may stick around in individual pooling instance allocator slots. When using the `Random` allocation strategy, for example, this means that the memory images could stick around for a long time. While not a pressing issue this has resource usage implications for Wasmtime. Namely removing a `Module` does not guarantee the memfd, if in use for a memory image, is closed and deallocated within the kernel. Unfortunately simply closing the memfd is not sufficient as well as the mappings into the address space additionally all need to be removed for the kernel to release the resources for the memfd. This means that to release all kernel-level resources for a `Module` all slots which have the memory image mapped in must have the slot reset. This problem isn't particularly present when using the `NextAvailable` allocation strategy since the number of lingering memfds is proportional to the maximum concurrent size of wasm instances. With the `Random` and `ReuseAffinity` strategies, however, it's much more prominent because the number of lingering memfds can reach the total number of slots available. This can appear as a leak of kernel-level memory which can cause other system instability. To fix this issue this commit adds necessary instrumentation to `Drop for Module` to purge all references to the module in the pooling instance allocator. All index allocation strategies now maintain affinity tracking to ensure that regardless of the strategy in use a module that is dropped will remove all its memory mappings. A new allocation method was added to the index allocator for allocating an index without setting affinity and only allocating affine slots. This is used to iterate over all the affine slots without holding the global index lock for an unnecessarily long time while mappings are removed. * Review comments
This commit is contained in:
@@ -499,14 +499,8 @@ impl MemoryImageSlot {
|
||||
// extent of the prior initialization image in order to preserve
|
||||
// resident memory that might come before or after the image.
|
||||
if self.image.as_ref() != maybe_image {
|
||||
if let Some(image) = &self.image {
|
||||
unsafe {
|
||||
image
|
||||
.remap_as_zeros_at(self.base)
|
||||
.map_err(|e| InstantiationError::Resource(e.into()))?;
|
||||
}
|
||||
self.image = None;
|
||||
}
|
||||
self.remove_image()
|
||||
.map_err(|e| InstantiationError::Resource(e.into()))?;
|
||||
}
|
||||
|
||||
// The next order of business is to ensure that `self.accessible` is
|
||||
@@ -565,6 +559,16 @@ impl MemoryImageSlot {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn remove_image(&mut self) -> Result<()> {
|
||||
if let Some(image) = &self.image {
|
||||
unsafe {
|
||||
image.remap_as_zeros_at(self.base)?;
|
||||
}
|
||||
self.image = None;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resets this linear memory slot back to a "pristine state".
|
||||
///
|
||||
/// This will reset the memory back to its original contents on Linux or
|
||||
|
||||
@@ -2,8 +2,7 @@ use crate::imports::Imports;
|
||||
use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator};
|
||||
use crate::memory::{DefaultMemoryCreator, Memory};
|
||||
use crate::table::Table;
|
||||
use crate::ModuleRuntimeInfo;
|
||||
use crate::Store;
|
||||
use crate::{CompiledModuleId, ModuleRuntimeInfo, Store};
|
||||
use anyhow::Result;
|
||||
use std::alloc;
|
||||
use std::any::Any;
|
||||
@@ -190,6 +189,13 @@ pub unsafe trait InstanceAllocator: Send + Sync {
|
||||
/// The provided stack is required to have been allocated with `allocate_fiber_stack`.
|
||||
#[cfg(feature = "async")]
|
||||
unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack);
|
||||
|
||||
/// Purges all lingering resources related to `module` from within this
|
||||
/// allocator.
|
||||
///
|
||||
/// Primarily present for the pooling allocator to remove mappings of
|
||||
/// this module from slots in linear memory.
|
||||
fn purge_module(&self, module: CompiledModuleId);
|
||||
}
|
||||
|
||||
fn get_table_init_start(
|
||||
@@ -593,4 +599,6 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
|
||||
unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
|
||||
// The on-demand allocator has no further bookkeeping for fiber stacks
|
||||
}
|
||||
|
||||
fn purge_module(&self, _: CompiledModuleId) {}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use super::{
|
||||
InstantiationError,
|
||||
};
|
||||
use crate::{instance::Instance, Memory, Mmap, Table};
|
||||
use crate::{MemoryImageSlot, ModuleRuntimeInfo, Store};
|
||||
use crate::{CompiledModuleId, MemoryImageSlot, ModuleRuntimeInfo, Store};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use libc::c_void;
|
||||
use std::convert::TryFrom;
|
||||
@@ -560,6 +560,22 @@ impl InstancePool {
|
||||
|
||||
bail!("{}", message)
|
||||
}
|
||||
|
||||
fn purge_module(&self, module: CompiledModuleId) {
|
||||
// Purging everything related to `module` primarily means clearing out
|
||||
// all of its memory images present in the virtual address space. Go
|
||||
// through the index allocator for slots affine to `module` and reset
|
||||
// them, freeing up the index when we're done.
|
||||
//
|
||||
// Note that this is only called when the specified `module` won't be
|
||||
// allocated further (the module is being dropped) so this shouldn't hit
|
||||
// any sort of infinite loop since this should be the final operation
|
||||
// working with `module`.
|
||||
while let Some(index) = self.index_allocator.alloc_affine_and_clear_affinity(module) {
|
||||
self.memories.clear_images(index.0);
|
||||
self.index_allocator.free(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a pool of WebAssembly linear memories.
|
||||
@@ -740,6 +756,26 @@ impl MemoryPool {
|
||||
let idx = instance_index * self.max_memories + (memory_index.as_u32() as usize);
|
||||
*self.image_slots[idx].lock().unwrap() = Some(slot);
|
||||
}
|
||||
|
||||
/// Resets all the images for the instance index slot specified to clear out
|
||||
/// any prior mappings.
|
||||
///
|
||||
/// This is used when a `Module` is dropped at the `wasmtime` layer to clear
|
||||
/// out any remaining mappings and ensure that its memfd backing, if any, is
|
||||
/// removed from the address space to avoid lingering references to it.
|
||||
fn clear_images(&self, instance_index: usize) {
|
||||
for i in 0..self.max_memories {
|
||||
let index = DefinedMemoryIndex::from_u32(i as u32);
|
||||
|
||||
// Clear the image from the slot and, if successful, return it back
|
||||
// to our state. Note that on failure here the whole slot will get
|
||||
// paved over with an anonymous mapping.
|
||||
let mut slot = self.take_memory_image_slot(instance_index, index);
|
||||
if slot.remove_image().is_ok() {
|
||||
self.return_memory_image_slot(instance_index, index, slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for MemoryPool {
|
||||
@@ -1116,6 +1152,10 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
|
||||
unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
|
||||
// A no-op as we don't own the fiber stack on Windows
|
||||
}
|
||||
|
||||
fn purge_module(&self, module: CompiledModuleId) {
|
||||
self.instances.purge_module(module);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -43,49 +43,28 @@ pub struct IndexAllocator(Mutex<Inner>);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
strategy: PoolingAllocationStrategy,
|
||||
rng: SmallRng,
|
||||
state: State,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum State {
|
||||
NextAvailable(Vec<SlotId>),
|
||||
Random(Vec<SlotId>),
|
||||
/// Reuse-affinity policy state.
|
||||
/// Free-list of all slots.
|
||||
///
|
||||
/// The data structures here deserve a little explanation:
|
||||
/// We use this to pick a victim when we don't have an appropriate slot with
|
||||
/// the preferred affinity.
|
||||
free_list: Vec<SlotId>,
|
||||
|
||||
/// Affine slot management which tracks which slots are free and were last
|
||||
/// used with the specified `CompiledModuleId`.
|
||||
///
|
||||
/// - free_list: this is a vec of slot indices that are free, no
|
||||
/// matter their affinities (or no affinity at all).
|
||||
/// - per_module: this is a hashmap of vecs of slot indices that
|
||||
/// are free, with affinity for particular module IDs. A slot may
|
||||
/// appear in zero or one of these lists.
|
||||
/// - slot_state: indicates what state each slot is in: allocated
|
||||
/// (Taken), only in free_list (Empty), or in free_list and a
|
||||
/// per_module list (Affinity).
|
||||
/// Invariant: any module ID in this hashmap must have a non-empty list of
|
||||
/// free slots (otherwise we remove it). We remove a module's freelist when
|
||||
/// we have no more slots with affinity for that module.
|
||||
per_module: HashMap<CompiledModuleId, Vec<SlotId>>,
|
||||
|
||||
/// The state of any given slot.
|
||||
///
|
||||
/// The slot state tracks a slot's index in the global and
|
||||
/// per-module freelists, so it can be efficiently removed from
|
||||
/// both. We take some care to keep these up-to-date as well.
|
||||
///
|
||||
/// On allocation, we first try to find a slot with affinity for
|
||||
/// the given module ID, if any. If not, we pick a random slot
|
||||
/// ID. This random choice is unbiased across all free slots.
|
||||
ReuseAffinity {
|
||||
/// Free-list of all slots. We use this to pick a victim when
|
||||
/// we don't have an appropriate slot with the preferred
|
||||
/// affinity.
|
||||
free_list: Vec<SlotId>,
|
||||
/// Invariant: any module ID in this hashmap must have a
|
||||
/// non-empty list of free slots (otherwise we remove it). We
|
||||
/// remove a module's freelist when we have no more slots with
|
||||
/// affinity for that module.
|
||||
per_module: HashMap<CompiledModuleId, Vec<SlotId>>,
|
||||
/// The state of any given slot. Records indices in the above
|
||||
/// list (empty) or two lists (with affinity), and these
|
||||
/// indices are kept up-to-date to allow fast removal.
|
||||
slot_state: Vec<SlotState>,
|
||||
},
|
||||
/// Records indices in the above list (empty) or two lists (with affinity),
|
||||
/// and these indices are kept up-to-date to allow fast removal.
|
||||
slot_state: Vec<SlotState>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -213,69 +192,15 @@ impl FreeSlotState {
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal: remove a slot-index from the global free list.
|
||||
fn remove_global_free_list_item(
|
||||
slot_state: &mut Vec<SlotState>,
|
||||
free_list: &mut Vec<SlotId>,
|
||||
index: SlotId,
|
||||
) {
|
||||
let free_list_index = slot_state[index.index()].unwrap_free().free_list_index();
|
||||
assert_eq!(index, free_list.swap_remove(free_list_index.index()));
|
||||
if free_list_index.index() < free_list.len() {
|
||||
let replaced = free_list[free_list_index.index()];
|
||||
slot_state[replaced.index()]
|
||||
.unwrap_free_mut()
|
||||
.update_free_list_index(free_list_index);
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal: remove a slot-index from a per-module free list.
|
||||
fn remove_module_free_list_item(
|
||||
slot_state: &mut Vec<SlotState>,
|
||||
per_module: &mut HashMap<CompiledModuleId, Vec<SlotId>>,
|
||||
id: CompiledModuleId,
|
||||
index: SlotId,
|
||||
) {
|
||||
debug_assert!(
|
||||
per_module.contains_key(&id),
|
||||
"per_module list for given module should not be empty"
|
||||
);
|
||||
|
||||
let per_module_list = per_module.get_mut(&id).unwrap();
|
||||
debug_assert!(!per_module_list.is_empty());
|
||||
|
||||
let per_module_index = slot_state[index.index()].unwrap_free().per_module_index();
|
||||
assert_eq!(index, per_module_list.swap_remove(per_module_index.index()));
|
||||
if per_module_index.index() < per_module_list.len() {
|
||||
let replaced = per_module_list[per_module_index.index()];
|
||||
slot_state[replaced.index()]
|
||||
.unwrap_free_mut()
|
||||
.update_per_module_index(per_module_index);
|
||||
}
|
||||
if per_module_list.is_empty() {
|
||||
per_module.remove(&id);
|
||||
}
|
||||
enum AllocMode {
|
||||
ForceAffineAndClear,
|
||||
AnySlot,
|
||||
}
|
||||
|
||||
impl IndexAllocator {
|
||||
/// Create the default state for this strategy.
|
||||
pub fn new(strategy: PoolingAllocationStrategy, max_instances: usize) -> Self {
|
||||
let ids = (0..max_instances).map(|i| SlotId(i)).collect::<Vec<_>>();
|
||||
let state = match strategy {
|
||||
PoolingAllocationStrategy::NextAvailable => State::NextAvailable(ids),
|
||||
PoolingAllocationStrategy::Random => State::Random(ids),
|
||||
PoolingAllocationStrategy::ReuseAffinity => State::ReuseAffinity {
|
||||
free_list: ids,
|
||||
per_module: HashMap::new(),
|
||||
slot_state: (0..max_instances)
|
||||
.map(|i| {
|
||||
SlotState::Free(FreeSlotState::NoAffinity {
|
||||
free_list_index: GlobalFreeListIndex(i),
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
};
|
||||
// Use a deterministic seed during fuzzing to improve reproducibility of
|
||||
// test cases, but otherwise outside of fuzzing use a random seed to
|
||||
// shake things up.
|
||||
@@ -285,120 +210,124 @@ impl IndexAllocator {
|
||||
rand::thread_rng().gen()
|
||||
};
|
||||
let rng = SmallRng::from_seed(seed);
|
||||
IndexAllocator(Mutex::new(Inner { rng, state }))
|
||||
IndexAllocator(Mutex::new(Inner {
|
||||
rng,
|
||||
strategy,
|
||||
free_list: ids,
|
||||
per_module: HashMap::new(),
|
||||
slot_state: (0..max_instances)
|
||||
.map(|i| {
|
||||
SlotState::Free(FreeSlotState::NoAffinity {
|
||||
free_list_index: GlobalFreeListIndex(i),
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Allocate a new slot.
|
||||
pub fn alloc(&self, id: Option<CompiledModuleId>) -> Option<SlotId> {
|
||||
/// Allocate a new index from this allocator optionally using `id` as an
|
||||
/// affinity request if the allocation strategy supports it.
|
||||
///
|
||||
/// Returns `None` if no more slots are available.
|
||||
pub fn alloc(&self, module_id: Option<CompiledModuleId>) -> Option<SlotId> {
|
||||
self._alloc(module_id, AllocMode::AnySlot)
|
||||
}
|
||||
|
||||
/// Attempts to allocate a guaranteed-affine slot to the module `id`
|
||||
/// specified.
|
||||
///
|
||||
/// Returns `None` if there are no slots affine to `id`. The allocation of
|
||||
/// this slot will not record the affinity to `id`, instead simply listing
|
||||
/// it as taken. This is intended to be used for clearing out all affine
|
||||
/// slots to a module.
|
||||
pub fn alloc_affine_and_clear_affinity(&self, module_id: CompiledModuleId) -> Option<SlotId> {
|
||||
self._alloc(Some(module_id), AllocMode::ForceAffineAndClear)
|
||||
}
|
||||
|
||||
fn _alloc(&self, module_id: Option<CompiledModuleId>, mode: AllocMode) -> Option<SlotId> {
|
||||
let mut inner = self.0.lock().unwrap();
|
||||
let inner = &mut *inner;
|
||||
match &mut inner.state {
|
||||
State::NextAvailable(free_list) => free_list.pop(),
|
||||
State::Random(free_list) => {
|
||||
if free_list.len() == 0 {
|
||||
None
|
||||
} else {
|
||||
let id = inner.rng.gen_range(0..free_list.len());
|
||||
Some(free_list.swap_remove(id))
|
||||
}
|
||||
}
|
||||
State::ReuseAffinity {
|
||||
free_list,
|
||||
per_module,
|
||||
slot_state,
|
||||
} => {
|
||||
if let Some(this_module) = id.and_then(|id| per_module.get_mut(&id)) {
|
||||
// There is a freelist of slots with affinity for
|
||||
// the requested module-ID. Pick the last one; any
|
||||
// will do, no need for randomness here.
|
||||
assert!(!this_module.is_empty());
|
||||
let slot_id = this_module.pop().expect("List should never be empty");
|
||||
if this_module.is_empty() {
|
||||
per_module.remove(&id.unwrap());
|
||||
}
|
||||
// Make sure to remove from the global
|
||||
// freelist. We already removed from the
|
||||
// per-module list above.
|
||||
remove_global_free_list_item(slot_state, free_list, slot_id);
|
||||
slot_state[slot_id.index()] = SlotState::Taken(id);
|
||||
Some(slot_id)
|
||||
} else {
|
||||
if free_list.len() == 0 {
|
||||
return None;
|
||||
}
|
||||
// Pick a random free slot ID. Note that we do
|
||||
// this, rather than pick a victim module first,
|
||||
// to maintain an unbiased stealing distribution:
|
||||
// we want the likelihood of our taking a slot
|
||||
// from some other module's freelist to be
|
||||
// proportional to that module's freelist
|
||||
// length. Or in other words, every *slot* should
|
||||
// be equally likely to be stolen. The
|
||||
// alternative, where we pick the victim module
|
||||
// freelist first, means that either a module with
|
||||
// an affinity freelist of one slot has the same
|
||||
// chances of losing that slot as one with a
|
||||
// hundred slots; or else we need a weighted
|
||||
// random choice among modules, which is just as
|
||||
|
||||
// Determine which `SlotId` will be chosen first. Below the free list
|
||||
// metadata will be updated with our choice.
|
||||
let slot_id = match mode {
|
||||
// If any slot is desired then the pooling allocation strategy
|
||||
// determines which index is chosen.
|
||||
AllocMode::AnySlot => {
|
||||
match inner.strategy {
|
||||
PoolingAllocationStrategy::NextAvailable => inner.pick_last_used()?,
|
||||
PoolingAllocationStrategy::Random => inner.pick_random()?,
|
||||
// First attempt an affine allocation where the slot
|
||||
// returned was previously used by `id`, but if that fails
|
||||
// pick a random free slot ID.
|
||||
//
|
||||
// Note that we do this to maintain an unbiased stealing
|
||||
// distribution: we want the likelihood of our taking a slot
|
||||
// from some other module's freelist to be proportional to
|
||||
// that module's freelist length. Or in other words, every
|
||||
// *slot* should be equally likely to be stolen. The
|
||||
// alternative, where we pick the victim module freelist
|
||||
// first, means that either a module with an affinity
|
||||
// freelist of one slot has the same chances of losing that
|
||||
// slot as one with a hundred slots; or else we need a
|
||||
// weighted random choice among modules, which is just as
|
||||
// complex as this process.
|
||||
//
|
||||
// We don't bother picking an empty slot (no
|
||||
// established affinity) before a random slot,
|
||||
// because this is more complex, and in the steady
|
||||
// state, all slots will see at least one
|
||||
// instantiation very quickly, so there will never
|
||||
// (past an initial phase) be a slot with no
|
||||
// affinity.
|
||||
let free_list_index = inner.rng.gen_range(0..free_list.len());
|
||||
let slot_id = free_list[free_list_index];
|
||||
// Remove from both the global freelist and
|
||||
// per-module freelist, if any.
|
||||
remove_global_free_list_item(slot_state, free_list, slot_id);
|
||||
if let &SlotState::Free(FreeSlotState::Affinity { module, .. }) =
|
||||
&slot_state[slot_id.index()]
|
||||
{
|
||||
remove_module_free_list_item(slot_state, per_module, module, slot_id);
|
||||
}
|
||||
slot_state[slot_id.index()] = SlotState::Taken(id);
|
||||
|
||||
Some(slot_id)
|
||||
// We don't bother picking an empty slot (no established
|
||||
// affinity) before a random slot, because this is more
|
||||
// complex, and in the steady state, all slots will see at
|
||||
// least one instantiation very quickly, so there will never
|
||||
// (past an initial phase) be a slot with no affinity.
|
||||
PoolingAllocationStrategy::ReuseAffinity => inner
|
||||
.pick_affine(module_id)
|
||||
.or_else(|| inner.pick_random())?,
|
||||
}
|
||||
}
|
||||
|
||||
// In this mode an affinity-based allocation is always performed as
|
||||
// the purpose here is to clear out slots relevant to `module_id`
|
||||
// during module teardown.
|
||||
AllocMode::ForceAffineAndClear => inner.pick_affine(module_id)?,
|
||||
};
|
||||
|
||||
// Update internal metadata about the allocation of `slot_id` to
|
||||
// `module_id`, meaning that it's removed from the per-module freelist
|
||||
// if it was previously affine and additionally it's removed from the
|
||||
// global freelist.
|
||||
inner.remove_global_free_list_item(slot_id);
|
||||
if let &SlotState::Free(FreeSlotState::Affinity { module, .. }) =
|
||||
&inner.slot_state[slot_id.index()]
|
||||
{
|
||||
inner.remove_module_free_list_item(module, slot_id);
|
||||
}
|
||||
inner.slot_state[slot_id.index()] = SlotState::Taken(match mode {
|
||||
AllocMode::ForceAffineAndClear => None,
|
||||
AllocMode::AnySlot => module_id,
|
||||
});
|
||||
|
||||
Some(slot_id)
|
||||
}
|
||||
|
||||
pub(crate) fn free(&self, index: SlotId) {
|
||||
let mut inner = self.0.lock().unwrap();
|
||||
match &mut inner.state {
|
||||
State::NextAvailable(free_list) | State::Random(free_list) => {
|
||||
free_list.push(index);
|
||||
}
|
||||
State::ReuseAffinity {
|
||||
per_module,
|
||||
free_list,
|
||||
slot_state,
|
||||
} => {
|
||||
let module_id = slot_state[index.index()].unwrap_module_id();
|
||||
|
||||
let free_list_index = GlobalFreeListIndex(free_list.len());
|
||||
free_list.push(index);
|
||||
if let Some(id) = module_id {
|
||||
let per_module_list = per_module
|
||||
.entry(id)
|
||||
.or_insert_with(|| Vec::with_capacity(1));
|
||||
let per_module_index = PerModuleFreeListIndex(per_module_list.len());
|
||||
per_module_list.push(index);
|
||||
slot_state[index.index()] = SlotState::Free(FreeSlotState::Affinity {
|
||||
module: id,
|
||||
free_list_index,
|
||||
per_module_index,
|
||||
});
|
||||
} else {
|
||||
slot_state[index.index()] =
|
||||
SlotState::Free(FreeSlotState::NoAffinity { free_list_index });
|
||||
}
|
||||
}
|
||||
}
|
||||
let free_list_index = GlobalFreeListIndex(inner.free_list.len());
|
||||
inner.free_list.push(index);
|
||||
let module_id = inner.slot_state[index.index()].unwrap_module_id();
|
||||
inner.slot_state[index.index()] = if let Some(id) = module_id {
|
||||
let per_module_list = inner
|
||||
.per_module
|
||||
.entry(id)
|
||||
.or_insert_with(|| Vec::with_capacity(1));
|
||||
let per_module_index = PerModuleFreeListIndex(per_module_list.len());
|
||||
per_module_list.push(index);
|
||||
SlotState::Free(FreeSlotState::Affinity {
|
||||
module: id,
|
||||
free_list_index,
|
||||
per_module_index,
|
||||
})
|
||||
} else {
|
||||
SlotState::Free(FreeSlotState::NoAffinity { free_list_index })
|
||||
};
|
||||
}
|
||||
|
||||
/// For testing only, we want to be able to assert what is on the
|
||||
@@ -406,10 +335,7 @@ impl IndexAllocator {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
|
||||
let inner = self.0.lock().unwrap();
|
||||
match &inner.state {
|
||||
State::NextAvailable(free_list) | State::Random(free_list) => free_list.clone(),
|
||||
_ => panic!("Wrong kind of state"),
|
||||
}
|
||||
inner.free_list.clone()
|
||||
}
|
||||
|
||||
/// For testing only, get the list of all modules with at least
|
||||
@@ -417,18 +343,71 @@ impl IndexAllocator {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn testing_module_affinity_list(&self) -> Vec<CompiledModuleId> {
|
||||
let inner = self.0.lock().unwrap();
|
||||
match &inner.state {
|
||||
State::NextAvailable(..) | State::Random(..) => {
|
||||
panic!("Wrong kind of state")
|
||||
}
|
||||
State::ReuseAffinity { per_module, .. } => {
|
||||
let mut ret = vec![];
|
||||
for (module, list) in per_module {
|
||||
assert!(!list.is_empty());
|
||||
ret.push(*module);
|
||||
}
|
||||
ret
|
||||
}
|
||||
let mut ret = vec![];
|
||||
for (module, list) in inner.per_module.iter() {
|
||||
assert!(!list.is_empty());
|
||||
ret.push(*module);
|
||||
}
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
fn pick_last_used(&self) -> Option<SlotId> {
|
||||
self.free_list.last().copied()
|
||||
}
|
||||
|
||||
fn pick_random(&mut self) -> Option<SlotId> {
|
||||
if self.free_list.len() == 0 {
|
||||
return None;
|
||||
}
|
||||
let i = self.rng.gen_range(0..self.free_list.len());
|
||||
Some(self.free_list[i])
|
||||
}
|
||||
|
||||
/// Attempts to allocate a slot already affine to `id`, returning `None` if
|
||||
/// `id` is `None` or if there are no affine slots.
|
||||
fn pick_affine(&self, module_id: Option<CompiledModuleId>) -> Option<SlotId> {
|
||||
let free = self.per_module.get(&module_id?)?;
|
||||
free.last().copied()
|
||||
}
|
||||
|
||||
/// Remove a slot-index from the global free list.
|
||||
fn remove_global_free_list_item(&mut self, index: SlotId) {
|
||||
let free_list_index = self.slot_state[index.index()]
|
||||
.unwrap_free()
|
||||
.free_list_index();
|
||||
assert_eq!(index, self.free_list.swap_remove(free_list_index.index()));
|
||||
if free_list_index.index() < self.free_list.len() {
|
||||
let replaced = self.free_list[free_list_index.index()];
|
||||
self.slot_state[replaced.index()]
|
||||
.unwrap_free_mut()
|
||||
.update_free_list_index(free_list_index);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a slot-index from a per-module free list.
|
||||
fn remove_module_free_list_item(&mut self, module_id: CompiledModuleId, index: SlotId) {
|
||||
debug_assert!(
|
||||
self.per_module.contains_key(&module_id),
|
||||
"per_module list for given module should not be empty"
|
||||
);
|
||||
|
||||
let per_module_list = self.per_module.get_mut(&module_id).unwrap();
|
||||
debug_assert!(!per_module_list.is_empty());
|
||||
|
||||
let per_module_index = self.slot_state[index.index()]
|
||||
.unwrap_free()
|
||||
.per_module_index();
|
||||
assert_eq!(index, per_module_list.swap_remove(per_module_index.index()));
|
||||
if per_module_index.index() < per_module_list.len() {
|
||||
let replaced = per_module_list[per_module_index.index()];
|
||||
self.slot_state[replaced.index()]
|
||||
.unwrap_free_mut()
|
||||
.update_per_module_index(per_module_index);
|
||||
}
|
||||
if per_module_list.is_empty() {
|
||||
self.per_module.remove(&module_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -519,6 +498,28 @@ mod test {
|
||||
state.free(index);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clear_affine() {
|
||||
let id_alloc = CompiledModuleIdAllocator::new();
|
||||
let id = id_alloc.alloc();
|
||||
|
||||
for strat in [
|
||||
PoolingAllocationStrategy::ReuseAffinity,
|
||||
PoolingAllocationStrategy::NextAvailable,
|
||||
PoolingAllocationStrategy::Random,
|
||||
] {
|
||||
let state = IndexAllocator::new(strat, 100);
|
||||
|
||||
let index1 = state.alloc(Some(id)).unwrap();
|
||||
let index2 = state.alloc(Some(id)).unwrap();
|
||||
state.free(index2);
|
||||
state.free(index1);
|
||||
assert!(state.alloc_affine_and_clear_affinity(id).is_some());
|
||||
assert!(state.alloc_affine_and_clear_affinity(id).is_some());
|
||||
assert_eq!(state.alloc_affine_and_clear_affinity(id), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_affinity_allocation_strategy_random() {
|
||||
use rand::Rng;
|
||||
|
||||
Reference in New Issue
Block a user