Fix incorrect use of MemoryIndex in the pooling allocator. (#3782)

This commit corrects a few places where `MemoryIndex` was used and treated like
a `DefinedMemoryIndex` in the pooling instance allocator.

When the unstable `multi-memory` proposal is enabled, it is possible to cause a
newly allocated instance to use an incorrect base address for any defined
memories by having the module being instantiated also import a memory.

This requires enabling the unstable `multi-memory` proposal, configuring the
use of the pooling instance allocator (not the default), and then configuring
the module limits to allow imported memories (also not the default).

The fix is to replace all uses of `MemoryIndex` with `DefinedMemoryIndex` in
the pooling instance allocator.

Several `debug_assert!` have also been updated to `assert!` to sanity check the
state of the pooling allocator even in release builds.
This commit is contained in:
Peter Huene
2022-02-09 07:39:29 -08:00
committed by GitHub
parent 10198553c7
commit 1b27508a42
3 changed files with 79 additions and 33 deletions

View File

@@ -20,8 +20,8 @@ use std::mem;
use std::sync::Arc; use std::sync::Arc;
use std::sync::Mutex; use std::sync::Mutex;
use wasmtime_environ::{ use wasmtime_environ::{
HostPtr, MemoryIndex, MemoryStyle, Module, PrimaryMap, Tunables, VMOffsets, VMOffsetsFields, DefinedMemoryIndex, HostPtr, MemoryStyle, Module, PrimaryMap, Tunables, VMOffsets,
WASM_PAGE_SIZE, VMOffsetsFields, WASM_PAGE_SIZE,
}; };
mod index_allocator; mod index_allocator;
@@ -339,7 +339,7 @@ impl InstancePool {
} }
unsafe fn instance(&self, index: usize) -> &mut Instance { unsafe fn instance(&self, index: usize) -> &mut Instance {
debug_assert!(index < self.max_instances); assert!(index < self.max_instances);
&mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance) &mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance)
} }
@@ -421,11 +421,11 @@ impl InstancePool {
let addr = handle.instance as usize; let addr = handle.instance as usize;
let base = self.mapping.as_ptr() as usize; let base = self.mapping.as_ptr() as usize;
debug_assert!(addr >= base && addr < base + self.mapping.len()); assert!(addr >= base && addr < base + self.mapping.len());
debug_assert!((addr - base) % self.instance_size == 0); assert!((addr - base) % self.instance_size == 0);
let index = (addr - base) / self.instance_size; let index = (addr - base) / self.instance_size;
debug_assert!(index < self.max_instances); assert!(index < self.max_instances);
let instance = unsafe { &mut *handle.instance }; let instance = unsafe { &mut *handle.instance };
@@ -434,20 +434,20 @@ impl InstancePool {
instance.memories.iter_mut().zip(self.memories.get(index)) instance.memories.iter_mut().zip(self.memories.get(index))
{ {
let mut memory = mem::take(memory); let mut memory = mem::take(memory);
debug_assert!(memory.is_static()); assert!(memory.is_static());
match memory { match memory {
Memory::Static { Memory::Static {
memfd_slot: Some(mut memfd_slot), memfd_slot: Some(mut memfd_slot),
.. ..
} => { } => {
let mem_idx = instance.module.memory_index(def_mem_idx);
// If there was any error clearing the memfd, just // If there was any error clearing the memfd, just
// drop it here, and let the drop handler for the // drop it here, and let the drop handler for the
// MemFdSlot unmap in a way that retains the // MemFdSlot unmap in a way that retains the
// address space reservation. // address space reservation.
if memfd_slot.clear_and_remain_ready().is_ok() { if memfd_slot.clear_and_remain_ready().is_ok() {
self.memories.return_memfd_slot(index, mem_idx, memfd_slot); self.memories
.return_memfd_slot(index, def_mem_idx, memfd_slot);
} }
} }
@@ -476,7 +476,7 @@ impl InstancePool {
// Decommit any tables that were used // Decommit any tables that were used
for (table, base) in instance.tables.values_mut().zip(self.tables.get(index)) { for (table, base) in instance.tables.values_mut().zip(self.tables.get(index)) {
let table = mem::take(table); let table = mem::take(table);
debug_assert!(table.is_static()); assert!(table.is_static());
let size = round_up_to_pow2( let size = round_up_to_pow2(
table.size() as usize * mem::size_of::<*mut u8>(), table.size() as usize * mem::size_of::<*mut u8>(),
@@ -509,7 +509,7 @@ impl InstancePool {
) -> Result<(), InstantiationError> { ) -> Result<(), InstantiationError> {
let module = instance.module.as_ref(); let module = instance.module.as_ref();
debug_assert!(instance.memories.is_empty()); assert!(instance.memories.is_empty());
for (memory_index, plan) in module for (memory_index, plan) in module
.memory_plans .memory_plans
@@ -522,14 +522,14 @@ impl InstancePool {
let memory = unsafe { let memory = unsafe {
std::slice::from_raw_parts_mut( std::slice::from_raw_parts_mut(
memories.get_base(instance_idx, memory_index), memories.get_base(instance_idx, defined_index),
(max_pages as usize) * (WASM_PAGE_SIZE as usize), (max_pages as usize) * (WASM_PAGE_SIZE as usize),
) )
}; };
if let Some(memfds) = maybe_memfds { if let Some(memfds) = maybe_memfds {
let image = memfds.get_memory_image(defined_index); let image = memfds.get_memory_image(defined_index);
let mut slot = memories.take_memfd_slot(instance_idx, memory_index); let mut slot = memories.take_memfd_slot(instance_idx, defined_index);
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64; let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
// If instantiation fails, we can propagate the error // If instantiation fails, we can propagate the error
@@ -564,7 +564,7 @@ impl InstancePool {
} }
} }
debug_assert!(instance.dropped_data.is_empty()); assert!(instance.dropped_data.is_empty());
Ok(()) Ok(())
} }
@@ -576,7 +576,7 @@ impl InstancePool {
) -> Result<(), InstantiationError> { ) -> Result<(), InstantiationError> {
let module = instance.module.as_ref(); let module = instance.module.as_ref();
debug_assert!(instance.tables.is_empty()); assert!(instance.tables.is_empty());
for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() { for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() {
let base = tables.next().unwrap(); let base = tables.next().unwrap();
@@ -594,7 +594,7 @@ impl InstancePool {
); );
} }
debug_assert!(instance.dropped_elements.is_empty()); assert!(instance.dropped_elements.is_empty());
instance instance
.dropped_elements .dropped_elements
.resize(module.passive_elements.len()); .resize(module.passive_elements.len());
@@ -664,7 +664,7 @@ impl MemoryPool {
0 0
}; };
debug_assert!( assert!(
memory_size % region::page::size() == 0, memory_size % region::page::size() == 0,
"memory size {} is not a multiple of system page size", "memory size {} is not a multiple of system page size",
memory_size memory_size
@@ -729,10 +729,10 @@ impl MemoryPool {
Ok(pool) Ok(pool)
} }
fn get_base(&self, instance_index: usize, memory_index: MemoryIndex) -> *mut u8 { fn get_base(&self, instance_index: usize, memory_index: DefinedMemoryIndex) -> *mut u8 {
debug_assert!(instance_index < self.max_instances); assert!(instance_index < self.max_instances);
let memory_index = memory_index.as_u32() as usize; let memory_index = memory_index.as_u32() as usize;
debug_assert!(memory_index < self.max_memories); assert!(memory_index < self.max_memories);
let idx = instance_index * self.max_memories + memory_index; let idx = instance_index * self.max_memories + memory_index;
let offset = self.initial_memory_offset + idx * self.memory_size; let offset = self.initial_memory_offset + idx * self.memory_size;
unsafe { self.mapping.as_mut_ptr().offset(offset as isize) } unsafe { self.mapping.as_mut_ptr().offset(offset as isize) }
@@ -740,12 +740,16 @@ impl MemoryPool {
fn get<'a>(&'a self, instance_index: usize) -> impl Iterator<Item = *mut u8> + 'a { fn get<'a>(&'a self, instance_index: usize) -> impl Iterator<Item = *mut u8> + 'a {
(0..self.max_memories) (0..self.max_memories)
.map(move |i| self.get_base(instance_index, MemoryIndex::from_u32(i as u32))) .map(move |i| self.get_base(instance_index, DefinedMemoryIndex::from_u32(i as u32)))
} }
/// Take ownership of the given memfd slot. Must be returned via /// Take ownership of the given memfd slot. Must be returned via
/// `return_memfd_slot` when the instance is done using it. /// `return_memfd_slot` when the instance is done using it.
fn take_memfd_slot(&self, instance_index: usize, memory_index: MemoryIndex) -> MemFdSlot { fn take_memfd_slot(
&self,
instance_index: usize,
memory_index: DefinedMemoryIndex,
) -> MemFdSlot {
let idx = instance_index * self.max_memories + (memory_index.as_u32() as usize); let idx = instance_index * self.max_memories + (memory_index.as_u32() as usize);
let maybe_slot = self.memfd_slots[idx].lock().unwrap().take(); let maybe_slot = self.memfd_slots[idx].lock().unwrap().take();
@@ -759,7 +763,12 @@ impl MemoryPool {
} }
/// Return ownership of the given memfd slot. /// Return ownership of the given memfd slot.
fn return_memfd_slot(&self, instance_index: usize, memory_index: MemoryIndex, slot: MemFdSlot) { fn return_memfd_slot(
&self,
instance_index: usize,
memory_index: DefinedMemoryIndex,
slot: MemFdSlot,
) {
assert!(!slot.is_dirty()); assert!(!slot.is_dirty());
let idx = instance_index * self.max_memories + (memory_index.as_u32() as usize); let idx = instance_index * self.max_memories + (memory_index.as_u32() as usize);
*self.memfd_slots[idx].lock().unwrap() = Some(slot); *self.memfd_slots[idx].lock().unwrap() = Some(slot);
@@ -831,7 +840,7 @@ impl TablePool {
} }
fn get(&self, instance_index: usize) -> impl Iterator<Item = *mut u8> { fn get(&self, instance_index: usize) -> impl Iterator<Item = *mut u8> {
debug_assert!(instance_index < self.max_instances); assert!(instance_index < self.max_instances);
let base: *mut u8 = unsafe { let base: *mut u8 = unsafe {
self.mapping self.mapping
@@ -929,7 +938,7 @@ impl StackPool {
alloc.alloc(None).index() alloc.alloc(None).index()
}; };
debug_assert!(index < self.max_instances); assert!(index < self.max_instances);
unsafe { unsafe {
// Remove the guard page from the size // Remove the guard page from the size
@@ -964,11 +973,11 @@ impl StackPool {
let stack_size = self.stack_size - self.page_size; let stack_size = self.stack_size - self.page_size;
let bottom_of_stack = top - stack_size; let bottom_of_stack = top - stack_size;
let start_of_stack = bottom_of_stack - self.page_size; let start_of_stack = bottom_of_stack - self.page_size;
debug_assert!(start_of_stack >= base && start_of_stack < (base + len)); assert!(start_of_stack >= base && start_of_stack < (base + len));
debug_assert!((start_of_stack - base) % self.stack_size == 0); assert!((start_of_stack - base) % self.stack_size == 0);
let index = (start_of_stack - base) / self.stack_size; let index = (start_of_stack - base) / self.stack_size;
debug_assert!(index < self.max_instances); assert!(index < self.max_instances);
decommit_stack_pages(bottom_of_stack as _, stack_size).unwrap(); decommit_stack_pages(bottom_of_stack as _, stack_size).unwrap();

View File

@@ -224,14 +224,14 @@ impl Config {
/// executing WebAssembly to periodically yield back according to the /// executing WebAssembly to periodically yield back according to the
/// epoch configuration settings. This enables `Future::poll` to take at /// epoch configuration settings. This enables `Future::poll` to take at
/// most a certain amount of time according to epoch configuration /// most a certain amount of time according to epoch configuration
/// setttings and when increments happen. The benefit of this approach is /// settings and when increments happen. The benefit of this approach is
/// that the instrumentation in compiled code is quite lightweight, but a /// that the instrumentation in compiled code is quite lightweight, but a
/// downside can be that the scheduling is somewhat nondeterministic since /// downside can be that the scheduling is somewhat nondeterministic since
/// increments are usually timer-based which are not always deterministic. /// increments are usually timer-based which are not always deterministic.
/// ///
/// Note that to prevent infinite execution of wasm it's recommended to /// Note that to prevent infinite execution of wasm it's recommended to
/// place a timeout on the entire future representing executing wasm code /// place a timeout on the entire future representing executing wasm code
/// and the preriodic yields with epochs should ensure that when the /// and the periodic yields with epochs should ensure that when the
/// timeout is reached it's appropriately recognized. /// timeout is reached it's appropriately recognized.
/// ///
/// * Alternatively you can enable the /// * Alternatively you can enable the
@@ -250,7 +250,7 @@ impl Config {
/// ///
/// Note that to prevent infinite execution of wasm it's recommended to /// Note that to prevent infinite execution of wasm it's recommended to
/// place a timeout on the entire future representing executing wasm code /// place a timeout on the entire future representing executing wasm code
/// and the preriodic yields with epochs should ensure that when the /// and the periodic yields with epochs should ensure that when the
/// timeout is reached it's appropriately recognized. /// timeout is reached it's appropriately recognized.
/// ///
/// * Finally you can spawn futures into a thread pool. By doing this in a /// * Finally you can spawn futures into a thread pool. By doing this in a
@@ -387,7 +387,7 @@ impl Config {
/// ///
/// - Yield to the executor loop, then resume when the future is /// - Yield to the executor loop, then resume when the future is
/// next polled. See /// next polled. See
/// [`Store::epoch_dealdine_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update). /// [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
/// ///
/// The first is the default; set the second for the timeslicing /// The first is the default; set the second for the timeslicing
/// behavior described above. /// behavior described above.
@@ -1084,7 +1084,7 @@ impl Config {
/// linear memory. /// linear memory.
/// ///
/// Note that this is a currently simple heuristic for optimizing the growth /// Note that this is a currently simple heuristic for optimizing the growth
/// of dynamic memories, primarily implemented for the memory64 propsal /// of dynamic memories, primarily implemented for the memory64 proposal
/// where all memories are currently "dynamic". This is unlikely to be a /// where all memories are currently "dynamic". This is unlikely to be a
/// one-size-fits-all style approach and if you're an embedder running into /// one-size-fits-all style approach and if you're an embedder running into
/// issues with dynamic memories and growth and are interested in having /// issues with dynamic memories and growth and are interested in having

View File

@@ -511,3 +511,40 @@ fn preserve_data_segments() -> Result<()> {
Ok(()) Ok(())
} }
#[test]
fn multi_memory_with_imported_memories() -> Result<()> {
// This test checks that the base address for the defined memory is correct for the instance
// despite the presence of an imported memory.
let mut config = Config::new();
config.allocation_strategy(InstanceAllocationStrategy::Pooling {
strategy: PoolingAllocationStrategy::NextAvailable,
module_limits: ModuleLimits {
memory_pages: 1,
imported_memories: 1,
memories: 1,
..Default::default()
},
instance_limits: InstanceLimits { count: 1 },
});
config.wasm_multi_memory(true);
let engine = Engine::new(&config)?;
let module = Module::new(
&engine,
r#"(module (import "" "m1" (memory 0)) (memory (export "m2") 1))"#,
)?;
let mut store = Store::new(&engine, ());
let m1 = Memory::new(&mut store, MemoryType::new(0, None))?;
let instance = Instance::new(&mut store, &module, &[m1.into()])?;
let m2 = instance.get_memory(&mut store, "m2").unwrap();
m2.data_mut(&mut store)[0] = 0x42;
assert_eq!(m2.data(&store)[0], 0x42);
Ok(())
}