From ff840b3d3b3a53d9b9fc20f1d3f54c9763eebdde Mon Sep 17 00:00:00 2001 From: Peter Huene Date: Thu, 4 Mar 2021 14:01:42 -0800 Subject: [PATCH] More PR feedback changes. * More use of `anyhow`. * Change `make_accessible` into `protect_linear_memory` to better demonstrate what it is used for; this will make the uffd implementation make a little more sense. * Remove `create_memory_map` in favor of just creating the `Mmap` instances in the pooling allocator. This also removes the need for `MAP_NORESERVE` in the uffd implementation. * Moar comments. * Remove `BasePointerIterator` in favor of `impl Iterator`. * The uffd implementation now only monitors linear memory pages and will only receive faults on pages that could potentially be accessible and never on a statically known guard page. * Stop allocating memory or table pools if the maximum limit of the memory or table is 0. --- crates/jit/src/code_memory.rs | 2 +- crates/runtime/src/instance.rs | 14 +- crates/runtime/src/instance/allocator.rs | 6 +- .../runtime/src/instance/allocator/pooling.rs | 350 ++++++++---------- .../src/instance/allocator/pooling/linux.rs | 67 +++- .../src/instance/allocator/pooling/uffd.rs | 346 +++++++---------- .../src/instance/allocator/pooling/unix.rs | 69 +++- .../src/instance/allocator/pooling/windows.rs | 61 ++- crates/runtime/src/memory.rs | 30 +- crates/runtime/src/mmap.rs | 34 +- crates/wasmtime/src/trampoline/memory.rs | 5 +- 11 files changed, 484 insertions(+), 500 deletions(-) diff --git a/crates/jit/src/code_memory.rs b/crates/jit/src/code_memory.rs index f4ba5c3017..49a2d0ecd4 100644 --- a/crates/jit/src/code_memory.rs +++ b/crates/jit/src/code_memory.rs @@ -25,7 +25,7 @@ struct CodeMemoryEntry { impl CodeMemoryEntry { fn with_capacity(cap: usize) -> Result { - let mmap = ManuallyDrop::new(Mmap::with_at_least(cap)?); + let mmap = ManuallyDrop::new(Mmap::with_at_least(cap).map_err(|e| e.to_string())?); let registry = ManuallyDrop::new(UnwindRegistry::new(mmap.as_ptr() as usize)); Ok(Self { mmap, diff --git a/crates/runtime/src/instance.rs b/crates/runtime/src/instance.rs index 313642a077..7c06c61211 100644 --- a/crates/runtime/src/instance.rs +++ b/crates/runtime/src/instance.rs @@ -67,10 +67,10 @@ pub(crate) struct Instance { /// Hosts can store arbitrary per-instance information here. host_state: Box, - /// Stores guard page faults in memory relating to the instance. - /// This is used for the pooling allocator with uffd enabled on Linux. + /// Stores linear memory guard page faults for the pooling allocator with uffd enabled. + /// These pages need to be reset after the signal handler generates the out-of-bounds trap. #[cfg(all(feature = "uffd", target_os = "linux"))] - guard_page_faults: RefCell bool)>>, + guard_page_faults: RefCell anyhow::Result<()>)>>, /// Additional context used by compiled wasm code. This field is last, and /// represents a dynamically-sized array that extends beyond the nominal @@ -821,7 +821,7 @@ impl Instance { &self, page_addr: *mut u8, size: usize, - reset: unsafe fn(*mut u8, usize) -> bool, + reset: fn(*mut u8, usize) -> anyhow::Result<()>, ) { self.guard_page_faults .borrow_mut() @@ -837,11 +837,7 @@ impl Instance { pub(crate) fn reset_guard_pages(&self) -> anyhow::Result<()> { let mut faults = self.guard_page_faults.borrow_mut(); for (addr, len, reset) in faults.drain(..) { - unsafe { - if !reset(addr, len) { - anyhow::bail!("failed to reset previously faulted memory guard page"); - } - } + reset(addr, len)?; } Ok(()) diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index c835c14421..c70105dfb5 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -549,8 +549,10 @@ impl OnDemandInstanceAllocator { let mut memories: PrimaryMap = PrimaryMap::with_capacity(module.memory_plans.len() - num_imports); for plan in &module.memory_plans.values().as_slice()[num_imports..] { - memories - .push(Memory::new_dynamic(plan, creator).map_err(InstantiationError::Resource)?); + memories.push( + Memory::new_dynamic(plan, creator) + .map_err(|e| InstantiationError::Resource(e.to_string()))?, + ); } Ok(memories) } diff --git a/crates/runtime/src/instance/allocator/pooling.rs b/crates/runtime/src/instance/allocator/pooling.rs index 5029d90667..c9e054cbc1 100644 --- a/crates/runtime/src/instance/allocator/pooling.rs +++ b/crates/runtime/src/instance/allocator/pooling.rs @@ -31,9 +31,7 @@ cfg_if::cfg_if! { } else if #[cfg(all(feature = "uffd", target_os = "linux"))] { mod uffd; use uffd as imp; - use imp::PageFaultHandler; - use super::{check_init_bounds, initialize_tables}; - use wasmtime_environ::MemoryInitialization; + use imp::initialize_memory_pool; } else if #[cfg(target_os = "linux")] { mod linux; use linux as imp; @@ -43,7 +41,10 @@ cfg_if::cfg_if! { } } -use imp::{create_memory_map, decommit, make_accessible}; +use imp::{ + commit_memory_pages, commit_stack_pages, commit_table_pages, decommit_memory_pages, + decommit_stack_pages, decommit_table_pages, +}; fn round_up_to_pow2(n: usize, to: usize) -> usize { debug_assert!(to > 0); @@ -272,9 +273,9 @@ impl Default for InstanceLimits { Self { count: 1000, #[cfg(target_pointer_width = "32")] - memory_reservation_size: 0xA00000, + memory_reservation_size: 10 * (1 << 20), // 10 MiB, #[cfg(target_pointer_width = "64")] - memory_reservation_size: 0x180000000, + memory_reservation_size: 6 * (1 << 30), // 6 GiB, } } } @@ -305,45 +306,6 @@ impl Default for PoolingAllocationStrategy { } } -// Used to iterate the base address of instance memories and tables. -struct BasePointerIterator { - base: *mut u8, - current: usize, - num: usize, - size: usize, -} - -impl BasePointerIterator { - fn new(base: *mut u8, num: usize, size: usize) -> Self { - Self { - base, - current: 0, - num, - size, - } - } -} - -impl Iterator for BasePointerIterator { - type Item = *mut u8; - - fn next(&mut self) -> Option { - let current = self.current; - if current == self.num { - return None; - } - - self.current += 1; - - Some(unsafe { self.base.add(current * self.size) }) - } - - fn size_hint(&self) -> (usize, Option) { - let remaining = self.num - self.current; - (remaining, Some(remaining)) - } -} - /// Represents a pool of maximal `Instance` structures. /// /// Each index in the pool provides enough space for a maximal `Instance` @@ -395,8 +357,11 @@ impl InstancePool { .checked_mul(max_instances) .ok_or_else(|| anyhow!("total size of instance data exceeds addressable memory"))?; + let mapping = Mmap::accessible_reserved(allocation_size, allocation_size) + .context("failed to create instance pool mapping")?; + let pool = Self { - mapping: create_memory_map(allocation_size, allocation_size)?, + mapping, offsets, instance_size, max_instances, @@ -414,15 +379,18 @@ impl InstancePool { Ok(pool) } - fn initialize(&self, index: usize, module: &Arc) { + unsafe fn instance(&self, index: usize) -> &mut Instance { debug_assert!(index < self.max_instances); + &mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance) + } + fn initialize(&self, index: usize, module: &Arc) { unsafe { - let instance_ptr = self.mapping.as_mut_ptr().add(index * self.instance_size); + let instance = self.instance(index); // Write a default instance with preallocated memory/table map storage to the ptr std::ptr::write( - instance_ptr as _, + instance as _, Instance { module: module.clone(), offsets: self.offsets, @@ -456,9 +424,7 @@ impl InstancePool { let host_state = std::mem::replace(&mut req.host_state, Box::new(())); unsafe { - debug_assert!(index < self.max_instances); - let instance = - &mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance); + let instance = self.instance(index); instance.module = req.module.clone(); instance.offsets = VMOffsets::new( @@ -490,47 +456,40 @@ impl InstancePool { let index = (addr - base) / self.instance_size; debug_assert!(index < self.max_instances); - unsafe { - // Decommit any linear memories that were used - for (mem, base) in (*handle.instance) - .memories - .values() - .zip(self.memories.get(index)) - { - let size = (mem.size() * WASM_PAGE_SIZE) as usize; - if size > 0 { - decommit(base, size); - } - } + let instance = unsafe { &mut *handle.instance }; - // Decommit any tables that were used - let table_element_size = max_table_element_size(); - for (table, base) in (*handle.instance) - .tables - .values() - .zip(self.tables.get(index)) - { - let size = round_up_to_pow2( - table.size() as usize * table_element_size, - self.tables.page_size, - ); - if size > 0 { - decommit(base, size); - } - } - - // Drop the host state - (*handle.instance).host_state = Box::new(()); + // Decommit any linear memories that were used + for (mem, base) in instance.memories.values().zip(self.memories.get(index)) { + let size = (mem.size() * WASM_PAGE_SIZE) as usize; + decommit_memory_pages(base, size).unwrap(); } - { - self.free_list.lock().unwrap().push(index); + instance.memories.clear(); + instance.dropped_data.borrow_mut().clear(); + + // Decommit any tables that were used + let table_element_size = max_table_element_size(); + for (table, base) in instance.tables.values().zip(self.tables.get(index)) { + let size = round_up_to_pow2( + table.size() as usize * table_element_size, + self.tables.page_size, + ); + + decommit_table_pages(base, size).unwrap(); } + + instance.tables.clear(); + instance.dropped_elements.borrow_mut().clear(); + + // Drop any host state + instance.host_state = Box::new(()); + + self.free_list.lock().unwrap().push(index); } fn set_instance_memories( instance: &mut Instance, - mut memories: BasePointerIterator, + mut memories: impl Iterator, max_pages: u32, ) -> Result<(), InstantiationError> { let module = instance.module.as_ref(); @@ -541,19 +500,24 @@ impl InstancePool { .reset_guard_pages() .map_err(|e| InstantiationError::Resource(e.to_string()))?; - instance.memories.clear(); + debug_assert!(instance.memories.is_empty()); for plan in (&module.memory_plans.values().as_slice()[module.num_imported_memories..]).iter() { instance.memories.push( - Memory::new_static(plan, memories.next().unwrap(), max_pages, make_accessible) - .map_err(InstantiationError::Resource)?, + Memory::new_static( + plan, + memories.next().unwrap(), + max_pages, + commit_memory_pages, + ) + .map_err(|e| InstantiationError::Resource(e.to_string()))?, ); } let mut dropped_data = instance.dropped_data.borrow_mut(); - dropped_data.clear(); + debug_assert!(dropped_data.is_empty()); dropped_data.resize(module.passive_data.len()); Ok(()) @@ -561,22 +525,18 @@ impl InstancePool { fn set_instance_tables( instance: &mut Instance, - mut tables: BasePointerIterator, + mut tables: impl Iterator, max_elements: u32, ) -> Result<(), InstantiationError> { let module = instance.module.as_ref(); - instance.tables.clear(); + debug_assert!(instance.tables.is_empty()); for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() { let base = tables.next().unwrap(); - // Make the table data accessible - if unsafe { !make_accessible(base, max_elements as usize * max_table_element_size()) } { - return Err(InstantiationError::Resource( - "failed to make instance memory accessible".into(), - )); - } + commit_table_pages(base, max_elements as usize * max_table_element_size()) + .map_err(|e| InstantiationError::Resource(e.to_string()))?; instance .tables @@ -584,7 +544,7 @@ impl InstancePool { } let mut dropped_elements = instance.dropped_elements.borrow_mut(); - dropped_elements.clear(); + debug_assert!(dropped_elements.is_empty()); dropped_elements.resize(module.passive_elements.len()); Ok(()) @@ -623,8 +583,31 @@ struct MemoryPool { impl MemoryPool { fn new(module_limits: &ModuleLimits, instance_limits: &InstanceLimits) -> Result { - let memory_size = usize::try_from(instance_limits.memory_reservation_size) - .map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))?; + // The maximum module memory page count cannot exceed 65536 pages + if module_limits.memory_pages > 0x10000 { + bail!( + "module memory page limit of {} exceeds the maximum of 65536", + module_limits.memory_pages + ); + } + + // The maximum module memory page count cannot exceed the memory reservation size + if (module_limits.memory_pages * WASM_PAGE_SIZE) as u64 + > instance_limits.memory_reservation_size + { + bail!( + "module memory page limit of {} pages exceeds the memory reservation size limit of {} bytes", + module_limits.memory_pages, + instance_limits.memory_reservation_size + ); + } + + let memory_size = if module_limits.memory_pages > 0 { + usize::try_from(instance_limits.memory_reservation_size) + .map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))? + } else { + 0 + }; debug_assert!( memory_size % region::page::size() == 0, @@ -642,25 +625,36 @@ impl MemoryPool { anyhow!("total size of memory reservation exceeds addressable memory") })?; - Ok(Self { - mapping: create_memory_map(0, allocation_size)?, + // Create a completely inaccessible region to start + let mapping = Mmap::accessible_reserved(0, allocation_size) + .context("failed to create memory pool mapping")?; + + let pool = Self { + mapping, memory_size, max_memories, max_instances, max_wasm_pages: module_limits.memory_pages, - }) + }; + + // uffd support requires some special setup for the memory pool + #[cfg(all(feature = "uffd", target_os = "linux"))] + initialize_memory_pool(&pool)?; + + Ok(pool) } - fn get(&self, instance_index: usize) -> BasePointerIterator { + fn get(&self, instance_index: usize) -> impl Iterator { debug_assert!(instance_index < self.max_instances); - let base = unsafe { + let base: *mut u8 = unsafe { self.mapping .as_mut_ptr() .add(instance_index * self.memory_size * self.max_memories) as _ }; - BasePointerIterator::new(base, self.max_memories, self.memory_size) + let size = self.memory_size; + (0..self.max_memories).map(move |i| unsafe { base.add(i * size) }) } } @@ -668,9 +662,6 @@ impl MemoryPool { /// /// Each instance index into the pool returns an iterator over the base addresses /// of the instance's tables. -/// -/// The userfault handler relies on how tables are stored in the mapping, -/// so make sure the uffd implementation is kept up-to-date. #[derive(Debug)] struct TablePool { mapping: Mmap, @@ -685,12 +676,16 @@ impl TablePool { fn new(module_limits: &ModuleLimits, instance_limits: &InstanceLimits) -> Result { let page_size = region::page::size(); - let table_size = round_up_to_pow2( - max_table_element_size() - .checked_mul(module_limits.table_elements as usize) - .ok_or_else(|| anyhow!("table size exceeds addressable memory"))?, - page_size, - ); + let table_size = if module_limits.table_elements > 0 { + round_up_to_pow2( + max_table_element_size() + .checked_mul(module_limits.table_elements as usize) + .ok_or_else(|| anyhow!("table size exceeds addressable memory"))?, + page_size, + ) + } else { + 0 + }; let max_instances = instance_limits.count as usize; let max_tables = module_limits.tables as usize; @@ -700,26 +695,30 @@ impl TablePool { .and_then(|c| c.checked_mul(max_instances)) .ok_or_else(|| anyhow!("total size of instance tables exceeds addressable memory"))?; + let mapping = Mmap::accessible_reserved(allocation_size, allocation_size) + .context("failed to create table pool mapping")?; + Ok(Self { - mapping: create_memory_map(0, allocation_size)?, + mapping, table_size, max_tables, max_instances, - page_size: region::page::size(), + page_size, max_elements: module_limits.table_elements, }) } - fn get(&self, instance_index: usize) -> BasePointerIterator { + fn get(&self, instance_index: usize) -> impl Iterator { debug_assert!(instance_index < self.max_instances); - let base = unsafe { + let base: *mut u8 = unsafe { self.mapping .as_mut_ptr() .add(instance_index * self.table_size * self.max_tables) as _ }; - BasePointerIterator::new(base, self.max_tables, self.table_size) + let size = self.table_size; + (0..self.max_tables).map(move |i| unsafe { base.add(i * size) }) } } @@ -733,9 +732,6 @@ impl TablePool { /// /// The top of the stack (starting stack pointer) is returned when a stack is allocated /// from the pool. -/// -/// The userfault handler relies on how stacks are stored in the mapping, -/// so make sure the uffd implementation is kept up-to-date. #[derive(Debug)] struct StackPool { mapping: Mmap, @@ -765,15 +761,18 @@ impl StackPool { .checked_mul(max_instances) .ok_or_else(|| anyhow!("total size of execution stacks exceeds addressable memory"))?; - let mapping = create_memory_map(allocation_size, allocation_size)?; + let mapping = Mmap::accessible_reserved(allocation_size, allocation_size) + .context("failed to create stack pool mapping")?; // Set up the stack guard pages - unsafe { - for i in 0..max_instances { - // Make the stack guard page inaccessible - let bottom_of_stack = mapping.as_mut_ptr().add(i * stack_size); - region::protect(bottom_of_stack, page_size, region::Protection::NONE) - .context("failed to protect stack guard page")?; + if allocation_size > 0 { + unsafe { + for i in 0..max_instances { + // Make the stack guard page inaccessible + let bottom_of_stack = mapping.as_mut_ptr().add(i * stack_size); + region::protect(bottom_of_stack, page_size, region::Protection::NONE) + .context("failed to protect stack guard page")?; + } } } @@ -804,8 +803,19 @@ impl StackPool { debug_assert!(index < self.max_instances); unsafe { - // The top (end) of the stack should be returned - Ok(self.mapping.as_mut_ptr().add((index + 1) * self.stack_size)) + // Remove the guard page from the size + let size_without_guard = self.stack_size - self.page_size; + + let bottom_of_stack = self + .mapping + .as_mut_ptr() + .add((index * self.stack_size) + self.page_size); + + commit_stack_pages(bottom_of_stack, size_without_guard) + .map_err(|e| FiberStackError::Resource(e.to_string()))?; + + // The top of the stack should be returned + Ok(bottom_of_stack.add(size_without_guard)) } } @@ -826,18 +836,16 @@ impl StackPool { let index = (start_of_stack - base) / self.stack_size; debug_assert!(index < self.max_instances); - decommit(bottom_of_stack, stack_size); + decommit_stack_pages(bottom_of_stack, stack_size).unwrap(); - { - self.free_list.lock().unwrap().push(index); - } + self.free_list.lock().unwrap().push(index); } } } /// Implements the pooling instance allocator. /// -/// This allocator interinally maintains pools of instances, memories, tables, and stacks. +/// This allocator internally maintains pools of instances, memories, tables, and stacks. /// /// Note: the resource pools are manually dropped so that the fault handler terminates correctly. #[derive(Debug)] @@ -845,10 +853,11 @@ pub struct PoolingInstanceAllocator { strategy: PoolingAllocationStrategy, module_limits: ModuleLimits, instance_limits: InstanceLimits, + // This is manually drop so that the pools unmap their memory before the page fault handler drops. instances: mem::ManuallyDrop, - stacks: mem::ManuallyDrop, + stacks: StackPool, #[cfg(all(feature = "uffd", target_os = "linux"))] - _fault_handler: PageFaultHandler, + _fault_handler: imp::PageFaultHandler, } impl PoolingInstanceAllocator { @@ -874,37 +883,18 @@ impl PoolingInstanceAllocator { instance_limits.memory_reservation_size = min(instance_limits.memory_reservation_size, 0x200000000); - // The maximum module memory page count cannot exceed 65536 pages - if module_limits.memory_pages > 0x10000 { - bail!( - "module memory page limit of {} exceeds the maximum of 65536", - module_limits.memory_pages - ); - } - - // The maximum module memory page count cannot exceed the memory reservation size - if (module_limits.memory_pages * WASM_PAGE_SIZE) as u64 - > instance_limits.memory_reservation_size - { - bail!( - "module memory page limit of {} pages exeeds the memory reservation size limit of {} bytes", - module_limits.memory_pages, - instance_limits.memory_reservation_size - ); - } - let instances = InstancePool::new(&module_limits, &instance_limits)?; let stacks = StackPool::new(&instance_limits, stack_size)?; #[cfg(all(feature = "uffd", target_os = "linux"))] - let _fault_handler = PageFaultHandler::new(&instances)?; + let _fault_handler = imp::PageFaultHandler::new(&instances)?; Ok(Self { strategy, module_limits, instance_limits, instances: mem::ManuallyDrop::new(instances), - stacks: mem::ManuallyDrop::new(stacks), + stacks, #[cfg(all(feature = "uffd", target_os = "linux"))] _fault_handler, }) @@ -917,7 +907,6 @@ impl Drop for PoolingInstanceAllocator { // This ensures that any fault handler thread monitoring the pool memory terminates unsafe { mem::ManuallyDrop::drop(&mut self.instances); - mem::ManuallyDrop::drop(&mut self.stacks); } } } @@ -963,13 +952,13 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator { cfg_if::cfg_if! { if #[cfg(all(feature = "uffd", target_os = "linux"))] { match &instance.module.memory_initialization { - MemoryInitialization::Paged{ out_of_bounds, .. } => { + wasmtime_environ::MemoryInitialization::Paged{ out_of_bounds, .. } => { if !is_bulk_memory { - check_init_bounds(instance)?; + super::check_init_bounds(instance)?; } // Initialize the tables - initialize_tables(instance)?; + super::initialize_tables(instance)?; // Don't initialize the memory; the fault handler will back the pages when accessed @@ -1312,21 +1301,6 @@ mod test { assert_eq!(strat.next(1), 0); } - #[test] - fn test_base_pointer_iterator() { - let mut iter = BasePointerIterator::new(std::ptr::null_mut(), 5, 3); - - assert_eq!(iter.next(), Some(0usize as _)); - assert_eq!(iter.next(), Some(3usize as _)); - assert_eq!(iter.next(), Some(6usize as _)); - assert_eq!(iter.next(), Some(9usize as _)); - assert_eq!(iter.next(), Some(12usize as _)); - assert_eq!(iter.next(), None); - - let mut iter = BasePointerIterator::new(std::ptr::null_mut(), 0, 10); - assert_eq!(iter.next(), None); - } - #[cfg(target_pointer_width = "64")] #[test] fn test_instance_pool() -> Result<()> { @@ -1341,11 +1315,11 @@ mod test { memories: 1, globals: 0, table_elements: 10, - memory_pages: 10, + memory_pages: 1, }; let instance_limits = InstanceLimits { count: 3, - memory_reservation_size: 4096, + memory_reservation_size: WASM_PAGE_SIZE as u64, }; let instances = InstancePool::new(&module_limits, &instance_limits)?; @@ -1366,7 +1340,7 @@ mod test { assert_eq!(instances.instance_size, 4096); assert_eq!(instances.max_instances, 3); - assert_eq!(&*instances.free_list.lock().unwrap(), &[0, 1, 2],); + assert_eq!(&*instances.free_list.lock().unwrap(), &[0, 1, 2]); let mut handles = Vec::new(); let module = Arc::new(Module::default()); @@ -1397,7 +1371,7 @@ mod test { ); } - assert_eq!(&*instances.free_list.lock().unwrap(), &[],); + assert_eq!(&*instances.free_list.lock().unwrap(), &[]); match instances.allocate( PoolingAllocationStrategy::NextAvailable, @@ -1425,7 +1399,7 @@ mod test { instances.deallocate(&handle); } - assert_eq!(&*instances.free_list.lock().unwrap(), &[2, 1, 0],); + assert_eq!(&*instances.free_list.lock().unwrap(), &[2, 1, 0]); Ok(()) } @@ -1445,7 +1419,7 @@ mod test { memories: 3, globals: 0, table_elements: 0, - memory_pages: 10, + memory_pages: 1, }, &InstanceLimits { count: 5, @@ -1456,7 +1430,7 @@ mod test { assert_eq!(pool.memory_size, WASM_PAGE_SIZE as usize); assert_eq!(pool.max_memories, 3); assert_eq!(pool.max_instances, 5); - assert_eq!(pool.max_wasm_pages, 10); + assert_eq!(pool.max_wasm_pages, 1); let base = pool.mapping.as_ptr() as usize; @@ -1554,7 +1528,7 @@ mod test { stacks.push(stack); } - assert_eq!(&*pool.free_list.lock().unwrap(), &[],); + assert_eq!(&*pool.free_list.lock().unwrap(), &[]); match pool .allocate(PoolingAllocationStrategy::NextAvailable) @@ -1632,7 +1606,7 @@ mod test { ) .map_err(|e| e.to_string()) .expect_err("expected a failure constructing instance allocator"), - "module memory page limit of 2 pages exeeds the memory reservation size limit of 65536 bytes" + "module memory page limit of 2 pages exceeds the memory reservation size limit of 65536 bytes" ); } diff --git a/crates/runtime/src/instance/allocator/pooling/linux.rs b/crates/runtime/src/instance/allocator/pooling/linux.rs index bd62f37dfb..324200efe4 100644 --- a/crates/runtime/src/instance/allocator/pooling/linux.rs +++ b/crates/runtime/src/instance/allocator/pooling/linux.rs @@ -1,23 +1,58 @@ -use crate::Mmap; -use anyhow::{anyhow, Result}; +use anyhow::{bail, Context, Result}; -pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool { - region::protect(addr, len, region::Protection::READ_WRITE).is_ok() +fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> { + if len == 0 { + return Ok(()); + } + + unsafe { + if protect { + region::protect(addr, len, region::Protection::NONE) + .context("failed to protect memory pages")?; + } + + // On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access + if libc::madvise(addr as _, len, libc::MADV_DONTNEED) != 0 { + bail!( + "madvise failed to decommit: {}", + std::io::Error::last_os_error() + ); + } + } + + Ok(()) } -pub unsafe fn decommit(addr: *mut u8, len: usize) { - region::protect(addr, len, region::Protection::NONE).unwrap(); +pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> { + if len == 0 { + return Ok(()); + } - // On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access - assert_eq!( - libc::madvise(addr as _, len, libc::MADV_DONTNEED), - 0, - "madvise failed to mark pages as missing: {}", - std::io::Error::last_os_error() - ); + // Just change the protection level to READ|WRITE + unsafe { + region::protect(addr, len, region::Protection::READ_WRITE) + .context("failed to make linear memory pages read/write") + } } -pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result { - Mmap::accessible_reserved(accessible_size, mapping_size) - .map_err(|e| anyhow!("failed to allocate pool memory: {}", e)) +pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, true) +} + +pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as table pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, false) +} + +pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as stack pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, false) } diff --git a/crates/runtime/src/instance/allocator/pooling/uffd.rs b/crates/runtime/src/instance/allocator/pooling/uffd.rs index b140a8d2a9..18e9eb3c2c 100644 --- a/crates/runtime/src/instance/allocator/pooling/uffd.rs +++ b/crates/runtime/src/instance/allocator/pooling/uffd.rs @@ -16,11 +16,10 @@ //! 1. A user fault file descriptor is created to monitor specific areas of the address space. //! 2. A thread is spawned to continually read events from the user fault file descriptor. //! 3. When a page fault event is received, the handler thread calculates where the fault occurred: -//! a) If the fault occurs on a table page, it is handled by zeroing the page. -//! b) If the fault occurs on a linear memory page, it is handled by either copying the page from +//! a) If the fault occurs on a linear memory page, it is handled by either copying the page from //! initialization data or zeroing it. -//! c) If the fault occurs on a guard page, the protection level of the guard page is changed to -//! force the kernel to signal SIGSEV on the next retry. The faulting page is recorded so the +//! b) If the fault occurs on a guard page, the protection level of the guard page is changed to +//! force the kernel to signal SIGBUS on the next retry. The faulting page is recorded so the //! protection level can be reset in the future. //! 4. Faults to address space relating to an instance may occur from both Wasmtime (e.g. instance //! initialization) or from WebAssembly code (e.g. reading from or writing to linear memory), @@ -31,80 +30,103 @@ //! //! This feature requires a Linux kernel 4.11 or newer to use. -use super::InstancePool; -use crate::{instance::Instance, Mmap}; +use super::{InstancePool, MemoryPool}; +use crate::instance::Instance; use anyhow::{bail, Context, Result}; -use std::ptr; use std::thread; use userfaultfd::{Event, FeatureFlags, IoctlFlags, Uffd, UffdBuilder}; use wasmtime_environ::{entity::EntityRef, wasm::DefinedMemoryIndex, MemoryInitialization}; const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize; -pub unsafe fn make_accessible(_addr: *mut u8, _len: usize) -> bool { - // A no-op when userfaultfd is used - true -} - -pub unsafe fn reset_guard_page(addr: *mut u8, len: usize) -> bool { - // Guard pages are READ_WRITE with uffd until faulted - region::protect(addr, len, region::Protection::READ_WRITE).is_ok() -} - -pub unsafe fn decommit(addr: *mut u8, len: usize) { - // Use MADV_DONTNEED to mark the pages as missing - // This will cause a missing page fault for next access on any page in the given range - assert_eq!( - libc::madvise(addr as _, len, libc::MADV_DONTNEED), - 0, - "madvise failed to mark pages as missing: {}", - std::io::Error::last_os_error() - ); -} - -pub fn create_memory_map(_accessible_size: usize, mapping_size: usize) -> Result { - // Allocate a single read-write region at once - // As writable pages need to count towards commit charge, use MAP_NORESERVE to override. - // This implies that the kernel is configured to allow overcommit or else this allocation - // will almost certainly fail without a plethora of physical memory to back the allocation. - // The consequence of not reserving is that our process may segfault on any write to a memory - // page that cannot be backed (i.e. out of memory conditions). - - if mapping_size == 0 { - return Ok(Mmap::new()); +fn decommit(addr: *mut u8, len: usize) -> Result<()> { + if len == 0 { + return Ok(()); } unsafe { - let ptr = libc::mmap( - ptr::null_mut(), - mapping_size, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_NORESERVE, - -1, - 0, - ); - - if ptr as isize == -1_isize { + // On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access + if libc::madvise(addr as _, len, libc::MADV_DONTNEED) != 0 { bail!( - "failed to allocate pool memory: mmap failed with {}", + "madvise failed to decommit: {}", std::io::Error::last_os_error() ); } + } - Ok(Mmap::from_raw(ptr as usize, mapping_size)) + Ok(()) +} + +pub fn commit_memory_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as memory pages remain READ|WRITE with uffd + Ok(()) +} + +pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) +} + +pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as table pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) +} + +pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as stack pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) +} + +/// This is used to initialize the memory pool when uffd is enabled. +/// +/// Without uffd, all of the memory pool's pages are initially protected with `NONE` to treat the entire +/// range as guard pages. When an instance is created, the initial pages of the memory are +/// changed to `READ_WRITE`. +/// +/// With uffd, however, the potentially accessible pages of the each linear memory are made `READ_WRITE` and +/// the page fault handler will detect an out of bounds access and treat the page, temporarily, +/// as a guard page. +/// +/// This me +pub(super) fn initialize_memory_pool(pool: &MemoryPool) -> Result<()> { + if pool.memory_size == 0 || pool.max_wasm_pages == 0 { + return Ok(()); + } + + for i in 0..pool.max_instances { + for base in pool.get(i) { + unsafe { + region::protect( + base as _, + pool.max_wasm_pages as usize * WASM_PAGE_SIZE, + region::Protection::READ_WRITE, + ) + .context("failed to initialize memory pool for uffd")?; + } + } + } + + Ok(()) +} + +/// This is used to reset a linear memory's guard page back to read-write as the page might be accessible +/// again in the future depending on how the linear memory grows. +fn reset_guard_page(addr: *mut u8, len: usize) -> Result<()> { + unsafe { + region::protect(addr, len, region::Protection::READ_WRITE) + .context("failed to reset guard page") } } /// Represents a location of a page fault within monitored regions of memory. -enum AddressLocation<'a> { - /// The address location is in a WebAssembly table page. - /// The fault handler will zero the page as tables are initialized at instantiation-time. - TablePage { - /// The address of the page being accessed. - page_addr: *mut u8, - /// The length of the page being accessed. - len: usize, - }, +enum FaultLocation<'a> { /// The address location is in a WebAssembly linear memory page. /// The fault handler will copy the pages from initialization data if necessary. MemoryPage { @@ -121,12 +143,12 @@ enum AddressLocation<'a> { }, } -/// Used to resolve fault addresses to address locations. +/// Used to resolve fault addresses to a location. /// -/// This implementation relies heavily on how the various resource pools utilize their memory. +/// This implementation relies heavily on how the linear memory pool organizes its memory. /// /// `usize` is used here instead of pointers to keep this `Send` as it gets sent to the handler thread. -struct AddressLocator { +struct FaultLocator { instances_start: usize, instance_size: usize, max_instances: usize, @@ -134,19 +156,13 @@ struct AddressLocator { memories_end: usize, memory_size: usize, max_memories: usize, - tables_start: usize, - tables_end: usize, - table_size: usize, - page_size: usize, } -impl AddressLocator { +impl FaultLocator { fn new(instances: &InstancePool) -> Self { let instances_start = instances.mapping.as_ptr() as usize; let memories_start = instances.memories.mapping.as_ptr() as usize; let memories_end = memories_start + instances.memories.mapping.len(); - let tables_start = instances.tables.mapping.as_ptr() as usize; - let tables_end = tables_start + instances.tables.mapping.len(); // Should always have instances debug_assert!(instances_start != 0); @@ -159,10 +175,6 @@ impl AddressLocator { memories_end, memory_size: instances.memories.memory_size, max_memories: instances.memories.max_memories, - tables_start, - tables_end, - table_size: instances.tables.table_size, - page_size: instances.tables.page_size, } } @@ -174,7 +186,7 @@ impl AddressLocator { /// /// Of course a stray faulting memory access from a thread that does not own /// the instance might introduce a race, but this implementation considers - /// such to be a serious bug. + /// such to be a serious soundness bug not originating in this code. /// /// If the assumption holds true, accessing the instance data from the handler thread /// should, in theory, be safe. @@ -183,8 +195,8 @@ impl AddressLocator { &*((self.instances_start + (index * self.instance_size)) as *const Instance) } - unsafe fn get_location(&self, addr: usize) -> Option { - // Check for a memory location + unsafe fn locate(&self, addr: usize) -> Option { + // Check for a linear memory location if addr >= self.memories_start && addr < self.memories_end { let index = (addr - self.memories_start) / self.memory_size; let memory_index = DefinedMemoryIndex::new(index % self.max_memories); @@ -200,7 +212,7 @@ impl AddressLocator { } }); - return Some(AddressLocation::MemoryPage { + return Some(FaultLocation::MemoryPage { page_addr: (memory_start + page_index * WASM_PAGE_SIZE) as _, len: WASM_PAGE_SIZE, instance, @@ -209,19 +221,6 @@ impl AddressLocator { }); } - // Check for a table location - if addr >= self.tables_start && addr < self.tables_end { - let index = (addr - self.tables_start) / self.table_size; - let table_start = self.tables_start + (index * self.table_size); - let table_offset = addr - table_start; - let page_index = table_offset / self.page_size; - - return Some(AddressLocation::TablePage { - page_addr: (table_start + (page_index * self.page_size)) as _, - len: self.page_size, - }); - } - None } } @@ -231,9 +230,9 @@ impl AddressLocator { /// Because the region being monitored is protected read-write, this needs to set the /// protection level to `NONE` before waking the page. /// -/// This will cause the kernel to raise a SIGSEGV when retrying the fault. +/// This will cause the kernel to raise a SIGBUS when retrying the fault. unsafe fn wake_guard_page_access(uffd: &Uffd, page_addr: *const u8, len: usize) -> Result<()> { - // Set the page to NONE to induce a SIGSEGV for the access on the next retry + // Set the page to NONE to induce a SIGBUS for the access on the next retry region::protect(page_addr, len, region::Protection::NONE) .context("failed to change guard page protection")?; @@ -288,22 +287,11 @@ unsafe fn initialize_wasm_page( unsafe fn handle_page_fault( uffd: &Uffd, - locator: &AddressLocator, + locator: &FaultLocator, addr: *mut std::ffi::c_void, ) -> Result<()> { - match locator.get_location(addr as usize) { - Some(AddressLocation::TablePage { page_addr, len }) => { - log::trace!( - "handling fault in table at address {:p} on page {:p}", - addr, - page_addr, - ); - - // Tables are always initialized upon instantiation, so zero the page - uffd.zeropage(page_addr as _, len, true) - .context("failed to zero table page")?; - } - Some(AddressLocation::MemoryPage { + match locator.locate(addr as usize) { + Some(FaultLocation::MemoryPage { page_addr, len, instance, @@ -340,7 +328,7 @@ unsafe fn handle_page_fault( Ok(()) } -fn handler_thread(uffd: Uffd, locator: AddressLocator, mut registrations: usize) -> Result<()> { +fn fault_handler_thread(uffd: Uffd, locator: FaultLocator) -> Result<()> { loop { match uffd.read_event().expect("failed to read event") { Some(Event::Unmap { start, end }) => { @@ -348,13 +336,8 @@ fn handler_thread(uffd: Uffd, locator: AddressLocator, mut registrations: usize) let (start, end) = (start as usize, end as usize); - if (start == locator.memories_start && end == locator.memories_end) - || (start == locator.tables_start && end == locator.tables_end) - { - registrations -= 1; - if registrations == 0 { - break; - } + if start == locator.memories_start && end == locator.memories_end { + break; } else { panic!("unexpected memory region unmapped"); } @@ -385,53 +368,39 @@ impl PageFaultHandler { .create() .context("failed to create user fault descriptor")?; - // Register the ranges with the userfault fd - let mut registrations = 0; - for (start, len) in &[ - ( - instances.memories.mapping.as_ptr() as usize, - instances.memories.mapping.len(), - ), - ( - instances.tables.mapping.as_ptr() as usize, - instances.tables.mapping.len(), - ), - ] { - if *start == 0 || *len == 0 { - continue; - } + // Register the linear memory pool with the userfault fd + let start = instances.memories.mapping.as_ptr(); + let len = instances.memories.mapping.len(); + let thread = if !start.is_null() && len > 0 { let ioctls = uffd - .register(*start as _, *len) + .register(start as _, len) .context("failed to register user fault range")?; if !ioctls.contains(IoctlFlags::WAKE | IoctlFlags::COPY | IoctlFlags::ZEROPAGE) { bail!( - "required user fault ioctls not supported; found: {:?}", + "required user fault ioctls not supported by the kernel; found: {:?}", ioctls, ); } - registrations += 1; - } - - let thread = if registrations == 0 { - log::trace!("user fault handling disabled as there are no regions to monitor"); - None - } else { log::trace!( - "user fault handling enabled on {} memory regions", - registrations + "user fault handling enabled on linear memory pool at {:p} with size {}", + start, + len ); - let locator = AddressLocator::new(&instances); + let locator = FaultLocator::new(&instances); Some( thread::Builder::new() .name("page fault handler".into()) - .spawn(move || handler_thread(uffd, locator, registrations)) + .spawn(move || fault_handler_thread(uffd, locator)) .context("failed to spawn page fault handler thread")?, ) + } else { + log::trace!("user fault handling disabled as there is no linear memory pool"); + None }; Ok(Self { thread }) @@ -442,7 +411,7 @@ impl Drop for PageFaultHandler { fn drop(&mut self) { // The handler thread should terminate once all monitored regions of memory are unmapped. // The pooling instance allocator ensures that the regions are unmapped prior to dropping - // the user fault handler. + // the page fault handler. if let Some(thread) = self.thread.take() { thread .join() @@ -456,15 +425,12 @@ impl Drop for PageFaultHandler { mod test { use super::*; use crate::{ - table::max_table_element_size, Imports, InstanceAllocationRequest, InstanceLimits, - ModuleLimits, PoolingAllocationStrategy, VMSharedSignatureIndex, + Imports, InstanceAllocationRequest, InstanceLimits, ModuleLimits, + PoolingAllocationStrategy, VMSharedSignatureIndex, }; + use std::ptr; use std::sync::Arc; - use wasmtime_environ::{ - entity::PrimaryMap, - wasm::{Memory, Table, TableElementType, WasmType}, - MemoryPlan, MemoryStyle, Module, TablePlan, TableStyle, - }; + use wasmtime_environ::{entity::PrimaryMap, wasm::Memory, MemoryPlan, MemoryStyle, Module}; #[cfg(target_pointer_width = "64")] #[test] @@ -476,10 +442,10 @@ mod test { imported_globals: 0, types: 0, functions: 0, - tables: 3, + tables: 0, memories: 2, globals: 0, - table_elements: 1000, + table_elements: 0, memory_pages: 2, }; let instance_limits = InstanceLimits { @@ -490,7 +456,7 @@ mod test { let instances = InstancePool::new(&module_limits, &instance_limits).expect("should allocate"); - let locator = AddressLocator::new(&instances); + let locator = FaultLocator::new(&instances); assert_eq!(locator.instances_start, instances.mapping.as_ptr() as usize); assert_eq!(locator.instance_size, 4096); @@ -505,21 +471,10 @@ mod test { ); assert_eq!(locator.memory_size, WASM_PAGE_SIZE * 10); assert_eq!(locator.max_memories, 2); - assert_eq!( - locator.tables_start, - instances.tables.mapping.as_ptr() as usize - ); - assert_eq!( - locator.tables_end, - locator.tables_start + instances.tables.mapping.len() - ); - assert_eq!(locator.table_size, 8192); unsafe { - assert!(locator.get_location(0).is_none()); - assert!(locator - .get_location(std::cmp::max(locator.memories_end, locator.tables_end)) - .is_none()); + assert!(locator.locate(0).is_none()); + assert!(locator.locate(locator.memories_end).is_none()); let mut module = Module::new(); @@ -535,25 +490,13 @@ mod test { }); } - for _ in 0..module_limits.tables { - module.table_plans.push(TablePlan { - table: Table { - wasm_ty: WasmType::FuncRef, - ty: TableElementType::Func, - minimum: 800, - maximum: Some(900), - }, - style: TableStyle::CallerChecksSignature, - }); - } - module_limits.validate(&module).expect("should validate"); let mut handles = Vec::new(); let module = Arc::new(module); let finished_functions = &PrimaryMap::new(); - // Allocate the maximum number of instances with the maxmimum number of memories and tables + // Allocate the maximum number of instances with the maximum number of memories for _ in 0..instances.max_instances { handles.push( instances @@ -570,9 +513,9 @@ mod test { }, lookup_shared_signature: &|_| VMSharedSignatureIndex::default(), host_state: Box::new(()), - interrupts: std::ptr::null(), - externref_activations_table: std::ptr::null_mut(), - stack_map_registry: std::ptr::null_mut(), + interrupts: ptr::null(), + externref_activations_table: ptr::null_mut(), + stack_map_registry: ptr::null_mut(), }, ) .expect("instance should allocate"), @@ -587,8 +530,8 @@ mod test { + (memory_index * locator.memory_size); // Test for access to first page - match locator.get_location(memory_start + 10000) { - Some(AddressLocation::MemoryPage { + match locator.locate(memory_start + 10000) { + Some(FaultLocation::MemoryPage { page_addr, len, instance: _, @@ -604,8 +547,8 @@ mod test { } // Test for access to second page - match locator.get_location(memory_start + 1024 + WASM_PAGE_SIZE) { - Some(AddressLocation::MemoryPage { + match locator.locate(memory_start + 1024 + WASM_PAGE_SIZE) { + Some(FaultLocation::MemoryPage { page_addr, len, instance: _, @@ -621,8 +564,8 @@ mod test { } // Test for guard page - match locator.get_location(memory_start + 10 + 9 * WASM_PAGE_SIZE) { - Some(AddressLocation::MemoryPage { + match locator.locate(memory_start + 10 + 9 * WASM_PAGE_SIZE) { + Some(FaultLocation::MemoryPage { page_addr, len, instance: _, @@ -639,33 +582,6 @@ mod test { } } - // Validate table locations - for instance_index in 0..instances.max_instances { - for table_index in 0..instances.tables.max_tables { - let table_start = locator.tables_start - + (instance_index * locator.table_size * instances.tables.max_tables) - + (table_index * locator.table_size); - - // Check for an access of index 107 (first page) - match locator.get_location(table_start + (107 * max_table_element_size())) { - Some(AddressLocation::TablePage { page_addr, len }) => { - assert_eq!(page_addr, table_start as _); - assert_eq!(len, locator.page_size); - } - _ => panic!("expected a table page location"), - } - - // Check for an access of index 799 (second page) - match locator.get_location(table_start + (799 * max_table_element_size())) { - Some(AddressLocation::TablePage { page_addr, len }) => { - assert_eq!(page_addr, (table_start + locator.page_size) as _); - assert_eq!(len, locator.page_size); - } - _ => panic!("expected a table page location"), - } - } - } - for handle in handles.drain(..) { instances.deallocate(&handle); } diff --git a/crates/runtime/src/instance/allocator/pooling/unix.rs b/crates/runtime/src/instance/allocator/pooling/unix.rs index 9cc68b3361..957aea8e1b 100644 --- a/crates/runtime/src/instance/allocator/pooling/unix.rs +++ b/crates/runtime/src/instance/allocator/pooling/unix.rs @@ -1,27 +1,64 @@ -use crate::Mmap; -use anyhow::{anyhow, Result}; +use anyhow::{bail, Context, Result}; -pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool { - region::protect(addr, len, region::Protection::READ_WRITE).is_ok() -} +fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> { + if len == 0 { + return Ok(()); + } -pub unsafe fn decommit(addr: *mut u8, len: usize) { - assert_eq!( + if unsafe { libc::mmap( addr as _, len, - libc::PROT_NONE, + if protect { + libc::PROT_NONE + } else { + libc::PROT_READ | libc::PROT_WRITE + }, libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED, -1, 0, - ) as *mut u8, - addr, - "mmap failed to remap pages: {}", - std::io::Error::last_os_error() - ); + ) as *mut u8 + } != addr + { + bail!( + "mmap failed to remap pages: {}", + std::io::Error::last_os_error() + ); + } + + Ok(()) } -pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result { - Mmap::accessible_reserved(accessible_size, mapping_size) - .map_err(|e| anyhow!("failed to allocate pool memory: {}", e)) +pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> { + if len == 0 { + return Ok(()); + } + + // Just change the protection level to READ|WRITE + unsafe { + region::protect(addr, len, region::Protection::READ_WRITE) + .context("failed to make linear memory pages read/write") + } +} + +pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, true) +} + +pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as table pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, false) +} + +pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> { + // A no-op as stack pages remain READ|WRITE + Ok(()) +} + +pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len, false) } diff --git a/crates/runtime/src/instance/allocator/pooling/windows.rs b/crates/runtime/src/instance/allocator/pooling/windows.rs index 159f00b63f..286cd459fe 100644 --- a/crates/runtime/src/instance/allocator/pooling/windows.rs +++ b/crates/runtime/src/instance/allocator/pooling/windows.rs @@ -1,22 +1,55 @@ -use crate::Mmap; -use anyhow::{anyhow, Result}; +use anyhow::{bail, Result}; use winapi::um::memoryapi::{VirtualAlloc, VirtualFree}; use winapi::um::winnt::{MEM_COMMIT, MEM_DECOMMIT, PAGE_READWRITE}; -pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool { - // This doesn't use the `region` crate because the memory needs to be committed - !VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null() +pub fn commit(addr: *mut u8, len: usize) -> Result<()> { + if len == 0 { + return Ok(()); + } + + // Memory needs to be committed, so don't use the `region` crate + if unsafe { VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null() } { + bail!("failed to commit memory as read/write"); + } + + Ok(()) } -pub unsafe fn decommit(addr: *mut u8, len: usize) { - assert!( - VirtualFree(addr as _, len, MEM_DECOMMIT) != 0, - "failed to decommit memory pages: {}", - std::io::Error::last_os_error() - ); +pub fn decommit(addr: *mut u8, len: usize) -> Result<()> { + if len == 0 { + return Ok(()); + } + + if unsafe { VirtualFree(addr as _, len, MEM_DECOMMIT) } == 0 { + bail!( + "failed to decommit memory pages: {}", + std::io::Error::last_os_error() + ); + } + + Ok(()) } -pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result { - Mmap::accessible_reserved(accessible_size, mapping_size) - .map_err(|e| anyhow!("failed to allocate pool memory: {}", e)) +pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> { + commit(addr, len) +} + +pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) +} + +pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> { + commit(addr, len) +} + +pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) +} + +pub fn commit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { + commit(addr, len) +} + +pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> { + decommit(addr, len) } diff --git a/crates/runtime/src/memory.rs b/crates/runtime/src/memory.rs index 7d248b1361..ba7bab4700 100644 --- a/crates/runtime/src/memory.rs +++ b/crates/runtime/src/memory.rs @@ -4,6 +4,7 @@ use crate::mmap::Mmap; use crate::vmcontext::VMMemoryDefinition; +use anyhow::Result; use more_asserts::{assert_ge, assert_le}; use std::cell::{Cell, RefCell}; use std::cmp::min; @@ -13,7 +14,7 @@ use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE}; /// A memory allocator pub trait RuntimeMemoryCreator: Send + Sync { /// Create new RuntimeLinearMemory - fn new_memory(&self, plan: &MemoryPlan) -> Result, String>; + fn new_memory(&self, plan: &MemoryPlan) -> Result>; } /// A default memory allocator used by Wasmtime @@ -21,8 +22,8 @@ pub struct DefaultMemoryCreator; impl RuntimeMemoryCreator for DefaultMemoryCreator { /// Create new MmapMemory - fn new_memory(&self, plan: &MemoryPlan) -> Result, String> { - Ok(Box::new(MmapMemory::new(plan)?) as Box) + fn new_memory(&self, plan: &MemoryPlan) -> Result> { + Ok(Box::new(MmapMemory::new(plan)?) as _) } } @@ -65,7 +66,7 @@ struct WasmMmap { impl MmapMemory { /// Create a new linear memory instance with specified minimum and maximum number of wasm pages. - pub fn new(plan: &MemoryPlan) -> Result { + pub fn new(plan: &MemoryPlan) -> Result { // `maximum` cannot be set to more than `65536` pages. assert_le!(plan.memory.minimum, WASM_MAX_PAGES); assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES); @@ -177,7 +178,7 @@ enum MemoryStorage { base: *mut u8, size: Cell, maximum: u32, - make_accessible: unsafe fn(*mut u8, usize) -> bool, + make_accessible: fn(*mut u8, usize) -> Result<()>, }, Dynamic(Box), } @@ -189,10 +190,7 @@ pub struct Memory { impl Memory { /// Create a new dynamic (movable) memory instance for the specified plan. - pub fn new_dynamic( - plan: &MemoryPlan, - creator: &dyn RuntimeMemoryCreator, - ) -> Result { + pub fn new_dynamic(plan: &MemoryPlan, creator: &dyn RuntimeMemoryCreator) -> Result { Ok(Self { storage: MemoryStorage::Dynamic(creator.new_memory(plan)?), }) @@ -203,14 +201,10 @@ impl Memory { plan: &MemoryPlan, base: *mut u8, maximum: u32, - make_accessible: unsafe fn(*mut u8, usize) -> bool, - ) -> Result { + make_accessible: fn(*mut u8, usize) -> Result<()>, + ) -> Result { if plan.memory.minimum > 0 { - if unsafe { - !make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize) - } { - return Err("memory cannot be made accessible".into()); - } + make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize)?; } Ok(Self { @@ -258,9 +252,7 @@ impl Memory { let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize; let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize; - if unsafe { !make_accessible(base.add(start), len) } { - return None; - } + make_accessible(unsafe { base.add(start) }, len).ok()?; size.set(new_size); diff --git a/crates/runtime/src/mmap.rs b/crates/runtime/src/mmap.rs index e60e87e378..b0612f91c9 100644 --- a/crates/runtime/src/mmap.rs +++ b/crates/runtime/src/mmap.rs @@ -1,6 +1,7 @@ //! Low-level abstraction for allocating and managing zero-filled pages //! of memory. +use anyhow::{bail, Result}; use more_asserts::assert_le; use more_asserts::assert_lt; use std::io; @@ -38,7 +39,7 @@ impl Mmap { } /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory. - pub fn with_at_least(size: usize) -> Result { + pub fn with_at_least(size: usize) -> Result { let page_size = region::page::size(); let rounded_size = round_up_to_page_size(size, page_size); Self::accessible_reserved(rounded_size, rounded_size) @@ -48,10 +49,7 @@ impl Mmap { /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size` /// must be native page-size multiples. #[cfg(not(target_os = "windows"))] - pub fn accessible_reserved( - accessible_size: usize, - mapping_size: usize, - ) -> Result { + pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result { let page_size = region::page::size(); assert_le!(accessible_size, mapping_size); assert_eq!(mapping_size & (page_size - 1), 0); @@ -76,7 +74,7 @@ impl Mmap { ) }; if ptr as isize == -1_isize { - return Err(io::Error::last_os_error().to_string()); + bail!("mmap failed: {}", io::Error::last_os_error()); } Self { @@ -96,7 +94,7 @@ impl Mmap { ) }; if ptr as isize == -1_isize { - return Err(io::Error::last_os_error().to_string()); + bail!("mmap failed: {}", io::Error::last_os_error()); } let mut result = Self { @@ -117,10 +115,7 @@ impl Mmap { /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size` /// must be native page-size multiples. #[cfg(target_os = "windows")] - pub fn accessible_reserved( - accessible_size: usize, - mapping_size: usize, - ) -> Result { + pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result { use winapi::um::memoryapi::VirtualAlloc; use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE}; @@ -144,7 +139,7 @@ impl Mmap { ) }; if ptr.is_null() { - return Err(io::Error::last_os_error().to_string()); + bail!("VirtualAlloc failed: {}", io::Error::last_os_error()); } Self { @@ -156,7 +151,7 @@ impl Mmap { let ptr = unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) }; if ptr.is_null() { - return Err(io::Error::last_os_error().to_string()); + bail!("VirtualAlloc failed: {}", io::Error::last_os_error()); } let mut result = Self { @@ -177,7 +172,7 @@ impl Mmap { /// `start` and `len` must be native page-size multiples and describe a range within /// `self`'s reserved memory. #[cfg(not(target_os = "windows"))] - pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> { + pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> { let page_size = region::page::size(); assert_eq!(start & (page_size - 1), 0); assert_eq!(len & (page_size - 1), 0); @@ -186,15 +181,18 @@ impl Mmap { // Commit the accessible size. let ptr = self.ptr as *const u8; - unsafe { region::protect(ptr.add(start), len, region::Protection::READ_WRITE) } - .map_err(|e| e.to_string()) + unsafe { + region::protect(ptr.add(start), len, region::Protection::READ_WRITE)?; + } + + Ok(()) } /// Make the memory starting at `start` and extending for `len` bytes accessible. /// `start` and `len` must be native page-size multiples and describe a range within /// `self`'s reserved memory. #[cfg(target_os = "windows")] - pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> { + pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> { use winapi::ctypes::c_void; use winapi::um::memoryapi::VirtualAlloc; use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE}; @@ -216,7 +214,7 @@ impl Mmap { } .is_null() { - return Err(io::Error::last_os_error().to_string()); + bail!("VirtualAlloc failed: {}", io::Error::last_os_error()); } Ok(()) diff --git a/crates/wasmtime/src/trampoline/memory.rs b/crates/wasmtime/src/trampoline/memory.rs index af2b954792..37c14c51f6 100644 --- a/crates/wasmtime/src/trampoline/memory.rs +++ b/crates/wasmtime/src/trampoline/memory.rs @@ -3,7 +3,7 @@ use crate::memory::{LinearMemory, MemoryCreator}; use crate::trampoline::StoreInstanceHandle; use crate::Store; use crate::{Limits, MemoryType}; -use anyhow::Result; +use anyhow::{anyhow, Result}; use wasmtime_environ::entity::PrimaryMap; use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE}; use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition}; @@ -57,7 +57,7 @@ impl RuntimeLinearMemory for LinearMemoryProxy { pub(crate) struct MemoryCreatorProxy(pub Arc); impl RuntimeMemoryCreator for MemoryCreatorProxy { - fn new_memory(&self, plan: &MemoryPlan) -> Result, String> { + fn new_memory(&self, plan: &MemoryPlan) -> Result> { let ty = MemoryType::new(Limits::new(plan.memory.minimum, plan.memory.maximum)); let reserved_size_in_bytes = match plan.style { MemoryStyle::Static { bound } => Some(bound as u64 * WASM_PAGE_SIZE as u64), @@ -66,5 +66,6 @@ impl RuntimeMemoryCreator for MemoryCreatorProxy { self.0 .new_memory(ty, reserved_size_in_bytes, plan.offset_guard_size) .map(|mem| Box::new(LinearMemoryProxy { mem }) as Box) + .map_err(|e| anyhow!(e)) } }