diff --git a/lib/runtime/src/memory.rs b/lib/runtime/src/memory.rs index 50ac0d14a6..09bd3a6468 100644 --- a/lib/runtime/src/memory.rs +++ b/lib/runtime/src/memory.rs @@ -4,7 +4,6 @@ use crate::mmap::Mmap; use crate::vmcontext::VMMemoryDefinition; -use region; use std::string::String; use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE}; @@ -58,23 +57,8 @@ impl LinearMemory { let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap(); let mapped_pages = plan.memory.minimum as usize; let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize; - let unmapped_pages = minimum_pages - mapped_pages; - let unmapped_bytes = unmapped_pages * WASM_PAGE_SIZE as usize; - let inaccessible_bytes = unmapped_bytes + offset_guard_bytes; - let mmap = Mmap::with_size(request_bytes)?; - - // Make the unmapped and offset-guard pages inaccessible. - if request_bytes != 0 { - unsafe { - region::protect( - mmap.as_ptr().add(mapped_bytes), - inaccessible_bytes, - region::Protection::None, - ) - } - .expect("unable to make memory inaccessible"); - } + let mmap = Mmap::accessible_reserved(mapped_bytes, request_bytes)?; Ok(Self { mmap, @@ -117,29 +101,25 @@ impl LinearMemory { return None; } - let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize; + let delta_bytes = cast::usize(delta) * WASM_PAGE_SIZE as usize; + let prev_bytes = cast::usize(prev_pages) * WASM_PAGE_SIZE as usize; + let new_bytes = cast::usize(new_pages) * WASM_PAGE_SIZE as usize; if new_bytes > self.mmap.len() - self.offset_guard_size { - // If we have no maximum, this is a "dynamic" heap, and it's allowed to move. + // If the new size is within the declared maximum, but needs more memory than we + // have on hand, it's a dynamic heap and it can move. let guard_bytes = self.offset_guard_size; let request_bytes = new_bytes.checked_add(guard_bytes)?; - let mut new_mmap = Mmap::with_size(request_bytes).ok()?; - - // Make the offset-guard pages inaccessible. - unsafe { - region::protect( - new_mmap.as_ptr().add(new_bytes), - guard_bytes, - region::Protection::None, - ) - } - .expect("unable to make memory inaccessible"); + let mut new_mmap = Mmap::accessible_reserved(new_bytes, request_bytes).ok()?; let copy_len = self.mmap.len() - self.offset_guard_size; new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]); self.mmap = new_mmap; + } else { + // Make the newly allocated pages accessible. + self.mmap.make_accessible(prev_bytes, delta_bytes).ok()?; } self.current = new_pages; diff --git a/lib/runtime/src/mmap.rs b/lib/runtime/src/mmap.rs index cb11aedadb..8faefaab95 100644 --- a/lib/runtime/src/mmap.rs +++ b/lib/runtime/src/mmap.rs @@ -34,64 +34,196 @@ impl Mmap { } } - /// Create a new `Mmap` pointing to at least `size` bytes of memory, + /// Create a new `Mmap` pointing to at least `size` bytes of accessible memory, /// suitably sized and aligned for memory protection. - #[cfg(not(target_os = "windows"))] pub fn with_size(size: usize) -> Result { + Self::accessible_reserved(size, size) + } + + /// Create a new `Mmap` pointing to at least `accessible_size` bytes of accessible memory, + /// within a reserved mapping of at least `mapping_size` bytes, suitably sized and aligned + /// for memory protection. + #[cfg(not(target_os = "windows"))] + pub fn accessible_reserved( + accessible_size: usize, + mapping_size: usize, + ) -> Result { + assert!(accessible_size <= mapping_size); + // Mmap may return EINVAL if the size is zero, so just // special-case that. - if size == 0 { + if mapping_size == 0 { return Ok(Self::new()); } let page_size = region::page::size(); - let alloc_size = round_up_to_page_size(size, page_size); - let ptr = unsafe { - libc::mmap( - ptr::null_mut(), - alloc_size, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_PRIVATE | libc::MAP_ANON, - -1, - 0, - ) - }; - if ptr as isize == -1isize { - Err(errno::errno().to_string()) - } else { - Ok(Self { + let rounded_mapping_size = round_up_to_page_size(mapping_size, page_size); + + Ok(if accessible_size == mapping_size { + // Allocate a single read-write region at once. + let ptr = unsafe { + libc::mmap( + ptr::null_mut(), + rounded_mapping_size, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_PRIVATE | libc::MAP_ANON, + -1, + 0, + ) + }; + if ptr as isize == -1_isize { + return Err(errno::errno().to_string()); + } + + Self { ptr: ptr as *mut u8, - len: alloc_size, - }) - } + len: rounded_mapping_size, + } + } else { + // Reserve the mapping size. + let ptr = unsafe { + libc::mmap( + ptr::null_mut(), + rounded_mapping_size, + libc::PROT_NONE, + libc::MAP_PRIVATE | libc::MAP_ANON, + -1, + 0, + ) + }; + if ptr as isize == -1_isize { + return Err(errno::errno().to_string()); + } + + let result = Self { + ptr: ptr as *mut u8, + len: rounded_mapping_size, + }; + + if accessible_size != 0 { + // Commit the accessible size. + let rounded_accessible_size = round_up_to_page_size(accessible_size, page_size); + unsafe { + region::protect( + result.ptr, + rounded_accessible_size, + region::Protection::ReadWrite, + ) + } + .map_err(|e| e.to_string())?; + } + + result + }) } - /// Create a new `Mmap` pointing to at least `size` bytes of memory, - /// suitably sized and aligned for memory protection. + /// Create a new `Mmap` pointing to at least `accessible_size` bytes of accessible memory, + /// within a reserved mapping of at least `mapping_size` bytes, suitably sized and aligned + /// for memory protection. #[cfg(target_os = "windows")] - pub fn with_size(size: usize) -> Result { + pub fn accessible_reserved( + accessible_size: usize, + mapping_size: usize, + ) -> Result { + assert!(accessible_size <= mapping_size); + use winapi::um::memoryapi::VirtualAlloc; - use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE}; + use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE}; + + let page_size = region::page::size(); + let rounded_mapping_size = round_up_to_page_size(mapping_size, page_size); + + Ok(if accessible_size == mapping_size { + // Allocate a single read-write region at once. + let ptr = unsafe { + VirtualAlloc( + ptr::null_mut(), + rounded_mapping_size, + MEM_RESERVE | MEM_COMMIT, + PAGE_READWRITE, + ) + }; + if ptr.is_null() { + return Err(errno::errno().to_string()); + } + + Self { + ptr: ptr as *mut u8, + len: rounded_mapping_size, + } + } else { + // Reserve the mapping size. + let ptr = unsafe { + VirtualAlloc( + ptr::null_mut(), + rounded_mapping_size, + MEM_RESERVE, + PAGE_NOACCESS, + ) + }; + if ptr.is_null() { + return Err(errno::errno().to_string()); + } + + let result = Self { + ptr: ptr as *mut u8, + len: rounded_mapping_size, + }; + + if accessible_size != 0 { + // Commit the accessible size. + let rounded_accessible_size = round_up_to_page_size(accessible_size, page_size); + if unsafe { VirtualAlloc(ptr, rounded_accessible_size, MEM_COMMIT, PAGE_READWRITE) } + .is_null() + { + return Err(errno::errno().to_string()); + } + } + + result + }) + } + + /// Make the memory starting at `start` and extending for `len` bytes accessible. + #[cfg(not(target_os = "windows"))] + pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> { + // Mmap may return EINVAL if the size is zero, so just + // special-case that. + if len == 0 { + return Ok(()); + } let page_size = region::page::size(); - // VirtualAlloc always rounds up to the next multiple of the page size - let ptr = unsafe { - VirtualAlloc( - ptr::null_mut(), - size, - MEM_COMMIT | MEM_RESERVE, - PAGE_READWRITE, - ) - }; - if !ptr.is_null() { - Ok(Self { - ptr: ptr as *mut u8, - len: round_up_to_page_size(size, page_size), - }) - } else { - Err(errno::errno().to_string()) + assert_eq!(start % page_size, 0); + assert_eq!(len % page_size, 0); + assert!(len < self.len); + assert!(start < self.len - len); + + // Commit the accessible size. + unsafe { region::protect(self.ptr.add(start), len, region::Protection::ReadWrite) } + .map_err(|e| e.to_string()) + } + + /// Make the memory starting at `start` and extending for `len` bytes accessible. + #[cfg(target_os = "windows")] + pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> { + use winapi::um::memoryapi::VirtualAlloc; + use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE}; + + let page_size = region::page::size(); + + assert_eq!(start % page_size, 0); + assert_eq!(len % page_size, 0); + assert!(len < self.len); + assert!(start < self.len - len); + + // Commit the accessible size. + if unsafe { VirtualAlloc(self.ptr.add(start), len, MEM_COMMIT, PAGE_READWRITE) }.is_null() { + return Err(errno::errno().to_string()); } + + Ok(()) } /// Return the allocated memory as a slice of u8.