Move the creation of linear-memory guard pages into the mmap module.
Create linear-memory pages as PROT_NONE and then make them accessible on demand.
This commit is contained in:
@@ -4,7 +4,6 @@
|
|||||||
|
|
||||||
use crate::mmap::Mmap;
|
use crate::mmap::Mmap;
|
||||||
use crate::vmcontext::VMMemoryDefinition;
|
use crate::vmcontext::VMMemoryDefinition;
|
||||||
use region;
|
|
||||||
use std::string::String;
|
use std::string::String;
|
||||||
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
|
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
|
||||||
|
|
||||||
@@ -58,23 +57,8 @@ impl LinearMemory {
|
|||||||
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
|
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
|
||||||
let mapped_pages = plan.memory.minimum as usize;
|
let mapped_pages = plan.memory.minimum as usize;
|
||||||
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
|
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
|
||||||
let unmapped_pages = minimum_pages - mapped_pages;
|
|
||||||
let unmapped_bytes = unmapped_pages * WASM_PAGE_SIZE as usize;
|
|
||||||
let inaccessible_bytes = unmapped_bytes + offset_guard_bytes;
|
|
||||||
|
|
||||||
let mmap = Mmap::with_size(request_bytes)?;
|
let mmap = Mmap::accessible_reserved(mapped_bytes, request_bytes)?;
|
||||||
|
|
||||||
// Make the unmapped and offset-guard pages inaccessible.
|
|
||||||
if request_bytes != 0 {
|
|
||||||
unsafe {
|
|
||||||
region::protect(
|
|
||||||
mmap.as_ptr().add(mapped_bytes),
|
|
||||||
inaccessible_bytes,
|
|
||||||
region::Protection::None,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
.expect("unable to make memory inaccessible");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
mmap,
|
mmap,
|
||||||
@@ -117,29 +101,25 @@ impl LinearMemory {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
|
let delta_bytes = cast::usize(delta) * WASM_PAGE_SIZE as usize;
|
||||||
|
let prev_bytes = cast::usize(prev_pages) * WASM_PAGE_SIZE as usize;
|
||||||
|
let new_bytes = cast::usize(new_pages) * WASM_PAGE_SIZE as usize;
|
||||||
|
|
||||||
if new_bytes > self.mmap.len() - self.offset_guard_size {
|
if new_bytes > self.mmap.len() - self.offset_guard_size {
|
||||||
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
|
// If the new size is within the declared maximum, but needs more memory than we
|
||||||
|
// have on hand, it's a dynamic heap and it can move.
|
||||||
let guard_bytes = self.offset_guard_size;
|
let guard_bytes = self.offset_guard_size;
|
||||||
let request_bytes = new_bytes.checked_add(guard_bytes)?;
|
let request_bytes = new_bytes.checked_add(guard_bytes)?;
|
||||||
|
|
||||||
let mut new_mmap = Mmap::with_size(request_bytes).ok()?;
|
let mut new_mmap = Mmap::accessible_reserved(new_bytes, request_bytes).ok()?;
|
||||||
|
|
||||||
// Make the offset-guard pages inaccessible.
|
|
||||||
unsafe {
|
|
||||||
region::protect(
|
|
||||||
new_mmap.as_ptr().add(new_bytes),
|
|
||||||
guard_bytes,
|
|
||||||
region::Protection::None,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
.expect("unable to make memory inaccessible");
|
|
||||||
|
|
||||||
let copy_len = self.mmap.len() - self.offset_guard_size;
|
let copy_len = self.mmap.len() - self.offset_guard_size;
|
||||||
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
|
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
|
||||||
|
|
||||||
self.mmap = new_mmap;
|
self.mmap = new_mmap;
|
||||||
|
} else {
|
||||||
|
// Make the newly allocated pages accessible.
|
||||||
|
self.mmap.make_accessible(prev_bytes, delta_bytes).ok()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.current = new_pages;
|
self.current = new_pages;
|
||||||
|
|||||||
@@ -34,64 +34,196 @@ impl Mmap {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new `Mmap` pointing to at least `size` bytes of memory,
|
/// Create a new `Mmap` pointing to at least `size` bytes of accessible memory,
|
||||||
/// suitably sized and aligned for memory protection.
|
/// suitably sized and aligned for memory protection.
|
||||||
#[cfg(not(target_os = "windows"))]
|
|
||||||
pub fn with_size(size: usize) -> Result<Self, String> {
|
pub fn with_size(size: usize) -> Result<Self, String> {
|
||||||
|
Self::accessible_reserved(size, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new `Mmap` pointing to at least `accessible_size` bytes of accessible memory,
|
||||||
|
/// within a reserved mapping of at least `mapping_size` bytes, suitably sized and aligned
|
||||||
|
/// for memory protection.
|
||||||
|
#[cfg(not(target_os = "windows"))]
|
||||||
|
pub fn accessible_reserved(
|
||||||
|
accessible_size: usize,
|
||||||
|
mapping_size: usize,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
assert!(accessible_size <= mapping_size);
|
||||||
|
|
||||||
// Mmap may return EINVAL if the size is zero, so just
|
// Mmap may return EINVAL if the size is zero, so just
|
||||||
// special-case that.
|
// special-case that.
|
||||||
if size == 0 {
|
if mapping_size == 0 {
|
||||||
return Ok(Self::new());
|
return Ok(Self::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
let page_size = region::page::size();
|
let page_size = region::page::size();
|
||||||
let alloc_size = round_up_to_page_size(size, page_size);
|
let rounded_mapping_size = round_up_to_page_size(mapping_size, page_size);
|
||||||
let ptr = unsafe {
|
|
||||||
libc::mmap(
|
Ok(if accessible_size == mapping_size {
|
||||||
ptr::null_mut(),
|
// Allocate a single read-write region at once.
|
||||||
alloc_size,
|
let ptr = unsafe {
|
||||||
libc::PROT_READ | libc::PROT_WRITE,
|
libc::mmap(
|
||||||
libc::MAP_PRIVATE | libc::MAP_ANON,
|
ptr::null_mut(),
|
||||||
-1,
|
rounded_mapping_size,
|
||||||
0,
|
libc::PROT_READ | libc::PROT_WRITE,
|
||||||
)
|
libc::MAP_PRIVATE | libc::MAP_ANON,
|
||||||
};
|
-1,
|
||||||
if ptr as isize == -1isize {
|
0,
|
||||||
Err(errno::errno().to_string())
|
)
|
||||||
} else {
|
};
|
||||||
Ok(Self {
|
if ptr as isize == -1_isize {
|
||||||
|
return Err(errno::errno().to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
Self {
|
||||||
ptr: ptr as *mut u8,
|
ptr: ptr as *mut u8,
|
||||||
len: alloc_size,
|
len: rounded_mapping_size,
|
||||||
})
|
}
|
||||||
}
|
} else {
|
||||||
|
// Reserve the mapping size.
|
||||||
|
let ptr = unsafe {
|
||||||
|
libc::mmap(
|
||||||
|
ptr::null_mut(),
|
||||||
|
rounded_mapping_size,
|
||||||
|
libc::PROT_NONE,
|
||||||
|
libc::MAP_PRIVATE | libc::MAP_ANON,
|
||||||
|
-1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
if ptr as isize == -1_isize {
|
||||||
|
return Err(errno::errno().to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = Self {
|
||||||
|
ptr: ptr as *mut u8,
|
||||||
|
len: rounded_mapping_size,
|
||||||
|
};
|
||||||
|
|
||||||
|
if accessible_size != 0 {
|
||||||
|
// Commit the accessible size.
|
||||||
|
let rounded_accessible_size = round_up_to_page_size(accessible_size, page_size);
|
||||||
|
unsafe {
|
||||||
|
region::protect(
|
||||||
|
result.ptr,
|
||||||
|
rounded_accessible_size,
|
||||||
|
region::Protection::ReadWrite,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
.map_err(|e| e.to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new `Mmap` pointing to at least `size` bytes of memory,
|
/// Create a new `Mmap` pointing to at least `accessible_size` bytes of accessible memory,
|
||||||
/// suitably sized and aligned for memory protection.
|
/// within a reserved mapping of at least `mapping_size` bytes, suitably sized and aligned
|
||||||
|
/// for memory protection.
|
||||||
#[cfg(target_os = "windows")]
|
#[cfg(target_os = "windows")]
|
||||||
pub fn with_size(size: usize) -> Result<Self, String> {
|
pub fn accessible_reserved(
|
||||||
|
accessible_size: usize,
|
||||||
|
mapping_size: usize,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
assert!(accessible_size <= mapping_size);
|
||||||
|
|
||||||
use winapi::um::memoryapi::VirtualAlloc;
|
use winapi::um::memoryapi::VirtualAlloc;
|
||||||
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
|
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
|
||||||
|
|
||||||
|
let page_size = region::page::size();
|
||||||
|
let rounded_mapping_size = round_up_to_page_size(mapping_size, page_size);
|
||||||
|
|
||||||
|
Ok(if accessible_size == mapping_size {
|
||||||
|
// Allocate a single read-write region at once.
|
||||||
|
let ptr = unsafe {
|
||||||
|
VirtualAlloc(
|
||||||
|
ptr::null_mut(),
|
||||||
|
rounded_mapping_size,
|
||||||
|
MEM_RESERVE | MEM_COMMIT,
|
||||||
|
PAGE_READWRITE,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
if ptr.is_null() {
|
||||||
|
return Err(errno::errno().to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
Self {
|
||||||
|
ptr: ptr as *mut u8,
|
||||||
|
len: rounded_mapping_size,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Reserve the mapping size.
|
||||||
|
let ptr = unsafe {
|
||||||
|
VirtualAlloc(
|
||||||
|
ptr::null_mut(),
|
||||||
|
rounded_mapping_size,
|
||||||
|
MEM_RESERVE,
|
||||||
|
PAGE_NOACCESS,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
if ptr.is_null() {
|
||||||
|
return Err(errno::errno().to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
let result = Self {
|
||||||
|
ptr: ptr as *mut u8,
|
||||||
|
len: rounded_mapping_size,
|
||||||
|
};
|
||||||
|
|
||||||
|
if accessible_size != 0 {
|
||||||
|
// Commit the accessible size.
|
||||||
|
let rounded_accessible_size = round_up_to_page_size(accessible_size, page_size);
|
||||||
|
if unsafe { VirtualAlloc(ptr, rounded_accessible_size, MEM_COMMIT, PAGE_READWRITE) }
|
||||||
|
.is_null()
|
||||||
|
{
|
||||||
|
return Err(errno::errno().to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Make the memory starting at `start` and extending for `len` bytes accessible.
|
||||||
|
#[cfg(not(target_os = "windows"))]
|
||||||
|
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
|
||||||
|
// Mmap may return EINVAL if the size is zero, so just
|
||||||
|
// special-case that.
|
||||||
|
if len == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let page_size = region::page::size();
|
let page_size = region::page::size();
|
||||||
|
|
||||||
// VirtualAlloc always rounds up to the next multiple of the page size
|
assert_eq!(start % page_size, 0);
|
||||||
let ptr = unsafe {
|
assert_eq!(len % page_size, 0);
|
||||||
VirtualAlloc(
|
assert!(len < self.len);
|
||||||
ptr::null_mut(),
|
assert!(start < self.len - len);
|
||||||
size,
|
|
||||||
MEM_COMMIT | MEM_RESERVE,
|
// Commit the accessible size.
|
||||||
PAGE_READWRITE,
|
unsafe { region::protect(self.ptr.add(start), len, region::Protection::ReadWrite) }
|
||||||
)
|
.map_err(|e| e.to_string())
|
||||||
};
|
}
|
||||||
if !ptr.is_null() {
|
|
||||||
Ok(Self {
|
/// Make the memory starting at `start` and extending for `len` bytes accessible.
|
||||||
ptr: ptr as *mut u8,
|
#[cfg(target_os = "windows")]
|
||||||
len: round_up_to_page_size(size, page_size),
|
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
|
||||||
})
|
use winapi::um::memoryapi::VirtualAlloc;
|
||||||
} else {
|
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
|
||||||
Err(errno::errno().to_string())
|
|
||||||
|
let page_size = region::page::size();
|
||||||
|
|
||||||
|
assert_eq!(start % page_size, 0);
|
||||||
|
assert_eq!(len % page_size, 0);
|
||||||
|
assert!(len < self.len);
|
||||||
|
assert!(start < self.len - len);
|
||||||
|
|
||||||
|
// Commit the accessible size.
|
||||||
|
if unsafe { VirtualAlloc(self.ptr.add(start), len, MEM_COMMIT, PAGE_READWRITE) }.is_null() {
|
||||||
|
return Err(errno::errno().to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the allocated memory as a slice of u8.
|
/// Return the allocated memory as a slice of u8.
|
||||||
|
|||||||
Reference in New Issue
Block a user