Rewrite linear memory handling in terms of simple mmap/VirtualAlloc.
The memmap crate doesn't make it straightforward to have part of the region be writeable and part readonly. Since this is a fairly boutique use case, and we don't need all that much code, just use the low-level APIs directly. Also, introduce a concept of "tunables" for adjusting the parameters of the runtime.
This commit is contained in:
@@ -10,14 +10,14 @@ license = "Apache-2.0 WITH LLVM-exception"
|
||||
readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
cranelift-codegen = "0.25.0"
|
||||
cranelift-entity = "0.25.0"
|
||||
cranelift-wasm = "0.25.0"
|
||||
cranelift-codegen = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
cranelift-entity = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
cranelift-wasm = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
wasmtime-environ = { path = "../environ" }
|
||||
region = "1.0.0"
|
||||
memmap = "0.7.0"
|
||||
lazy_static = "1.2.0"
|
||||
libc = "0.2.44"
|
||||
errno = "0.2.4"
|
||||
|
||||
[build-dependencies]
|
||||
cmake = "0.1.35"
|
||||
|
||||
@@ -28,16 +28,16 @@ impl Instance {
|
||||
module: &Module,
|
||||
compilation: &Compilation,
|
||||
data_initializers: &[DataInitializer],
|
||||
) -> Self {
|
||||
) -> Result<Self, String> {
|
||||
let mut result = Self {
|
||||
tables: PrimaryMap::new(),
|
||||
memories: PrimaryMap::new(),
|
||||
globals: Vec::new(),
|
||||
};
|
||||
result.instantiate_tables(module, compilation, &module.table_elements);
|
||||
result.instantiate_memories(module, data_initializers);
|
||||
result.instantiate_memories(module, data_initializers)?;
|
||||
result.instantiate_globals(module);
|
||||
result
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Allocate memory in `self` for just the tables of the current module.
|
||||
@@ -48,10 +48,9 @@ impl Instance {
|
||||
table_initializers: &[TableElements],
|
||||
) {
|
||||
debug_assert!(self.tables.is_empty());
|
||||
// TODO: Enable this once PrimaryMap supports this.
|
||||
//self.tables.reserve_exact(module.tables.len());
|
||||
self.tables.reserve_exact(module.tables.len());
|
||||
for table in module.tables.values() {
|
||||
let len = table.size;
|
||||
let len = table.minimum as usize;
|
||||
let mut v = Vec::with_capacity(len);
|
||||
v.resize(len, 0);
|
||||
self.tables.push(v);
|
||||
@@ -70,13 +69,16 @@ impl Instance {
|
||||
}
|
||||
|
||||
/// Allocate memory in `instance` for just the memories of the current module.
|
||||
fn instantiate_memories(&mut self, module: &Module, data_initializers: &[DataInitializer]) {
|
||||
fn instantiate_memories(
|
||||
&mut self,
|
||||
module: &Module,
|
||||
data_initializers: &[DataInitializer],
|
||||
) -> Result<(), String> {
|
||||
debug_assert!(self.memories.is_empty());
|
||||
// Allocate the underlying memory and initialize it to all zeros.
|
||||
// TODO: Enable this once PrimaryMap supports it.
|
||||
//self.memories.reserve_exact(module.memories.len());
|
||||
for memory in module.memories.values() {
|
||||
let v = LinearMemory::new(memory.pages_count as u32, memory.maximum.map(|m| m as u32));
|
||||
self.memories.reserve_exact(module.memory_plans.len());
|
||||
for plan in module.memory_plans.values() {
|
||||
let v = LinearMemory::new(&plan)?;
|
||||
self.memories.push(v);
|
||||
}
|
||||
for init in data_initializers {
|
||||
@@ -85,6 +87,7 @@ impl Instance {
|
||||
let to_init = &mut mem_mut[init.offset..init.offset + init.data.len()];
|
||||
to_init.copy_from_slice(init.data);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allocate memory in `instance` for just the globals of the current module,
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
extern crate cranelift_codegen;
|
||||
extern crate cranelift_entity;
|
||||
extern crate cranelift_wasm;
|
||||
extern crate memmap;
|
||||
extern crate errno;
|
||||
extern crate region;
|
||||
extern crate wasmtime_environ;
|
||||
#[cfg(not(feature = "std"))]
|
||||
|
||||
@@ -1,92 +1,225 @@
|
||||
use memmap;
|
||||
use errno;
|
||||
use libc;
|
||||
use region;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
|
||||
|
||||
const PAGE_SIZE: u32 = 65536;
|
||||
const MAX_PAGES: u32 = 65536;
|
||||
/// Round `size` up to the nearest multiple of `page_size`.
|
||||
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
|
||||
(size + (page_size - 1)) & !(page_size - 1)
|
||||
}
|
||||
|
||||
/// A simple struct consisting of a page-aligned pointer to page-aligned
|
||||
/// and initially-zeroed memory and a length.
|
||||
struct PtrLen {
|
||||
ptr: *mut u8,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl PtrLen {
|
||||
/// Create a new `PtrLen` pointing to at least `size` bytes of memory,
|
||||
/// suitably sized and aligned for memory protection.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn with_size(size: usize) -> Result<Self, String> {
|
||||
let page_size = region::page::size();
|
||||
let alloc_size = round_up_to_page_size(size, page_size);
|
||||
unsafe {
|
||||
let ptr = libc::mmap(
|
||||
ptr::null_mut(),
|
||||
alloc_size,
|
||||
libc::PROT_READ | libc::PROT_WRITE,
|
||||
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
|
||||
-1,
|
||||
0,
|
||||
);
|
||||
if mem::transmute::<_, isize>(ptr) != -1isize {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: alloc_size,
|
||||
})
|
||||
} else {
|
||||
Err(errno::errno().to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn with_size(size: usize) -> Result<Self, String> {
|
||||
use winapi::um::memoryapi::VirtualAlloc;
|
||||
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
|
||||
|
||||
let page_size = region::page::size();
|
||||
|
||||
// VirtualAlloc always rounds up to the next multiple of the page size
|
||||
let ptr = unsafe {
|
||||
VirtualAlloc(
|
||||
ptr::null_mut(),
|
||||
size,
|
||||
MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE,
|
||||
)
|
||||
};
|
||||
if !ptr.is_null() {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: round_up_to_page_size(size, page_size),
|
||||
})
|
||||
} else {
|
||||
Err(errno::errno().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn as_slice(&self) -> &[u8] {
|
||||
unsafe { slice::from_raw_parts(self.ptr, self.len) }
|
||||
}
|
||||
|
||||
fn as_mut_slice(&mut self) -> &mut [u8] {
|
||||
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PtrLen {
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn drop(&mut self) {
|
||||
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn drop(&mut self) {
|
||||
use winapi::um::memoryapi::VirtualFree;
|
||||
use winapi::um::winnt::MEM_RELEASE;
|
||||
let r = unsafe { VirtualFree(self.ptr, self.len, MEM_RELEASE) };
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/// A linear memory instance.
|
||||
///
|
||||
/// This linear memory has a stable base address and at the same time allows
|
||||
/// for dynamical growing.
|
||||
pub struct LinearMemory {
|
||||
mmap: memmap::MmapMut,
|
||||
ptrlen: PtrLen,
|
||||
current: u32,
|
||||
maximum: Option<u32>,
|
||||
offset_guard_size: usize,
|
||||
}
|
||||
|
||||
impl LinearMemory {
|
||||
/// Create a new linear memory instance with specified initial and maximum number of pages.
|
||||
///
|
||||
/// `maximum` cannot be set to more than `65536` pages.
|
||||
pub fn new(initial: u32, maximum: Option<u32>) -> Self {
|
||||
assert!(initial <= MAX_PAGES);
|
||||
assert!(maximum.is_none() || maximum.unwrap() <= MAX_PAGES);
|
||||
/// Create a new linear memory instance with specified minimum and maximum number of pages.
|
||||
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
|
||||
// `maximum` cannot be set to more than `65536` pages.
|
||||
assert!(plan.memory.minimum <= WASM_MAX_PAGES);
|
||||
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
|
||||
|
||||
let len = PAGE_SIZE * match maximum {
|
||||
Some(val) => val,
|
||||
None => initial,
|
||||
};
|
||||
let mmap = memmap::MmapMut::map_anon(len as usize).unwrap();
|
||||
Self {
|
||||
mmap,
|
||||
current: initial,
|
||||
maximum,
|
||||
let offset_guard_bytes = plan.offset_guard_size as usize;
|
||||
|
||||
let minimum_pages = match plan.style {
|
||||
MemoryStyle::Dynamic => plan.memory.minimum,
|
||||
MemoryStyle::Static { bound } => {
|
||||
assert!(bound >= plan.memory.minimum);
|
||||
bound
|
||||
}
|
||||
} as usize;
|
||||
let minimum_bytes = minimum_pages.checked_mul(WASM_PAGE_SIZE as usize).unwrap();
|
||||
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
|
||||
let mapped_pages = plan.memory.minimum as usize;
|
||||
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
|
||||
let unmapped_pages = minimum_pages - mapped_pages;
|
||||
let unmapped_bytes = unmapped_pages * WASM_PAGE_SIZE as usize;
|
||||
let inaccessible_bytes = unmapped_bytes + offset_guard_bytes;
|
||||
|
||||
let ptrlen = PtrLen::with_size(request_bytes)?;
|
||||
|
||||
// Make the unmapped and offset-guard pages inaccessible.
|
||||
unsafe {
|
||||
region::protect(
|
||||
ptrlen.ptr.add(mapped_bytes),
|
||||
inaccessible_bytes,
|
||||
region::Protection::Read,
|
||||
).expect("unable to make memory readonly");
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
ptrlen,
|
||||
current: plan.memory.minimum,
|
||||
maximum: plan.memory.maximum,
|
||||
offset_guard_size: offset_guard_bytes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns an base address of this linear memory.
|
||||
pub fn base_addr(&mut self) -> *mut u8 {
|
||||
self.mmap.as_mut_ptr()
|
||||
self.ptrlen.ptr
|
||||
}
|
||||
|
||||
/// Returns a number of allocated wasm pages.
|
||||
pub fn current_size(&self) -> u32 {
|
||||
self.current
|
||||
assert_eq!(self.ptrlen.len % WASM_PAGE_SIZE as usize, 0);
|
||||
let num_pages = self.ptrlen.len / WASM_PAGE_SIZE as usize;
|
||||
assert_eq!(num_pages as u32 as usize, num_pages);
|
||||
num_pages as u32
|
||||
}
|
||||
|
||||
/// Grow memory by the specified amount of pages.
|
||||
///
|
||||
/// Returns `None` if memory can't be grown by the specified amount
|
||||
/// of pages.
|
||||
pub fn grow(&mut self, add_pages: u32) -> Option<u32> {
|
||||
let new_pages = match self.current.checked_add(add_pages) {
|
||||
pub fn grow(&mut self, delta: u32) -> Option<u32> {
|
||||
let new_pages = match self.current.checked_add(delta) {
|
||||
Some(new_pages) => new_pages,
|
||||
// Linear memory size overflow.
|
||||
None => return None,
|
||||
};
|
||||
if let Some(val) = self.maximum {
|
||||
if new_pages > val {
|
||||
return None;
|
||||
}
|
||||
} else {
|
||||
// Wasm linear memories are never allowed to grow beyond what is
|
||||
// indexable. If the memory has no maximum, enforce the greatest
|
||||
// limit here.
|
||||
if new_pages >= 65536 {
|
||||
let prev_pages = self.current;
|
||||
|
||||
if let Some(maximum) = self.maximum {
|
||||
if new_pages > maximum {
|
||||
// Linear memory size would exceed the declared maximum.
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
let prev_pages = self.current;
|
||||
let new_bytes = (new_pages * PAGE_SIZE) as usize;
|
||||
// Wasm linear memories are never allowed to grow beyond what is
|
||||
// indexable. If the memory has no maximum, enforce the greatest
|
||||
// limit here.
|
||||
if new_pages >= WASM_MAX_PAGES {
|
||||
// Linear memory size would exceed the index range.
|
||||
return None;
|
||||
}
|
||||
|
||||
if self.mmap.len() < new_bytes {
|
||||
// If we have no maximum, this is a "dynamic" heap, and it's allowed
|
||||
// to move.
|
||||
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
|
||||
|
||||
if new_bytes > self.ptrlen.len {
|
||||
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
|
||||
assert!(self.maximum.is_none());
|
||||
let mut new_mmap = memmap::MmapMut::map_anon(new_bytes).unwrap();
|
||||
new_mmap.copy_from_slice(&self.mmap);
|
||||
self.mmap = new_mmap;
|
||||
let mapped_pages = self.current as usize;
|
||||
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
|
||||
let guard_bytes = self.offset_guard_size;
|
||||
|
||||
let mut new_ptrlen = PtrLen::with_size(new_bytes).ok()?;
|
||||
|
||||
// Make the offset-guard pages inaccessible.
|
||||
unsafe {
|
||||
region::protect(
|
||||
new_ptrlen.ptr.add(mapped_bytes),
|
||||
guard_bytes,
|
||||
region::Protection::Read,
|
||||
).expect("unable to make memory readonly");
|
||||
}
|
||||
|
||||
new_ptrlen
|
||||
.as_mut_slice()
|
||||
.copy_from_slice(self.ptrlen.as_slice());
|
||||
|
||||
self.ptrlen = new_ptrlen;
|
||||
}
|
||||
|
||||
self.current = new_pages;
|
||||
|
||||
// Ensure that newly allocated area is zeroed.
|
||||
let new_start_offset = (prev_pages * PAGE_SIZE) as usize;
|
||||
let new_end_offset = (new_pages * PAGE_SIZE) as usize;
|
||||
for i in new_start_offset..new_end_offset {
|
||||
assert!(self.mmap[i] == 0);
|
||||
}
|
||||
|
||||
Some(prev_pages)
|
||||
}
|
||||
}
|
||||
@@ -102,12 +235,25 @@ impl fmt::Debug for LinearMemory {
|
||||
|
||||
impl AsRef<[u8]> for LinearMemory {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.mmap
|
||||
self.ptrlen.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<[u8]> for LinearMemory {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.mmap
|
||||
self.ptrlen.as_mut_slice()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_round_up_to_page_size() {
|
||||
assert_eq!(round_up_to_page_size(0, 4096), 0);
|
||||
assert_eq!(round_up_to_page_size(1, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user