More infrastructure.

Improve handling of memory.grow/size, add a standalone wast runner,
test harness improvements.
This commit is contained in:
Dan Gohman
2018-12-03 04:59:40 -08:00
parent 83f8a31010
commit 7faa15d7ac
15 changed files with 316 additions and 82 deletions

View File

@@ -19,7 +19,6 @@ region = "1.0.0"
lazy_static = "1.2.0"
libc = { version = "0.2.44", default-features = false }
errno = "0.2.4"
cast = { version = "0.2.2", default-features = false }
memoffset = "0.2.1"
[build-dependencies]

View File

@@ -36,6 +36,10 @@ where
Ok(compilation)
}
extern "C" {
pub fn __rust_probestack();
}
/// Performs the relocations inside the function bytecode, provided the necessary metadata
fn relocate<F>(
compilation: &mut Compilation,
@@ -76,6 +80,7 @@ fn relocate<F>(
FloorF64 => wasmtime_f64_floor as usize,
TruncF64 => wasmtime_f64_trunc as usize,
NearestF64 => wasmtime_f64_nearest as usize,
Probestack => __rust_probestack as usize,
other => panic!("unexpected libcall: {}", other),
}
}

View File

@@ -42,7 +42,6 @@ extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate memoffset;
extern crate cast;
mod code;
mod execute;

View File

@@ -2,7 +2,6 @@
//!
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
use cast;
use mmap::Mmap;
use region;
use std::string::String;
@@ -63,9 +62,7 @@ impl LinearMemory {
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
assert_eq!(self.mmap.len() % WASM_PAGE_SIZE as usize, 0);
let num_pages = self.mmap.len() / WASM_PAGE_SIZE as usize;
cast::u32(num_pages).unwrap()
self.current
}
/// Grow memory by the specified amount of pages.
@@ -97,27 +94,25 @@ impl LinearMemory {
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
if new_bytes > self.mmap.len() {
if new_bytes > self.mmap.len() - self.offset_guard_size {
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
assert!(self.maximum.is_none());
let mapped_pages = self.current as usize;
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
let guard_bytes = self.offset_guard_size;
let request_bytes = new_bytes.checked_add(guard_bytes)?;
let mut new_mmap = Mmap::with_size(new_bytes).ok()?;
let mut new_mmap = Mmap::with_size(request_bytes).ok()?;
// Make the offset-guard pages inaccessible.
unsafe {
region::protect(
new_mmap.as_ptr().add(mapped_bytes),
new_mmap.as_ptr().add(new_bytes),
guard_bytes,
region::Protection::Read,
).expect("unable to make memory readonly");
region::Protection::None,
).expect("unable to make memory inaccessible");
}
new_mmap
.as_mut_slice()
.copy_from_slice(self.mmap.as_slice());
let copy_len = self.mmap.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
self.mmap = new_mmap;
}

View File

@@ -18,17 +18,25 @@ pub struct VMMemory {
}
#[cfg(test)]
mod test {
mod test_vmmemory {
use super::VMMemory;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmmemory_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>());
assert_eq!(size_of::<VMMemory>(), offsets.size_of_vmmemory());
assert_eq!(offset_of!(VMMemory, base), offsets.vmmemory_base());
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMMemory>(),
usize::from(offsets.size_of_vmmemory())
);
assert_eq!(
offset_of!(VMMemory, base),
usize::from(offsets.vmmemory_base())
);
assert_eq!(
offset_of!(VMMemory, current_length),
offsets.vmmemory_current_length()
usize::from(offsets.vmmemory_current_length())
);
}
}
@@ -74,13 +82,14 @@ pub struct VMGlobal {
}
#[cfg(test)]
mod test {
use std::mem::align_of;
mod test_vmglobal {
use super::VMGlobal;
use std::mem::{align_of, size_of};
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmglobal_alignment() {
assert!(align_of::<VMGlobal>() <= align_of::<i32>());
assert!(align_of::<VMGlobal>() >= align_of::<i32>());
assert!(align_of::<VMGlobal>() >= align_of::<i64>());
assert!(align_of::<VMGlobal>() >= align_of::<f32>());
assert!(align_of::<VMGlobal>() >= align_of::<f64>());
@@ -88,8 +97,11 @@ mod test {
#[test]
fn check_vmglobal_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>());
assert_eq!(size_of::<VMGlobal>(), offsets.size_of_vmglobal());
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMGlobal>(),
usize::from(offsets.size_of_vmglobal())
);
}
}
@@ -110,17 +122,22 @@ pub struct VMTable {
}
#[cfg(test)]
mod test {
mod test_vmtable {
use super::VMTable;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmtable_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>());
assert_eq!(size_of::<VMTable>(), offsets.size_of_vmtable());
assert_eq!(offset_of!(VMTable, base), offsets.vmtable_base());
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMTable>(), usize::from(offsets.size_of_vmtable()));
assert_eq!(
offset_of!(VMTable, base),
usize::from(offsets.vmtable_base())
);
assert_eq!(
offset_of!(VMTable, current_elements),
offsets.vmtable_current_elements()
usize::from(offsets.vmtable_current_elements())
);
}
}
@@ -173,16 +190,26 @@ pub struct VMContext {
#[cfg(test)]
mod test {
use super::VMContext;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmctx_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>());
assert_eq!(size_of::<VMContext>(), offsets.size_of_vmctx());
assert_eq!(offset_of!(VMContext, memories), offsets.vmctx_memories());
assert_eq!(offset_of!(VMContext, globals), offsets.vmctx_globals());
assert_eq!(offset_of!(VMContext, tables), offsets.vmctx_tables());
assert_eq!(offset_of!(VMContext, instance), offsets.vmctx_instance());
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMContext>(), usize::from(offsets.size_of_vmctx()));
assert_eq!(
offset_of!(VMContext, memories),
usize::from(offsets.vmctx_memories())
);
assert_eq!(
offset_of!(VMContext, globals),
usize::from(offsets.vmctx_globals())
);
assert_eq!(
offset_of!(VMContext, tables),
usize::from(offsets.vmctx_tables())
);
}
}