Start a wast testing harness and add some tests.
This implements a minimal wast testing harness in tests/wast.rs, which runs the wast tests under tests/wast. It also adds tests for trapping in a variety of ways, and fixes several bugs exposed by those tests.
This commit is contained in:
@@ -13,10 +13,11 @@ readme = "README.md"
|
||||
cranelift-codegen = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
cranelift-entity = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
cranelift-wasm = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
cranelift-frontend = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
wasmtime-environ = { path = "../environ" }
|
||||
region = "1.0.0"
|
||||
lazy_static = "1.2.0"
|
||||
libc = "0.2.44"
|
||||
libc = { version = "0.2.44", default-features = false }
|
||||
errno = "0.2.4"
|
||||
|
||||
[build-dependencies]
|
||||
|
||||
@@ -400,6 +400,11 @@ HandleTrap(CONTEXT* context)
|
||||
|
||||
RecordTrap(pc, codeSegment);
|
||||
|
||||
// Unwind calls longjmp, so it doesn't run the automatic
|
||||
// sAlreadhHanldingTrap cleanups, so reset it manually before doing
|
||||
// a longjmp.
|
||||
sAlreadyHandlingTrap = false;
|
||||
|
||||
#if defined(__APPLE__)
|
||||
// Reroute the PC to run the Unwind function on the main stack after the
|
||||
// handler exits. This doesn't yet work for stack overflow traps, because
|
||||
|
||||
72
lib/execute/src/code.rs
Normal file
72
lib/execute/src/code.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
//! Memory management for executable code.
|
||||
|
||||
use mmap::Mmap;
|
||||
use region;
|
||||
use std::cmp;
|
||||
use std::mem;
|
||||
use std::slice;
|
||||
use std::string::String;
|
||||
use std::vec::Vec;
|
||||
|
||||
/// Memory manager for executable code.
|
||||
pub struct Code {
|
||||
current: Mmap,
|
||||
mmaps: Vec<Mmap>,
|
||||
position: usize,
|
||||
published: usize,
|
||||
}
|
||||
|
||||
impl Code {
|
||||
/// Create a new `Code` instance.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
current: Mmap::new(),
|
||||
mmaps: Vec::new(),
|
||||
position: 0,
|
||||
published: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocate `size` bytes of memory which can be made executable later by
|
||||
/// calling `publish()`.
|
||||
/// TODO: alignment
|
||||
pub fn allocate(&mut self, size: usize) -> Result<*mut u8, String> {
|
||||
if self.current.len() - self.position < size {
|
||||
self.mmaps.push(mem::replace(
|
||||
&mut self.current,
|
||||
Mmap::with_size(cmp::max(0x10000, size.next_power_of_two()))?,
|
||||
));
|
||||
self.position = 0;
|
||||
}
|
||||
let old_position = self.position;
|
||||
self.position += size;
|
||||
Ok(self.current.as_mut_slice()[old_position..self.position].as_mut_ptr())
|
||||
}
|
||||
|
||||
/// Allocate enough memory to hold a copy of `slice` and copy the data into it.
|
||||
/// TODO: Reorganize the code that calls this to emit code directly into the
|
||||
/// mmap region rather than into a Vec that we need to copy in.
|
||||
pub fn allocate_copy_of_slice(&mut self, slice: &[u8]) -> Result<&mut [u8], String> {
|
||||
let ptr = self.allocate(slice.len())?;
|
||||
let new = unsafe { slice::from_raw_parts_mut(ptr, slice.len()) };
|
||||
new.copy_from_slice(slice);
|
||||
Ok(new)
|
||||
}
|
||||
|
||||
/// Make all allocated memory executable.
|
||||
pub fn publish(&mut self) {
|
||||
self.mmaps
|
||||
.push(mem::replace(&mut self.current, Mmap::new()));
|
||||
self.position = 0;
|
||||
|
||||
for m in &mut self.mmaps[self.published..] {
|
||||
if !m.as_ptr().is_null() {
|
||||
unsafe {
|
||||
region::protect(m.as_mut_ptr(), m.len(), region::Protection::ReadExecute)
|
||||
.expect("unable to make memory readonly");
|
||||
}
|
||||
}
|
||||
}
|
||||
self.published = self.mmaps.len();
|
||||
}
|
||||
}
|
||||
@@ -1,19 +1,21 @@
|
||||
//! TODO: Move the contents of this file to other files, as "execute.rs" is
|
||||
//! no longer a descriptive filename.
|
||||
|
||||
use code::Code;
|
||||
use cranelift_codegen::binemit::Reloc;
|
||||
use cranelift_codegen::isa::TargetIsa;
|
||||
use cranelift_entity::{EntityRef, PrimaryMap};
|
||||
use cranelift_wasm::{DefinedFuncIndex, FuncIndex, MemoryIndex, TableIndex};
|
||||
use cranelift_wasm::{DefinedFuncIndex, MemoryIndex, TableIndex};
|
||||
use instance::Instance;
|
||||
use invoke::{invoke_by_index, InvokeOutcome};
|
||||
use memory::LinearMemory;
|
||||
use region::protect;
|
||||
use region::Protection;
|
||||
use signalhandlers::{ensure_eager_signal_handlers, ensure_full_signal_handlers, TrapContext};
|
||||
use std::mem::transmute;
|
||||
use std::ptr::{self, write_unaligned};
|
||||
use std::string::String;
|
||||
use std::vec::Vec;
|
||||
use traphandlers::call_wasm;
|
||||
use wasmtime_environ::{
|
||||
compile_module, Compilation, Export, Module, ModuleTranslation, Relocation, RelocationTarget,
|
||||
compile_module, Compilation, Module, ModuleTranslation, Relocation, RelocationTarget,
|
||||
};
|
||||
|
||||
/// Executes a module that has been translated with the `wasmtime-environ` environment
|
||||
@@ -112,7 +114,7 @@ extern "C" fn current_memory(memory_index: u32, vmctx: *mut *mut u8) -> u32 {
|
||||
|
||||
/// Create the VmCtx data structure for the JIT'd code to use. This must
|
||||
/// match the VmCtx layout in the environment.
|
||||
fn make_vmctx(instance: &mut Instance, mem_base_addrs: &mut [*mut u8]) -> Vec<*mut u8> {
|
||||
fn make_vmctx(instance: &mut Instance) -> Vec<*mut u8> {
|
||||
debug_assert!(
|
||||
instance.tables.len() <= 1,
|
||||
"non-default tables is not supported"
|
||||
@@ -128,7 +130,7 @@ fn make_vmctx(instance: &mut Instance, mem_base_addrs: &mut [*mut u8]) -> Vec<*m
|
||||
let mut vmctx = Vec::new();
|
||||
vmctx.push(instance.globals.as_mut_ptr());
|
||||
// FIXME: These need to be VMMemory now
|
||||
vmctx.push(mem_base_addrs.as_mut_ptr() as *mut u8);
|
||||
vmctx.push(instance.mem_base_addrs.as_mut_ptr() as *mut u8);
|
||||
// FIXME: These need to be VMTable now
|
||||
vmctx.push(default_table_ptr);
|
||||
vmctx.push(default_table_len as *mut u8);
|
||||
@@ -139,6 +141,8 @@ fn make_vmctx(instance: &mut Instance, mem_base_addrs: &mut [*mut u8]) -> Vec<*m
|
||||
|
||||
/// prepares the execution context
|
||||
pub fn finish_instantiation(
|
||||
code: &mut Code,
|
||||
isa: &TargetIsa,
|
||||
module: &Module,
|
||||
compilation: &Compilation,
|
||||
instance: &mut Instance,
|
||||
@@ -164,67 +168,25 @@ pub fn finish_instantiation(
|
||||
}
|
||||
|
||||
// Collect all memory base addresses and Vec.
|
||||
let mut mem_base_addrs = instance
|
||||
instance.mem_base_addrs = instance
|
||||
.memories
|
||||
.values_mut()
|
||||
.map(LinearMemory::base_addr)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut vmctx = make_vmctx(instance, &mut mem_base_addrs);
|
||||
let mut vmctx = make_vmctx(instance);
|
||||
|
||||
if let Some(start_index) = module.start_func {
|
||||
execute_by_index(module, compilation, &mut vmctx, start_index)?;
|
||||
let result = invoke_by_index(code, isa, module, compilation, &mut vmctx, start_index, &[])?;
|
||||
match result {
|
||||
InvokeOutcome::Returned { values } => {
|
||||
assert!(values.is_empty());
|
||||
}
|
||||
InvokeOutcome::Trapped { message } => {
|
||||
return Err(format!("start function trapped: {}", message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(vmctx)
|
||||
}
|
||||
|
||||
/// Jumps to the code region of memory and execute the exported function
|
||||
pub fn execute(
|
||||
module: &Module,
|
||||
compilation: &Compilation,
|
||||
vmctx: &mut Vec<*mut u8>,
|
||||
function: &str,
|
||||
) -> Result<(), String> {
|
||||
let fn_index = match module.exports.get(function) {
|
||||
Some(Export::Function(index)) => *index,
|
||||
Some(_) => return Err(format!("exported item \"{}\" is not a function", function)),
|
||||
None => return Err(format!("no export named \"{}\"", function)),
|
||||
};
|
||||
|
||||
execute_by_index(module, compilation, vmctx, fn_index)
|
||||
}
|
||||
|
||||
fn execute_by_index(
|
||||
module: &Module,
|
||||
compilation: &Compilation,
|
||||
vmctx: &mut Vec<*mut u8>,
|
||||
fn_index: FuncIndex,
|
||||
) -> Result<(), String> {
|
||||
let code_buf =
|
||||
&compilation.functions[module
|
||||
.defined_func_index(fn_index)
|
||||
.expect("imported start functions not supported yet")];
|
||||
|
||||
let mut traps = TrapContext {
|
||||
triedToInstallSignalHandlers: false,
|
||||
haveSignalHandlers: false,
|
||||
};
|
||||
|
||||
// Rather than writing inline assembly to jump to the code region, we use the fact that
|
||||
// the Rust ABI for calling a function with no arguments and no return values matches the one
|
||||
// of the generated code. Thanks to this, we can transmute the code region into a first-class
|
||||
// Rust function and call it.
|
||||
unsafe {
|
||||
// Ensure that our signal handlers are ready for action.
|
||||
ensure_eager_signal_handlers();
|
||||
ensure_full_signal_handlers(&mut traps);
|
||||
if !traps.haveSignalHandlers {
|
||||
return Err("failed to install signal handlers".to_string());
|
||||
}
|
||||
|
||||
let func = transmute::<_, fn(*const *mut u8)>(code_buf.as_ptr());
|
||||
call_wasm(|| func(vmctx.as_mut_ptr()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ use cranelift_entity::EntityRef;
|
||||
use cranelift_entity::PrimaryMap;
|
||||
use cranelift_wasm::{GlobalIndex, MemoryIndex, TableIndex};
|
||||
use memory::LinearMemory;
|
||||
use std::string::String;
|
||||
use std::vec::Vec;
|
||||
use wasmtime_environ::{Compilation, DataInitializer, Module, TableElements};
|
||||
|
||||
@@ -20,6 +21,9 @@ pub struct Instance {
|
||||
|
||||
/// WebAssembly global variable data.
|
||||
pub globals: Vec<u8>,
|
||||
|
||||
/// Memory base address vector pointed to by vmctx.
|
||||
pub mem_base_addrs: Vec<*mut u8>,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
@@ -33,6 +37,7 @@ impl Instance {
|
||||
tables: PrimaryMap::new(),
|
||||
memories: PrimaryMap::new(),
|
||||
globals: Vec::new(),
|
||||
mem_base_addrs: Vec::new(),
|
||||
};
|
||||
result.instantiate_tables(module, compilation, &module.table_elements);
|
||||
result.instantiate_memories(module, data_initializers)?;
|
||||
|
||||
271
lib/execute/src/invoke.rs
Normal file
271
lib/execute/src/invoke.rs
Normal file
@@ -0,0 +1,271 @@
|
||||
//! Support for invoking wasm functions from outside a wasm module.
|
||||
|
||||
use code::Code;
|
||||
use cranelift_codegen::ir::InstBuilder;
|
||||
use cranelift_codegen::{binemit, ir, isa, Context};
|
||||
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext};
|
||||
use cranelift_wasm::FuncIndex;
|
||||
use signalhandlers::{ensure_eager_signal_handlers, ensure_full_signal_handlers, TrapContext};
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::string::String;
|
||||
use std::vec::Vec;
|
||||
use traphandlers::call_wasm;
|
||||
use wasmtime_environ::{Compilation, Export, Module, RelocSink};
|
||||
|
||||
/// A runtime value.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub enum Value {
|
||||
/// A runtime value with type i32.
|
||||
I32(i32),
|
||||
/// A runtime value with type i64.
|
||||
I64(i64),
|
||||
/// A runtime value with type f32.
|
||||
F32(u32),
|
||||
/// A runtime value with type f64.
|
||||
F64(u64),
|
||||
}
|
||||
|
||||
impl Value {
|
||||
/// Return the type of this `Value`.
|
||||
pub fn value_type(self) -> ir::Type {
|
||||
match self {
|
||||
Value::I32(_) => ir::types::I32,
|
||||
Value::I64(_) => ir::types::I64,
|
||||
Value::F32(_) => ir::types::F32,
|
||||
Value::F64(_) => ir::types::F64,
|
||||
}
|
||||
}
|
||||
|
||||
/// Assuming this `Value` holds an `i32`, return that value.
|
||||
pub fn unwrap_i32(self) -> i32 {
|
||||
match self {
|
||||
Value::I32(x) => x,
|
||||
_ => panic!("unwrapping value of type {} as i32", self.value_type()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Assuming this `Value` holds an `i64`, return that value.
|
||||
pub fn unwrap_i64(self) -> i64 {
|
||||
match self {
|
||||
Value::I64(x) => x,
|
||||
_ => panic!("unwrapping value of type {} as i64", self.value_type()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Assuming this `Value` holds an `f32`, return that value.
|
||||
pub fn unwrap_f32(self) -> u32 {
|
||||
match self {
|
||||
Value::F32(x) => x,
|
||||
_ => panic!("unwrapping value of type {} as f32", self.value_type()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Assuming this `Value` holds an `f64`, return that value.
|
||||
pub fn unwrap_f64(self) -> u64 {
|
||||
match self {
|
||||
Value::F64(x) => x,
|
||||
_ => panic!("unwrapping value of type {} as f64", self.value_type()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of invoking a wasm function.
|
||||
#[derive(Debug)]
|
||||
pub enum InvokeOutcome {
|
||||
/// The function returned normally. Its return values are provided.
|
||||
Returned {
|
||||
/// The return values.
|
||||
values: Vec<Value>,
|
||||
},
|
||||
/// A trap occurred while the function was executing.
|
||||
Trapped {
|
||||
/// The trap message.
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Jumps to the code region of memory and invoke the exported function
|
||||
pub fn invoke(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
module: &Module,
|
||||
compilation: &Compilation,
|
||||
vmctx: &mut Vec<*mut u8>,
|
||||
function: &str,
|
||||
args: &[Value],
|
||||
) -> Result<InvokeOutcome, String> {
|
||||
let fn_index = match module.exports.get(function) {
|
||||
Some(Export::Function(index)) => *index,
|
||||
Some(_) => return Err(format!("exported item \"{}\" is not a function", function)),
|
||||
None => return Err(format!("no export named \"{}\"", function)),
|
||||
};
|
||||
|
||||
invoke_by_index(code, isa, module, compilation, vmctx, fn_index, args)
|
||||
}
|
||||
|
||||
pub fn invoke_by_index(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
module: &Module,
|
||||
compilation: &Compilation,
|
||||
vmctx: &mut Vec<*mut u8>,
|
||||
fn_index: FuncIndex,
|
||||
args: &[Value],
|
||||
) -> Result<InvokeOutcome, String> {
|
||||
let code_buf =
|
||||
&compilation.functions[module
|
||||
.defined_func_index(fn_index)
|
||||
.expect("imported start functions not supported yet")];
|
||||
let sig = &module.signatures[module.functions[fn_index]];
|
||||
|
||||
let exec_code_buf = code.allocate_copy_of_slice(&code_buf)?.as_ptr();
|
||||
|
||||
// TODO: Move this out to be done once per thread rather than per call.
|
||||
let mut traps = TrapContext {
|
||||
triedToInstallSignalHandlers: false,
|
||||
haveSignalHandlers: false,
|
||||
};
|
||||
|
||||
// Rather than writing inline assembly to jump to the code region, we use the fact that
|
||||
// the Rust ABI for calling a function with no arguments and no return values matches the one
|
||||
// of the generated code. Thanks to this, we can transmute the code region into a first-class
|
||||
// Rust function and call it.
|
||||
// Ensure that our signal handlers are ready for action.
|
||||
ensure_eager_signal_handlers();
|
||||
ensure_full_signal_handlers(&mut traps);
|
||||
if !traps.haveSignalHandlers {
|
||||
return Err("failed to install signal handlers".to_string());
|
||||
}
|
||||
|
||||
call_through_wrapper(
|
||||
code,
|
||||
isa,
|
||||
exec_code_buf as usize,
|
||||
vmctx.as_ptr() as usize,
|
||||
args,
|
||||
&sig,
|
||||
)
|
||||
}
|
||||
|
||||
fn call_through_wrapper(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
callee: usize,
|
||||
vmctx: usize,
|
||||
args: &[Value],
|
||||
sig: &ir::Signature,
|
||||
) -> Result<InvokeOutcome, String> {
|
||||
for (index, value) in args.iter().enumerate() {
|
||||
assert_eq!(value.value_type(), sig.params[index].value_type);
|
||||
}
|
||||
|
||||
let wrapper_sig = ir::Signature::new(isa.frontend_config().default_call_conv);
|
||||
let mut context = Context::new();
|
||||
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
|
||||
|
||||
let value_size = 8;
|
||||
let mut results_vec = Vec::new();
|
||||
results_vec.resize(sig.returns.len(), 0i64);
|
||||
|
||||
let mut fn_builder_ctx = FunctionBuilderContext::new();
|
||||
{
|
||||
let mut builder = FunctionBuilder::new(&mut context.func, &mut fn_builder_ctx);
|
||||
let block0 = builder.create_ebb();
|
||||
|
||||
builder.append_ebb_params_for_function_params(block0);
|
||||
|
||||
builder.switch_to_block(block0);
|
||||
builder.seal_block(block0);
|
||||
|
||||
let mut callee_args = Vec::new();
|
||||
let pointer_type = isa.pointer_type();
|
||||
|
||||
let callee_value = builder.ins().iconst(pointer_type, callee as i64);
|
||||
|
||||
for value in args {
|
||||
match value {
|
||||
Value::I32(i) => {
|
||||
callee_args.push(builder.ins().iconst(ir::types::I32, i64::from(*i)))
|
||||
}
|
||||
Value::I64(i) => callee_args.push(builder.ins().iconst(ir::types::I64, *i)),
|
||||
Value::F32(i) => callee_args.push(
|
||||
builder
|
||||
.ins()
|
||||
.f32const(ir::immediates::Ieee32::with_bits(*i)),
|
||||
),
|
||||
Value::F64(i) => callee_args.push(
|
||||
builder
|
||||
.ins()
|
||||
.f64const(ir::immediates::Ieee64::with_bits(*i)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
let vmctx_value = builder.ins().iconst(pointer_type, vmctx as i64);
|
||||
callee_args.push(vmctx_value);
|
||||
|
||||
let new_sig = builder.import_signature(sig.clone());
|
||||
|
||||
// TODO: It's possible to make this a direct call. We just need Cranelift
|
||||
// to support functions declared with an immediate integer address.
|
||||
let call = builder
|
||||
.ins()
|
||||
.call_indirect(new_sig, callee_value, &callee_args);
|
||||
|
||||
let results = builder.func.dfg.inst_results(call).to_vec();
|
||||
|
||||
let results_vec_value = builder
|
||||
.ins()
|
||||
.iconst(pointer_type, results_vec.as_ptr() as i64);
|
||||
|
||||
let mut mflags = ir::MemFlags::new();
|
||||
mflags.set_notrap();
|
||||
mflags.set_aligned();
|
||||
for (i, r) in results.iter().enumerate() {
|
||||
builder
|
||||
.ins()
|
||||
.store(mflags, *r, results_vec_value, (i * value_size) as i32);
|
||||
}
|
||||
|
||||
builder.ins().return_(&[]);
|
||||
}
|
||||
|
||||
let mut code_buf: Vec<u8> = Vec::new();
|
||||
let mut reloc_sink = RelocSink::new();
|
||||
let mut trap_sink = binemit::NullTrapSink {};
|
||||
context
|
||||
.compile_and_emit(isa, &mut code_buf, &mut reloc_sink, &mut trap_sink)
|
||||
.map_err(|e| e.to_string())?;
|
||||
assert!(reloc_sink.func_relocs.is_empty());
|
||||
|
||||
let exec_code_buf = code.allocate_copy_of_slice(&code_buf)?.as_ptr();
|
||||
code.publish();
|
||||
|
||||
let func = unsafe { mem::transmute::<_, fn()>(exec_code_buf) };
|
||||
|
||||
Ok(match call_wasm(func) {
|
||||
Ok(()) => {
|
||||
let mut values = Vec::with_capacity(sig.returns.len());
|
||||
|
||||
for (index, abi_param) in sig.returns.iter().enumerate() {
|
||||
let v = unsafe {
|
||||
let ptr = results_vec.as_ptr().add(index * value_size);
|
||||
|
||||
match abi_param.value_type {
|
||||
ir::types::I32 => Value::I32(ptr::read(ptr as *const i32)),
|
||||
ir::types::I64 => Value::I64(ptr::read(ptr as *const i64)),
|
||||
ir::types::F32 => Value::F32(ptr::read(ptr as *const u32)),
|
||||
ir::types::F64 => Value::F64(ptr::read(ptr as *const u64)),
|
||||
other => panic!("unsupported value type {:?}", other),
|
||||
}
|
||||
};
|
||||
|
||||
values.push(v);
|
||||
}
|
||||
|
||||
InvokeOutcome::Returned { values }
|
||||
}
|
||||
Err(message) => InvokeOutcome::Trapped { message },
|
||||
})
|
||||
}
|
||||
@@ -29,6 +29,7 @@
|
||||
|
||||
extern crate cranelift_codegen;
|
||||
extern crate cranelift_entity;
|
||||
extern crate cranelift_frontend;
|
||||
extern crate cranelift_wasm;
|
||||
extern crate errno;
|
||||
extern crate region;
|
||||
@@ -40,14 +41,19 @@ extern crate alloc;
|
||||
extern crate lazy_static;
|
||||
extern crate libc;
|
||||
|
||||
mod code;
|
||||
mod execute;
|
||||
mod instance;
|
||||
mod invoke;
|
||||
mod memory;
|
||||
mod mmap;
|
||||
mod signalhandlers;
|
||||
mod traphandlers;
|
||||
|
||||
pub use execute::{compile_and_link_module, execute, finish_instantiation};
|
||||
pub use code::Code;
|
||||
pub use execute::{compile_and_link_module, finish_instantiation};
|
||||
pub use instance::Instance;
|
||||
pub use invoke::{invoke, InvokeOutcome, Value};
|
||||
pub use traphandlers::{call_wasm, LookupCodeSegment, RecordTrap, Unwind};
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
|
||||
@@ -1,108 +1,17 @@
|
||||
use errno;
|
||||
use libc;
|
||||
//! Memory management for linear memory.
|
||||
|
||||
use mmap::Mmap;
|
||||
use region;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
use std::string::String;
|
||||
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
|
||||
|
||||
/// Round `size` up to the nearest multiple of `page_size`.
|
||||
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
|
||||
(size + (page_size - 1)) & !(page_size - 1)
|
||||
}
|
||||
|
||||
/// A simple struct consisting of a page-aligned pointer to page-aligned
|
||||
/// and initially-zeroed memory and a length.
|
||||
struct PtrLen {
|
||||
ptr: *mut u8,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl PtrLen {
|
||||
/// Create a new `PtrLen` pointing to at least `size` bytes of memory,
|
||||
/// suitably sized and aligned for memory protection.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn with_size(size: usize) -> Result<Self, String> {
|
||||
let page_size = region::page::size();
|
||||
let alloc_size = round_up_to_page_size(size, page_size);
|
||||
unsafe {
|
||||
let ptr = libc::mmap(
|
||||
ptr::null_mut(),
|
||||
alloc_size,
|
||||
libc::PROT_READ | libc::PROT_WRITE,
|
||||
libc::MAP_PRIVATE | libc::MAP_ANON,
|
||||
-1,
|
||||
0,
|
||||
);
|
||||
if mem::transmute::<_, isize>(ptr) != -1isize {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: alloc_size,
|
||||
})
|
||||
} else {
|
||||
Err(errno::errno().to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn with_size(size: usize) -> Result<Self, String> {
|
||||
use winapi::um::memoryapi::VirtualAlloc;
|
||||
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
|
||||
|
||||
let page_size = region::page::size();
|
||||
|
||||
// VirtualAlloc always rounds up to the next multiple of the page size
|
||||
let ptr = unsafe {
|
||||
VirtualAlloc(
|
||||
ptr::null_mut(),
|
||||
size,
|
||||
MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE,
|
||||
)
|
||||
};
|
||||
if !ptr.is_null() {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: round_up_to_page_size(size, page_size),
|
||||
})
|
||||
} else {
|
||||
Err(errno::errno().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn as_slice(&self) -> &[u8] {
|
||||
unsafe { slice::from_raw_parts(self.ptr, self.len) }
|
||||
}
|
||||
|
||||
fn as_mut_slice(&mut self) -> &mut [u8] {
|
||||
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PtrLen {
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn drop(&mut self) {
|
||||
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn drop(&mut self) {
|
||||
use winapi::um::memoryapi::VirtualFree;
|
||||
use winapi::um::winnt::MEM_RELEASE;
|
||||
let r = unsafe { VirtualFree(self.ptr, self.len, MEM_RELEASE) };
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/// A linear memory instance.
|
||||
///
|
||||
/// This linear memory has a stable base address and at the same time allows
|
||||
/// for dynamical growing.
|
||||
pub struct LinearMemory {
|
||||
ptrlen: PtrLen,
|
||||
mmap: Mmap,
|
||||
current: u32,
|
||||
maximum: Option<u32>,
|
||||
offset_guard_size: usize,
|
||||
@@ -132,19 +41,19 @@ impl LinearMemory {
|
||||
let unmapped_bytes = unmapped_pages * WASM_PAGE_SIZE as usize;
|
||||
let inaccessible_bytes = unmapped_bytes + offset_guard_bytes;
|
||||
|
||||
let ptrlen = PtrLen::with_size(request_bytes)?;
|
||||
let mmap = Mmap::with_size(request_bytes)?;
|
||||
|
||||
// Make the unmapped and offset-guard pages inaccessible.
|
||||
unsafe {
|
||||
region::protect(
|
||||
ptrlen.ptr.add(mapped_bytes),
|
||||
mmap.as_ptr().add(mapped_bytes),
|
||||
inaccessible_bytes,
|
||||
region::Protection::Read,
|
||||
).expect("unable to make memory readonly");
|
||||
region::Protection::None,
|
||||
).expect("unable to make memory inaccessible");
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
ptrlen,
|
||||
mmap,
|
||||
current: plan.memory.minimum,
|
||||
maximum: plan.memory.maximum,
|
||||
offset_guard_size: offset_guard_bytes,
|
||||
@@ -153,13 +62,13 @@ impl LinearMemory {
|
||||
|
||||
/// Returns an base address of this linear memory.
|
||||
pub fn base_addr(&mut self) -> *mut u8 {
|
||||
self.ptrlen.ptr
|
||||
self.mmap.as_mut_ptr()
|
||||
}
|
||||
|
||||
/// Returns a number of allocated wasm pages.
|
||||
pub fn current_size(&self) -> u32 {
|
||||
assert_eq!(self.ptrlen.len % WASM_PAGE_SIZE as usize, 0);
|
||||
let num_pages = self.ptrlen.len / WASM_PAGE_SIZE as usize;
|
||||
assert_eq!(self.mmap.len() % WASM_PAGE_SIZE as usize, 0);
|
||||
let num_pages = self.mmap.len() / WASM_PAGE_SIZE as usize;
|
||||
assert_eq!(num_pages as u32 as usize, num_pages);
|
||||
num_pages as u32
|
||||
}
|
||||
@@ -193,29 +102,29 @@ impl LinearMemory {
|
||||
|
||||
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
|
||||
|
||||
if new_bytes > self.ptrlen.len {
|
||||
if new_bytes > self.mmap.len() {
|
||||
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
|
||||
assert!(self.maximum.is_none());
|
||||
let mapped_pages = self.current as usize;
|
||||
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
|
||||
let guard_bytes = self.offset_guard_size;
|
||||
|
||||
let mut new_ptrlen = PtrLen::with_size(new_bytes).ok()?;
|
||||
let mut new_mmap = Mmap::with_size(new_bytes).ok()?;
|
||||
|
||||
// Make the offset-guard pages inaccessible.
|
||||
unsafe {
|
||||
region::protect(
|
||||
new_ptrlen.ptr.add(mapped_bytes),
|
||||
new_mmap.as_ptr().add(mapped_bytes),
|
||||
guard_bytes,
|
||||
region::Protection::Read,
|
||||
).expect("unable to make memory readonly");
|
||||
}
|
||||
|
||||
new_ptrlen
|
||||
new_mmap
|
||||
.as_mut_slice()
|
||||
.copy_from_slice(self.ptrlen.as_slice());
|
||||
.copy_from_slice(self.mmap.as_slice());
|
||||
|
||||
self.ptrlen = new_ptrlen;
|
||||
self.mmap = new_mmap;
|
||||
}
|
||||
|
||||
self.current = new_pages;
|
||||
@@ -235,25 +144,12 @@ impl fmt::Debug for LinearMemory {
|
||||
|
||||
impl AsRef<[u8]> for LinearMemory {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.ptrlen.as_slice()
|
||||
self.mmap.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<[u8]> for LinearMemory {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
self.ptrlen.as_mut_slice()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_round_up_to_page_size() {
|
||||
assert_eq!(round_up_to_page_size(0, 4096), 0);
|
||||
assert_eq!(round_up_to_page_size(1, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
|
||||
self.mmap.as_mut_slice()
|
||||
}
|
||||
}
|
||||
|
||||
136
lib/execute/src/mmap.rs
Normal file
136
lib/execute/src/mmap.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
//! Low-level abstraction for allocating and managing zero-filled pages
|
||||
//! of memory.
|
||||
|
||||
use errno;
|
||||
use libc;
|
||||
use region;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
use std::string::String;
|
||||
|
||||
/// Round `size` up to the nearest multiple of `page_size`.
|
||||
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
|
||||
(size + (page_size - 1)) & !(page_size - 1)
|
||||
}
|
||||
|
||||
/// A simple struct consisting of a page-aligned pointer to page-aligned
|
||||
/// and initially-zeroed memory and a length.
|
||||
pub struct Mmap {
|
||||
ptr: *mut u8,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Mmap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
ptr: ptr::null_mut(),
|
||||
len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `Mmap` pointing to at least `size` bytes of memory,
|
||||
/// suitably sized and aligned for memory protection.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
pub fn with_size(size: usize) -> Result<Self, String> {
|
||||
let page_size = region::page::size();
|
||||
let alloc_size = round_up_to_page_size(size, page_size);
|
||||
unsafe {
|
||||
let ptr = libc::mmap(
|
||||
ptr::null_mut(),
|
||||
alloc_size,
|
||||
libc::PROT_READ | libc::PROT_WRITE,
|
||||
libc::MAP_PRIVATE | libc::MAP_ANON,
|
||||
-1,
|
||||
0,
|
||||
);
|
||||
if mem::transmute::<_, isize>(ptr) != -1isize {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: alloc_size,
|
||||
})
|
||||
} else {
|
||||
Err(errno::errno().to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub fn with_size(size: usize) -> Result<Self, String> {
|
||||
use winapi::um::memoryapi::VirtualAlloc;
|
||||
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
|
||||
|
||||
let page_size = region::page::size();
|
||||
|
||||
// VirtualAlloc always rounds up to the next multiple of the page size
|
||||
let ptr = unsafe {
|
||||
VirtualAlloc(
|
||||
ptr::null_mut(),
|
||||
size,
|
||||
MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE,
|
||||
)
|
||||
};
|
||||
if !ptr.is_null() {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: round_up_to_page_size(size, page_size),
|
||||
})
|
||||
} else {
|
||||
Err(errno::errno().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
unsafe { slice::from_raw_parts(self.ptr, self.len) }
|
||||
}
|
||||
|
||||
pub fn as_mut_slice(&mut self) -> &mut [u8] {
|
||||
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
|
||||
}
|
||||
|
||||
pub fn as_ptr(&self) -> *const u8 {
|
||||
self.ptr
|
||||
}
|
||||
|
||||
pub fn as_mut_ptr(&mut self) -> *mut u8 {
|
||||
self.ptr
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Mmap {
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn drop(&mut self) {
|
||||
if !self.ptr.is_null() {
|
||||
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
|
||||
assert_eq!(r, 0, "munmap failed: {}", errno::errno());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn drop(&mut self) {
|
||||
if !self.ptr.is_null() {
|
||||
use winapi::um::memoryapi::VirtualFree;
|
||||
use winapi::um::winnt::MEM_RELEASE;
|
||||
let r = unsafe { VirtualFree(self.ptr, self.len, MEM_RELEASE) };
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_round_up_to_page_size() {
|
||||
assert_eq!(round_up_to_page_size(0, 4096), 0);
|
||||
assert_eq!(round_up_to_page_size(1, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ use signalhandlers::{jmp_buf, CodeSegment};
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::string::String;
|
||||
|
||||
// Currently we uset setjmp/longjmp to unwind out of a signal handler
|
||||
// and back to the point where WebAssembly was called (via `call_wasm`).
|
||||
|
||||
Reference in New Issue
Block a user