Initial support for function, table, memory, and global imports.

This commit is contained in:
Dan Gohman
2018-12-08 17:38:28 -05:00
parent 93f33141e9
commit 56850d481d
45 changed files with 3181 additions and 2181 deletions

View File

@@ -1,12 +1,10 @@
//! Memory management for executable code.
use mmap::Mmap;
use region;
use std::cmp;
use std::mem;
use std::string::String;
use std::vec::Vec;
use vmcontext::VMFunctionBody;
use std::{cmp, mem};
use wasmtime_runtime::{Mmap, VMFunctionBody};
/// Memory manager for executable code.
pub struct Code {
@@ -47,7 +45,7 @@ impl Code {
}
/// Convert mut a slice from u8 to VMFunctionBody.
fn as_mut_vmfunc_slice(slice: &mut [u8]) -> &mut [VMFunctionBody] {
fn view_as_mut_vmfunc_slice(slice: &mut [u8]) -> &mut [VMFunctionBody] {
let byte_ptr: *mut [u8] = slice;
let body_ptr = byte_ptr as *mut [VMFunctionBody];
unsafe { &mut *body_ptr }
@@ -62,7 +60,7 @@ impl Code {
) -> Result<&mut [VMFunctionBody], String> {
let new = self.allocate(slice.len())?;
new.copy_from_slice(slice);
Ok(Self::as_mut_vmfunc_slice(new))
Ok(Self::view_as_mut_vmfunc_slice(new))
}
/// Make all allocated memory executable.

View File

@@ -1,22 +1,29 @@
use cranelift_codegen::ir;
use cranelift_wasm::Global;
use vmcontext::{VMFunctionBody, VMGlobal, VMMemory, VMTable};
use wasmtime_environ::{MemoryPlan, TablePlan};
use wasmtime_runtime::{
VMContext, VMFunctionBody, VMGlobalDefinition, VMMemoryDefinition, VMTableDefinition,
};
/// An exported function.
pub struct FunctionExport {
/// The address of the native-code function.
pub address: *const VMFunctionBody,
/// The function signature declaration, used for compatibilty checking.
pub signature: ir::Signature,
}
/// The value of an export passed from one instance to another.
pub enum ExportValue {
pub enum Export {
/// A function export value.
Function {
/// The address of the native-code function.
address: *const VMFunctionBody,
/// The function signature declaration, used for compatibilty checking.
signature: ir::Signature,
},
Function(FunctionExport),
/// A table export value.
Table {
/// The address of the table descriptor.
address: *mut VMTable,
address: *mut VMTableDefinition,
/// Pointer to the containing VMContext.
vmctx: *mut VMContext,
/// The table declaration, used for compatibilty checking.
table: TablePlan,
},
@@ -24,7 +31,9 @@ pub enum ExportValue {
/// A memory export value.
Memory {
/// The address of the memory descriptor.
address: *mut VMMemory,
address: *mut VMMemoryDefinition,
/// Pointer to the containing VMContext.
vmctx: *mut VMContext,
/// The memory declaration, used for compatibilty checking.
memory: MemoryPlan,
},
@@ -32,45 +41,57 @@ pub enum ExportValue {
/// A global export value.
Global {
/// The address of the global storage.
address: *mut VMGlobal,
address: *mut VMGlobalDefinition,
/// The global declaration, used for compatibilty checking.
global: Global,
},
}
impl ExportValue {
impl Export {
/// Construct a function export value.
pub fn function(address: *const VMFunctionBody, signature: ir::Signature) -> Self {
ExportValue::Function { address, signature }
Export::Function(FunctionExport { address, signature })
}
/// Construct a table export value.
pub fn table(address: *mut VMTable, table: TablePlan) -> Self {
ExportValue::Table { address, table }
pub fn table(address: *mut VMTableDefinition, vmctx: *mut VMContext, table: TablePlan) -> Self {
Export::Table {
address,
vmctx,
table,
}
}
/// Construct a memory export value.
pub fn memory(address: *mut VMMemory, memory: MemoryPlan) -> Self {
ExportValue::Memory { address, memory }
pub fn memory(
address: *mut VMMemoryDefinition,
vmctx: *mut VMContext,
memory: MemoryPlan,
) -> Self {
Export::Memory {
address,
vmctx,
memory,
}
}
/// Construct a global export value.
pub fn global(address: *mut VMGlobal, global: Global) -> Self {
ExportValue::Global { address, global }
pub fn global(address: *mut VMGlobalDefinition, global: Global) -> Self {
Export::Global { address, global }
}
}
/// Import resolver connects imports with available exported values.
pub trait Resolver {
/// Resolve the given module/field combo.
fn resolve(&mut self, module: &str, field: &str) -> Option<ExportValue>;
fn resolve(&mut self, module: &str, field: &str) -> Option<Export>;
}
/// `Resolver` implementation that always resolves to `None`.
pub struct NullResolver {}
impl Resolver for NullResolver {
fn resolve(&mut self, _module: &str, _field: &str) -> Option<ExportValue> {
fn resolve(&mut self, _module: &str, _field: &str) -> Option<Export> {
None
}
}

View File

@@ -1,65 +0,0 @@
//! Support for reading the value of a wasm global from outside the module.
use action::{ActionError, RuntimeValue};
use cranelift_codegen::ir;
use cranelift_entity::EntityRef;
use cranelift_wasm::GlobalIndex;
use instance::Instance;
use wasmtime_environ::{Export, Module};
/// Reads the value of the named global variable in `module`.
pub fn get(
module: &Module,
instance: &mut Instance,
global_name: &str,
) -> Result<RuntimeValue, ActionError> {
let global_index = match module.exports.get(global_name) {
Some(Export::Global(index)) => *index,
Some(_) => {
return Err(ActionError::Kind(format!(
"exported item \"{}\" is not a global",
global_name
)))
}
None => {
return Err(ActionError::Field(format!(
"no export named \"{}\"",
global_name
)))
}
};
get_by_index(module, instance, global_index)
}
/// Reads the value of the indexed global variable in `module`.
pub fn get_by_index(
module: &Module,
instance: &mut Instance,
global_index: GlobalIndex,
) -> Result<RuntimeValue, ActionError> {
unsafe {
let vmctx = &mut *instance.vmctx();
let vmglobal = vmctx.global(global_index);
let definition = vmglobal.get_definition(module.is_imported_global(global_index));
Ok(
match module
.globals
.get(global_index)
.ok_or_else(|| ActionError::Index(global_index.index() as u64))?
.ty
{
ir::types::I32 => RuntimeValue::I32(*definition.as_i32()),
ir::types::I64 => RuntimeValue::I64(*definition.as_i64()),
ir::types::F32 => RuntimeValue::F32(*definition.as_f32_bits()),
ir::types::F64 => RuntimeValue::F64(*definition.as_f64_bits()),
other => {
return Err(ActionError::Type(format!(
"global with type {} not supported",
other
)))
}
},
)
}
}

View File

@@ -1,30 +0,0 @@
use cranelift_entity::PrimaryMap;
use cranelift_wasm::{FuncIndex, GlobalIndex, MemoryIndex, TableIndex};
use vmcontext::{VMFunctionBody, VMGlobal, VMMemory, VMTable};
/// Resolved import pointers.
#[derive(Debug)]
pub struct Imports {
/// Resolved addresses for imported functions.
pub functions: PrimaryMap<FuncIndex, *const VMFunctionBody>,
/// Resolved addresses for imported tables.
pub tables: PrimaryMap<TableIndex, *mut VMTable>,
/// Resolved addresses for imported globals.
pub globals: PrimaryMap<GlobalIndex, *mut VMGlobal>,
/// Resolved addresses for imported memories.
pub memories: PrimaryMap<MemoryIndex, *mut VMMemory>,
}
impl Imports {
pub fn new() -> Self {
Self {
functions: PrimaryMap::new(),
tables: PrimaryMap::new(),
globals: PrimaryMap::new(),
memories: PrimaryMap::new(),
}
}
}

View File

@@ -1,235 +0,0 @@
//! An `Instance` contains all the runtime state used by execution of a wasm
//! module.
use cranelift_entity::EntityRef;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::{DefinedFuncIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex};
use imports::Imports;
use memory::LinearMemory;
use sig_registry::SignatureRegistry;
use std::ptr;
use std::slice;
use std::string::String;
use table::Table;
use vmcontext::{VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMGlobal, VMMemory, VMTable};
use wasmtime_environ::{DataInitializer, Module};
/// An Instance of a WebAssemby module.
#[derive(Debug)]
pub struct Instance {
/// WebAssembly linear memory data.
memories: PrimaryMap<MemoryIndex, LinearMemory>,
/// WebAssembly table data.
tables: PrimaryMap<TableIndex, Table>,
/// Function Signature IDs.
/// FIXME: This should be shared across instances rather than per-Instance.
sig_registry: SignatureRegistry,
/// Memory base address vector pointed to by vmctx.
vmctx_memories: PrimaryMap<MemoryIndex, VMMemory>,
/// WebAssembly global variable data.
vmctx_globals: PrimaryMap<GlobalIndex, VMGlobal>,
/// Table storage base address vector pointed to by vmctx.
vmctx_tables: PrimaryMap<TableIndex, VMTable>,
/// Pointer values for resolved imports.
imports: Imports,
/// Pointers to functions in executable memory.
allocated_functions: PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
/// Context pointer used by JIT code.
vmctx: VMContext,
}
impl Instance {
/// Create a new `Instance`. In order to complete instantiation, call
/// `invoke_start_function`. `allocated_functions` holds the function bodies
/// which have been placed in executable memory.
pub fn new(
module: &Module,
allocated_functions: PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
data_initializers: &[DataInitializer],
imports: Imports,
) -> Result<Self, String> {
let mut sig_registry = instantiate_signatures(module);
let mut memories = instantiate_memories(module, data_initializers)?;
let mut tables = instantiate_tables(module, &allocated_functions, &mut sig_registry);
let mut vmctx_memories = memories
.values_mut()
.map(LinearMemory::vmmemory)
.collect::<PrimaryMap<MemoryIndex, _>>();
let mut vmctx_globals = instantiate_globals(module);
let mut vmctx_tables = tables
.values_mut()
.map(Table::vmtable)
.collect::<PrimaryMap<TableIndex, _>>();
let vmctx_memories_ptr = vmctx_memories.values_mut().into_slice().as_mut_ptr();
let vmctx_globals_ptr = vmctx_globals.values_mut().into_slice().as_mut_ptr();
let vmctx_tables_ptr = vmctx_tables.values_mut().into_slice().as_mut_ptr();
let signature_ids_ptr = sig_registry.vmsignature_ids();
Ok(Self {
memories,
tables,
sig_registry,
vmctx_memories,
vmctx_globals,
vmctx_tables,
imports,
allocated_functions,
vmctx: VMContext::new(
vmctx_memories_ptr,
vmctx_globals_ptr,
vmctx_tables_ptr,
signature_ids_ptr,
),
})
}
/// Return the vmctx pointer to be passed into JIT code.
pub fn vmctx(&mut self) -> &mut VMContext {
&mut self.vmctx
}
/// Return the offset from the vmctx pointer to its containing Instance.
pub(crate) fn vmctx_offset() -> isize {
offset_of!(Self, vmctx) as isize
}
/// Return the pointer to executable memory for the given function index.
pub(crate) fn get_allocated_function(
&self,
index: DefinedFuncIndex,
) -> Option<&[VMFunctionBody]> {
self.allocated_functions
.get(index)
.map(|(ptr, len)| unsafe { slice::from_raw_parts(*ptr, *len) })
}
/// Return the pointer to executable memory for the given function index.
pub(crate) fn get_imported_function(&self, index: FuncIndex) -> Option<*const VMFunctionBody> {
self.imports.functions.get(index).cloned()
}
/// Grow memory by the specified amount of pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of pages.
pub fn memory_grow(&mut self, memory_index: MemoryIndex, delta: u32) -> Option<u32> {
let result = self
.memories
.get_mut(memory_index)
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
.grow(delta);
// Keep current the VMContext pointers used by JIT code.
self.vmctx_memories[memory_index] = self.memories[memory_index].vmmemory();
result
}
/// Returns the number of allocated wasm pages.
pub fn memory_size(&mut self, memory_index: MemoryIndex) -> u32 {
self.memories
.get(memory_index)
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
.size()
}
/// Returns a slice of the contents of allocated linear memory.
pub fn inspect_memory(&self, memory_index: MemoryIndex, address: usize, len: usize) -> &[u8] {
&self
.memories
.get(memory_index)
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
.as_ref()[address..address + len]
}
/// Shows the value of a global variable.
pub fn inspect_global(&self, global_index: GlobalIndex) -> &VMGlobal {
&self.vmctx_globals[global_index]
}
}
fn instantiate_signatures(module: &Module) -> SignatureRegistry {
let mut sig_registry = SignatureRegistry::new();
for (sig_index, sig) in module.signatures.iter() {
sig_registry.register(sig_index, sig);
}
sig_registry
}
/// Allocate memory for just the memories of the current module.
fn instantiate_memories(
module: &Module,
data_initializers: &[DataInitializer],
) -> Result<PrimaryMap<MemoryIndex, LinearMemory>, String> {
let mut memories = PrimaryMap::with_capacity(module.memory_plans.len());
for plan in module.memory_plans.values() {
memories.push(LinearMemory::new(&plan)?);
}
for init in data_initializers {
debug_assert!(init.base.is_none(), "globalvar base not supported yet");
let mem_mut = memories[init.memory_index].as_mut();
let to_init = &mut mem_mut[init.offset..init.offset + init.data.len()];
to_init.copy_from_slice(init.data);
}
Ok(memories)
}
/// Allocate memory for just the tables of the current module.
fn instantiate_tables(
module: &Module,
allocated_functions: &PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
sig_registry: &mut SignatureRegistry,
) -> PrimaryMap<TableIndex, Table> {
let mut tables = PrimaryMap::with_capacity(module.table_plans.len());
for table in module.table_plans.values() {
tables.push(Table::new(table));
}
for init in &module.table_elements {
debug_assert!(init.base.is_none(), "globalvar base not supported yet");
let slice = tables[init.table_index].as_mut();
let subslice = &mut slice[init.offset..init.offset + init.elements.len()];
for (i, func_idx) in init.elements.iter().enumerate() {
let callee_sig = module.functions[*func_idx];
let func_ptr = allocated_functions[module
.defined_func_index(*func_idx)
.expect("table element initializer with imported function not supported yet")]
.0;
let type_id = sig_registry.lookup(callee_sig);
subslice[i] = VMCallerCheckedAnyfunc { func_ptr, type_id };
}
}
tables
}
/// Allocate memory for just the globals of the current module,
/// without any initializers applied yet.
fn instantiate_globals(module: &Module) -> PrimaryMap<GlobalIndex, VMGlobal> {
let mut vmctx_globals = PrimaryMap::with_capacity(module.globals.len());
for (index, global) in module.globals.iter() {
if module.is_imported_global(index) {
// FIXME: get the actual import
vmctx_globals.push(VMGlobal::import(ptr::null_mut()));
} else {
vmctx_globals.push(VMGlobal::definition(global));
}
}
vmctx_globals
}

View File

@@ -1,232 +0,0 @@
//! Support for invoking wasm functions from outside a wasm module.
use action::{ActionError, ActionOutcome, RuntimeValue};
use code::Code;
use cranelift_codegen::ir::InstBuilder;
use cranelift_codegen::{binemit, ir, isa, Context};
use cranelift_entity::EntityRef;
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext};
use cranelift_wasm::FuncIndex;
use instance::Instance;
use signalhandlers::{ensure_eager_signal_handlers, ensure_full_signal_handlers, TrapContext};
use std::mem;
use std::ptr;
use std::vec::Vec;
use traphandlers::call_wasm;
use vmcontext::{VMContext, VMFunctionBody};
use wasmtime_environ::{CompileError, Export, Module, RelocSink};
/// Calls the given named function, passing its return values and returning
/// its results.
pub fn invoke(
code: &mut Code,
isa: &isa::TargetIsa,
module: &Module,
instance: &mut Instance,
function: &str,
args: &[RuntimeValue],
) -> Result<ActionOutcome, ActionError> {
let fn_index = match module.exports.get(function) {
Some(Export::Function(index)) => *index,
Some(_) => {
return Err(ActionError::Kind(format!(
"exported item \"{}\" is not a function",
function
)))
}
None => {
return Err(ActionError::Field(format!(
"no export named \"{}\"",
function
)))
}
};
invoke_by_index(code, isa, module, instance, fn_index, args)
}
/// Invoke the WebAssembly start function of the instance, if one is present.
pub fn invoke_start_function(
code: &mut Code,
isa: &isa::TargetIsa,
module: &Module,
instance: &mut Instance,
) -> Result<ActionOutcome, ActionError> {
if let Some(start_index) = module.start_func {
invoke_by_index(code, isa, module, instance, start_index, &[])
} else {
// No start function, just return nothing.
Ok(ActionOutcome::Returned { values: vec![] })
}
}
/// Calls the given indexed function, passing its return values and returning
/// its results.
pub fn invoke_by_index(
code: &mut Code,
isa: &isa::TargetIsa,
module: &Module,
instance: &mut Instance,
fn_index: FuncIndex,
args: &[RuntimeValue],
) -> Result<ActionOutcome, ActionError> {
let exec_code_buf = match module.defined_func_index(fn_index) {
Some(def_fn_index) => instance
.get_allocated_function(def_fn_index)
.ok_or_else(|| ActionError::Index(def_fn_index.index() as u64))?
.as_ptr(),
None => instance
.get_imported_function(fn_index)
.ok_or_else(|| ActionError::Index(fn_index.index() as u64))?,
};
let sig = &module.signatures[module.functions[fn_index]];
// TODO: Move this out to be done once per thread rather than per call.
let mut traps = TrapContext {
triedToInstallSignalHandlers: false,
haveSignalHandlers: false,
};
// Rather than writing inline assembly to jump to the code region, we use the fact that
// the Rust ABI for calling a function with no arguments and no return values matches the one
// of the generated code. Thanks to this, we can transmute the code region into a first-class
// Rust function and call it.
// Ensure that our signal handlers are ready for action.
ensure_eager_signal_handlers();
ensure_full_signal_handlers(&mut traps);
if !traps.haveSignalHandlers {
return Err(ActionError::Resource(
"failed to install signal handlers".to_string(),
));
}
call_through_wrapper(code, isa, exec_code_buf, instance, args, &sig)
}
fn call_through_wrapper(
code: &mut Code,
isa: &isa::TargetIsa,
callee: *const VMFunctionBody,
instance: &mut Instance,
args: &[RuntimeValue],
sig: &ir::Signature,
) -> Result<ActionOutcome, ActionError> {
let vmctx = instance.vmctx() as *mut VMContext;
for (index, value) in args.iter().enumerate() {
assert_eq!(value.value_type(), sig.params[index].value_type);
}
let wrapper_sig = ir::Signature::new(isa.frontend_config().default_call_conv);
let mut context = Context::new();
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
let value_size = 8;
let mut results_vec = Vec::new();
results_vec.resize(sig.returns.len(), 0i64);
let mut fn_builder_ctx = FunctionBuilderContext::new();
{
let mut builder = FunctionBuilder::new(&mut context.func, &mut fn_builder_ctx);
let block0 = builder.create_ebb();
builder.append_ebb_params_for_function_params(block0);
builder.switch_to_block(block0);
builder.seal_block(block0);
let mut callee_args = Vec::new();
let pointer_type = isa.pointer_type();
let callee_value = builder.ins().iconst(pointer_type, callee as i64);
for value in args {
match value {
RuntimeValue::I32(i) => {
callee_args.push(builder.ins().iconst(ir::types::I32, i64::from(*i)))
}
RuntimeValue::I64(i) => callee_args.push(builder.ins().iconst(ir::types::I64, *i)),
RuntimeValue::F32(i) => callee_args.push(
builder
.ins()
.f32const(ir::immediates::Ieee32::with_bits(*i)),
),
RuntimeValue::F64(i) => callee_args.push(
builder
.ins()
.f64const(ir::immediates::Ieee64::with_bits(*i)),
),
}
}
let vmctx_value = builder.ins().iconst(pointer_type, vmctx as i64);
callee_args.push(vmctx_value);
let new_sig = builder.import_signature(sig.clone());
// TODO: It's possible to make this a direct call. We just need Cranelift
// to support functions declared with an immediate integer address.
let call = builder
.ins()
.call_indirect(new_sig, callee_value, &callee_args);
let results = builder.func.dfg.inst_results(call).to_vec();
let results_vec_value = builder
.ins()
.iconst(pointer_type, results_vec.as_ptr() as i64);
let mut mflags = ir::MemFlags::new();
mflags.set_notrap();
mflags.set_aligned();
for (i, r) in results.iter().enumerate() {
builder
.ins()
.store(mflags, *r, results_vec_value, (i * value_size) as i32);
}
builder.ins().return_(&[]);
}
let mut code_buf: Vec<u8> = Vec::new();
let mut reloc_sink = RelocSink::new();
let mut trap_sink = binemit::NullTrapSink {};
context
.compile_and_emit(isa, &mut code_buf, &mut reloc_sink, &mut trap_sink)
.map_err(|error| ActionError::Compile(CompileError::Codegen(error)))?;
assert!(reloc_sink.func_relocs.is_empty());
let exec_code_buf = code
.allocate_copy_of_byte_slice(&code_buf)
.map_err(ActionError::Resource)?
.as_ptr();
code.publish();
let func: fn() = unsafe { mem::transmute(exec_code_buf) };
Ok(match call_wasm(func) {
Ok(()) => {
let mut values = Vec::with_capacity(sig.returns.len());
for (index, abi_param) in sig.returns.iter().enumerate() {
let v = unsafe {
let ptr = results_vec.as_ptr().add(index * value_size);
match abi_param.value_type {
ir::types::I32 => RuntimeValue::I32(ptr::read(ptr as *const i32)),
ir::types::I64 => RuntimeValue::I64(ptr::read(ptr as *const i64)),
ir::types::F32 => RuntimeValue::F32(ptr::read(ptr as *const u32)),
ir::types::F64 => RuntimeValue::F64(ptr::read(ptr as *const u64)),
other => panic!("unsupported value type {:?}", other),
}
};
values.push(v);
}
ActionOutcome::Returned { values }
}
Err(message) => ActionOutcome::Trapped { message },
})
}

View File

@@ -28,18 +28,12 @@ extern crate cranelift_codegen;
extern crate cranelift_entity;
extern crate cranelift_frontend;
extern crate cranelift_wasm;
extern crate errno;
extern crate region;
extern crate wasmtime_environ;
extern crate wasmtime_runtime;
#[cfg(not(feature = "std"))]
#[macro_use]
extern crate alloc;
#[macro_use]
extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate memoffset;
extern crate cast;
extern crate failure;
#[macro_use]
extern crate failure_derive;
@@ -47,30 +41,13 @@ extern crate failure_derive;
mod action;
mod code;
mod export;
mod get;
mod imports;
mod instance;
mod invoke;
mod libcalls;
mod link;
mod memory;
mod mmap;
mod sig_registry;
mod signalhandlers;
mod table;
mod traphandlers;
mod vmcontext;
mod world;
pub use action::{ActionError, ActionOutcome, RuntimeValue};
pub use code::Code;
pub use export::{ExportValue, NullResolver, Resolver};
pub use get::{get, get_by_index};
pub use instance::Instance;
pub use invoke::{invoke, invoke_by_index, invoke_start_function};
pub use export::{Export, NullResolver, Resolver};
pub use link::link_module;
pub use traphandlers::{call_wasm, LookupCodeSegment, RecordTrap, Unwind};
pub use vmcontext::{VMContext, VMFunctionBody, VMGlobal, VMMemory, VMTable};
pub use world::InstanceWorld;
#[cfg(not(feature = "std"))]

View File

@@ -1,77 +0,0 @@
//! Runtime library calls. Note that the JIT may sometimes perform these inline
//! rather than calling them, particularly when CPUs have special instructions
//! which compute them directly.
pub extern "C" fn wasmtime_f32_ceil(x: f32) -> f32 {
x.ceil()
}
pub extern "C" fn wasmtime_f32_floor(x: f32) -> f32 {
x.floor()
}
pub extern "C" fn wasmtime_f32_trunc(x: f32) -> f32 {
x.trunc()
}
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f32_nearest(x: f32) -> f32 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}
pub extern "C" fn wasmtime_f64_ceil(x: f64) -> f64 {
x.ceil()
}
pub extern "C" fn wasmtime_f64_floor(x: f64) -> f64 {
x.floor()
}
pub extern "C" fn wasmtime_f64_trunc(x: f64) -> f64 {
x.trunc()
}
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f64_nearest(x: f64) -> f64 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}

View File

@@ -1,18 +1,16 @@
use cranelift_codegen::binemit::Reloc;
use cranelift_entity::{EntityRef, PrimaryMap};
use cranelift_wasm::{
DefinedFuncIndex, Global, GlobalInit, Memory, MemoryIndex, Table, TableElementType,
};
use export::{ExportValue, Resolver};
use imports::Imports;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::{DefinedFuncIndex, Global, GlobalInit, Memory, Table, TableElementType};
use export::{Export, FunctionExport, Resolver};
use std::ptr::write_unaligned;
use std::string::String;
use std::vec::Vec;
use vmcontext::VMContext;
use vmcontext::{VMFunctionBody, VMGlobal, VMMemory, VMTable};
use wasmtime_environ::{
MemoryPlan, MemoryStyle, Module, Relocation, RelocationTarget, Relocations, TablePlan,
TableStyle,
};
use wasmtime_runtime::libcalls;
use wasmtime_runtime::{Imports, VMFunctionBody, VMGlobalImport, VMMemoryImport, VMTableImport};
/// A link error, such as incompatible or unmatched imports/exports.
#[derive(Fail, Debug)]
@@ -22,29 +20,28 @@ pub struct LinkError(String);
/// Links a module that has been compiled with `compiled_module` in `wasmtime-environ`.
pub fn link_module(
module: &Module,
allocated_functions: &PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
allocated_functions: &PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
relocations: Relocations,
resolver: &mut Resolver,
) -> Result<Imports, LinkError> {
let mut imports = Imports::new();
let mut function_imports = PrimaryMap::with_capacity(module.imported_funcs.len());
for (index, (ref module_name, ref field)) in module.imported_funcs.iter() {
match resolver.resolve(module_name, field) {
Some(export_value) => match export_value {
ExportValue::Function { address, signature } => {
Export::Function(FunctionExport { address, signature }) => {
let import_signature = &module.signatures[module.functions[index]];
if signature != *import_signature {
// TODO: If the difference is in the calling convention,
// we could emit a wrapper function to fix it up.
return Err(LinkError(
format!("{}/{}: exported function with signature {} incompatible with function import with signature {}",
module_name, field,
signature, import_signature)
));
}
imports.functions.push(address);
function_imports.push(address);
}
ExportValue::Table { .. }
| ExportValue::Memory { .. }
| ExportValue::Global { .. } => {
Export::Table { .. } | Export::Memory { .. } | Export::Global { .. } => {
return Err(LinkError(format!(
"{}/{}: export not compatible with function import",
module_name, field
@@ -60,41 +57,15 @@ pub fn link_module(
}
}
for (index, (ref module_name, ref field)) in module.imported_globals.iter() {
match resolver.resolve(module_name, field) {
Some(export_value) => match export_value {
ExportValue::Global { address, global } => {
let imported_global = module.globals[index];
if !is_global_compatible(&global, &imported_global) {
return Err(LinkError(format!(
"{}/{}: exported global incompatible with global import",
module_name, field
)));
}
imports.globals.push(address as *mut VMGlobal);
}
ExportValue::Table { .. }
| ExportValue::Memory { .. }
| ExportValue::Function { .. } => {
return Err(LinkError(format!(
"{}/{}: exported global incompatible with global import",
module_name, field
)));
}
},
None => {
return Err(LinkError(format!(
"no provided import global for {}/{}",
module_name, field
)))
}
}
}
let mut table_imports = PrimaryMap::with_capacity(module.imported_tables.len());
for (index, (ref module_name, ref field)) in module.imported_tables.iter() {
match resolver.resolve(module_name, field) {
Some(export_value) => match export_value {
ExportValue::Table { address, table } => {
Export::Table {
address,
vmctx,
table,
} => {
let import_table = &module.table_plans[index];
if !is_table_compatible(&table, import_table) {
return Err(LinkError(format!(
@@ -102,11 +73,12 @@ pub fn link_module(
module_name, field,
)));
}
imports.tables.push(address as *mut VMTable);
table_imports.push(VMTableImport {
from: address,
vmctx,
});
}
ExportValue::Global { .. }
| ExportValue::Memory { .. }
| ExportValue::Function { .. } => {
Export::Global { .. } | Export::Memory { .. } | Export::Function { .. } => {
return Err(LinkError(format!(
"{}/{}: export not compatible with table import",
module_name, field
@@ -122,10 +94,15 @@ pub fn link_module(
}
}
let mut memory_imports = PrimaryMap::with_capacity(module.imported_memories.len());
for (index, (ref module_name, ref field)) in module.imported_memories.iter() {
match resolver.resolve(module_name, field) {
Some(export_value) => match export_value {
ExportValue::Memory { address, memory } => {
Export::Memory {
address,
vmctx,
memory,
} => {
let import_memory = &module.memory_plans[index];
if is_memory_compatible(&memory, import_memory) {
return Err(LinkError(format!(
@@ -133,11 +110,12 @@ pub fn link_module(
module_name, field
)));
}
imports.memories.push(address as *mut VMMemory);
memory_imports.push(VMMemoryImport {
from: address,
vmctx,
});
}
ExportValue::Table { .. }
| ExportValue::Global { .. }
| ExportValue::Function { .. } => {
Export::Table { .. } | Export::Global { .. } | Export::Function { .. } => {
return Err(LinkError(format!(
"{}/{}: export not compatible with memory import",
module_name, field
@@ -153,6 +131,43 @@ pub fn link_module(
}
}
let mut global_imports = PrimaryMap::with_capacity(module.imported_globals.len());
for (index, (ref module_name, ref field)) in module.imported_globals.iter() {
match resolver.resolve(module_name, field) {
Some(export_value) => match export_value {
Export::Global { address, global } => {
let imported_global = module.globals[index];
if !is_global_compatible(&global, &imported_global) {
return Err(LinkError(format!(
"{}/{}: exported global incompatible with global import",
module_name, field
)));
}
global_imports.push(VMGlobalImport { from: address });
}
Export::Table { .. } | Export::Memory { .. } | Export::Function { .. } => {
return Err(LinkError(format!(
"{}/{}: exported global incompatible with global import",
module_name, field
)));
}
},
None => {
return Err(LinkError(format!(
"no provided import global for {}/{}",
module_name, field
)))
}
}
}
let imports = Imports::new(
function_imports,
table_imports,
memory_imports,
global_imports,
);
// Apply relocations, now that we have virtual addresses for everything.
relocate(&imports, allocated_functions, relocations, &module);
@@ -277,22 +292,27 @@ fn is_memory_compatible(exported: &MemoryPlan, imported: &MemoryPlan) -> bool {
/// Performs the relocations inside the function bytecode, provided the necessary metadata.
fn relocate(
imports: &Imports,
allocated_functions: &PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
allocated_functions: &PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
relocations: PrimaryMap<DefinedFuncIndex, Vec<Relocation>>,
module: &Module,
) {
for (i, function_relocs) in relocations.into_iter() {
for r in function_relocs {
use self::libcalls::*;
let target_func_address: usize = match r.reloc_target {
RelocationTarget::UserFunc(index) => match module.defined_func_index(index) {
Some(f) => allocated_functions[f].0 as usize,
Some(f) => {
let fatptr: *const [VMFunctionBody] = allocated_functions[f];
fatptr as *const VMFunctionBody as usize
}
None => imports.functions[index] as usize,
},
RelocationTarget::MemoryGrow => wasmtime_memory_grow as usize,
RelocationTarget::MemorySize => wasmtime_memory_size as usize,
RelocationTarget::Memory32Grow => wasmtime_memory32_grow as usize,
RelocationTarget::Memory32Size => wasmtime_memory32_size as usize,
RelocationTarget::ImportedMemory32Grow => wasmtime_imported_memory32_grow as usize,
RelocationTarget::ImportedMemory32Size => wasmtime_imported_memory32_size as usize,
RelocationTarget::LibCall(libcall) => {
use cranelift_codegen::ir::LibCall::*;
use libcalls::*;
match libcall {
CeilF32 => wasmtime_f32_ceil as usize,
FloorF32 => wasmtime_f32_floor as usize,
@@ -308,7 +328,8 @@ fn relocate(
}
};
let body = allocated_functions[i].0;
let fatptr: *const [VMFunctionBody] = allocated_functions[i];
let body = fatptr as *const VMFunctionBody;
match r.reloc {
#[cfg(target_pointer_width = "64")]
Reloc::Abs8 => unsafe {
@@ -340,21 +361,3 @@ fn relocate(
extern "C" {
pub fn __rust_probestack();
}
/// The implementation of memory.grow.
extern "C" fn wasmtime_memory_grow(size: u32, memory_index: u32, vmctx: *mut VMContext) -> u32 {
let instance = unsafe { (&mut *vmctx).instance() };
let memory_index = MemoryIndex::new(memory_index as usize);
instance
.memory_grow(memory_index, size)
.unwrap_or(u32::max_value())
}
/// The implementation of memory.size.
extern "C" fn wasmtime_memory_size(memory_index: u32, vmctx: *mut VMContext) -> u32 {
let instance = unsafe { (&mut *vmctx).instance() };
let memory_index = MemoryIndex::new(memory_index as usize);
instance.memory_size(memory_index)
}

View File

@@ -1,143 +0,0 @@
//! Memory management for linear memories.
//!
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
use mmap::Mmap;
use region;
use std::string::String;
use vmcontext::VMMemory;
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
/// A linear memory instance.
#[derive(Debug)]
pub struct LinearMemory {
mmap: Mmap,
current: u32,
maximum: Option<u32>,
offset_guard_size: usize,
}
impl LinearMemory {
/// Create a new linear memory instance with specified minimum and maximum number of pages.
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
// `maximum` cannot be set to more than `65536` pages.
assert!(plan.memory.minimum <= WASM_MAX_PAGES);
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
let offset_guard_bytes = plan.offset_guard_size as usize;
let minimum_pages = match plan.style {
MemoryStyle::Dynamic => plan.memory.minimum,
MemoryStyle::Static { bound } => {
assert!(bound >= plan.memory.minimum);
bound
}
} as usize;
let minimum_bytes = minimum_pages.checked_mul(WASM_PAGE_SIZE as usize).unwrap();
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
let mapped_pages = plan.memory.minimum as usize;
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
let unmapped_pages = minimum_pages - mapped_pages;
let unmapped_bytes = unmapped_pages * WASM_PAGE_SIZE as usize;
let inaccessible_bytes = unmapped_bytes + offset_guard_bytes;
let mmap = Mmap::with_size(request_bytes)?;
// Make the unmapped and offset-guard pages inaccessible.
unsafe {
region::protect(
mmap.as_ptr().add(mapped_bytes),
inaccessible_bytes,
region::Protection::None,
)
}
.expect("unable to make memory inaccessible");
Ok(Self {
mmap,
current: plan.memory.minimum,
maximum: plan.memory.maximum,
offset_guard_size: offset_guard_bytes,
})
}
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
self.current
}
/// Grow memory by the specified amount of pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of pages.
pub fn grow(&mut self, delta: u32) -> Option<u32> {
let new_pages = match self.current.checked_add(delta) {
Some(new_pages) => new_pages,
// Linear memory size overflow.
None => return None,
};
let prev_pages = self.current;
if let Some(maximum) = self.maximum {
if new_pages > maximum {
// Linear memory size would exceed the declared maximum.
return None;
}
}
// Wasm linear memories are never allowed to grow beyond what is
// indexable. If the memory has no maximum, enforce the greatest
// limit here.
if new_pages >= WASM_MAX_PAGES {
// Linear memory size would exceed the index range.
return None;
}
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
if new_bytes > self.mmap.len() - self.offset_guard_size {
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
assert!(self.maximum.is_none());
let guard_bytes = self.offset_guard_size;
let request_bytes = new_bytes.checked_add(guard_bytes)?;
let mut new_mmap = Mmap::with_size(request_bytes).ok()?;
// Make the offset-guard pages inaccessible.
unsafe {
region::protect(
new_mmap.as_ptr().add(new_bytes),
guard_bytes,
region::Protection::None,
)
}
.expect("unable to make memory inaccessible");
let copy_len = self.mmap.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
self.mmap = new_mmap;
}
self.current = new_pages;
Some(prev_pages)
}
/// Return a `VMMemory` for exposing the memory to JIT code.
pub fn vmmemory(&mut self) -> VMMemory {
VMMemory::definition(self.mmap.as_mut_ptr(), self.mmap.len())
}
}
impl AsRef<[u8]> for LinearMemory {
fn as_ref(&self) -> &[u8] {
self.mmap.as_slice()
}
}
impl AsMut<[u8]> for LinearMemory {
fn as_mut(&mut self) -> &mut [u8] {
self.mmap.as_mut_slice()
}
}

View File

@@ -1,136 +0,0 @@
//! Low-level abstraction for allocating and managing zero-filled pages
//! of memory.
use errno;
use libc;
use region;
use std::ptr;
use std::slice;
use std::string::String;
/// Round `size` up to the nearest multiple of `page_size`.
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
(size + (page_size - 1)) & !(page_size - 1)
}
/// A simple struct consisting of a page-aligned pointer to page-aligned
/// and initially-zeroed memory and a length.
#[derive(Debug)]
pub struct Mmap {
ptr: *mut u8,
len: usize,
}
impl Mmap {
pub fn new() -> Self {
Self {
ptr: ptr::null_mut(),
len: 0,
}
}
/// Create a new `Mmap` pointing to at least `size` bytes of memory,
/// suitably sized and aligned for memory protection.
#[cfg(not(target_os = "windows"))]
pub fn with_size(size: usize) -> Result<Self, String> {
let page_size = region::page::size();
let alloc_size = round_up_to_page_size(size, page_size);
let ptr = unsafe {
libc::mmap(
ptr::null_mut(),
alloc_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1,
0,
)
};
if ptr as isize == -1isize {
Err(errno::errno().to_string())
} else {
Ok(Self {
ptr: ptr as *mut u8,
len: alloc_size,
})
}
}
#[cfg(target_os = "windows")]
pub fn with_size(size: usize) -> Result<Self, String> {
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
let page_size = region::page::size();
// VirtualAlloc always rounds up to the next multiple of the page size
let ptr = unsafe {
VirtualAlloc(
ptr::null_mut(),
size,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE,
)
};
if !ptr.is_null() {
Ok(Self {
ptr: ptr as *mut u8,
len: round_up_to_page_size(size, page_size),
})
} else {
Err(errno::errno().to_string())
}
}
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.ptr, self.len) }
}
pub fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
}
pub fn as_ptr(&self) -> *const u8 {
self.ptr
}
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self.ptr
}
pub fn len(&self) -> usize {
self.len
}
}
impl Drop for Mmap {
#[cfg(not(target_os = "windows"))]
fn drop(&mut self) {
if !self.ptr.is_null() {
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
assert_eq!(r, 0, "munmap failed: {}", errno::errno());
}
}
#[cfg(target_os = "windows")]
fn drop(&mut self) {
if !self.ptr.is_null() {
use winapi::um::memoryapi::VirtualFree;
use winapi::um::winnt::MEM_RELEASE;
let r = unsafe { VirtualFree(self.ptr, self.len, MEM_RELEASE) };
assert_eq!(r, 0);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_round_up_to_page_size() {
assert_eq!(round_up_to_page_size(0, 4096), 0);
assert_eq!(round_up_to_page_size(1, 4096), 4096);
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
}
}

View File

@@ -1,52 +0,0 @@
//! Implement a registry of function signatures, for fast indirect call
//! signature checking.
use cast;
use cranelift_codegen::ir;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::SignatureIndex;
use std::collections::{hash_map, HashMap};
use vmcontext::VMSignatureId;
#[derive(Debug)]
pub struct SignatureRegistry {
signature_hash: HashMap<ir::Signature, VMSignatureId>,
signature_ids: PrimaryMap<SignatureIndex, VMSignatureId>,
}
impl SignatureRegistry {
pub fn new() -> Self {
Self {
signature_hash: HashMap::new(),
signature_ids: PrimaryMap::new(),
}
}
pub fn vmsignature_ids(&mut self) -> *mut VMSignatureId {
self.signature_ids.values_mut().into_slice().as_mut_ptr()
}
/// Register the given signature.
pub fn register(&mut self, sig_index: SignatureIndex, sig: &ir::Signature) {
// TODO: Refactor this interface so that we're not passing in redundant
// information.
debug_assert_eq!(sig_index.index(), self.signature_ids.len());
use cranelift_entity::EntityRef;
let len = self.signature_hash.len();
let sig_id = match self.signature_hash.entry(sig.clone()) {
hash_map::Entry::Occupied(entry) => *entry.get(),
hash_map::Entry::Vacant(entry) => {
let sig_id = cast::u32(len).unwrap();
entry.insert(sig_id);
sig_id
}
};
self.signature_ids.push(sig_id);
}
/// Return the identifying runtime index for the given signature.
pub fn lookup(&mut self, sig_index: SignatureIndex) -> VMSignatureId {
self.signature_ids[sig_index]
}
}

View File

@@ -1,101 +0,0 @@
//! Interface to low-level signal-handling mechanisms.
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use std::borrow::{Borrow, BorrowMut};
use std::sync::RwLock;
include!(concat!(env!("OUT_DIR"), "/signalhandlers.rs"));
struct InstallState {
tried: bool,
success: bool,
}
impl InstallState {
fn new() -> Self {
Self {
tried: false,
success: false,
}
}
}
lazy_static! {
static ref EAGER_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
static ref LAZY_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
}
/// This function performs the low-overhead signal handler initialization that we
/// want to do eagerly to ensure a more-deterministic global process state. This
/// is especially relevant for signal handlers since handler ordering depends on
/// installation order: the wasm signal handler must run *before* the other crash
/// handlers and since POSIX signal handlers work LIFO, this function needs to be
/// called at the end of the startup process, after other handlers have been
/// installed. This function can thus be called multiple times, having no effect
/// after the first call.
pub fn ensure_eager_signal_handlers() {
let mut locked = EAGER_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if !unsafe { EnsureEagerSignalHandlers() } {
return;
}
state.success = true;
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn ensure_darwin_mach_ports() {
let mut locked = LAZY_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if !unsafe { EnsureDarwinMachPorts() } {
return;
}
state.success = true;
}
/// Assuming `EnsureEagerProcessSignalHandlers` has already been called,
/// this function performs the full installation of signal handlers which must
/// be performed per-thread. This operation may incur some overhead and
/// so should be done only when needed to use wasm.
pub fn ensure_full_signal_handlers(cx: &mut TrapContext) {
if cx.triedToInstallSignalHandlers {
return;
}
cx.triedToInstallSignalHandlers = true;
assert!(!cx.haveSignalHandlers);
{
let locked = EAGER_INSTALL_STATE.read().unwrap();
let state = locked.borrow();
assert!(state.tried);
if !state.success {
return;
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
ensure_darwin_mach_ports();
cx.haveSignalHandlers = true;
}

View File

@@ -1,58 +0,0 @@
//! Memory management for tables.
//!
//! `Table` is to WebAssembly tables what `LinearMemory` is to WebAssembly linear memories.
use cranelift_wasm::TableElementType;
use vmcontext::{VMCallerCheckedAnyfunc, VMTable};
use wasmtime_environ::{TablePlan, TableStyle};
/// A table instance.
#[derive(Debug)]
pub struct Table {
vec: Vec<VMCallerCheckedAnyfunc>,
maximum: Option<u32>,
}
impl Table {
/// Create a new table instance with specified minimum and maximum number of elements.
pub fn new(plan: &TablePlan) -> Self {
match plan.table.ty {
TableElementType::Func => (),
TableElementType::Val(ty) => {
unimplemented!("tables of types other than anyfunc ({})", ty)
}
};
match plan.style {
TableStyle::CallerChecksSignature => {
let mut vec = Vec::new();
vec.resize(
plan.table.minimum as usize,
VMCallerCheckedAnyfunc::default(),
);
Self {
vec,
maximum: plan.table.maximum,
}
}
}
}
/// Return a `VMTable` for exposing the table to JIT code.
pub fn vmtable(&mut self) -> VMTable {
VMTable::definition(self.vec.as_mut_ptr() as *mut u8, self.vec.len())
}
}
impl AsRef<[VMCallerCheckedAnyfunc]> for Table {
fn as_ref(&self) -> &[VMCallerCheckedAnyfunc] {
self.vec.as_slice()
}
}
impl AsMut<[VMCallerCheckedAnyfunc]> for Table {
fn as_mut(&mut self) -> &mut [VMCallerCheckedAnyfunc] {
self.vec.as_mut_slice()
}
}

View File

@@ -1,102 +0,0 @@
//! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use libc::c_int;
use signalhandlers::{jmp_buf, CodeSegment};
use std::cell::{Cell, RefCell};
use std::mem;
use std::ptr;
use std::string::String;
// Currently we uset setjmp/longjmp to unwind out of a signal handler
// and back to the point where WebAssembly was called (via `call_wasm`).
// This works because WebAssembly code currently does not use any EH
// or require any cleanups, and we never unwind through non-wasm frames.
// In the future, we'll likely replace this with fancier stack unwinding.
extern "C" {
fn setjmp(env: *mut jmp_buf) -> c_int;
fn longjmp(env: *const jmp_buf, val: c_int) -> !;
}
#[derive(Copy, Clone, Debug)]
struct TrapData {
pc: *const u8,
}
thread_local! {
static TRAP_DATA: Cell<TrapData> = Cell::new(TrapData { pc: ptr::null() });
static JMP_BUFS: RefCell<Vec<jmp_buf>> = RefCell::new(Vec::new());
}
/// Record the Trap code and wasm bytecode offset in TLS somewhere
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn RecordTrap(pc: *const u8, _codeSegment: *const CodeSegment) {
// TODO: Look up the wasm bytecode offset and trap code and record them instead.
TRAP_DATA.with(|data| data.set(TrapData { pc }));
}
/// Initiate an unwind.
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn Unwind() {
JMP_BUFS.with(|bufs| {
let buf = bufs.borrow_mut().pop().unwrap();
unsafe { longjmp(&buf, 1) };
})
}
/// Return the CodeSegment containing the given pc, if any exist in the process.
/// This method does not take a lock.
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn LookupCodeSegment(_pc: *const ::std::os::raw::c_void) -> *const CodeSegment {
// TODO: Implement this.
-1isize as *const CodeSegment
}
/// A simple guard to ensure that `JMP_BUFS` is reset when we're done.
struct ScopeGuard {
orig_num_bufs: usize,
}
impl ScopeGuard {
fn new() -> Self {
Self {
orig_num_bufs: JMP_BUFS.with(|bufs| bufs.borrow().len()),
}
}
}
impl Drop for ScopeGuard {
fn drop(&mut self) {
let orig_num_bufs = self.orig_num_bufs;
JMP_BUFS.with(|bufs| {
bufs.borrow_mut()
.resize(orig_num_bufs, unsafe { mem::zeroed() })
});
}
}
/// Call the wasm function poined to by `f`.
pub fn call_wasm<F>(f: F) -> Result<(), String>
where
F: FnOnce(),
{
// In case wasm code calls Rust that panics and unwinds past this point,
// ensure that JMP_BUFS is unwound to its incoming state.
let _guard = ScopeGuard::new();
JMP_BUFS.with(|bufs| {
let mut buf = unsafe { mem::uninitialized() };
if unsafe { setjmp(&mut buf) } != 0 {
return TRAP_DATA.with(|data| Err(format!("wasm trap at {:?}", data.get().pc)));
}
bufs.borrow_mut().push(buf);
f();
Ok(())
})
}

View File

@@ -1,601 +0,0 @@
//! This file declares `VMContext` and several related structs which contain
//! fields that JIT code accesses directly.
use cranelift_entity::EntityRef;
use cranelift_wasm::{Global, GlobalIndex, GlobalInit, MemoryIndex, TableIndex};
use instance::Instance;
use std::fmt;
use std::ptr;
/// A placeholder byte-sized type which is just used to provide some amount of type
/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
/// around.
#[repr(C)]
pub struct VMFunctionBody(u8);
#[cfg(test)]
mod test_vmfunction_body {
use super::VMFunctionBody;
use std::mem::size_of;
#[test]
fn check_vmfunction_body_offsets() {
assert_eq!(size_of::<VMFunctionBody>(), 1);
}
}
/// The fields a JIT needs to access to utilize a WebAssembly linear
/// memory defined within the instance, namely the start address and the
/// size in bytes.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryDefinition {
/// The start address.
base: *mut u8,
/// The current size of linear memory in bytes.
current_length: usize,
}
#[cfg(test)]
mod test_vmmemory_definition {
use super::VMMemoryDefinition;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmmemory_definition_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMMemoryDefinition>(),
usize::from(offsets.size_of_vmmemory_definition())
);
assert_eq!(
offset_of!(VMMemoryDefinition, base),
usize::from(offsets.vmmemory_definition_base())
);
assert_eq!(
offset_of!(VMMemoryDefinition, current_length),
usize::from(offsets.vmmemory_definition_current_length())
);
}
}
/// The fields a JIT needs to access to utilize a WebAssembly linear
/// memory imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryImport {
/// A pointer to the imported memory description.
from: *mut VMMemoryDefinition,
}
#[cfg(test)]
mod test_vmmemory_import {
use super::VMMemoryImport;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmmemory_import_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMMemoryImport>(),
usize::from(offsets.size_of_vmmemory_import())
);
assert_eq!(
offset_of!(VMMemoryImport, from),
usize::from(offsets.vmmemory_import_from())
);
}
}
/// The main fields a JIT needs to access to utilize a WebAssembly linear
/// memory. It must know whether the memory is defined within the instance
/// or imported.
#[repr(C)]
pub union VMMemory {
/// A linear memory defined within the instance.
definition: VMMemoryDefinition,
/// An imported linear memory.
import: VMMemoryImport,
}
#[cfg(test)]
mod test_vmmemory {
use super::VMMemory;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmmemory_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMMemory>(),
usize::from(offsets.size_of_vmmemory())
);
}
}
impl VMMemory {
/// Construct a `VMMemoryDefinition` variant of `VMMemory`.
pub fn definition(base: *mut u8, current_length: usize) -> Self {
Self {
definition: VMMemoryDefinition {
base,
current_length,
},
}
}
/// Construct a `VMMemoryImmport` variant of `VMMemory`.
pub fn import(from: *mut VMMemoryDefinition) -> Self {
Self {
import: VMMemoryImport { from },
}
}
/// Get the underlying `VMMemoryDefinition`.
pub unsafe fn get_definition(&mut self, is_import: bool) -> &mut VMMemoryDefinition {
if is_import {
&mut *self.import.from
} else {
&mut self.definition
}
}
}
impl fmt::Debug for VMMemory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VMMemory {{")?;
write!(f, " definition: {:?},", unsafe { self.definition })?;
write!(f, " import: {:?},", unsafe { self.import })?;
write!(f, "}}")?;
Ok(())
}
}
/// The storage for a WebAssembly global defined within the instance.
///
/// TODO: Pack the globals more densely, rather than using the same size
/// for every type.
#[derive(Debug, Copy, Clone)]
#[repr(C, align(8))]
pub struct VMGlobalDefinition {
storage: [u8; 8],
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmglobal_definition {
use super::VMGlobalDefinition;
use std::mem::{align_of, size_of};
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmglobal_definition_alignment() {
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
}
#[test]
fn check_vmglobal_definition_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMGlobalDefinition>(),
usize::from(offsets.size_of_vmglobal_definition())
);
}
}
impl VMGlobalDefinition {
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i32(&mut self) -> &mut i32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut i32)
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i64(&mut self) -> &mut i64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut i64)
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32(&mut self) -> &mut f32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut f32)
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_bits(&mut self) -> &mut u32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut u32)
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64(&mut self) -> &mut f64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut f64)
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_bits(&mut self) -> &mut u64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut u64)
}
}
/// The fields a JIT needs to access to utilize a WebAssembly global
/// variable imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMGlobalImport {
/// A pointer to the imported global variable description.
from: *mut VMGlobalDefinition,
}
#[cfg(test)]
mod test_vmglobal_import {
use super::VMGlobalImport;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmglobal_import_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMGlobalImport>(),
usize::from(offsets.size_of_vmglobal_import())
);
assert_eq!(
offset_of!(VMGlobalImport, from),
usize::from(offsets.vmglobal_import_from())
);
}
}
/// The main fields a JIT needs to access to utilize a WebAssembly global
/// variable. It must know whether the global variable is defined within the
/// instance or imported.
#[repr(C)]
pub union VMGlobal {
/// A global variable defined within the instance.
definition: VMGlobalDefinition,
/// An imported global variable.
import: VMGlobalImport,
}
#[cfg(test)]
mod test_vmglobal {
use super::VMGlobal;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmglobal_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMGlobal>(),
usize::from(offsets.size_of_vmglobal())
);
}
}
impl VMGlobal {
/// Construct a `VMGlobalDefinition` variant of `VMGlobal`.
pub fn definition(global: &Global) -> Self {
let mut result = VMGlobalDefinition { storage: [0; 8] };
match global.initializer {
GlobalInit::I32Const(x) => *unsafe { result.as_i32() } = x,
GlobalInit::I64Const(x) => *unsafe { result.as_i64() } = x,
GlobalInit::F32Const(x) => *unsafe { result.as_f32_bits() } = x,
GlobalInit::F64Const(x) => *unsafe { result.as_f64_bits() } = x,
GlobalInit::GetGlobal(_x) => unimplemented!("globals init with get_global"),
GlobalInit::Import => panic!("attempting to initialize imported global"),
}
Self { definition: result }
}
/// Construct a `VMGlobalImmport` variant of `VMGlobal`.
pub fn import(from: *mut VMGlobalDefinition) -> Self {
Self {
import: VMGlobalImport { from },
}
}
/// Get the underlying `VMGlobalDefinition`.
pub unsafe fn get_definition(&mut self, is_import: bool) -> &mut VMGlobalDefinition {
if is_import {
&mut *self.import.from
} else {
&mut self.definition
}
}
}
impl fmt::Debug for VMGlobal {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VMGlobal {{")?;
write!(f, " definition: {:?},", unsafe { self.definition })?;
write!(f, " import: {:?},", unsafe { self.import })?;
write!(f, "}}")?;
Ok(())
}
}
/// The fields a JIT needs to access to utilize a WebAssembly table
/// defined within the instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableDefinition {
base: *mut u8,
current_elements: usize,
}
#[cfg(test)]
mod test_vmtable_definition {
use super::VMTableDefinition;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmtable_definition_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMTableDefinition>(),
usize::from(offsets.size_of_vmtable_definition())
);
assert_eq!(
offset_of!(VMTableDefinition, base),
usize::from(offsets.vmtable_definition_base())
);
assert_eq!(
offset_of!(VMTableDefinition, current_elements),
usize::from(offsets.vmtable_definition_current_elements())
);
}
}
/// The fields a JIT needs to access to utilize a WebAssembly table
/// imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableImport {
/// A pointer to the imported table description.
from: *mut VMTableDefinition,
}
#[cfg(test)]
mod test_vmtable_import {
use super::VMTableImport;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmtable_import_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMTableImport>(),
usize::from(offsets.size_of_vmtable_import())
);
assert_eq!(
offset_of!(VMTableImport, from),
usize::from(offsets.vmtable_import_from())
);
}
}
/// The main fields a JIT needs to access to utilize a WebAssembly table.
/// It must know whether the table is defined within the instance
/// or imported.
#[repr(C)]
pub union VMTable {
/// A table defined within the instance.
definition: VMTableDefinition,
/// An imported table.
import: VMTableImport,
}
#[cfg(test)]
mod test_vmtable {
use super::VMTable;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmtable_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMTable>(), usize::from(offsets.size_of_vmtable()));
}
}
impl VMTable {
/// Construct a `VMTableDefinition` variant of `VMTable`.
pub fn definition(base: *mut u8, current_elements: usize) -> Self {
Self {
definition: VMTableDefinition {
base,
current_elements,
},
}
}
/// Construct a `VMTableImmport` variant of `VMTable`.
pub fn import(from: *mut VMTableDefinition) -> Self {
Self {
import: VMTableImport { from },
}
}
/// Get the underlying `VMTableDefinition`.
pub unsafe fn get_definition(&mut self, is_import: bool) -> &mut VMTableDefinition {
if is_import {
&mut *self.import.from
} else {
&mut self.definition
}
}
}
impl fmt::Debug for VMTable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VMTable {{")?;
write!(f, " definition: {:?},", unsafe { self.definition })?;
write!(f, " import: {:?},", unsafe { self.import })?;
write!(f, "}}")?;
Ok(())
}
}
/// The type of the `type_id` field in `VMCallerCheckedAnyfunc`.
pub type VMSignatureId = u32;
#[cfg(test)]
mod test_vmsignature_id {
use super::VMSignatureId;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmcaller_checked_anyfunc_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMSignatureId>(),
usize::from(offsets.size_of_vmsignature_id())
);
}
}
/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
/// It consists of the actual function pointer and a signature id to be checked
/// by the caller.
#[derive(Debug, Clone)]
#[repr(C)]
pub struct VMCallerCheckedAnyfunc {
pub func_ptr: *const VMFunctionBody,
pub type_id: VMSignatureId,
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmcaller_checked_anyfunc {
use super::VMCallerCheckedAnyfunc;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmcaller_checked_anyfunc_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMCallerCheckedAnyfunc>(),
usize::from(offsets.size_of_vmcaller_checked_anyfunc())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, func_ptr),
usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, type_id),
usize::from(offsets.vmcaller_checked_anyfunc_type_id())
);
}
}
impl Default for VMCallerCheckedAnyfunc {
fn default() -> Self {
Self {
func_ptr: ptr::null_mut(),
type_id: 0,
}
}
}
/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
/// This has pointers to the globals, memories, tables, and other runtime
/// state associated with the current instance.
///
/// TODO: The number of memories, globals, tables, and signature IDs does
/// not change dynamically, and pointer arrays are not indexed dynamically,
/// so these fields could all be contiguously allocated.
#[derive(Debug)]
#[repr(C)]
pub struct VMContext {
/// A pointer to an array of `VMMemory` instances, indexed by
/// WebAssembly memory index.
memories: *mut VMMemory,
/// A pointer to an array of globals.
globals: *mut VMGlobal,
/// A pointer to an array of `VMTable` instances, indexed by
/// WebAssembly table index.
tables: *mut VMTable,
/// Signature identifiers for signature-checking indirect calls.
signature_ids: *mut u32,
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test {
use super::VMContext;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmctx_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMContext>(), usize::from(offsets.size_of_vmctx()));
assert_eq!(
offset_of!(VMContext, memories),
usize::from(offsets.vmctx_memories())
);
assert_eq!(
offset_of!(VMContext, globals),
usize::from(offsets.vmctx_globals())
);
assert_eq!(
offset_of!(VMContext, tables),
usize::from(offsets.vmctx_tables())
);
assert_eq!(
offset_of!(VMContext, signature_ids),
usize::from(offsets.vmctx_signature_ids())
);
}
}
impl VMContext {
/// Create a new `VMContext` instance.
pub fn new(
memories: *mut VMMemory,
globals: *mut VMGlobal,
tables: *mut VMTable,
signature_ids: *mut u32,
) -> Self {
Self {
memories,
globals,
tables,
signature_ids,
}
}
/// Return the base pointer of the globals array.
pub unsafe fn global(&mut self, index: GlobalIndex) -> &mut VMGlobal {
&mut *self.globals.add(index.index())
}
/// Return a mutable reference to linear memory `index`.
pub unsafe fn memory(&mut self, index: MemoryIndex) -> &mut VMMemory {
&mut *self.memories.add(index.index())
}
/// Return a mutable reference to table `index`.
pub unsafe fn table(&mut self, index: TableIndex) -> &mut VMTable {
&mut *self.tables.add(index.index())
}
/// Return a mutable reference to the associated `Instance`.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn instance(&mut self) -> &mut Instance {
&mut *((self as *mut Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
}
}

View File

@@ -1,17 +1,30 @@
use action::{ActionError, ActionOutcome, RuntimeValue};
use code::Code;
use cranelift_codegen::isa;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::{DefinedFuncIndex, GlobalIndex, MemoryIndex};
use cranelift_codegen::ir::InstBuilder;
use cranelift_codegen::Context;
use cranelift_codegen::{binemit, ir, isa};
use cranelift_entity::{BoxedSlice, EntityRef, PrimaryMap};
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext};
use cranelift_wasm::{
DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex,
GlobalIndex, MemoryIndex, TableIndex,
};
use export::Resolver;
use get::get;
use instance::Instance;
use invoke::{invoke, invoke_start_function};
use link::link_module;
use std::str;
use vmcontext::{VMFunctionBody, VMGlobal};
use std::cmp::max;
use std::collections::HashMap;
use std::slice;
use std::string::String;
use std::vec::Vec;
use std::{mem, ptr};
use wasmtime_environ::{
compile_module, Compilation, CompileError, Module, ModuleEnvironment, Tunables,
compile_module, Compilation, CompileError, Export, Module, ModuleEnvironment, RelocSink,
Tunables,
};
use wasmtime_runtime::{
wasmtime_call_trampoline, wasmtime_init_eager, wasmtime_init_finish, Instance, VMContext,
VMFunctionBody, VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport,
VMTableDefinition, VMTableImport,
};
/// A module, an instance of that module, and accompanying compilation artifacts.
@@ -20,10 +33,19 @@ use wasmtime_environ::{
pub struct InstanceWorld {
module: Module,
instance: Instance,
/// Pointers to functions in executable memory.
finished_functions: BoxedSlice<DefinedFuncIndex, *const VMFunctionBody>,
/// Trampolines for calling into JIT code.
trampolines: TrampolinePark,
}
impl InstanceWorld {
/// Create a new `InstanceWorld` by compiling the wasm module in `data` and instatiating it.
///
/// `finished_functions` holds the function bodies
/// which have been placed in executable memory and linked.
pub fn new(
code: &mut Code,
isa: &isa::TargetIsa,
@@ -33,57 +55,133 @@ impl InstanceWorld {
let mut module = Module::new();
// TODO: Allow the tunables to be overridden.
let tunables = Tunables::default();
let instance = {
// TODO: Untie this.
let ((mut compilation, relocations), lazy_data_initializers) = {
let (lazy_function_body_inputs, lazy_data_initializers) = {
let environ = ModuleEnvironment::new(isa, &mut module, tunables);
let (lazy_function_body_inputs, lazy_data_initializers) = {
let environ = ModuleEnvironment::new(isa, &mut module, tunables);
let translation = environ
.translate(&data)
.map_err(|error| ActionError::Compile(CompileError::Wasm(error)))?;
let translation = environ
.translate(&data)
.map_err(|error| ActionError::Compile(CompileError::Wasm(error)))?;
(
translation.lazy.function_body_inputs,
translation.lazy.data_initializers,
)
};
(
compile_module(&module, &lazy_function_body_inputs, isa)
.map_err(ActionError::Compile)?,
lazy_data_initializers,
)
};
let allocated_functions =
allocate_functions(code, compilation).map_err(ActionError::Resource)?;
let resolved = link_module(&module, &allocated_functions, relocations, resolver)
.map_err(ActionError::Link)?;
let mut instance = Instance::new(
&module,
allocated_functions,
&lazy_data_initializers,
resolved,
(
translation.lazy.function_body_inputs,
translation.lazy.data_initializers,
)
.map_err(ActionError::Resource)?;
// The WebAssembly spec specifies that the start function is
// invoked automatically at instantiation time.
match invoke_start_function(code, isa, &module, &mut instance)? {
ActionOutcome::Returned { .. } => {}
ActionOutcome::Trapped { message } => {
// Instantiation fails if the start function traps.
return Err(ActionError::Start(message));
}
}
instance
};
Ok(Self { module, instance })
let (compilation, relocations) = compile_module(&module, &lazy_function_body_inputs, isa)
.map_err(ActionError::Compile)?;
let allocated_functions =
allocate_functions(code, compilation).map_err(ActionError::Resource)?;
let imports = link_module(&module, &allocated_functions, relocations, resolver)
.map_err(ActionError::Link)?;
let finished_functions: BoxedSlice<DefinedFuncIndex, *const VMFunctionBody> =
allocated_functions
.into_iter()
.map(|(_index, allocated)| {
let fatptr: *const [VMFunctionBody] = *allocated;
fatptr as *const VMFunctionBody
})
.collect::<PrimaryMap<_, _>>()
.into_boxed_slice();
let instance = Instance::new(
&module,
&finished_functions,
imports,
&lazy_data_initializers,
)
.map_err(ActionError::Resource)?;
let fn_builder_ctx = FunctionBuilderContext::new();
let mut result = Self {
module,
instance,
finished_functions,
trampolines: TrampolinePark {
memo: HashMap::new(),
fn_builder_ctx,
},
};
// The WebAssembly spec specifies that the start function is
// invoked automatically at instantiation time.
match result.invoke_start_function(code, isa)? {
ActionOutcome::Returned { .. } => {}
ActionOutcome::Trapped { message } => {
// Instantiation fails if the start function traps.
return Err(ActionError::Start(message));
}
}
Ok(result)
}
fn get_imported_function(&self, index: FuncIndex) -> Option<*const VMFunctionBody> {
if index.index() < self.module.imported_funcs.len() {
Some(unsafe { self.instance.vmctx().imported_function(index) })
} else {
None
}
}
// TODO: Add an accessor for table elements.
#[allow(dead_code)]
fn get_imported_table(&self, index: TableIndex) -> Option<&VMTableImport> {
if index.index() < self.module.imported_tables.len() {
Some(unsafe { self.instance.vmctx().imported_table(index) })
} else {
None
}
}
fn get_imported_memory(&self, index: MemoryIndex) -> Option<&VMMemoryImport> {
if index.index() < self.module.imported_memories.len() {
Some(unsafe { self.instance.vmctx().imported_memory(index) })
} else {
None
}
}
fn get_imported_global(&self, index: GlobalIndex) -> Option<&VMGlobalImport> {
if index.index() < self.module.imported_globals.len() {
Some(unsafe { self.instance.vmctx().imported_global(index) })
} else {
None
}
}
fn get_finished_function(&self, index: DefinedFuncIndex) -> Option<*const VMFunctionBody> {
self.finished_functions.get(index).cloned()
}
// TODO: Add an accessor for table elements.
#[allow(dead_code)]
fn get_defined_table(&self, index: DefinedTableIndex) -> Option<&VMTableDefinition> {
if self.module.table_index(index).index() < self.module.table_plans.len() {
Some(unsafe { self.instance.vmctx().table(index) })
} else {
None
}
}
fn get_defined_memory(&self, index: DefinedMemoryIndex) -> Option<&VMMemoryDefinition> {
if self.module.memory_index(index).index() < self.module.memory_plans.len() {
Some(unsafe { self.instance.vmctx().memory(index) })
} else {
None
}
}
fn get_defined_global(&self, index: DefinedGlobalIndex) -> Option<&VMGlobalDefinition> {
if self.module.global_index(index).index() < self.module.globals.len() {
Some(unsafe { self.instance.vmctx().global(index) })
} else {
None
}
}
/// Invoke a function in this `InstanceWorld` by name.
@@ -94,40 +192,362 @@ impl InstanceWorld {
function_name: &str,
args: &[RuntimeValue],
) -> Result<ActionOutcome, ActionError> {
invoke(
code,
isa,
&self.module,
&mut self.instance,
&function_name,
args,
)
let fn_index = match self.module.exports.get(function_name) {
Some(Export::Function(index)) => *index,
Some(_) => {
return Err(ActionError::Kind(format!(
"exported item \"{}\" is not a function",
function_name
)))
}
None => {
return Err(ActionError::Field(format!(
"no export named \"{}\"",
function_name
)))
}
};
self.invoke_by_index(code, isa, fn_index, args)
}
/// Invoke the WebAssembly start function of the instance, if one is present.
fn invoke_start_function(
&mut self,
code: &mut Code,
isa: &isa::TargetIsa,
) -> Result<ActionOutcome, ActionError> {
if let Some(start_index) = self.module.start_func {
self.invoke_by_index(code, isa, start_index, &[])
} else {
// No start function, just return nothing.
Ok(ActionOutcome::Returned { values: vec![] })
}
}
/// Calls the given indexed function, passing its return values and returning
/// its results.
fn invoke_by_index(
&mut self,
code: &mut Code,
isa: &isa::TargetIsa,
fn_index: FuncIndex,
args: &[RuntimeValue],
) -> Result<ActionOutcome, ActionError> {
let callee_address = match self.module.defined_func_index(fn_index) {
Some(def_fn_index) => self
.get_finished_function(def_fn_index)
.ok_or_else(|| ActionError::Index(def_fn_index.index() as u64))?,
None => self
.get_imported_function(fn_index)
.ok_or_else(|| ActionError::Index(fn_index.index() as u64))?,
};
// Rather than writing inline assembly to jump to the code region, we use the fact that
// the Rust ABI for calling a function with no arguments and no return values matches the one
// of the generated code. Thanks to this, we can transmute the code region into a first-class
// Rust function and call it.
// Ensure that our signal handlers are ready for action.
wasmtime_init_eager();
wasmtime_init_finish(self.instance.vmctx_mut());
let signature = &self.module.signatures[self.module.functions[fn_index]];
let vmctx: *mut VMContext = self.instance.vmctx_mut();
for (index, value) in args.iter().enumerate() {
assert_eq!(value.value_type(), signature.params[index].value_type);
}
// TODO: Support values larger than u64.
let mut values_vec: Vec<u64> = Vec::new();
let value_size = mem::size_of::<u64>();
values_vec.resize(max(signature.params.len(), signature.returns.len()), 0u64);
// Store the argument values into `values_vec`.
for (index, arg) in args.iter().enumerate() {
unsafe {
let ptr = values_vec.as_mut_ptr().add(index);
match arg {
RuntimeValue::I32(x) => ptr::write(ptr as *mut i32, *x),
RuntimeValue::I64(x) => ptr::write(ptr as *mut i64, *x),
RuntimeValue::F32(x) => ptr::write(ptr as *mut u32, *x),
RuntimeValue::F64(x) => ptr::write(ptr as *mut u64, *x),
}
}
}
// Store the vmctx value into `values_vec`.
unsafe {
let ptr = values_vec.as_mut_ptr().add(args.len());
ptr::write(ptr as *mut usize, vmctx as usize)
}
// Get the trampoline to call for this function.
let exec_code_buf =
self.trampolines
.get(code, isa, callee_address, &signature, value_size)?;
// Make all JIT code produced thus far executable.
code.publish();
// Call the trampoline.
if let Err(message) = unsafe {
wasmtime_call_trampoline(
exec_code_buf,
values_vec.as_mut_ptr() as *mut u8,
self.instance.vmctx_mut(),
)
} {
return Ok(ActionOutcome::Trapped { message });
}
// Load the return values out of `values_vec`.
let values = signature
.returns
.iter()
.enumerate()
.map(|(index, abi_param)| unsafe {
let ptr = values_vec.as_ptr().add(index);
match abi_param.value_type {
ir::types::I32 => RuntimeValue::I32(ptr::read(ptr as *const i32)),
ir::types::I64 => RuntimeValue::I64(ptr::read(ptr as *const i64)),
ir::types::F32 => RuntimeValue::F32(ptr::read(ptr as *const u32)),
ir::types::F64 => RuntimeValue::F64(ptr::read(ptr as *const u64)),
other => panic!("unsupported value type {:?}", other),
}
})
.collect();
Ok(ActionOutcome::Returned { values })
}
/// Read a global in this `InstanceWorld` by name.
pub fn get(&mut self, global_name: &str) -> Result<RuntimeValue, ActionError> {
get(&self.module, &mut self.instance, global_name)
pub fn get(&self, global_name: &str) -> Result<RuntimeValue, ActionError> {
let global_index = match self.module.exports.get(global_name) {
Some(Export::Global(index)) => *index,
Some(_) => {
return Err(ActionError::Kind(format!(
"exported item \"{}\" is not a global",
global_name
)))
}
None => {
return Err(ActionError::Field(format!(
"no export named \"{}\"",
global_name
)))
}
};
self.get_by_index(global_index)
}
/// Reads the value of the indexed global variable in `module`.
pub fn get_by_index(&self, global_index: GlobalIndex) -> Result<RuntimeValue, ActionError> {
let global_address = match self.module.defined_global_index(global_index) {
Some(def_global_index) => self
.get_defined_global(def_global_index)
.ok_or_else(|| ActionError::Index(def_global_index.index() as u64))?,
None => {
let from: *const VMGlobalDefinition = self
.get_imported_global(global_index)
.ok_or_else(|| ActionError::Index(global_index.index() as u64))?
.from;
from
}
};
let global_def = unsafe { &*global_address };
unsafe {
Ok(
match self
.module
.globals
.get(global_index)
.ok_or_else(|| ActionError::Index(global_index.index() as u64))?
.ty
{
ir::types::I32 => RuntimeValue::I32(*global_def.as_i32()),
ir::types::I64 => RuntimeValue::I64(*global_def.as_i64()),
ir::types::F32 => RuntimeValue::F32(*global_def.as_f32_bits()),
ir::types::F64 => RuntimeValue::F64(*global_def.as_f64_bits()),
other => {
return Err(ActionError::Type(format!(
"global with type {} not supported",
other
)))
}
},
)
}
}
/// Returns a slice of the contents of allocated linear memory.
pub fn inspect_memory(&self, memory_index: MemoryIndex, address: usize, len: usize) -> &[u8] {
self.instance.inspect_memory(memory_index, address, len)
}
pub fn inspect_memory(
&self,
memory_index: MemoryIndex,
address: usize,
len: usize,
) -> Result<&[u8], ActionError> {
let memory_address = match self.module.defined_memory_index(memory_index) {
Some(def_memory_index) => self
.get_defined_memory(def_memory_index)
.ok_or_else(|| ActionError::Index(def_memory_index.index() as u64))?,
None => {
let from: *const VMMemoryDefinition = self
.get_imported_memory(memory_index)
.ok_or_else(|| ActionError::Index(memory_index.index() as u64))?
.from;
from
}
};
let memory_def = unsafe { &*memory_address };
/// Shows the value of a global variable.
pub fn inspect_global(&self, global_index: GlobalIndex) -> &VMGlobal {
self.instance.inspect_global(global_index)
Ok(unsafe {
&slice::from_raw_parts(memory_def.base, memory_def.current_length)
[address..address + len]
})
}
}
fn allocate_functions(
code: &mut Code,
compilation: Compilation,
) -> Result<PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>, String> {
) -> Result<PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>, String> {
let mut result = PrimaryMap::with_capacity(compilation.functions.len());
for (_, body) in compilation.functions.into_iter() {
let slice = code.allocate_copy_of_byte_slice(body)?;
result.push((slice.as_mut_ptr(), slice.len()));
let fatptr: *mut [VMFunctionBody] = code.allocate_copy_of_byte_slice(body)?;
result.push(fatptr);
}
Ok(result)
}
struct TrampolinePark {
/// Memorized per-function trampolines.
memo: HashMap<*const VMFunctionBody, *const VMFunctionBody>,
/// The `FunctionBuilderContext`, shared between function compilations.
fn_builder_ctx: FunctionBuilderContext,
}
impl TrampolinePark {
fn get(
&mut self,
code: &mut Code,
isa: &isa::TargetIsa,
callee_address: *const VMFunctionBody,
signature: &ir::Signature,
value_size: usize,
) -> Result<*const VMFunctionBody, ActionError> {
use std::collections::hash_map::Entry::{Occupied, Vacant};
Ok(match self.memo.entry(callee_address) {
Occupied(entry) => *entry.get(),
Vacant(entry) => {
let body = make_trampoline(
&mut self.fn_builder_ctx,
code,
isa,
callee_address,
signature,
value_size,
)?;
entry.insert(body);
body
}
})
}
}
fn make_trampoline(
fn_builder_ctx: &mut FunctionBuilderContext,
code: &mut Code,
isa: &isa::TargetIsa,
callee_address: *const VMFunctionBody,
signature: &ir::Signature,
value_size: usize,
) -> Result<*const VMFunctionBody, ActionError> {
let pointer_type = isa.pointer_type();
let mut wrapper_sig = ir::Signature::new(isa.frontend_config().default_call_conv);
// Add the `values_vec` parameter.
wrapper_sig.params.push(ir::AbiParam::new(pointer_type));
// Add the `vmctx` parameter.
wrapper_sig.params.push(ir::AbiParam::special(
pointer_type,
ir::ArgumentPurpose::VMContext,
));
let mut context = Context::new();
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
{
let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx);
let block0 = builder.create_ebb();
builder.append_ebb_params_for_function_params(block0);
builder.switch_to_block(block0);
builder.seal_block(block0);
let mut callee_args = Vec::new();
let pointer_type = isa.pointer_type();
let (values_vec_ptr_val, vmctx_ptr_val) = {
let params = builder.func.dfg.ebb_params(block0);
(params[0], params[1])
};
// Load the argument values out of `values_vec`.
let mflags = ir::MemFlags::trusted();
for (i, r) in signature.params.iter().enumerate() {
let value = match r.purpose {
ir::ArgumentPurpose::Normal => builder.ins().load(
r.value_type,
mflags,
values_vec_ptr_val,
(i * value_size) as i32,
),
ir::ArgumentPurpose::VMContext => vmctx_ptr_val,
other => panic!("unsupported argument purpose {}", other),
};
callee_args.push(value);
}
let new_sig = builder.import_signature(signature.clone());
// TODO: It's possible to make this a direct call. We just need Cranelift
// to support functions declared with an immediate integer address.
// ExternalName::Absolute(u64). Let's do it.
let callee_value = builder.ins().iconst(pointer_type, callee_address as i64);
let call = builder
.ins()
.call_indirect(new_sig, callee_value, &callee_args);
let results = builder.func.dfg.inst_results(call).to_vec();
// Store the return values into `values_vec`.
let mflags = ir::MemFlags::trusted();
for (i, r) in results.iter().enumerate() {
builder
.ins()
.store(mflags, *r, values_vec_ptr_val, (i * value_size) as i32);
}
builder.ins().return_(&[]);
builder.finalize()
}
let mut code_buf: Vec<u8> = Vec::new();
let mut reloc_sink = RelocSink::new();
let mut trap_sink = binemit::NullTrapSink {};
context
.compile_and_emit(isa, &mut code_buf, &mut reloc_sink, &mut trap_sink)
.map_err(|error| ActionError::Compile(CompileError::Codegen(error)))?;
assert!(reloc_sink.func_relocs.is_empty());
Ok(code
.allocate_copy_of_byte_slice(&code_buf)
.map_err(ActionError::Resource)?
.as_ptr())
}