Initial reorg.

This is largely the same as #305, but updated for the current tree.
This commit is contained in:
Dan Gohman
2019-11-07 17:11:06 -08:00
parent 2c69546a24
commit 22641de629
351 changed files with 52 additions and 52 deletions

View File

@@ -0,0 +1,104 @@
use crate::vmcontext::{
VMContext, VMFunctionBody, VMGlobalDefinition, VMMemoryDefinition, VMTableDefinition,
};
use cranelift_codegen::ir;
use cranelift_wasm::Global;
use wasmtime_environ::{MemoryPlan, TablePlan};
/// The value of an export passed from one instance to another.
#[derive(Debug, Clone)]
pub enum Export {
/// A function export value.
Function {
/// The address of the native-code function.
address: *const VMFunctionBody,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The function signature declaration, used for compatibilty checking.
signature: ir::Signature,
},
/// A table export value.
Table {
/// The address of the table descriptor.
definition: *mut VMTableDefinition,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The table declaration, used for compatibilty checking.
table: TablePlan,
},
/// A memory export value.
Memory {
/// The address of the memory descriptor.
definition: *mut VMMemoryDefinition,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The memory declaration, used for compatibilty checking.
memory: MemoryPlan,
},
/// A global export value.
Global {
/// The address of the global storage.
definition: *mut VMGlobalDefinition,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The global declaration, used for compatibilty checking.
global: Global,
},
}
impl Export {
/// Construct a function export value.
pub fn function(
address: *const VMFunctionBody,
vmctx: *mut VMContext,
signature: ir::Signature,
) -> Self {
Self::Function {
address,
vmctx,
signature,
}
}
/// Construct a table export value.
pub fn table(
definition: *mut VMTableDefinition,
vmctx: *mut VMContext,
table: TablePlan,
) -> Self {
Self::Table {
definition,
vmctx,
table,
}
}
/// Construct a memory export value.
pub fn memory(
definition: *mut VMMemoryDefinition,
vmctx: *mut VMContext,
memory: MemoryPlan,
) -> Self {
Self::Memory {
definition,
vmctx,
memory,
}
}
/// Construct a global export value.
pub fn global(
definition: *mut VMGlobalDefinition,
vmctx: *mut VMContext,
global: Global,
) -> Self {
Self::Global {
definition,
vmctx,
global,
}
}
}

View File

@@ -0,0 +1,54 @@
use crate::instance::InstanceHandle;
use crate::vmcontext::{VMFunctionImport, VMGlobalImport, VMMemoryImport, VMTableImport};
use crate::HashSet;
use cranelift_entity::{BoxedSlice, PrimaryMap};
use cranelift_wasm::{FuncIndex, GlobalIndex, MemoryIndex, TableIndex};
/// Resolved import pointers.
#[derive(Clone)]
pub struct Imports {
/// The set of instances that the imports depend on.
pub dependencies: HashSet<InstanceHandle>,
/// Resolved addresses for imported functions.
pub functions: BoxedSlice<FuncIndex, VMFunctionImport>,
/// Resolved addresses for imported tables.
pub tables: BoxedSlice<TableIndex, VMTableImport>,
/// Resolved addresses for imported memories.
pub memories: BoxedSlice<MemoryIndex, VMMemoryImport>,
/// Resolved addresses for imported globals.
pub globals: BoxedSlice<GlobalIndex, VMGlobalImport>,
}
impl Imports {
/// Construct a new `Imports` instance.
pub fn new(
dependencies: HashSet<InstanceHandle>,
function_imports: PrimaryMap<FuncIndex, VMFunctionImport>,
table_imports: PrimaryMap<TableIndex, VMTableImport>,
memory_imports: PrimaryMap<MemoryIndex, VMMemoryImport>,
global_imports: PrimaryMap<GlobalIndex, VMGlobalImport>,
) -> Self {
Self {
dependencies,
functions: function_imports.into_boxed_slice(),
tables: table_imports.into_boxed_slice(),
memories: memory_imports.into_boxed_slice(),
globals: global_imports.into_boxed_slice(),
}
}
/// Construct a new `Imports` instance with no imports.
pub fn none() -> Self {
Self {
dependencies: HashSet::new(),
functions: PrimaryMap::new().into_boxed_slice(),
tables: PrimaryMap::new().into_boxed_slice(),
memories: PrimaryMap::new().into_boxed_slice(),
globals: PrimaryMap::new().into_boxed_slice(),
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,120 @@
//! The GDB's JIT compilation interface. The low level module that exposes
//! the __jit_debug_register_code() and __jit_debug_descriptor to register
//! or unregister generated object images with debuggers.
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::ptr;
#[repr(C)]
struct JITCodeEntry {
next_entry: *mut JITCodeEntry,
prev_entry: *mut JITCodeEntry,
symfile_addr: *const u8,
symfile_size: u64,
}
const JIT_NOACTION: u32 = 0;
const JIT_REGISTER_FN: u32 = 1;
const JIT_UNREGISTER_FN: u32 = 2;
#[repr(C)]
struct JITDescriptor {
version: u32,
action_flag: u32,
relevant_entry: *mut JITCodeEntry,
first_entry: *mut JITCodeEntry,
}
#[no_mangle]
#[used]
static mut __jit_debug_descriptor: JITDescriptor = JITDescriptor {
version: 1,
action_flag: JIT_NOACTION,
relevant_entry: ptr::null_mut(),
first_entry: ptr::null_mut(),
};
#[no_mangle]
#[inline(never)]
extern "C" fn __jit_debug_register_code() {
// Hack to not allow inlining even when Rust wants to do it in release mode.
let x = 3;
unsafe {
core::ptr::read_volatile(&x);
}
}
/// Registeration for JIT image
pub struct GdbJitImageRegistration {
entry: *mut JITCodeEntry,
file: Vec<u8>,
}
impl GdbJitImageRegistration {
/// Registers JIT image using __jit_debug_register_code
pub fn register(file: Vec<u8>) -> Self {
Self {
entry: unsafe { register_gdb_jit_image(&file) },
file,
}
}
/// JIT image used in registration
pub fn file(&self) -> &[u8] {
&self.file
}
}
impl Drop for GdbJitImageRegistration {
fn drop(&mut self) {
unsafe {
unregister_gdb_jit_image(self.entry);
}
}
}
unsafe fn register_gdb_jit_image(file: &[u8]) -> *mut JITCodeEntry {
// Create a code entry for the file, which gives the start and size of the symbol file.
let entry = Box::into_raw(Box::new(JITCodeEntry {
next_entry: __jit_debug_descriptor.first_entry,
prev_entry: ptr::null_mut(),
symfile_addr: file.as_ptr(),
symfile_size: file.len() as u64,
}));
// Add it to the linked list in the JIT descriptor.
if !__jit_debug_descriptor.first_entry.is_null() {
(*__jit_debug_descriptor.first_entry).prev_entry = entry;
}
__jit_debug_descriptor.first_entry = entry;
// Point the relevant_entry field of the descriptor at the entry.
__jit_debug_descriptor.relevant_entry = entry;
// Set action_flag to JIT_REGISTER and call __jit_debug_register_code.
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
__jit_debug_register_code();
__jit_debug_descriptor.action_flag = JIT_NOACTION;
__jit_debug_descriptor.relevant_entry = ptr::null_mut();
entry
}
unsafe fn unregister_gdb_jit_image(entry: *mut JITCodeEntry) {
// Remove the code entry corresponding to the code from the linked list.
if !(*entry).prev_entry.is_null() {
(*(*entry).prev_entry).next_entry = (*entry).next_entry;
} else {
__jit_debug_descriptor.first_entry = (*entry).next_entry;
}
if !(*entry).next_entry.is_null() {
(*(*entry).next_entry).prev_entry = (*entry).prev_entry;
}
// Point the relevant_entry field of the descriptor at the code entry.
__jit_debug_descriptor.relevant_entry = entry;
// Set action_flag to JIT_UNREGISTER and call __jit_debug_register_code.
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
__jit_debug_register_code();
__jit_debug_descriptor.action_flag = JIT_NOACTION;
__jit_debug_descriptor.relevant_entry = ptr::null_mut();
let _box = Box::from_raw(entry);
}

73
crates/runtime/src/lib.rs Normal file
View File

@@ -0,0 +1,73 @@
//! Runtime library support for Wasmtime.
#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)]
#![warn(unused_import_braces)]
#![cfg_attr(feature = "std", deny(unstable_features))]
#![cfg_attr(feature = "clippy", plugin(clippy(conf_file = "../../clippy.toml")))]
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::new_without_default, clippy::new_without_default_derive)
)]
#![cfg_attr(
feature = "cargo-clippy",
warn(
clippy::float_arithmetic,
clippy::mut_mut,
clippy::nonminimal_bool,
clippy::option_map_unwrap_or,
clippy::option_map_unwrap_or_else,
clippy::print_stdout,
clippy::unicode_not_nfc,
clippy::use_self
)
)]
#![cfg_attr(not(feature = "std"), no_std)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate memoffset;
extern crate alloc;
mod export;
mod imports;
mod instance;
mod jit_int;
mod memory;
mod mmap;
mod sig_registry;
mod signalhandlers;
mod table;
mod trap_registry;
mod traphandlers;
mod vmcontext;
pub mod libcalls;
pub use crate::export::Export;
pub use crate::imports::Imports;
pub use crate::instance::{InstanceHandle, InstantiationError, LinkError};
pub use crate::jit_int::GdbJitImageRegistration;
pub use crate::mmap::Mmap;
pub use crate::sig_registry::SignatureRegistry;
pub use crate::signalhandlers::{wasmtime_init_eager, wasmtime_init_finish};
pub use crate::trap_registry::{get_mut_trap_registry, get_trap_registry, TrapRegistrationGuard};
pub use crate::traphandlers::{wasmtime_call, wasmtime_call_trampoline};
pub use crate::vmcontext::{
VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport, VMGlobalDefinition,
VMGlobalImport, VMInvokeArgument, VMMemoryDefinition, VMMemoryImport, VMSharedSignatureIndex,
VMTableDefinition, VMTableImport,
};
#[cfg(not(feature = "std"))]
use hashbrown::{hash_map, HashMap, HashSet};
#[cfg(feature = "std")]
use std::collections::{hash_map, HashMap, HashSet};
#[cfg(not(feature = "std"))]
use spin::{RwLock, RwLockReadGuard, RwLockWriteGuard};
#[cfg(feature = "std")]
use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
/// Version number of this crate.
pub const VERSION: &str = env!("CARGO_PKG_VERSION");

View File

@@ -0,0 +1,139 @@
//! Runtime library calls. Note that wasm compilers may sometimes perform these
//! inline rather than calling them, particularly when CPUs have special
//! instructions which compute them directly.
use crate::vmcontext::VMContext;
use cranelift_wasm::{DefinedMemoryIndex, MemoryIndex};
/// Implementation of f32.ceil
pub extern "C" fn wasmtime_f32_ceil(x: f32) -> f32 {
x.ceil()
}
/// Implementation of f32.floor
pub extern "C" fn wasmtime_f32_floor(x: f32) -> f32 {
x.floor()
}
/// Implementation of f32.trunc
pub extern "C" fn wasmtime_f32_trunc(x: f32) -> f32 {
x.trunc()
}
/// Implementation of f32.nearest
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f32_nearest(x: f32) -> f32 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}
/// Implementation of f64.ceil
pub extern "C" fn wasmtime_f64_ceil(x: f64) -> f64 {
x.ceil()
}
/// Implementation of f64.floor
pub extern "C" fn wasmtime_f64_floor(x: f64) -> f64 {
x.floor()
}
/// Implementation of f64.trunc
pub extern "C" fn wasmtime_f64_trunc(x: f64) -> f64 {
x.trunc()
}
/// Implementation of f64.nearest
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f64_nearest(x: f64) -> f64 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}
/// Implementation of memory.grow for locally-defined 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_memory32_grow(
vmctx: *mut VMContext,
delta: u32,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance
.memory_grow(memory_index, delta)
.unwrap_or(u32::max_value())
}
/// Implementation of memory.grow for imported 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_imported_memory32_grow(
vmctx: *mut VMContext,
delta: u32,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = MemoryIndex::from_u32(memory_index);
instance
.imported_memory_grow(memory_index, delta)
.unwrap_or(u32::max_value())
}
/// Implementation of memory.size for locally-defined 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_memory32_size(vmctx: *mut VMContext, memory_index: u32) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance.memory_size(memory_index)
}
/// Implementation of memory.size for imported 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_imported_memory32_size(
vmctx: *mut VMContext,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = MemoryIndex::from_u32(memory_index);
instance.imported_memory_size(memory_index)
}

View File

@@ -0,0 +1,143 @@
//! Memory management for linear memories.
//!
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
use crate::mmap::Mmap;
use crate::vmcontext::VMMemoryDefinition;
use alloc::string::String;
use core::convert::TryFrom;
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
/// A linear memory instance.
#[derive(Debug)]
pub struct LinearMemory {
// The underlying allocation.
mmap: Mmap,
// The current logical size in wasm pages of this linear memory.
current: u32,
// The optional maximum size in wasm pages of this linear memory.
maximum: Option<u32>,
// Size in bytes of extra guard pages after the end to optimize loads and stores with
// constant offsets.
offset_guard_size: usize,
// Records whether we're using a bounds-checking strategy which requires
// handlers to catch trapping accesses.
pub(crate) needs_signal_handlers: bool,
}
impl LinearMemory {
/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
// `maximum` cannot be set to more than `65536` pages.
assert!(plan.memory.minimum <= WASM_MAX_PAGES);
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
let offset_guard_bytes = plan.offset_guard_size as usize;
// If we have an offset guard, or if we're doing the static memory
// allocation strategy, we need signal handlers to catch out of bounds
// acceses.
let needs_signal_handlers = offset_guard_bytes > 0
|| match plan.style {
MemoryStyle::Dynamic => false,
MemoryStyle::Static { .. } => true,
};
let minimum_pages = match plan.style {
MemoryStyle::Dynamic => plan.memory.minimum,
MemoryStyle::Static { bound } => {
assert!(bound >= plan.memory.minimum);
bound
}
} as usize;
let minimum_bytes = minimum_pages.checked_mul(WASM_PAGE_SIZE as usize).unwrap();
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
let mapped_pages = plan.memory.minimum as usize;
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
let mmap = Mmap::accessible_reserved(mapped_bytes, request_bytes)?;
Ok(Self {
mmap,
current: plan.memory.minimum,
maximum: plan.memory.maximum,
offset_guard_size: offset_guard_bytes,
needs_signal_handlers,
})
}
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
self.current
}
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages.
pub fn grow(&mut self, delta: u32) -> Option<u32> {
// Optimization of memory.grow 0 calls.
if delta == 0 {
return Some(self.current);
}
let new_pages = match self.current.checked_add(delta) {
Some(new_pages) => new_pages,
// Linear memory size overflow.
None => return None,
};
let prev_pages = self.current;
if let Some(maximum) = self.maximum {
if new_pages > maximum {
// Linear memory size would exceed the declared maximum.
return None;
}
}
// Wasm linear memories are never allowed to grow beyond what is
// indexable. If the memory has no maximum, enforce the greatest
// limit here.
if new_pages >= WASM_MAX_PAGES {
// Linear memory size would exceed the index range.
return None;
}
let delta_bytes = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize;
let new_bytes = usize::try_from(new_pages).unwrap() * WASM_PAGE_SIZE as usize;
if new_bytes > self.mmap.len() - self.offset_guard_size {
// If the new size is within the declared maximum, but needs more memory than we
// have on hand, it's a dynamic heap and it can move.
let guard_bytes = self.offset_guard_size;
let request_bytes = new_bytes.checked_add(guard_bytes)?;
let mut new_mmap = Mmap::accessible_reserved(new_bytes, request_bytes).ok()?;
let copy_len = self.mmap.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
self.mmap = new_mmap;
} else if delta_bytes > 0 {
// Make the newly allocated pages accessible.
self.mmap.make_accessible(prev_bytes, delta_bytes).ok()?;
}
self.current = new_pages;
Some(prev_pages)
}
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
pub fn vmmemory(&mut self) -> VMMemoryDefinition {
VMMemoryDefinition {
base: self.mmap.as_mut_ptr(),
current_length: self.current as usize * WASM_PAGE_SIZE as usize,
}
}
}

275
crates/runtime/src/mmap.rs Normal file
View File

@@ -0,0 +1,275 @@
//! Low-level abstraction for allocating and managing zero-filled pages
//! of memory.
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::ptr;
use core::slice;
#[cfg(not(target_os = "windows"))]
use libc;
use region;
use std::io;
/// Round `size` up to the nearest multiple of `page_size`.
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
(size + (page_size - 1)) & !(page_size - 1)
}
/// A simple struct consisting of a page-aligned pointer to page-aligned
/// and initially-zeroed memory and a length.
#[derive(Debug)]
pub struct Mmap {
ptr: *mut u8,
len: usize,
}
impl Mmap {
/// Construct a new empty instance of `Mmap`.
pub fn new() -> Self {
// Rust's slices require non-null pointers, even when empty. `Vec`
// contains code to create a non-null dangling pointer value when
// constructed empty, so we reuse that here.
Self {
ptr: Vec::new().as_mut_ptr(),
len: 0,
}
}
/// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
pub fn with_at_least(size: usize) -> Result<Self, String> {
let page_size = region::page::size();
let rounded_size = round_up_to_page_size(size, page_size);
Self::accessible_reserved(rounded_size, rounded_size)
}
/// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
/// must be native page-size multiples.
#[cfg(not(target_os = "windows"))]
pub fn accessible_reserved(
accessible_size: usize,
mapping_size: usize,
) -> Result<Self, String> {
let page_size = region::page::size();
assert!(accessible_size <= mapping_size);
assert_eq!(mapping_size & (page_size - 1), 0);
assert_eq!(accessible_size & (page_size - 1), 0);
// Mmap may return EINVAL if the size is zero, so just
// special-case that.
if mapping_size == 0 {
return Ok(Self::new());
}
Ok(if accessible_size == mapping_size {
// Allocate a single read-write region at once.
let ptr = unsafe {
libc::mmap(
ptr::null_mut(),
mapping_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1,
0,
)
};
if ptr as isize == -1_isize {
return Err(io::Error::last_os_error().to_string());
}
Self {
ptr: ptr as *mut u8,
len: mapping_size,
}
} else {
// Reserve the mapping size.
let ptr = unsafe {
libc::mmap(
ptr::null_mut(),
mapping_size,
libc::PROT_NONE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1,
0,
)
};
if ptr as isize == -1_isize {
return Err(io::Error::last_os_error().to_string());
}
let mut result = Self {
ptr: ptr as *mut u8,
len: mapping_size,
};
if accessible_size != 0 {
// Commit the accessible size.
result.make_accessible(0, accessible_size)?;
}
result
})
}
/// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
/// must be native page-size multiples.
#[cfg(target_os = "windows")]
pub fn accessible_reserved(
accessible_size: usize,
mapping_size: usize,
) -> Result<Self, String> {
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
let page_size = region::page::size();
assert!(accessible_size <= mapping_size);
assert_eq!(mapping_size & (page_size - 1), 0);
assert_eq!(accessible_size & (page_size - 1), 0);
Ok(if accessible_size == mapping_size {
// Allocate a single read-write region at once.
let ptr = unsafe {
VirtualAlloc(
ptr::null_mut(),
mapping_size,
MEM_RESERVE | MEM_COMMIT,
PAGE_READWRITE,
)
};
if ptr.is_null() {
return Err(io::Error::last_os_error().to_string());
}
Self {
ptr: ptr as *mut u8,
len: mapping_size,
}
} else {
// Reserve the mapping size.
let ptr =
unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
if ptr.is_null() {
return Err(io::Error::last_os_error().to_string());
}
let mut result = Self {
ptr: ptr as *mut u8,
len: mapping_size,
};
if accessible_size != 0 {
// Commit the accessible size.
result.make_accessible(0, accessible_size)?;
}
result
})
}
/// Make the memory starting at `start` and extending for `len` bytes accessible.
/// `start` and `len` must be native page-size multiples and describe a range within
/// `self`'s reserved memory.
#[cfg(not(target_os = "windows"))]
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
let page_size = region::page::size();
assert_eq!(start & (page_size - 1), 0);
assert_eq!(len & (page_size - 1), 0);
assert!(len < self.len);
assert!(start < self.len - len);
// Commit the accessible size.
unsafe { region::protect(self.ptr.add(start), len, region::Protection::ReadWrite) }
.map_err(|e| e.to_string())
}
/// Make the memory starting at `start` and extending for `len` bytes accessible.
/// `start` and `len` must be native page-size multiples and describe a range within
/// `self`'s reserved memory.
#[cfg(target_os = "windows")]
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
use winapi::ctypes::c_void;
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
let page_size = region::page::size();
assert_eq!(start & (page_size - 1), 0);
assert_eq!(len & (page_size - 1), 0);
assert!(len < self.len);
assert!(start < self.len - len);
// Commit the accessible size.
if unsafe {
VirtualAlloc(
self.ptr.add(start) as *mut c_void,
len,
MEM_COMMIT,
PAGE_READWRITE,
)
}
.is_null()
{
return Err(io::Error::last_os_error().to_string());
}
Ok(())
}
/// Return the allocated memory as a slice of u8.
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.ptr, self.len) }
}
/// Return the allocated memory as a mutable slice of u8.
pub fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
}
/// Return the allocated memory as a pointer to u8.
pub fn as_ptr(&self) -> *const u8 {
self.ptr
}
/// Return the allocated memory as a mutable pointer to u8.
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self.ptr
}
/// Return the length of the allocated memory.
pub fn len(&self) -> usize {
self.len
}
}
impl Drop for Mmap {
#[cfg(not(target_os = "windows"))]
fn drop(&mut self) {
if self.len != 0 {
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
assert_eq!(r, 0, "munmap failed: {}", io::Error::last_os_error());
}
}
#[cfg(target_os = "windows")]
fn drop(&mut self) {
if self.len != 0 {
use winapi::ctypes::c_void;
use winapi::um::memoryapi::VirtualFree;
use winapi::um::winnt::MEM_RELEASE;
let r = unsafe { VirtualFree(self.ptr as *mut c_void, self.len, MEM_RELEASE) };
assert_eq!(r, 0);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_round_up_to_page_size() {
assert_eq!(round_up_to_page_size(0, 4096), 0);
assert_eq!(round_up_to_page_size(1, 4096), 4096);
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
}
}

View File

@@ -0,0 +1,44 @@
//! Implement a registry of function signatures, for fast indirect call
//! signature checking.
use crate::vmcontext::VMSharedSignatureIndex;
use crate::{hash_map, HashMap};
use core::convert::TryFrom;
use cranelift_codegen::ir;
/// WebAssembly requires that the caller and callee signatures in an indirect
/// call must match. To implement this efficiently, keep a registry of all
/// signatures, shared by all instances, so that call sites can just do an
/// index comparison.
#[derive(Debug)]
pub struct SignatureRegistry {
signature_hash: HashMap<ir::Signature, VMSharedSignatureIndex>,
}
impl SignatureRegistry {
/// Create a new `SignatureRegistry`.
pub fn new() -> Self {
Self {
signature_hash: HashMap::new(),
}
}
/// Register a signature and return its unique index.
pub fn register(&mut self, sig: &ir::Signature) -> VMSharedSignatureIndex {
let len = self.signature_hash.len();
match self.signature_hash.entry(sig.clone()) {
hash_map::Entry::Occupied(entry) => *entry.get(),
hash_map::Entry::Vacant(entry) => {
// Keep `signature_hash` len under 2**32 -- VMSharedSignatureIndex::new(core::u32::MAX)
// is reserved for VMSharedSignatureIndex::default().
debug_assert!(
len < core::u32::MAX as usize,
"Invariant check: signature_hash.len() < core::u32::MAX"
);
let sig_id = VMSharedSignatureIndex::new(u32::try_from(len).unwrap());
entry.insert(sig_id);
sig_id
}
}
}
}

View File

@@ -0,0 +1,130 @@
//! Interface to low-level signal-handling mechanisms.
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use crate::vmcontext::VMContext;
use crate::RwLock;
use core::borrow::{Borrow, BorrowMut};
use core::cell::Cell;
#[derive(Default)]
struct TrapContext {
tried_to_install_signal_handlers: Cell<bool>,
have_signal_handlers: Cell<bool>,
}
extern "C" {
fn EnsureEagerSignalHandlers() -> libc::c_int;
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn EnsureDarwinMachPorts() -> libc::c_int;
}
struct InstallState {
tried: bool,
success: bool,
}
impl InstallState {
fn new() -> Self {
Self {
tried: false,
success: false,
}
}
}
lazy_static! {
static ref EAGER_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
static ref LAZY_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
}
/// This function performs the low-overhead signal handler initialization that we
/// want to do eagerly to ensure a more-deterministic global process state. This
/// is especially relevant for signal handlers since handler ordering depends on
/// installation order: the wasm signal handler must run *before* the other crash
/// handlers and since POSIX signal handlers work LIFO, this function needs to be
/// called at the end of the startup process, after other handlers have been
/// installed. This function can thus be called multiple times, having no effect
/// after the first call.
#[no_mangle]
pub extern "C" fn wasmtime_init_eager() {
let mut locked = EAGER_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if unsafe { EnsureEagerSignalHandlers() == 0 } {
return;
}
state.success = true;
}
thread_local! {
static TRAP_CONTEXT: TrapContext = TrapContext::default();
}
/// Assuming `EnsureEagerProcessSignalHandlers` has already been called,
/// this function performs the full installation of signal handlers which must
/// be performed per-thread. This operation may incur some overhead and
/// so should be done only when needed to use wasm.
#[no_mangle]
pub extern "C" fn wasmtime_init_finish(vmctx: &mut VMContext) {
if !TRAP_CONTEXT.with(|cx| cx.tried_to_install_signal_handlers.get()) {
TRAP_CONTEXT.with(|cx| {
cx.tried_to_install_signal_handlers.set(true);
assert!(!cx.have_signal_handlers.get());
});
{
let locked = EAGER_INSTALL_STATE.read().unwrap();
let state = locked.borrow();
assert!(
state.tried,
"call wasmtime_init_eager before calling wasmtime_init_finish"
);
if !state.success {
return;
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
ensure_darwin_mach_ports();
TRAP_CONTEXT.with(|cx| {
cx.have_signal_handlers.set(true);
})
}
let instance = unsafe { vmctx.instance() };
let have_signal_handlers = TRAP_CONTEXT.with(|cx| cx.have_signal_handlers.get());
if !have_signal_handlers && instance.needs_signal_handlers() {
panic!("failed to install signal handlers");
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn ensure_darwin_mach_ports() {
let mut locked = LAZY_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if unsafe { EnsureDarwinMachPorts() != 0 } {
return;
}
state.success = true;
}

101
crates/runtime/src/table.rs Normal file
View File

@@ -0,0 +1,101 @@
//! Memory management for tables.
//!
//! `Table` is to WebAssembly tables what `LinearMemory` is to WebAssembly linear memories.
use crate::vmcontext::{VMCallerCheckedAnyfunc, VMTableDefinition};
use alloc::vec::Vec;
use core::convert::{TryFrom, TryInto};
use cranelift_wasm::TableElementType;
use wasmtime_environ::{TablePlan, TableStyle};
/// A table instance.
#[derive(Debug)]
pub struct Table {
vec: Vec<VMCallerCheckedAnyfunc>,
maximum: Option<u32>,
}
impl Table {
/// Create a new table instance with specified minimum and maximum number of elements.
pub fn new(plan: &TablePlan) -> Self {
match plan.table.ty {
TableElementType::Func => (),
TableElementType::Val(ty) => {
unimplemented!("tables of types other than anyfunc ({})", ty)
}
};
match plan.style {
TableStyle::CallerChecksSignature => Self {
vec: vec![
VMCallerCheckedAnyfunc::default();
usize::try_from(plan.table.minimum).unwrap()
],
maximum: plan.table.maximum,
},
}
}
/// Returns the number of allocated elements.
pub fn size(&self) -> u32 {
self.vec.len().try_into().unwrap()
}
/// Grow table by the specified amount of elements.
///
/// Returns `None` if table can't be grown by the specified amount
/// of elements.
pub fn grow(&mut self, delta: u32) -> Option<u32> {
let new_len = match self.size().checked_add(delta) {
Some(len) => {
if let Some(max) = self.maximum {
if len > max {
return None;
}
}
len
}
None => {
return None;
}
};
self.vec.resize(
usize::try_from(new_len).unwrap(),
VMCallerCheckedAnyfunc::default(),
);
Some(new_len)
}
/// Get reference to the specified element.
///
/// Returns `None` if the index is out of bounds.
pub fn get(&self, index: u32) -> Option<&VMCallerCheckedAnyfunc> {
self.vec.get(index as usize)
}
/// Get mutable reference to the specified element.
///
/// Returns `None` if the index is out of bounds.
pub fn get_mut(&mut self, index: u32) -> Option<&mut VMCallerCheckedAnyfunc> {
self.vec.get_mut(index as usize)
}
/// Return a `VMTableDefinition` for exposing the table to compiled wasm code.
pub fn vmtable(&mut self) -> VMTableDefinition {
VMTableDefinition {
base: self.vec.as_mut_ptr() as *mut u8,
current_elements: self.vec.len().try_into().unwrap(),
}
}
}
impl AsRef<[VMCallerCheckedAnyfunc]> for Table {
fn as_ref(&self) -> &[VMCallerCheckedAnyfunc] {
self.vec.as_slice()
}
}
impl AsMut<[VMCallerCheckedAnyfunc]> for Table {
fn as_mut(&mut self) -> &mut [VMCallerCheckedAnyfunc] {
self.vec.as_mut_slice()
}
}

View File

@@ -0,0 +1,71 @@
use crate::HashMap;
use crate::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use cranelift_codegen::ir;
use lazy_static::lazy_static;
lazy_static! {
static ref REGISTRY: RwLock<TrapRegistry> = RwLock::new(TrapRegistry::default());
}
/// The registry maintains descriptions of traps in currently allocated functions.
#[derive(Default)]
pub struct TrapRegistry {
traps: HashMap<usize, TrapDescription>,
}
/// Description of a trap.
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct TrapDescription {
/// Location of the trap in source binary module.
pub source_loc: ir::SourceLoc,
/// Code of the trap.
pub trap_code: ir::TrapCode,
}
/// RAII guard for deregistering traps
pub struct TrapRegistrationGuard(usize);
impl TrapRegistry {
/// Registers a new trap.
/// Returns a RAII guard that deregisters the trap when dropped.
pub fn register_trap(
&mut self,
address: usize,
source_loc: ir::SourceLoc,
trap_code: ir::TrapCode,
) -> TrapRegistrationGuard {
let entry = TrapDescription {
source_loc,
trap_code,
};
let previous_trap = self.traps.insert(address, entry);
assert!(previous_trap.is_none());
TrapRegistrationGuard(address)
}
fn deregister_trap(&mut self, address: usize) {
assert!(self.traps.remove(&address).is_some());
}
/// Gets a trap description at given address.
pub fn get_trap(&self, address: usize) -> Option<TrapDescription> {
self.traps.get(&address).copied()
}
}
impl Drop for TrapRegistrationGuard {
fn drop(&mut self) {
let mut registry = get_mut_trap_registry();
registry.deregister_trap(self.0);
}
}
/// Gets guarded writable reference to traps registry
pub fn get_mut_trap_registry() -> RwLockWriteGuard<'static, TrapRegistry> {
REGISTRY.write().expect("trap registry lock got poisoned")
}
/// Gets guarded readable reference to traps registry
pub fn get_trap_registry() -> RwLockReadGuard<'static, TrapRegistry> {
REGISTRY.read().expect("trap registry lock got poisoned")
}

View File

@@ -0,0 +1,170 @@
//! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use crate::trap_registry::get_trap_registry;
use crate::trap_registry::TrapDescription;
use crate::vmcontext::{VMContext, VMFunctionBody};
use alloc::string::{String, ToString};
use core::cell::Cell;
use core::ptr;
use cranelift_codegen::ir;
extern "C" {
fn WasmtimeCallTrampoline(
vmctx: *mut u8,
callee: *const VMFunctionBody,
values_vec: *mut u8,
) -> i32;
fn WasmtimeCall(vmctx: *mut u8, callee: *const VMFunctionBody) -> i32;
}
thread_local! {
static RECORDED_TRAP: Cell<Option<TrapDescription>> = Cell::new(None);
static JMP_BUF: Cell<*const u8> = Cell::new(ptr::null());
static RESET_GUARD_PAGE: Cell<bool> = Cell::new(false);
}
/// Check if there is a trap at given PC
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn CheckIfTrapAtAddress(_pc: *const u8) -> i8 {
// TODO: stack overflow can happen at any random time (i.e. in malloc() in memory.grow)
// and it's really hard to determine if the cause was stack overflow and if it happened
// in WebAssembly module.
// So, let's assume that any untrusted code called from WebAssembly doesn't trap.
// Then, if we have called some WebAssembly code, it means the trap is stack overflow.
JMP_BUF.with(|ptr| !ptr.get().is_null()) as i8
}
/// Record the Trap code and wasm bytecode offset in TLS somewhere
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn RecordTrap(pc: *const u8, reset_guard_page: bool) {
// TODO: please see explanation in CheckIfTrapAtAddress.
let registry = get_trap_registry();
let trap_desc = registry
.get_trap(pc as usize)
.unwrap_or_else(|| TrapDescription {
source_loc: ir::SourceLoc::default(),
trap_code: ir::TrapCode::StackOverflow,
});
if reset_guard_page {
RESET_GUARD_PAGE.with(|v| v.set(true));
}
RECORDED_TRAP.with(|data| {
assert_eq!(
data.get(),
None,
"Only one trap per thread can be recorded at a moment!"
);
data.set(Some(trap_desc))
});
}
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn EnterScope(ptr: *const u8) -> *const u8 {
JMP_BUF.with(|buf| buf.replace(ptr))
}
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn GetScope() -> *const u8 {
JMP_BUF.with(|buf| buf.get())
}
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn LeaveScope(ptr: *const u8) {
RESET_GUARD_PAGE.with(|v| {
if v.get() {
reset_guard_page();
v.set(false);
}
});
JMP_BUF.with(|buf| buf.set(ptr))
}
#[cfg(target_os = "windows")]
fn reset_guard_page() {
extern "C" {
fn _resetstkoflw() -> winapi::ctypes::c_int;
}
// We need to restore guard page under stack to handle future stack overflows properly.
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/resetstkoflw?view=vs-2019
if unsafe { _resetstkoflw() } == 0 {
panic!("failed to restore stack guard page");
}
}
#[cfg(not(target_os = "windows"))]
fn reset_guard_page() {}
fn trap_message() -> String {
let trap_desc = RECORDED_TRAP
.with(|data| data.replace(None))
.expect("trap_message must be called after trap occurred");
format!(
"wasm trap: {}, source location: {}",
trap_code_to_expected_string(trap_desc.trap_code),
trap_desc.source_loc,
)
}
fn trap_code_to_expected_string(trap_code: ir::TrapCode) -> String {
use ir::TrapCode::*;
match trap_code {
StackOverflow => "call stack exhausted".to_string(),
HeapOutOfBounds => "out of bounds memory access".to_string(),
TableOutOfBounds => "undefined element".to_string(),
OutOfBounds => "out of bounds".to_string(), // Note: not covered by the test suite
IndirectCallToNull => "uninitialized element".to_string(),
BadSignature => "indirect call type mismatch".to_string(),
IntegerOverflow => "integer overflow".to_string(),
IntegerDivisionByZero => "integer divide by zero".to_string(),
BadConversionToInteger => "invalid conversion to integer".to_string(),
UnreachableCodeReached => "unreachable".to_string(),
Interrupt => "interrupt".to_string(), // Note: not covered by the test suite
User(x) => format!("user trap {}", x), // Note: not covered by the test suite
}
}
/// Call the wasm function pointed to by `callee`. `values_vec` points to
/// a buffer which holds the incoming arguments, and to which the outgoing
/// return values will be written.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_call_trampoline(
vmctx: *mut VMContext,
callee: *const VMFunctionBody,
values_vec: *mut u8,
) -> Result<(), String> {
if WasmtimeCallTrampoline(vmctx as *mut u8, callee, values_vec) == 0 {
Err(trap_message())
} else {
Ok(())
}
}
/// Call the wasm function pointed to by `callee`, which has no arguments or
/// return values.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_call(
vmctx: *mut VMContext,
callee: *const VMFunctionBody,
) -> Result<(), String> {
if WasmtimeCall(vmctx as *mut u8, callee) == 0 {
Err(trap_message())
} else {
Ok(())
}
}

View File

@@ -0,0 +1,615 @@
//! This file declares `VMContext` and several related structs which contain
//! fields that compiled wasm code accesses directly.
use crate::instance::Instance;
use core::any::Any;
use core::{ptr, u32};
use wasmtime_environ::BuiltinFunctionIndex;
/// An imported function.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMFunctionImport {
/// A pointer to the imported function body.
pub body: *const VMFunctionBody,
/// A pointer to the `VMContext` that owns the function.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmfunction_import {
use super::VMFunctionImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmfunction_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMFunctionImport>(),
usize::from(offsets.size_of_vmfunction_import())
);
assert_eq!(
offset_of!(VMFunctionImport, body),
usize::from(offsets.vmfunction_import_body())
);
assert_eq!(
offset_of!(VMFunctionImport, vmctx),
usize::from(offsets.vmfunction_import_vmctx())
);
}
}
/// A placeholder byte-sized type which is just used to provide some amount of type
/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
/// around.
#[repr(C)]
pub struct VMFunctionBody(u8);
#[cfg(test)]
mod test_vmfunction_body {
use super::VMFunctionBody;
use core::mem::size_of;
#[test]
fn check_vmfunction_body_offsets() {
assert_eq!(size_of::<VMFunctionBody>(), 1);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly table
/// imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableImport {
/// A pointer to the imported table description.
pub from: *mut VMTableDefinition,
/// A pointer to the `VMContext` that owns the table description.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmtable_import {
use super::VMTableImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmtable_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMTableImport>(),
usize::from(offsets.size_of_vmtable_import())
);
assert_eq!(
offset_of!(VMTableImport, from),
usize::from(offsets.vmtable_import_from())
);
assert_eq!(
offset_of!(VMTableImport, vmctx),
usize::from(offsets.vmtable_import_vmctx())
);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly linear
/// memory imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryImport {
/// A pointer to the imported memory description.
pub from: *mut VMMemoryDefinition,
/// A pointer to the `VMContext` that owns the memory description.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmmemory_import {
use super::VMMemoryImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmmemory_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMMemoryImport>(),
usize::from(offsets.size_of_vmmemory_import())
);
assert_eq!(
offset_of!(VMMemoryImport, from),
usize::from(offsets.vmmemory_import_from())
);
assert_eq!(
offset_of!(VMMemoryImport, vmctx),
usize::from(offsets.vmmemory_import_vmctx())
);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly global
/// variable imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMGlobalImport {
/// A pointer to the imported global variable description.
pub from: *mut VMGlobalDefinition,
}
#[cfg(test)]
mod test_vmglobal_import {
use super::VMGlobalImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmglobal_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMGlobalImport>(),
usize::from(offsets.size_of_vmglobal_import())
);
assert_eq!(
offset_of!(VMGlobalImport, from),
usize::from(offsets.vmglobal_import_from())
);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly linear
/// memory defined within the instance, namely the start address and the
/// size in bytes.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryDefinition {
/// The start address.
pub base: *mut u8,
/// The current logical size of this linear memory in bytes.
pub current_length: usize,
}
#[cfg(test)]
mod test_vmmemory_definition {
use super::VMMemoryDefinition;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmmemory_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMMemoryDefinition>(),
usize::from(offsets.size_of_vmmemory_definition())
);
assert_eq!(
offset_of!(VMMemoryDefinition, base),
usize::from(offsets.vmmemory_definition_base())
);
assert_eq!(
offset_of!(VMMemoryDefinition, current_length),
usize::from(offsets.vmmemory_definition_current_length())
);
/* TODO: Assert that the size of `current_length` matches.
assert_eq!(
size_of::<VMMemoryDefinition::current_length>(),
usize::from(offsets.size_of_vmmemory_definition_current_length())
);
*/
}
}
/// The fields compiled code needs to access to utilize a WebAssembly table
/// defined within the instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableDefinition {
/// Pointer to the table data.
pub base: *mut u8,
/// The current number of elements in the table.
pub current_elements: u32,
}
#[cfg(test)]
mod test_vmtable_definition {
use super::VMTableDefinition;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmtable_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMTableDefinition>(),
usize::from(offsets.size_of_vmtable_definition())
);
assert_eq!(
offset_of!(VMTableDefinition, base),
usize::from(offsets.vmtable_definition_base())
);
assert_eq!(
offset_of!(VMTableDefinition, current_elements),
usize::from(offsets.vmtable_definition_current_elements())
);
}
}
/// The storage for a WebAssembly global defined within the instance.
///
/// TODO: Pack the globals more densely, rather than using the same size
/// for every type.
#[derive(Debug, Copy, Clone)]
#[repr(C, align(16))]
pub struct VMGlobalDefinition {
storage: [u8; 16],
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmglobal_definition {
use super::VMGlobalDefinition;
use core::mem::{align_of, size_of};
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmglobal_definition_alignment() {
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
}
#[test]
fn check_vmglobal_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMGlobalDefinition>(),
usize::from(offsets.size_of_vmglobal_definition())
);
}
#[test]
fn check_vmglobal_begins_aligned() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
}
}
impl VMGlobalDefinition {
/// Construct a `VMGlobalDefinition`.
pub fn new() -> Self {
Self { storage: [0; 16] }
}
/// Return a reference to the value as an i32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i32(&self) -> &i32 {
&*(self.storage.as_ref().as_ptr() as *const i32)
}
/// Return a mutable reference to the value as an i32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut i32)
}
/// Return a reference to the value as a u32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u32(&self) -> &u32 {
&*(self.storage.as_ref().as_ptr() as *const u32)
}
/// Return a mutable reference to the value as an u32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u32)
}
/// Return a reference to the value as an i64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i64(&self) -> &i64 {
&*(self.storage.as_ref().as_ptr() as *const i64)
}
/// Return a mutable reference to the value as an i64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut i64)
}
/// Return a reference to the value as an u64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u64(&self) -> &u64 {
&*(self.storage.as_ref().as_ptr() as *const u64)
}
/// Return a mutable reference to the value as an u64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u64)
}
/// Return a reference to the value as an f32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32(&self) -> &f32 {
&*(self.storage.as_ref().as_ptr() as *const f32)
}
/// Return a mutable reference to the value as an f32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut f32)
}
/// Return a reference to the value as f32 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_bits(&self) -> &u32 {
&*(self.storage.as_ref().as_ptr() as *const u32)
}
/// Return a mutable reference to the value as f32 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u32)
}
/// Return a reference to the value as an f64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64(&self) -> &f64 {
&*(self.storage.as_ref().as_ptr() as *const f64)
}
/// Return a mutable reference to the value as an f64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut f64)
}
/// Return a reference to the value as f64 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_bits(&self) -> &u64 {
&*(self.storage.as_ref().as_ptr() as *const u64)
}
/// Return a mutable reference to the value as f64 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u64)
}
/// Return a reference to the value as an u128.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128(&self) -> &u128 {
&*(self.storage.as_ref().as_ptr() as *const u128)
}
/// Return a mutable reference to the value as an u128.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128_mut(&mut self) -> &mut u128 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u128)
}
/// Return a reference to the value as u128 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
&*(self.storage.as_ref().as_ptr() as *const [u8; 16])
}
/// Return a mutable reference to the value as u128 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut [u8; 16])
}
}
/// An index into the shared signature registry, usable for checking signatures
/// at indirect calls.
#[repr(C)]
#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
pub struct VMSharedSignatureIndex(u32);
#[cfg(test)]
mod test_vmshared_signature_index {
use super::VMSharedSignatureIndex;
use core::mem::size_of;
use wasmtime_environ::{Module, TargetSharedSignatureIndex, VMOffsets};
#[test]
fn check_vmshared_signature_index() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMSharedSignatureIndex>(),
usize::from(offsets.size_of_vmshared_signature_index())
);
}
#[test]
fn check_target_shared_signature_index() {
assert_eq!(
size_of::<VMSharedSignatureIndex>(),
size_of::<TargetSharedSignatureIndex>()
);
}
}
impl VMSharedSignatureIndex {
/// Create a new `VMSharedSignatureIndex`.
pub fn new(value: u32) -> Self {
Self(value)
}
}
impl Default for VMSharedSignatureIndex {
fn default() -> Self {
Self::new(u32::MAX)
}
}
/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
/// It consists of the actual function pointer and a signature id to be checked
/// by the caller.
#[derive(Debug, Clone)]
#[repr(C)]
pub struct VMCallerCheckedAnyfunc {
/// Function body.
pub func_ptr: *const VMFunctionBody,
/// Function signature id.
pub type_index: VMSharedSignatureIndex,
/// Function `VMContext`.
pub vmctx: *mut VMContext,
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmcaller_checked_anyfunc {
use super::VMCallerCheckedAnyfunc;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmcaller_checked_anyfunc_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMCallerCheckedAnyfunc>(),
usize::from(offsets.size_of_vmcaller_checked_anyfunc())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, func_ptr),
usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, type_index),
usize::from(offsets.vmcaller_checked_anyfunc_type_index())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, vmctx),
usize::from(offsets.vmcaller_checked_anyfunc_vmctx())
);
}
}
impl Default for VMCallerCheckedAnyfunc {
fn default() -> Self {
Self {
func_ptr: ptr::null_mut(),
type_index: Default::default(),
vmctx: ptr::null_mut(),
}
}
}
/// An array that stores addresses of builtin functions. We translate code
/// to use indirect calls. This way, we don't have to patch the code.
#[repr(C)]
pub struct VMBuiltinFunctionsArray {
ptrs: [usize; Self::len()],
}
impl VMBuiltinFunctionsArray {
pub const fn len() -> usize {
BuiltinFunctionIndex::builtin_functions_total_number() as usize
}
pub fn initialized() -> Self {
use crate::libcalls::*;
let mut ptrs = [0; Self::len()];
ptrs[BuiltinFunctionIndex::get_memory32_grow_index().index() as usize] =
wasmtime_memory32_grow as usize;
ptrs[BuiltinFunctionIndex::get_imported_memory32_grow_index().index() as usize] =
wasmtime_imported_memory32_grow as usize;
ptrs[BuiltinFunctionIndex::get_memory32_size_index().index() as usize] =
wasmtime_memory32_size as usize;
ptrs[BuiltinFunctionIndex::get_imported_memory32_size_index().index() as usize] =
wasmtime_imported_memory32_size as usize;
Self { ptrs }
}
}
/// The storage for a WebAssembly invocation argument
///
/// TODO: These could be packed more densely, rather than using the same size for every type.
#[derive(Debug, Copy, Clone)]
#[repr(C, align(16))]
pub struct VMInvokeArgument([u8; 16]);
#[cfg(test)]
mod test_vm_invoke_argument {
use super::VMInvokeArgument;
use core::mem::{align_of, size_of};
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vm_invoke_argument_alignment() {
assert_eq!(align_of::<VMInvokeArgument>(), 16);
}
#[test]
fn check_vmglobal_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMInvokeArgument>(),
usize::from(offsets.size_of_vmglobal_definition())
);
}
}
impl VMInvokeArgument {
/// Create a new invocation argument filled with zeroes
pub fn new() -> Self {
Self([0; 16])
}
}
/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
/// This has information about globals, memories, tables, and other runtime
/// state associated with the current instance.
///
/// The struct here is empty, as the sizes of these fields are dynamic, and
/// we can't describe them in Rust's type system. Sufficient memory is
/// allocated at runtime.
///
/// TODO: We could move the globals into the `vmctx` allocation too.
#[derive(Debug)]
#[repr(C)]
pub struct VMContext {}
impl VMContext {
/// Return a mutable reference to the associated `Instance`.
///
/// This is unsafe because it doesn't work on just any `VMContext`, it must
/// be a `VMContext` allocated as part of an `Instance`.
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn instance(&mut self) -> &mut Instance {
&mut *((self as *mut Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
}
/// Return a mutable reference to the host state associated with this `Instance`.
///
/// This is unsafe because it doesn't work on just any `VMContext`, it must
/// be a `VMContext` allocated as part of an `Instance`.
pub unsafe fn host_state(&mut self) -> &mut dyn Any {
self.instance().host_state()
}
/// Lookup an export in the global exports namespace.
pub unsafe fn lookup_global_export(&mut self, field: &str) -> Option<crate::export::Export> {
self.instance().lookup_global_export(field)
}
}