Merge remote-tracking branch 'origin/main' into pch/wiggle_sync_shimming

This commit is contained in:
Pat Hickey
2021-05-06 17:54:03 -07:00
142 changed files with 4443 additions and 1771 deletions

View File

@@ -283,14 +283,6 @@ WASMTIME_CONFIG_PROP(void, static_memory_guard_size, uint64_t)
*/
WASMTIME_CONFIG_PROP(void, dynamic_memory_guard_size, uint64_t)
/**
* \brief Configures the maximum number of instances that can be created.
*
* For more information see the Rust documentation at
* https://bytecodealliance.github.io/wasmtime/api/wasmtime/struct.Config.html#method.max_instances.
*/
WASMTIME_CONFIG_PROP(void, max_instances, size_t)
/**
* \brief Enables Wasmtime's cache and loads configuration from the specified
* path.
@@ -1000,9 +992,13 @@ WASM_API_EXTERN own wasmtime_error_t* wasmtime_module_serialize(
/**
* \brief Build a module from serialized data.
* *
*
* This function does not take ownership of any of its arguments, but the
* returned error and module are owned by the caller.
*
* This function is not safe to receive arbitrary user input. See the Rust
* documentation for more information on what inputs are safe to pass in here
* (e.g. only that of #wasmtime_module_serialize)
*/
WASM_API_EXTERN own wasmtime_error_t *wasmtime_module_deserialize(
wasm_engine_t *engine,

View File

@@ -176,8 +176,3 @@ pub extern "C" fn wasmtime_config_static_memory_guard_size_set(c: &mut wasm_conf
pub extern "C" fn wasmtime_config_dynamic_memory_guard_size_set(c: &mut wasm_config_t, size: u64) {
c.config.dynamic_memory_guard_size(size);
}
#[no_mangle]
pub extern "C" fn wasmtime_config_max_instances_set(c: &mut wasm_config_t, limit: usize) {
c.config.max_instances(limit);
}

View File

@@ -31,13 +31,13 @@ impl wasm_memory_t {
pub extern "C" fn wasm_memory_new(
store: &wasm_store_t,
mt: &wasm_memorytype_t,
) -> Box<wasm_memory_t> {
let memory = Memory::new(&store.store, mt.ty().ty.clone());
Box::new(wasm_memory_t {
) -> Option<Box<wasm_memory_t>> {
let memory = Memory::new(&store.store, mt.ty().ty.clone()).ok()?;
Some(Box::new(wasm_memory_t {
ext: wasm_extern_t {
which: memory.into(),
},
})
}))
}
#[no_mangle]

View File

@@ -185,10 +185,13 @@ pub extern "C" fn wasmtime_module_deserialize(
binary: &wasm_byte_vec_t,
ret: &mut *mut wasm_module_t,
) -> Option<Box<wasmtime_error_t>> {
handle_result(Module::new(&engine.engine, binary.as_slice()), |module| {
let module = Box::new(wasm_module_t::new(module));
*ret = Box::into_raw(module);
})
handle_result(
unsafe { Module::deserialize(&engine.engine, binary.as_slice()) },
|module| {
let module = Box::new(wasm_module_t::new(module));
*ret = Box::into_raw(module);
},
)
}
#[no_mangle]

View File

@@ -3,6 +3,7 @@
#![allow(clippy::cast_ptr_alignment)]
use anyhow::{bail, ensure, Error};
use object::endian::{BigEndian, Endian, Endianness, LittleEndian};
use object::{RelocationEncoding, RelocationKind};
use std::collections::HashMap;
@@ -18,13 +19,20 @@ pub fn create_gdbjit_image(
defined_funcs_offset: usize,
funcs: &[*const u8],
) -> Result<Vec<u8>, Error> {
ensure_supported_elf_format(&mut bytes)?;
let e = ensure_supported_elf_format(&mut bytes)?;
// patch relocs
relocate_dwarf_sections(&mut bytes, defined_funcs_offset, funcs)?;
// elf is still missing details...
convert_object_elf_to_loadable_file(&mut bytes, code_region);
match e {
Endianness::Little => {
convert_object_elf_to_loadable_file::<LittleEndian>(&mut bytes, code_region)
}
Endianness::Big => {
convert_object_elf_to_loadable_file::<BigEndian>(&mut bytes, code_region)
}
}
// let mut file = ::std::fs::File::create(::std::path::Path::new("test.o")).expect("file");
// ::std::io::Write::write_all(&mut file, &bytes).expect("write");
@@ -83,20 +91,36 @@ fn relocate_dwarf_sections(
Ok(())
}
fn ensure_supported_elf_format(bytes: &mut Vec<u8>) -> Result<(), Error> {
fn ensure_supported_elf_format(bytes: &mut Vec<u8>) -> Result<Endianness, Error> {
use object::elf::*;
use object::endian::LittleEndian;
use object::read::elf::*;
use object::Bytes;
use std::mem::size_of;
let e = LittleEndian;
let header: &FileHeader64<LittleEndian> =
unsafe { &*(bytes.as_mut_ptr() as *const FileHeader64<_>) };
ensure!(
header.e_ident.class == ELFCLASS64 && header.e_ident.data == ELFDATA2LSB,
"bits and endianess in .ELF",
);
let kind = match object::FileKind::parse(bytes) {
Ok(file) => file,
Err(err) => {
bail!("Failed to parse file: {}", err);
}
};
let header = match kind {
object::FileKind::Elf64 => {
match object::elf::FileHeader64::<Endianness>::parse(Bytes(bytes)) {
Ok(header) => header,
Err(err) => {
bail!("Unsupported ELF file: {}", err);
}
}
}
_ => {
bail!("only 64-bit ELF files currently supported")
}
};
let e = header.endian().unwrap();
match header.e_machine.get(e) {
EM_X86_64 => (),
EM_S390 => (),
machine => {
bail!("Unsupported ELF target machine: {:x}", machine);
}
@@ -106,23 +130,25 @@ fn ensure_supported_elf_format(bytes: &mut Vec<u8>) -> Result<(), Error> {
"program header table is empty"
);
let e_shentsize = header.e_shentsize.get(e);
ensure!(
e_shentsize as usize == size_of::<SectionHeader64<LittleEndian>>(),
"size of sh"
);
Ok(())
let req_shentsize = match e {
Endianness::Little => size_of::<SectionHeader64<LittleEndian>>(),
Endianness::Big => size_of::<SectionHeader64<BigEndian>>(),
};
ensure!(e_shentsize as usize == req_shentsize, "size of sh");
Ok(e)
}
fn convert_object_elf_to_loadable_file(bytes: &mut Vec<u8>, code_region: (*const u8, usize)) {
fn convert_object_elf_to_loadable_file<E: Endian>(
bytes: &mut Vec<u8>,
code_region: (*const u8, usize),
) {
use object::elf::*;
use object::endian::LittleEndian;
use std::ffi::CStr;
use std::mem::size_of;
use std::os::raw::c_char;
let e = LittleEndian;
let header: &FileHeader64<LittleEndian> =
unsafe { &*(bytes.as_mut_ptr() as *const FileHeader64<_>) };
let e = E::default();
let header: &FileHeader64<E> = unsafe { &*(bytes.as_mut_ptr() as *const FileHeader64<_>) };
let e_shentsize = header.e_shentsize.get(e);
let e_shoff = header.e_shoff.get(e);
@@ -130,7 +156,7 @@ fn convert_object_elf_to_loadable_file(bytes: &mut Vec<u8>, code_region: (*const
let mut shstrtab_off = 0;
for i in 0..e_shnum {
let off = e_shoff as isize + i as isize * e_shentsize as isize;
let section: &SectionHeader64<LittleEndian> =
let section: &SectionHeader64<E> =
unsafe { &*(bytes.as_ptr().offset(off) as *const SectionHeader64<_>) };
if section.sh_type.get(e) != SHT_STRTAB {
continue;
@@ -140,7 +166,7 @@ fn convert_object_elf_to_loadable_file(bytes: &mut Vec<u8>, code_region: (*const
let mut segment: Option<_> = None;
for i in 0..e_shnum {
let off = e_shoff as isize + i as isize * e_shentsize as isize;
let section: &mut SectionHeader64<LittleEndian> =
let section: &mut SectionHeader64<E> =
unsafe { &mut *(bytes.as_mut_ptr().offset(off) as *mut SectionHeader64<_>) };
if section.sh_type.get(e) != SHT_PROGBITS {
continue;
@@ -171,12 +197,12 @@ fn convert_object_elf_to_loadable_file(bytes: &mut Vec<u8>, code_region: (*const
// LLDB wants segment with virtual address set, placing them at the end of ELF.
let ph_off = bytes.len();
let e_phentsize = size_of::<ProgramHeader64<LittleEndian>>();
let e_phentsize = size_of::<ProgramHeader64<E>>();
let e_phnum = 1;
bytes.resize(ph_off + e_phentsize * e_phnum, 0);
if let Some((sh_offset, sh_size)) = segment {
let (v_offset, size) = code_region;
let program: &mut ProgramHeader64<LittleEndian> =
let program: &mut ProgramHeader64<E> =
unsafe { &mut *(bytes.as_ptr().add(ph_off) as *mut ProgramHeader64<_>) };
program.p_type.set(e, PT_LOAD);
program.p_offset.set(e, sh_offset);
@@ -189,7 +215,7 @@ fn convert_object_elf_to_loadable_file(bytes: &mut Vec<u8>, code_region: (*const
}
// It is somewhat loadable ELF file at this moment.
let header: &mut FileHeader64<LittleEndian> =
let header: &mut FileHeader64<E> =
unsafe { &mut *(bytes.as_mut_ptr() as *mut FileHeader64<_>) };
header.e_type.set(e, ET_DYN);
header.e_phoff.set(e, ph_off as u64);

View File

@@ -512,24 +512,28 @@ where
}
};
}
// Find all landing pads by scanning bytes, do not care about
// false location at this moment.
// Looks hacky but it is fast; does not need to be really exact.
for i in 0..buf.len() - 2 {
let op = buf[i];
if op == gimli::constants::DW_OP_bra.0 || op == gimli::constants::DW_OP_skip.0 {
// TODO fix for big-endian
let offset = i16::from_le_bytes([buf[i + 1], buf[i + 2]]);
let origin = i + 3;
// Discarding out-of-bounds jumps (also some of falsely detected ops)
if (offset >= 0 && offset as usize + origin <= buf.len())
|| (offset < 0 && -offset as usize <= origin)
{
let target = buf.len() as isize - origin as isize - offset as isize;
jump_targets.insert(target as u64, JumpTargetMarker::new());
if buf.len() > 2 {
for i in 0..buf.len() - 2 {
let op = buf[i];
if op == gimli::constants::DW_OP_bra.0 || op == gimli::constants::DW_OP_skip.0 {
// TODO fix for big-endian
let offset = i16::from_le_bytes([buf[i + 1], buf[i + 2]]);
let origin = i + 3;
// Discarding out-of-bounds jumps (also some of falsely detected ops)
if (offset >= 0 && offset as usize + origin <= buf.len())
|| (offset < 0 && -offset as usize <= origin)
{
let target = buf.len() as isize - origin as isize - offset as isize;
jump_targets.insert(target as u64, JumpTargetMarker::new());
}
}
}
}
while !pc.is_empty() {
let unread_bytes = pc.len().into_u64();
if let Some(marker) = jump_targets.get(&unread_bytes) {

View File

@@ -10,6 +10,7 @@ use anyhow::{Context, Error};
use gimli::write;
use gimli::{AttributeValue, DebuggingInformationEntry, Unit};
use std::collections::HashSet;
use wasmtime_environ::ir::Endianness;
use wasmtime_environ::isa::TargetIsa;
use wasmtime_environ::wasm::DefinedFuncIndex;
use wasmtime_environ::{CompiledFunctions, ModuleMemoryOffset};
@@ -463,6 +464,19 @@ where
isa,
)?;
// Data in WebAssembly memory always uses little-endian byte order.
// If the native architecture is big-endian, we need to mark all
// base types used to refer to WebAssembly memory as little-endian
// using the DW_AT_endianity attribute, so that the debugger will
// be able to correctly access them.
if entry.tag() == gimli::DW_TAG_base_type && isa.endianness() == Endianness::Big {
let current_scope = comp_unit.get_mut(die_id);
current_scope.set(
gimli::DW_AT_endianity,
write::AttributeValue::Endianity(gimli::DW_END_little),
);
}
if entry.tag() == gimli::DW_TAG_subprogram && !current_scope_ranges.is_empty() {
append_vmctx_info(
comp_unit,

View File

@@ -2,6 +2,7 @@ pub use crate::transform::transform_dwarf;
use gimli::write::{Address, Dwarf, EndianVec, FrameTable, Result, Sections, Writer};
use gimli::{RunTimeEndian, SectionId};
use wasmtime_environ::entity::EntityRef;
use wasmtime_environ::ir::Endianness;
use wasmtime_environ::isa::{unwind::UnwindInfo, TargetIsa};
use wasmtime_environ::{CompiledFunctions, DebugInfoData, ModuleMemoryOffset};
@@ -26,10 +27,19 @@ pub struct DwarfSection {
}
fn emit_dwarf_sections(
isa: &dyn TargetIsa,
mut dwarf: Dwarf,
frames: Option<FrameTable>,
) -> anyhow::Result<Vec<DwarfSection>> {
let mut sections = Sections::new(WriterRelocate::default());
let endian = match isa.endianness() {
Endianness::Little => RunTimeEndian::Little,
Endianness::Big => RunTimeEndian::Big,
};
let writer = WriterRelocate {
relocs: Vec::new(),
writer: EndianVec::new(endian),
};
let mut sections = Sections::new(writer);
dwarf.write(&mut sections)?;
if let Some(frames) = frames {
frames.write_debug_frame(&mut sections.debug_frame)?;
@@ -54,15 +64,6 @@ pub struct WriterRelocate {
writer: EndianVec<RunTimeEndian>,
}
impl Default for WriterRelocate {
fn default() -> Self {
WriterRelocate {
relocs: Vec::new(),
writer: EndianVec::new(RunTimeEndian::Little),
}
}
}
impl Writer for WriterRelocate {
type Endian = RunTimeEndian;
@@ -156,6 +157,6 @@ pub fn emit_dwarf<'a>(
) -> anyhow::Result<Vec<DwarfSection>> {
let dwarf = transform_dwarf(isa, debuginfo_data, funcs, memory_offset)?;
let frame_table = create_frame_table(isa, funcs);
let sections = emit_dwarf_sections(dwarf, frame_table)?;
let sections = emit_dwarf_sections(isa, dwarf, frame_table)?;
Ok(sections)
}

View File

@@ -12,8 +12,6 @@ readme = "README.md"
edition = "2018"
[dependencies]
anyhow = "1.0"
region = "2.2.0"
cranelift-codegen = { path = "../../cranelift/codegen", version = "0.73.0", features = ["enable-serde"] }
cranelift-entity = { path = "../../cranelift/entity", version = "0.73.0", features = ["enable-serde"] }
cranelift-wasm = { path = "../../cranelift/wasm", version = "0.73.0", features = ["enable-serde"] }

View File

@@ -3,8 +3,8 @@
pub mod ir {
pub use cranelift_codegen::binemit::{Reloc, StackMap};
pub use cranelift_codegen::ir::{
types, AbiParam, ArgumentPurpose, JumpTableOffsets, LabelValueLoc, LibCall, Signature,
SourceLoc, StackSlots, TrapCode, Type, ValueLabel, ValueLoc,
types, AbiParam, ArgumentPurpose, Endianness, JumpTableOffsets, LabelValueLoc, LibCall,
Signature, SourceLoc, StackSlots, TrapCode, Type, ValueLabel, ValueLoc,
};
pub use cranelift_codegen::{ValueLabelsRanges, ValueLocRange};
}

View File

@@ -7,6 +7,7 @@ use cranelift_wasm::*;
use indexmap::IndexMap;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::sync::Arc;
/// Implemenation styles for WebAssembly linear memory.
@@ -86,7 +87,7 @@ pub struct MemoryInitializer {
/// Optionally, a global variable giving a base index.
pub base: Option<GlobalIndex>,
/// The offset to add to the base.
pub offset: usize,
pub offset: u32,
/// The data to write into the linear memory.
pub data: Box<[u8]>,
}
@@ -168,7 +169,15 @@ impl MemoryInitialization {
// Perform a bounds check on the segment
// As this segment is referencing a defined memory without a global base, the last byte
// written to by the segment cannot exceed the memory's initial minimum size
if (initializer.offset + initializer.data.len())
let offset = usize::try_from(initializer.offset).unwrap();
let end = match offset.checked_add(initializer.data.len()) {
Some(end) => end,
None => {
out_of_bounds = true;
continue;
}
};
if end
> ((module.memory_plans[initializer.memory_index].memory.minimum
as usize)
* WASM_PAGE_SIZE)
@@ -178,8 +187,8 @@ impl MemoryInitialization {
}
let pages = &mut map[index];
let mut page_index = initializer.offset / WASM_PAGE_SIZE;
let mut page_offset = initializer.offset % WASM_PAGE_SIZE;
let mut page_index = offset / WASM_PAGE_SIZE;
let mut page_offset = offset % WASM_PAGE_SIZE;
let mut data_offset = 0;
let mut data_remaining = initializer.data.len();
@@ -268,7 +277,7 @@ pub struct TableInitializer {
/// Optionally, a global variable giving a base index.
pub base: Option<GlobalIndex>,
/// The offset to add to the base.
pub offset: usize,
pub offset: u32,
/// The values to write into the table elements.
pub elements: Box<[FuncIndex]>,
}

View File

@@ -705,7 +705,7 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
&mut self,
table_index: TableIndex,
base: Option<GlobalIndex>,
offset: usize,
offset: u32,
elements: Box<[FuncIndex]>,
) -> WasmResult<()> {
for element in elements.iter() {
@@ -746,6 +746,13 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
Ok(())
}
fn declare_elements(&mut self, segments: Box<[FuncIndex]>) -> WasmResult<()> {
for element in segments.iter() {
self.flag_func_possibly_exported(*element);
}
Ok(())
}
fn reserve_function_bodies(&mut self, _count: u32, offset: u64) {
self.result.debuginfo.wasm_file.code_section_offset = offset;
}
@@ -794,7 +801,7 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
&mut self,
memory_index: MemoryIndex,
base: Option<GlobalIndex>,
offset: usize,
offset: u32,
data: &'data [u8],
) -> WasmResult<()> {
match &mut self.result.module.memory_initialization {

View File

@@ -6,7 +6,7 @@
// struct VMContext {
// interrupts: *const VMInterrupts,
// externref_activations_table: *mut VMExternRefActivationsTable,
// stack_map_registry: *mut StackMapRegistry,
// module_info_lookup: *const dyn ModuleInfoLookup,
// signature_ids: [VMSharedSignatureIndex; module.num_signature_ids],
// imported_functions: [VMFunctionImport; module.num_imported_functions],
// imported_tables: [VMTableImport; module.num_imported_tables],
@@ -77,7 +77,7 @@ pub struct VMOffsets {
// precalculated offsets of various member fields
interrupts: u32,
externref_activations_table: u32,
stack_map_registry: u32,
module_info_lookup: u32,
signature_ids: u32,
imported_functions: u32,
imported_tables: u32,
@@ -149,7 +149,7 @@ impl From<VMOffsetsFields> for VMOffsets {
num_defined_globals: fields.num_defined_globals,
interrupts: 0,
externref_activations_table: 0,
stack_map_registry: 0,
module_info_lookup: 0,
signature_ids: 0,
imported_functions: 0,
imported_tables: 0,
@@ -168,13 +168,13 @@ impl From<VMOffsetsFields> for VMOffsets {
.interrupts
.checked_add(u32::from(fields.pointer_size))
.unwrap();
ret.stack_map_registry = ret
ret.module_info_lookup = ret
.externref_activations_table
.checked_add(u32::from(fields.pointer_size))
.unwrap();
ret.signature_ids = ret
.stack_map_registry
.checked_add(u32::from(fields.pointer_size))
.module_info_lookup
.checked_add(u32::from(fields.pointer_size * 2))
.unwrap();
ret.imported_functions = ret
.signature_ids
@@ -507,10 +507,10 @@ impl VMOffsets {
self.externref_activations_table
}
/// The offset of the `*mut StackMapRegistry` member.
/// The offset of the `*const dyn ModuleInfoLookup` member.
#[inline]
pub fn vmctx_stack_map_registry(&self) -> u32 {
self.stack_map_registry
pub fn vmctx_module_info_lookup(&self) -> u32 {
self.module_info_lookup
}
/// The offset of the `signature_ids` array.

View File

@@ -0,0 +1,112 @@
// A WORD OF CAUTION
//
// This entire file basically needs to be kept in sync with itself. It's not
// really possible to modify just one bit of this file without understanding
// all the other bits. Documentation tries to reference various bits here and
// there but try to make sure to read over everything before tweaking things!
//
// Also at this time this file is heavily based off the x86_64 file, so you'll
// probably want to read that one as well.
#include "header.h"
// fn(top_of_stack(%x0): *mut u8)
HIDDEN(wasmtime_fiber_switch)
GLOBL(wasmtime_fiber_switch)
.p2align 2
TYPE(wasmtime_fiber_switch)
FUNCTION(wasmtime_fiber_switch):
// Save all callee-saved registers on the stack since we're assuming
// they're clobbered as a result of the stack switch.
stmg %r6, %r15, 48(%r15)
aghi %r15, -64
std %f8, 0(%r15)
std %f9, 8(%r15)
std %f10, 16(%r15)
std %f11, 24(%r15)
std %f12, 32(%r15)
std %f13, 40(%r15)
std %f14, 48(%r15)
std %f15, 56(%r15)
// Load our previously saved stack pointer to resume to, and save off our
// current stack pointer on where to come back to eventually.
lg %r1, -16(%r2)
stg %r15, -16(%r2)
// Switch to the new stack and restore all our callee-saved registers after
// the switch and return to our new stack.
ld %f8, 0(%r1)
ld %f9, 8(%r1)
ld %f10, 16(%r1)
ld %f11, 24(%r1)
ld %f12, 32(%r1)
ld %f13, 40(%r1)
ld %f14, 48(%r1)
ld %f15, 56(%r1)
lmg %r6, %r15, 112(%r1)
br %r14
SIZE(wasmtime_fiber_switch)
// fn(
// top_of_stack(%x0): *mut u8,
// entry_point(%x1): extern fn(*mut u8, *mut u8),
// entry_arg0(%x2): *mut u8,
// )
HIDDEN(wasmtime_fiber_init)
GLOBL(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_init)
FUNCTION(wasmtime_fiber_init):
larl %r1, FUNCTION(wasmtime_fiber_start)
stg %r1, -48(%r2) // wasmtime_fiber_start - restored into %r14
stg %r2, -112(%r2) // top_of_stack - restored into %r6
stg %r3, -104(%r2) // entry_point - restored into %r7
stg %r4, -96(%r2) // entry_arg0 - restored into %r8
aghi %r2, -160 // 160 bytes register save area
stg %r2, 120(%r2) // bottom of register save area - restored into %r15
// `wasmtime_fiber_switch` has a 64 byte stack.
aghi %r2, -64
stg %r2, 208(%r2)
br %r14
SIZE(wasmtime_fiber_init)
.p2align 2
TYPE(wasmtime_fiber_start)
FUNCTION(wasmtime_fiber_start):
.cfi_startproc simple
// See the x86_64 file for more commentary on what these CFI directives are
// doing. Like over there note that the relative offsets to registers here
// match the frame layout in `wasmtime_fiber_switch`.
.cfi_escape 0x0f, /* DW_CFA_def_cfa_expression */ \
7, /* the byte length of this expression */ \
0x7f, 0x90, 0x1, /* DW_OP_breg15 0x90 */ \
0x06, /* DW_OP_deref */ \
0x23, 0xe0, 0x1 /* DW_OP_plus_uconst 0xe0 */
.cfi_rel_offset 6, -112
.cfi_rel_offset 7, -104
.cfi_rel_offset 8, -96
.cfi_rel_offset 9, -88
.cfi_rel_offset 10, -80
.cfi_rel_offset 11, -72
.cfi_rel_offset 12, -64
.cfi_rel_offset 13, -56
.cfi_rel_offset 14, -48
.cfi_rel_offset 15, -40
// Load our two arguments prepared by `wasmtime_fiber_init`.
lgr %r2, %r8 // entry_arg0
lgr %r3, %r6 // top_of_stack
// ... and then we call the function! Note that this is a function call so
// our frame stays on the stack to backtrace through.
basr %r14, %r7 // entry_point
// .. technically we shouldn't get here, so just trap.
.word 0x0000
.cfi_endproc
SIZE(wasmtime_fiber_start)
FOOTER

View File

@@ -39,13 +39,6 @@ pub fn fuzz_default_config(strategy: wasmtime::Strategy) -> anyhow::Result<wasmt
.wasm_bulk_memory(true)
.wasm_reference_types(true)
.wasm_module_linking(true)
// The limits here are chosen based on the default "maximum type size"
// configured in wasm-smith, which is 1000. This means that instances
// are allowed to, for example, export up to 1000 memories. We bump that
// a little bit here to give us some slop.
.max_instances(1100)
.max_tables(1100)
.max_memories(1100)
.strategy(strategy)?;
Ok(config)
}

View File

@@ -44,6 +44,21 @@ fn log_wasm(wasm: &[u8]) {
}
}
fn create_store(engine: &Engine) -> Store {
Store::new_with_limits(
&engine,
StoreLimitsBuilder::new()
// The limits here are chosen based on the default "maximum type size"
// configured in wasm-smith, which is 1000. This means that instances
// are allowed to, for example, export up to 1000 memories. We bump that
// a little bit here to give us some slop.
.instances(1100)
.tables(1100)
.memories(1100)
.build(),
)
}
/// Methods of timing out execution of a WebAssembly module
#[derive(Debug)]
pub enum Timeout {
@@ -95,7 +110,7 @@ pub fn instantiate_with_config(
_ => false,
});
let engine = Engine::new(&config).unwrap();
let store = Store::new(&engine);
let store = create_store(&engine);
let mut timeout_state = SignalOnDrop::default();
match timeout {
@@ -203,7 +218,7 @@ pub fn differential_execution(
config.wasm_module_linking(false);
let engine = Engine::new(&config).unwrap();
let store = Store::new(&engine);
let store = create_store(&engine);
let module = Module::new(&engine, &wasm).unwrap();
@@ -348,7 +363,7 @@ pub fn make_api_calls(api: crate::generators::api::ApiCalls) {
ApiCall::StoreNew => {
log::trace!("creating store");
assert!(store.is_none());
store = Some(Store::new(engine.as_ref().unwrap()));
store = Some(create_store(engine.as_ref().unwrap()));
}
ApiCall::ModuleNew { id, wasm } => {
@@ -439,7 +454,7 @@ pub fn spectest(fuzz_config: crate::generators::Config, test: crate::generators:
config.wasm_reference_types(false);
config.wasm_bulk_memory(false);
config.wasm_module_linking(false);
let store = Store::new(&Engine::new(&config).unwrap());
let store = create_store(&Engine::new(&config).unwrap());
if fuzz_config.consume_fuel {
store.add_fuel(u64::max_value()).unwrap();
}
@@ -463,7 +478,7 @@ pub fn table_ops(
let mut config = fuzz_config.to_wasmtime();
config.wasm_reference_types(true);
let engine = Engine::new(&config).unwrap();
let store = Store::new(&engine);
let store = create_store(&engine);
if fuzz_config.consume_fuel {
store.add_fuel(u64::max_value()).unwrap();
}
@@ -578,7 +593,7 @@ pub fn differential_wasmi_execution(wasm: &[u8], config: &crate::generators::Con
let mut wasmtime_config = config.to_wasmtime();
wasmtime_config.cranelift_nan_canonicalization(true);
let wasmtime_engine = Engine::new(&wasmtime_config).unwrap();
let wasmtime_store = Store::new(&wasmtime_engine);
let wasmtime_store = create_store(&wasmtime_engine);
if config.consume_fuel {
wasmtime_store.add_fuel(u64::max_value()).unwrap();
}

View File

@@ -87,7 +87,7 @@ pub fn dummy_table(store: &Store, ty: TableType) -> Table {
/// Construct a dummy memory for the given memory type.
pub fn dummy_memory(store: &Store, ty: MemoryType) -> Memory {
Memory::new(store, ty)
Memory::new(store, ty).unwrap()
}
/// Construct a dummy instance for the given instance type.

View File

@@ -176,11 +176,13 @@ struct FinishedFunctions(PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>);
unsafe impl Send for FinishedFunctions {}
unsafe impl Sync for FinishedFunctions {}
/// Information about a function, such as trap information, address map,
/// and stack maps.
#[derive(Serialize, Deserialize, Clone)]
struct FunctionInfo {
traps: Vec<TrapInformation>,
address_map: FunctionAddressMap,
stack_maps: Vec<StackMapInformation>,
pub struct FunctionInfo {
pub traps: Vec<TrapInformation>,
pub address_map: FunctionAddressMap,
pub stack_maps: Vec<StackMapInformation>,
}
/// This is intended to mirror the type tables in `wasmtime_environ`, except that
@@ -362,11 +364,10 @@ impl CompiledModule {
}
/// Gets the function information for a given function index.
pub fn func_info(&self, index: DefinedFuncIndex) -> (&FunctionAddressMap, &[TrapInformation]) {
pub fn func_info(&self, index: DefinedFuncIndex) -> &FunctionInfo {
self.artifacts
.funcs
.get(index)
.map(|f| (&f.address_map, f.traps.as_ref()))
.expect("defined function should be present")
}

View File

@@ -111,6 +111,19 @@ fn apply_reloc(
);
write_unaligned(reloc_address as *mut u32, reloc_delta_u64 as u32);
},
#[cfg(target_pointer_width = "64")]
(RelocationKind::Relative, RelocationEncoding::S390xDbl, 32) => unsafe {
let reloc_address = body.add(offset as usize) as usize;
let reloc_addend = r.addend() as isize;
let reloc_delta_u64 = (target_func_address as u64)
.wrapping_sub(reloc_address as u64)
.wrapping_add(reloc_addend as u64);
assert!(
(reloc_delta_u64 as isize) >> 1 <= i32::max_value() as isize,
"relocation too large to fit in i32"
);
write_unaligned(reloc_address as *mut u32, (reloc_delta_u64 >> 1) as u32);
},
(RelocationKind::Elf(elf::R_AARCH64_CALL26), RelocationEncoding::Generic, 32) => unsafe {
let reloc_address = body.add(offset as usize) as usize;
let reloc_addend = r.addend() as isize;

View File

@@ -17,7 +17,7 @@ cranelift-codegen = { path = "../../cranelift/codegen", version = "0.73.0" }
derive_more = "0.99"
dynasm = "1.0.0"
dynasmrt = "1.0.0"
iter-enum = "0.2"
iter-enum = "1"
itertools = "0.10.0"
memoffset = "0.6.0"
more-asserts = "0.2.1"

View File

@@ -80,6 +80,7 @@ fn to_object_relocations<'a>(
RelocationEncoding::Generic,
32,
),
Reloc::S390xPCRel32Dbl => (RelocationKind::Relative, RelocationEncoding::S390xDbl, 32),
other => unimplemented!("Unimplemented relocation {:?}", other),
};
Some(ObjectRelocation {
@@ -102,6 +103,7 @@ fn to_object_architecture(
X86_64 => Architecture::X86_64,
Arm(_) => Architecture::Arm,
Aarch64(_) => Architecture::Aarch64,
S390x => Architecture::S390x,
architecture => {
anyhow::bail!("target architecture {:?} is unsupported", architecture,);
}

View File

@@ -241,6 +241,7 @@ impl State {
Architecture::X86_32(_) => elf::EM_386 as u32,
Architecture::Arm(_) => elf::EM_ARM as u32,
Architecture::Aarch64(_) => elf::EM_AARCH64 as u32,
Architecture::S390x => elf::EM_S390 as u32,
_ => unimplemented!("unrecognized architecture"),
}
}

View File

@@ -99,18 +99,16 @@
//! Examination of Deferred Reference Counting and Cycle Detection* by Quinane:
//! <https://openresearch-repository.anu.edu.au/bitstream/1885/42030/2/hon-thesis.pdf>
use std::alloc::Layout;
use std::any::Any;
use std::cell::{Cell, RefCell, UnsafeCell};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::Deref;
use std::ptr::{self, NonNull};
use std::rc::Rc;
use wasmtime_environ::{ir::StackMap, StackMapInformation};
use std::{alloc::Layout, sync::Arc};
use wasmtime_environ::ir::StackMap;
/// An external reference to some opaque data.
///
@@ -596,10 +594,10 @@ impl VMExternRefActivationsTable {
pub unsafe fn insert_with_gc(
&self,
externref: VMExternRef,
stack_maps_registry: &StackMapRegistry,
module_info_lookup: &dyn ModuleInfoLookup,
) {
if let Err(externref) = self.try_insert(externref) {
self.gc_and_insert_slow(externref, stack_maps_registry);
self.gc_and_insert_slow(externref, module_info_lookup);
}
}
@@ -607,9 +605,9 @@ impl VMExternRefActivationsTable {
unsafe fn gc_and_insert_slow(
&self,
externref: VMExternRef,
stack_maps_registry: &StackMapRegistry,
module_info_lookup: &dyn ModuleInfoLookup,
) {
gc(stack_maps_registry, self);
gc(module_info_lookup, self);
// Might as well insert right into the hash set, rather than the bump
// chunk, since we are already on a slow path and we get de-duplication
@@ -743,182 +741,28 @@ impl VMExternRefActivationsTable {
}
}
/// A registry of stack maps for currently active Wasm modules.
#[derive(Default)]
pub struct StackMapRegistry {
inner: RefCell<StackMapRegistryInner>,
/// Used by the runtime to lookup information about a module given a
/// program counter value.
pub trait ModuleInfoLookup: 'static {
/// Lookup the module information from a program counter value.
fn lookup(&self, pc: usize) -> Option<Arc<dyn ModuleInfo>>;
}
#[derive(Default)]
struct StackMapRegistryInner {
/// A map from the highest pc in a module, to its stack maps.
///
/// For details, see the comment above `GlobalFrameInfo::ranges`.
ranges: BTreeMap<usize, ModuleStackMaps>,
/// Used by the runtime to query module information.
pub trait ModuleInfo {
/// Lookup the stack map at a program counter value.
fn lookup_stack_map(&self, pc: usize) -> Option<&StackMap>;
}
#[derive(Debug)]
struct ModuleStackMaps {
/// The range of PCs that this module covers. Different modules must always
/// have distinct ranges.
range: std::ops::Range<usize>,
pub(crate) struct EmptyModuleInfoLookup;
/// A map from a PC in this module (that is a GC safepoint) to its
/// associated stack map. If `None` then it means that the PC is the start
/// of a range which has no stack map.
pc_to_stack_map: Vec<(usize, Option<Rc<StackMap>>)>,
}
impl StackMapRegistry {
/// Register the stack maps for a given module.
///
/// The stack maps should be given as an iterator over a function's PC range
/// in memory (that is, where the JIT actually allocated and emitted the
/// function's code at), and the stack maps and code offsets within that
/// range for each of its GC safepoints.
pub fn register_stack_maps<'a>(
&self,
stack_maps: impl IntoIterator<Item = (std::ops::Range<usize>, &'a [StackMapInformation])>,
) {
let mut min = usize::max_value();
let mut max = 0;
let mut pc_to_stack_map = vec![];
let mut last_is_none_marker = true;
for (range, infos) in stack_maps {
let len = range.end - range.start;
min = std::cmp::min(min, range.start);
max = std::cmp::max(max, range.end);
// Add a marker between functions indicating that this function's pc
// starts with no stack map so when our binary search later on finds
// a pc between the start of the function and the function's first
// stack map it doesn't think the previous stack map is our stack
// map.
//
// We skip this if the previous entry pushed was also a `None`
// marker, in which case the starting pc already has no stack map.
// This is also skipped if the first `code_offset` is zero since
// what we'll push applies for the first pc anyway.
if !last_is_none_marker && (infos.is_empty() || infos[0].code_offset > 0) {
pc_to_stack_map.push((range.start, None));
last_is_none_marker = true;
}
for info in infos {
assert!((info.code_offset as usize) < len);
pc_to_stack_map.push((
range.start + (info.code_offset as usize),
Some(Rc::new(info.stack_map.clone())),
));
last_is_none_marker = false;
}
}
if pc_to_stack_map.is_empty() {
// Nothing to register.
return;
}
let module_stack_maps = ModuleStackMaps {
range: min..max,
pc_to_stack_map,
};
let mut inner = self.inner.borrow_mut();
// Assert that this chunk of ranges doesn't collide with any other known
// chunks.
if let Some((_, prev)) = inner.ranges.range(max..).next() {
assert!(prev.range.start > max);
}
if let Some((prev_end, _)) = inner.ranges.range(..=min).next_back() {
assert!(*prev_end < min);
}
let old = inner.ranges.insert(max, module_stack_maps);
assert!(old.is_none());
}
/// Lookup the stack map for the given PC, if any.
pub fn lookup_stack_map(&self, pc: usize) -> Option<Rc<StackMap>> {
let inner = self.inner.borrow();
let stack_maps = inner.module_stack_maps(pc)?;
// Do a binary search to find the stack map for the given PC.
//
// Because GC safepoints are technically only associated with a single
// PC, we should ideally only care about `Ok(index)` values returned
// from the binary search. However, safepoints are inserted right before
// calls, and there are two things that can disturb the PC/offset
// associated with the safepoint versus the PC we actually use to query
// for the stack map:
//
// 1. The `backtrace` crate gives us the PC in a frame that will be
// *returned to*, and where execution will continue from, rather than
// the PC of the call we are currently at. So we would need to
// disassemble one instruction backwards to query the actual PC for
// the stack map.
//
// TODO: One thing we *could* do to make this a little less error
// prone, would be to assert/check that the nearest GC safepoint
// found is within `max_encoded_size(any kind of call instruction)`
// our queried PC for the target architecture.
//
// 2. Cranelift's stack maps only handle the stack, not
// registers. However, some references that are arguments to a call
// may need to be in registers. In these cases, what Cranelift will
// do is:
//
// a. spill all the live references,
// b. insert a GC safepoint for those references,
// c. reload the references into registers, and finally
// d. make the call.
//
// Step (c) adds drift between the GC safepoint and the location of
// the call, which is where we actually walk the stack frame and
// collect its live references.
//
// Luckily, the spill stack slots for the live references are still
// up to date, so we can still find all the on-stack roots.
// Furthermore, we do not have a moving GC, so we don't need to worry
// whether the following code will reuse the references in registers
// (which would not have been updated to point to the moved objects)
// or reload from the stack slots (which would have been updated to
// point to the moved objects).
let index = match stack_maps
.pc_to_stack_map
.binary_search_by_key(&pc, |(pc, _stack_map)| *pc)
{
// Exact hit.
Ok(i) => i,
// `Err(0)` means that the associated stack map would have been the
// first element in the array if this pc had an associated stack
// map, but this pc does not have an associated stack map. This can
// only happen inside a Wasm frame if there are no live refs at this
// pc.
Err(0) => return None,
Err(n) => n - 1,
};
let stack_map = stack_maps.pc_to_stack_map[index].1.as_ref()?.clone();
Some(stack_map)
impl ModuleInfoLookup for EmptyModuleInfoLookup {
fn lookup(&self, _pc: usize) -> Option<Arc<dyn ModuleInfo>> {
None
}
}
impl StackMapRegistryInner {
fn module_stack_maps(&self, pc: usize) -> Option<&ModuleStackMaps> {
let (end, stack_maps) = self.ranges.range(pc..).next()?;
if pc < stack_maps.range.start || *end < pc {
None
} else {
Some(stack_maps)
}
}
}
pub(crate) const EMPTY_MODULE_LOOKUP: EmptyModuleInfoLookup = EmptyModuleInfoLookup;
#[derive(Debug, Default)]
struct DebugOnly<T> {
@@ -965,7 +809,7 @@ impl<T> std::ops::DerefMut for DebugOnly<T> {
/// Additionally, you must have registered the stack maps for every Wasm module
/// that has frames on the stack with the given `stack_maps_registry`.
pub unsafe fn gc(
stack_maps_registry: &StackMapRegistry,
module_info_lookup: &dyn ModuleInfoLookup,
externref_activations_table: &VMExternRefActivationsTable,
) {
// We borrow the precise stack roots `RefCell` for the whole duration of
@@ -1003,8 +847,7 @@ pub unsafe fn gc(
if cfg!(debug_assertions) {
// Assert that there aren't any Wasm frames on the stack.
backtrace::trace(|frame| {
let stack_map = stack_maps_registry.lookup_stack_map(frame.ip() as usize);
assert!(stack_map.is_none());
assert!(module_info_lookup.lookup(frame.ip() as usize).is_none());
true
});
}
@@ -1048,28 +891,30 @@ pub unsafe fn gc(
let pc = frame.ip() as usize;
let sp = frame.sp() as usize;
if let Some(stack_map) = stack_maps_registry.lookup_stack_map(pc) {
debug_assert!(sp != 0, "we should always get a valid SP for Wasm frames");
if let Some(module_info) = module_info_lookup.lookup(pc) {
if let Some(stack_map) = module_info.lookup_stack_map(pc) {
debug_assert!(sp != 0, "we should always get a valid SP for Wasm frames");
for i in 0..(stack_map.mapped_words() as usize) {
if stack_map.get_bit(i) {
// Stack maps have one bit per word in the frame, and the
// zero^th bit is the *lowest* addressed word in the frame,
// i.e. the closest to the SP. So to get the `i`^th word in
// this frame, we add `i * sizeof(word)` to the SP.
let ptr_to_ref = sp + i * mem::size_of::<usize>();
for i in 0..(stack_map.mapped_words() as usize) {
if stack_map.get_bit(i) {
// Stack maps have one bit per word in the frame, and the
// zero^th bit is the *lowest* addressed word in the frame,
// i.e. the closest to the SP. So to get the `i`^th word in
// this frame, we add `i * sizeof(word)` to the SP.
let ptr_to_ref = sp + i * mem::size_of::<usize>();
let r = std::ptr::read(ptr_to_ref as *const *mut VMExternData);
debug_assert!(
r.is_null() || activations_table_set.contains(&r),
"every on-stack externref inside a Wasm frame should \
have an entry in the VMExternRefActivationsTable"
);
if let Some(r) = NonNull::new(r) {
VMExternRefActivationsTable::insert_precise_stack_root(
&mut precise_stack_roots,
r,
let r = std::ptr::read(ptr_to_ref as *const *mut VMExternData);
debug_assert!(
r.is_null() || activations_table_set.contains(&r),
"every on-stack externref inside a Wasm frame should \
have an entry in the VMExternRefActivationsTable"
);
if let Some(r) = NonNull::new(r) {
VMExternRefActivationsTable::insert_precise_stack_root(
&mut precise_stack_roots,
r,
);
}
}
}
}

View File

@@ -3,7 +3,7 @@
//! `InstanceHandle` is a reference-counting handle for an `Instance`.
use crate::export::Export;
use crate::externref::{StackMapRegistry, VMExternRefActivationsTable};
use crate::externref::{ModuleInfoLookup, VMExternRefActivationsTable};
use crate::memory::{Memory, RuntimeMemoryCreator};
use crate::table::{Table, TableElement};
use crate::traphandlers::Trap;
@@ -37,6 +37,52 @@ mod allocator;
pub use allocator::*;
/// Used by hosts to limit resource consumption of instances.
///
/// An instance can be created with a resource limiter so that hosts can take into account
/// non-WebAssembly resource usage to determine if a linear memory or table should grow.
pub trait ResourceLimiter {
/// Notifies the resource limiter that an instance's linear memory has been requested to grow.
///
/// * `current` is the current size of the linear memory in WebAssembly page units.
/// * `desired` is the desired size of the linear memory in WebAssembly page units.
/// * `maximum` is either the linear memory's maximum or a maximum from an instance allocator,
/// also in WebAssembly page units. A value of `None` indicates that the linear memory is
/// unbounded.
///
/// This function should return `true` to indicate that the growing operation is permitted or
/// `false` if not permitted. Returning `true` when a maximum has been exceeded will have no
/// effect as the linear memory will not grow.
fn memory_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
/// Notifies the resource limiter that an instance's table has been requested to grow.
///
/// * `current` is the current number of elements in the table.
/// * `desired` is the desired number of elements in the table.
/// * `maximum` is either the table's maximum or a maximum from an instance allocator.
/// A value of `None` indicates that the table is unbounded.
///
/// This function should return `true` to indicate that the growing operation is permitted or
/// `false` if not permitted. Returning `true` when a maximum has been exceeded will have no
/// effect as the table will not grow.
fn table_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
/// The maximum number of instances that can be created for a `Store`.
///
/// Module instantiation will fail if this limit is exceeded.
fn instances(&self) -> usize;
/// The maximum number of tables that can be created for a `Store`.
///
/// Module instantiation will fail if this limit is exceeded.
fn tables(&self) -> usize;
/// The maximum number of tables that can be created for a `Store`.
///
/// Module instantiation will fail if this limit is exceeded.
fn memories(&self) -> usize;
}
/// Runtime representation of an instance value, which erases all `Instance`
/// information since instances are just a collection of values.
pub type RuntimeInstance = Rc<IndexMap<String, Export>>;
@@ -249,9 +295,9 @@ impl Instance {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_externref_activations_table()) }
}
/// Return a pointer to the `StackMapRegistry`.
pub fn stack_map_registry(&self) -> *mut *mut StackMapRegistry {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_stack_map_registry()) }
/// Return a pointer to the `ModuleInfoLookup`.
pub fn module_info_lookup(&self) -> *mut *const dyn ModuleInfoLookup {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_module_info_lookup()) }
}
/// Return a reference to the vmctx used by compiled wasm code.
@@ -378,11 +424,12 @@ impl Instance {
/// Returns `None` if memory can't be grown by the specified amount
/// of pages.
pub(crate) fn memory_grow(&self, memory_index: DefinedMemoryIndex, delta: u32) -> Option<u32> {
let result = self
let memory = self
.memories
.get(memory_index)
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
.grow(delta);
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()));
let result = unsafe { memory.grow(delta) };
// Keep current the VMContext pointers used by compiled wasm code.
self.set_memory(memory_index, self.memories[memory_index].vmmemory());
@@ -460,19 +507,18 @@ impl Instance {
delta: u32,
init_value: TableElement,
) -> Option<u32> {
unsafe {
let orig_size = self
.tables
.get(table_index)
.unwrap_or_else(|| panic!("no table for index {}", table_index.index()))
.grow(delta, init_value)?;
let table = self
.tables
.get(table_index)
.unwrap_or_else(|| panic!("no table for index {}", table_index.index()));
// Keep the `VMContext` pointers used by compiled Wasm code up to
// date.
self.set_table(table_index, self.tables[table_index].vmtable());
let result = unsafe { table.grow(delta, init_value) };
Some(orig_size)
}
// Keep the `VMContext` pointers used by compiled Wasm code up to
// date.
self.set_table(table_index, self.tables[table_index].vmtable());
result
}
pub(crate) fn defined_table_fill(
@@ -527,11 +573,11 @@ impl Instance {
return None;
}
Some(unsafe { &*self.anyfunc_ptr(index) })
unsafe { Some(&*self.vmctx_plus_offset(self.offsets.vmctx_anyfunc(index))) }
}
unsafe fn anyfunc_ptr(&self, index: FuncIndex) -> *mut VMCallerCheckedAnyfunc {
self.vmctx_plus_offset(self.offsets.vmctx_anyfunc(index))
unsafe fn anyfunc_base(&self) -> *mut VMCallerCheckedAnyfunc {
self.vmctx_plus_offset(self.offsets.vmctx_anyfuncs_begin())
}
fn find_passive_segment<'a, I, D, T>(
@@ -565,38 +611,56 @@ impl Instance {
src: u32,
len: u32,
) -> Result<(), Trap> {
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
let table = self.get_table(table_index);
let elements = Self::find_passive_segment(
elem_index,
&self.module.passive_elements_map,
&self.module.passive_elements,
&self.dropped_elements,
);
self.table_init_segment(table_index, elements, dst, src, len)
}
if src
.checked_add(len)
.map_or(true, |n| n as usize > elements.len())
|| dst.checked_add(len).map_or(true, |m| m > table.size())
pub(crate) fn table_init_segment(
&self,
table_index: TableIndex,
elements: &[FuncIndex],
dst: u32,
src: u32,
len: u32,
) -> Result<(), Trap> {
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
let table = self.get_table(table_index);
let elements = match elements
.get(usize::try_from(src).unwrap()..)
.and_then(|s| s.get(..usize::try_from(len).unwrap()))
{
return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds));
Some(elements) => elements,
None => return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds)),
};
match table.element_type() {
TableElementType::Func => unsafe {
let base = self.anyfunc_base();
table.init_funcs(
dst,
elements.iter().map(|idx| {
if *idx == FuncIndex::reserved_value() {
ptr::null_mut()
} else {
debug_assert!(idx.as_u32() < self.offsets.num_defined_functions);
base.add(usize::try_from(idx.as_u32()).unwrap())
}
}),
)?;
},
TableElementType::Val(_) => {
debug_assert!(elements.iter().all(|e| *e == FuncIndex::reserved_value()));
table.fill(dst, TableElement::ExternRef(None), len)?;
}
}
// TODO(#983): investigate replacing this get/set loop with a `memcpy`.
for (dst, src) in (dst..dst + len).zip(src..src + len) {
let elem = self
.get_caller_checked_anyfunc(elements[src as usize])
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
f as *const VMCallerCheckedAnyfunc as *mut _
});
table
.set(dst, TableElement::FuncRef(elem))
.expect("should never panic because we already did the bounds check above");
}
Ok(())
}
@@ -727,16 +791,26 @@ impl Instance {
src: u32,
len: u32,
) -> Result<(), Trap> {
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
let memory = self.get_memory(memory_index);
let data = Self::find_passive_segment(
data_index,
&self.module.passive_data_map,
&self.module.passive_data,
&self.dropped_data,
);
self.memory_init_segment(memory_index, &data, dst, src, len)
}
pub(crate) fn memory_init_segment(
&self,
memory_index: MemoryIndex,
data: &[u8],
dst: u32,
src: u32,
len: u32,
) -> Result<(), Trap> {
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
let memory = self.get_memory(memory_index);
if src
.checked_add(len)
@@ -818,10 +892,6 @@ pub struct InstanceHandle {
}
impl InstanceHandle {
pub(crate) unsafe fn new(instance: *mut Instance) -> Self {
Self { instance }
}
/// Create a new `InstanceHandle` pointing at the instance
/// pointed to by the given `VMContext` pointer.
///

View File

@@ -1,8 +1,8 @@
use crate::externref::{StackMapRegistry, VMExternRefActivationsTable};
use crate::externref::{ModuleInfoLookup, VMExternRefActivationsTable, EMPTY_MODULE_LOOKUP};
use crate::imports::Imports;
use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator};
use crate::instance::{Instance, InstanceHandle, ResourceLimiter, RuntimeMemoryCreator};
use crate::memory::{DefaultMemoryCreator, Memory};
use crate::table::{Table, TableElement};
use crate::table::Table;
use crate::traphandlers::Trap;
use crate::vmcontext::{
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
@@ -15,13 +15,13 @@ use std::any::Any;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::ptr::{self, NonNull};
use std::rc::Rc;
use std::slice;
use std::sync::Arc;
use thiserror::Error;
use wasmtime_environ::entity::{packed_option::ReservedValue, EntityRef, EntitySet, PrimaryMap};
use wasmtime_environ::entity::{EntityRef, EntitySet, PrimaryMap};
use wasmtime_environ::wasm::{
DefinedFuncIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, GlobalInit, SignatureIndex,
TableElementType, WasmType,
DefinedFuncIndex, DefinedMemoryIndex, DefinedTableIndex, GlobalInit, SignatureIndex, WasmType,
};
use wasmtime_environ::{
ir, MemoryInitialization, MemoryInitializer, Module, ModuleType, TableInitializer, VMOffsets,
@@ -57,8 +57,11 @@ pub struct InstanceAllocationRequest<'a> {
/// The pointer to the reference activations table to use for the instance.
pub externref_activations_table: *mut VMExternRefActivationsTable,
/// The pointer to the stack map registry to use for the instance.
pub stack_map_registry: *mut StackMapRegistry,
/// The pointer to the module info lookup to use for the instance.
pub module_info_lookup: Option<*const dyn ModuleInfoLookup>,
/// The resource limiter to use for the instance.
pub limiter: Option<&'a Rc<dyn ResourceLimiter>>,
}
/// An link error while instantiating a module.
@@ -208,7 +211,7 @@ impl<'a> From<&'a PrimaryMap<SignatureIndex, VMSharedSignatureIndex>> for Shared
fn get_table_init_start(
init: &TableInitializer,
instance: &Instance,
) -> Result<usize, InstantiationError> {
) -> Result<u32, InstantiationError> {
match init.base {
Some(base) => {
let val = unsafe {
@@ -219,7 +222,7 @@ fn get_table_init_start(
}
};
init.offset.checked_add(val as usize).ok_or_else(|| {
init.offset.checked_add(val).ok_or_else(|| {
InstantiationError::Link(LinkError(
"element segment global base overflows".to_owned(),
))
@@ -233,6 +236,7 @@ fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError
for init in &instance.module.table_initializers {
let table = instance.get_table(init.table_index);
let start = get_table_init_start(init, instance)?;
let start = usize::try_from(start).unwrap();
let end = start.checked_add(init.elements.len());
match end {
@@ -252,34 +256,15 @@ fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_initializers {
let table = instance.get_table(init.table_index);
let start = get_table_init_start(init, instance)?;
let end = start.checked_add(init.elements.len());
match end {
Some(end) if end <= table.size() as usize => {
for (i, func_idx) in init.elements.iter().enumerate() {
let item = match table.element_type() {
TableElementType::Func => instance
.get_caller_checked_anyfunc(*func_idx)
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc
})
.into(),
TableElementType::Val(_) => {
assert!(*func_idx == FuncIndex::reserved_value());
TableElement::ExternRef(None)
}
};
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
}
}
_ => {
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::TableOutOfBounds,
)))
}
}
instance
.table_init_segment(
init.table_index,
&init.elements,
get_table_init_start(init, instance)?,
0,
init.elements.len() as u32,
)
.map_err(InstantiationError::Trap)?;
}
Ok(())
@@ -288,7 +273,7 @@ fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
fn get_memory_init_start(
init: &MemoryInitializer,
instance: &Instance,
) -> Result<usize, InstantiationError> {
) -> Result<u32, InstantiationError> {
match init.base {
Some(base) => {
let val = unsafe {
@@ -299,7 +284,7 @@ fn get_memory_init_start(
}
};
init.offset.checked_add(val as usize).ok_or_else(|| {
init.offset.checked_add(val).ok_or_else(|| {
InstantiationError::Link(LinkError("data segment global base overflows".to_owned()))
})
}
@@ -307,24 +292,6 @@ fn get_memory_init_start(
}
}
unsafe fn get_memory_slice<'instance>(
init: &MemoryInitializer,
instance: &'instance Instance,
) -> &'instance mut [u8] {
let memory = if let Some(defined_memory_index) =
instance.module.defined_memory_index(init.memory_index)
{
instance.memory(defined_memory_index)
} else {
let import = instance.imported_memory(init.memory_index);
let foreign_instance = (&mut *(import).vmctx).instance();
let foreign_memory = &mut *(import).from;
let foreign_index = foreign_instance.memory_index(foreign_memory);
foreign_instance.memory(foreign_index)
};
&mut *ptr::slice_from_raw_parts_mut(memory.base, memory.current_length)
}
fn check_memory_init_bounds(
instance: &Instance,
initializers: &[MemoryInitializer],
@@ -332,6 +299,7 @@ fn check_memory_init_bounds(
for init in initializers {
let memory = instance.get_memory(init.memory_index);
let start = get_memory_init_start(init, instance)?;
let start = usize::try_from(start).unwrap();
let end = start.checked_add(init.data.len());
match end {
@@ -354,21 +322,15 @@ fn initialize_memories(
initializers: &[MemoryInitializer],
) -> Result<(), InstantiationError> {
for init in initializers {
let memory = instance.get_memory(init.memory_index);
let start = get_memory_init_start(init, instance)?;
let end = start.checked_add(init.data.len());
match end {
Some(end) if end <= memory.current_length => {
let mem_slice = unsafe { get_memory_slice(init, instance) };
mem_slice[start..end].copy_from_slice(&init.data);
}
_ => {
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::HeapOutOfBounds,
)))
}
}
instance
.memory_init_segment(
init.memory_index,
&init.data,
get_memory_init_start(init, instance)?,
0,
init.data.len() as u32,
)
.map_err(InstantiationError::Trap)?;
}
Ok(())
@@ -447,7 +409,7 @@ unsafe fn initialize_vmcontext(instance: &Instance, req: InstanceAllocationReque
*instance.interrupts() = req.interrupts;
*instance.externref_activations_table() = req.externref_activations_table;
*instance.stack_map_registry() = req.stack_map_registry;
*instance.module_info_lookup() = req.module_info_lookup.unwrap_or(&EMPTY_MODULE_LOOKUP);
// Initialize shared signatures
let mut ptr = instance.signature_ids_ptr();
@@ -492,6 +454,7 @@ unsafe fn initialize_vmcontext(instance: &Instance, req: InstanceAllocationReque
);
// Initialize the functions
let mut base = instance.anyfunc_base();
for (index, sig) in instance.module.functions.iter() {
let type_index = req.shared_signatures.lookup(*sig);
@@ -506,13 +469,14 @@ unsafe fn initialize_vmcontext(instance: &Instance, req: InstanceAllocationReque
};
ptr::write(
instance.anyfunc_ptr(index),
base,
VMCallerCheckedAnyfunc {
func_ptr,
type_index,
vmctx,
},
);
base = base.add(1);
}
// Initialize the defined tables
@@ -590,19 +554,23 @@ impl OnDemandInstanceAllocator {
}
}
fn create_tables(module: &Module) -> PrimaryMap<DefinedTableIndex, Table> {
fn create_tables(
module: &Module,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<PrimaryMap<DefinedTableIndex, Table>, InstantiationError> {
let num_imports = module.num_imported_tables;
let mut tables: PrimaryMap<DefinedTableIndex, _> =
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
for table in &module.table_plans.values().as_slice()[num_imports..] {
tables.push(Table::new_dynamic(table));
tables.push(Table::new_dynamic(table, limiter).map_err(InstantiationError::Resource)?);
}
tables
Ok(tables)
}
fn create_memories(
&self,
module: &Module,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<PrimaryMap<DefinedMemoryIndex, Memory>, InstantiationError> {
let creator = self
.mem_creator
@@ -612,8 +580,10 @@ impl OnDemandInstanceAllocator {
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
memories
.push(Memory::new_dynamic(plan, creator).map_err(InstantiationError::Resource)?);
memories.push(
Memory::new_dynamic(plan, creator, limiter)
.map_err(InstantiationError::Resource)?,
);
}
Ok(memories)
}
@@ -633,8 +603,8 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
&self,
mut req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError> {
let memories = self.create_memories(&req.module)?;
let tables = Self::create_tables(&req.module);
let memories = self.create_memories(&req.module, req.limiter)?;
let tables = Self::create_tables(&req.module, req.limiter)?;
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
@@ -657,7 +627,9 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
alloc::handle_alloc_error(layout);
}
ptr::write(instance_ptr, instance);
InstanceHandle::new(instance_ptr)
InstanceHandle {
instance: instance_ptr,
}
};
initialize_vmcontext(handle.instance(), req);

View File

@@ -9,7 +9,7 @@
use super::{
initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator,
InstanceHandle, InstantiationError,
InstanceHandle, InstantiationError, ResourceLimiter,
};
use crate::{instance::Instance, Memory, Mmap, Table, VMContext};
use anyhow::{anyhow, bail, Context, Result};
@@ -18,6 +18,7 @@ use std::cell::RefCell;
use std::cmp::min;
use std::convert::TryFrom;
use std::mem;
use std::rc::Rc;
use std::sync::{Arc, Mutex};
use wasmtime_environ::{
entity::{EntitySet, PrimaryMap},
@@ -376,10 +377,45 @@ impl InstancePool {
}
}
unsafe fn setup_instance(
&self,
index: usize,
mut req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError> {
let instance = self.instance(index);
instance.module = req.module.clone();
instance.offsets = VMOffsets::new(
std::mem::size_of::<*const u8>() as u8,
instance.module.as_ref(),
);
instance.host_state = std::mem::replace(&mut req.host_state, Box::new(()));
Self::set_instance_memories(
instance,
self.memories.get(index),
self.memories.max_wasm_pages,
req.limiter,
)?;
Self::set_instance_tables(
instance,
self.tables.get(index),
self.tables.max_elements,
req.limiter,
)?;
initialize_vmcontext(instance, req);
Ok(InstanceHandle {
instance: instance as _,
})
}
fn allocate(
&self,
strategy: PoolingAllocationStrategy,
mut req: InstanceAllocationRequest,
req: InstanceAllocationRequest,
) -> Result<InstanceHandle, InstantiationError> {
let index = {
let mut free_list = self.free_list.lock().unwrap();
@@ -390,28 +426,15 @@ impl InstancePool {
free_list.swap_remove(free_index)
};
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
unsafe {
let instance = self.instance(index);
instance.module = req.module.clone();
instance.offsets = VMOffsets::new(
std::mem::size_of::<*const u8>() as u8,
instance.module.as_ref(),
);
instance.host_state = host_state;
Self::set_instance_memories(
instance,
self.memories.get(index),
self.memories.max_wasm_pages,
)?;
Self::set_instance_tables(instance, self.tables.get(index), self.tables.max_elements)?;
initialize_vmcontext(instance, req);
Ok(InstanceHandle::new(instance as _))
self.setup_instance(index, req).or_else(|e| {
// Deallocate the allocated instance on error
let instance = self.instance(index);
self.deallocate(&InstanceHandle {
instance: instance as _,
});
Err(e)
})
}
}
@@ -473,6 +496,7 @@ impl InstancePool {
instance: &mut Instance,
mut memories: impl Iterator<Item = *mut u8>,
max_pages: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<(), InstantiationError> {
let module = instance.module.as_ref();
@@ -487,6 +511,7 @@ impl InstancePool {
memories.next().unwrap(),
max_pages,
commit_memory_pages,
limiter,
)
.map_err(InstantiationError::Resource)?,
);
@@ -503,6 +528,7 @@ impl InstancePool {
instance: &mut Instance,
mut tables: impl Iterator<Item = *mut u8>,
max_elements: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<(), InstantiationError> {
let module = instance.module.as_ref();
@@ -514,9 +540,10 @@ impl InstancePool {
commit_table_pages(base, max_elements as usize * mem::size_of::<*mut u8>())
.map_err(InstantiationError::Resource)?;
instance
.tables
.push(Table::new_static(plan, base as _, max_elements));
instance.tables.push(
Table::new_static(plan, base as _, max_elements, limiter)
.map_err(InstantiationError::Resource)?,
);
}
let mut dropped_elements = instance.dropped_elements.borrow_mut();
@@ -1370,7 +1397,8 @@ mod test {
host_state: Box::new(()),
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
stack_map_registry: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
},
)
.expect("allocation should succeed"),
@@ -1394,7 +1422,8 @@ mod test {
host_state: Box::new(()),
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
stack_map_registry: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
},
) {
Err(InstantiationError::Limit(3)) => {}

View File

@@ -523,7 +523,8 @@ mod test {
host_state: Box::new(()),
interrupts: ptr::null(),
externref_activations_table: ptr::null_mut(),
stack_map_registry: ptr::null_mut(),
module_info_lookup: None,
limiter: None,
},
)
.expect("instance should allocate"),

View File

@@ -40,7 +40,7 @@ pub use crate::imports::Imports;
pub use crate::instance::{
InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits,
InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator,
PoolingAllocationStrategy, PoolingInstanceAllocator, RuntimeInstance,
PoolingAllocationStrategy, PoolingInstanceAllocator, ResourceLimiter, RuntimeInstance,
};
pub use crate::jit_int::GdbJitImageRegistration;
pub use crate::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator};

View File

@@ -449,8 +449,8 @@ pub unsafe extern "C" fn wasmtime_activations_table_insert_with_gc(
let externref = VMExternRef::clone_from_raw(externref);
let instance = (&mut *vmctx).instance();
let activations_table = &**instance.externref_activations_table();
let registry = &**instance.stack_map_registry();
activations_table.insert_with_gc(externref, registry);
let module_info_lookup = &**instance.module_info_lookup();
activations_table.insert_with_gc(externref, module_info_lookup);
}
/// Perform a Wasm `global.get` for `externref` globals.
@@ -466,8 +466,8 @@ pub unsafe extern "C" fn wasmtime_externref_global_get(
Some(externref) => {
let raw = externref.as_raw();
let activations_table = &**instance.externref_activations_table();
let registry = &**instance.stack_map_registry();
activations_table.insert_with_gc(externref, registry);
let module_info_lookup = &**instance.module_info_lookup();
activations_table.insert_with_gc(externref, module_info_lookup);
raw
}
}

View File

@@ -4,12 +4,14 @@
use crate::mmap::Mmap;
use crate::vmcontext::VMMemoryDefinition;
use anyhow::Result;
use crate::ResourceLimiter;
use anyhow::{bail, Result};
use more_asserts::{assert_ge, assert_le};
use std::cell::{Cell, RefCell};
use std::cmp::min;
use std::convert::TryFrom;
use std::ptr;
use std::rc::Rc;
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
/// A memory allocator
@@ -33,6 +35,10 @@ pub trait RuntimeLinearMemory {
/// Returns the number of allocated wasm pages.
fn size(&self) -> u32;
/// Returns the maximum number of pages the memory can grow to.
/// Returns `None` if the memory is unbounded.
fn maximum(&self) -> Option<u32>;
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
@@ -105,6 +111,12 @@ impl RuntimeLinearMemory for MmapMemory {
self.mmap.borrow().size
}
/// Returns the maximum number of pages the memory can grow to.
/// Returns `None` if the memory is unbounded.
fn maximum(&self) -> Option<u32> {
self.maximum
}
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
@@ -189,12 +201,23 @@ enum MemoryStorage {
}
/// Represents an instantiation of a WebAssembly memory.
pub struct Memory(MemoryStorage);
pub struct Memory {
storage: MemoryStorage,
limiter: Option<Rc<dyn ResourceLimiter>>,
}
impl Memory {
/// Create a new dynamic (movable) memory instance for the specified plan.
pub fn new_dynamic(plan: &MemoryPlan, creator: &dyn RuntimeMemoryCreator) -> Result<Self> {
Ok(Self(MemoryStorage::Dynamic(creator.new_memory(plan)?)))
pub fn new_dynamic(
plan: &MemoryPlan,
creator: &dyn RuntimeMemoryCreator,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
Self::new(
plan,
MemoryStorage::Dynamic(creator.new_memory(plan)?),
limiter,
)
}
/// Create a new static (immovable) memory instance for the specified plan.
@@ -203,32 +226,78 @@ impl Memory {
base: *mut u8,
maximum: u32,
make_accessible: fn(*mut u8, usize) -> Result<()>,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
if plan.memory.minimum > 0 {
make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize)?;
}
Ok(Self(MemoryStorage::Static {
let storage = MemoryStorage::Static {
base,
size: Cell::new(plan.memory.minimum),
maximum: min(plan.memory.maximum.unwrap_or(maximum), maximum),
make_accessible,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: RefCell::new(Vec::new()),
}))
};
Self::new(plan, storage, limiter)
}
fn new(
plan: &MemoryPlan,
storage: MemoryStorage,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
if let Some(limiter) = limiter {
if !limiter.memory_growing(0, plan.memory.minimum, plan.memory.maximum) {
bail!(
"memory minimum size of {} pages exceeds memory limits",
plan.memory.minimum
);
}
}
if let MemoryStorage::Static {
base,
make_accessible,
..
} = &storage
{
if plan.memory.minimum > 0 {
make_accessible(
*base,
plan.memory.minimum as usize * WASM_PAGE_SIZE as usize,
)?;
}
}
Ok(Self {
storage,
limiter: limiter.cloned(),
})
}
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
match &self.0 {
match &self.storage {
MemoryStorage::Static { size, .. } => size.get(),
MemoryStorage::Dynamic(mem) => mem.size(),
}
}
/// Returns the maximum number of pages the memory can grow to at runtime.
///
/// Returns `None` if the memory is unbounded.
///
/// The runtime maximum may not be equal to the maximum from the linear memory's
/// Wasm type when it is being constrained by an instance allocator.
pub fn maximum(&self) -> Option<u32> {
match &self.storage {
MemoryStorage::Static { maximum, .. } => Some(*maximum),
MemoryStorage::Dynamic(mem) => mem.maximum(),
}
}
/// Returns whether or not the underlying storage of the memory is "static".
pub(crate) fn is_static(&self) -> bool {
if let MemoryStorage::Static { .. } = &self.0 {
if let MemoryStorage::Static { .. } = &self.storage {
true
} else {
false
@@ -239,8 +308,30 @@ impl Memory {
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages.
pub fn grow(&self, delta: u32) -> Option<u32> {
match &self.0 {
///
/// # Safety
///
/// Resizing the memory can reallocate the memory buffer for dynamic memories.
/// An instance's `VMContext` may have pointers to the memory's base and will
/// need to be fixed up after growing the memory.
///
/// Generally, prefer using `InstanceHandle::memory_grow`, which encapsulates
/// this unsafety.
pub unsafe fn grow(&self, delta: u32) -> Option<u32> {
let old_size = self.size();
if delta == 0 {
return Some(old_size);
}
let new_size = old_size.checked_add(delta)?;
if let Some(limiter) = &self.limiter {
if !limiter.memory_growing(old_size, new_size, self.maximum()) {
return None;
}
}
match &self.storage {
MemoryStorage::Static {
base,
size,
@@ -252,13 +343,6 @@ impl Memory {
#[cfg(all(feature = "uffd", target_os = "linux"))]
self.reset_guard_pages().ok()?;
let old_size = size.get();
if delta == 0 {
return Some(old_size);
}
let new_size = old_size.checked_add(delta)?;
if new_size > *maximum || new_size >= WASM_MAX_PAGES {
return None;
}
@@ -266,7 +350,7 @@ impl Memory {
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
make_accessible(unsafe { base.add(start) }, len).ok()?;
make_accessible(base.add(start), len).ok()?;
size.set(new_size);
@@ -278,7 +362,7 @@ impl Memory {
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
pub fn vmmemory(&self) -> VMMemoryDefinition {
match &self.0 {
match &self.storage {
MemoryStorage::Static { base, size, .. } => VMMemoryDefinition {
base: *base,
current_length: size.get() as usize * WASM_PAGE_SIZE as usize,
@@ -299,7 +383,7 @@ impl Memory {
size: usize,
reset: fn(*mut u8, usize) -> Result<()>,
) {
match &self.0 {
match &self.storage {
MemoryStorage::Static {
guard_page_faults, ..
} => {
@@ -320,7 +404,7 @@ impl Memory {
/// This function will panic if called on a dynamic memory.
#[cfg(all(feature = "uffd", target_os = "linux"))]
pub(crate) fn reset_guard_pages(&self) -> Result<()> {
match &self.0 {
match &self.storage {
MemoryStorage::Static {
guard_page_faults, ..
} => {
@@ -345,13 +429,16 @@ impl Default for Memory {
unreachable!()
}
Self(MemoryStorage::Static {
base: ptr::null_mut(),
size: Cell::new(0),
maximum: 0,
make_accessible,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: RefCell::new(Vec::new()),
})
Self {
storage: MemoryStorage::Static {
base: ptr::null_mut(),
size: Cell::new(0),
maximum: 0,
make_accessible,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: RefCell::new(Vec::new()),
},
limiter: None,
}
}
}

View File

@@ -3,19 +3,21 @@
//! `Table` is to WebAssembly tables what `LinearMemory` is to WebAssembly linear memories.
use crate::vmcontext::{VMCallerCheckedAnyfunc, VMTableDefinition};
use crate::{Trap, VMExternRef};
use crate::{ResourceLimiter, Trap, VMExternRef};
use anyhow::{bail, Result};
use std::cell::{Cell, RefCell};
use std::cmp::min;
use std::convert::TryInto;
use std::convert::{TryFrom, TryInto};
use std::ops::Range;
use std::ptr;
use std::rc::Rc;
use wasmtime_environ::wasm::TableElementType;
use wasmtime_environ::{ir, TablePlan};
/// An element going into or coming out of a table.
///
/// Table elements are stored as pointers and are default-initialized with `ptr::null_mut`.
#[derive(Clone, Debug)]
#[derive(Clone)]
pub enum TableElement {
/// A `funcref`.
FuncRef(*mut VMCallerCheckedAnyfunc),
@@ -69,7 +71,7 @@ impl TableElement {
unsafe fn into_raw(self) -> *mut u8 {
match self {
Self::FuncRef(e) => e as _,
Self::ExternRef(e) => e.map(|e| e.into_raw()).unwrap_or(ptr::null_mut()),
Self::ExternRef(e) => e.map_or(ptr::null_mut(), |e| e.into_raw()),
}
}
}
@@ -92,7 +94,6 @@ impl From<VMExternRef> for TableElement {
}
}
#[derive(Debug)]
enum TableStorage {
Static {
data: *mut *mut u8,
@@ -108,38 +109,74 @@ enum TableStorage {
}
/// Represents an instance's table.
#[derive(Debug)]
pub struct Table(TableStorage);
pub struct Table {
storage: TableStorage,
limiter: Option<Rc<dyn ResourceLimiter>>,
}
impl Table {
/// Create a new dynamic (movable) table instance for the specified table plan.
pub fn new_dynamic(plan: &TablePlan) -> Self {
pub fn new_dynamic(
plan: &TablePlan,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
let elements = RefCell::new(vec![ptr::null_mut(); plan.table.minimum as usize]);
let ty = plan.table.ty.clone();
let maximum = plan.table.maximum;
Self(TableStorage::Dynamic {
let storage = TableStorage::Dynamic {
elements,
ty,
maximum,
})
};
Self::new(plan, storage, limiter)
}
/// Create a new static (immovable) table instance for the specified table plan.
pub fn new_static(plan: &TablePlan, data: *mut *mut u8, maximum: u32) -> Self {
pub fn new_static(
plan: &TablePlan,
data: *mut *mut u8,
maximum: u32,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
let size = Cell::new(plan.table.minimum);
let ty = plan.table.ty.clone();
let maximum = min(plan.table.maximum.unwrap_or(maximum), maximum);
Self(TableStorage::Static {
let storage = TableStorage::Static {
data,
size,
ty,
maximum,
};
Self::new(plan, storage, limiter)
}
fn new(
plan: &TablePlan,
storage: TableStorage,
limiter: Option<&Rc<dyn ResourceLimiter>>,
) -> Result<Self> {
if let Some(limiter) = limiter {
if !limiter.table_growing(0, plan.table.minimum, plan.table.maximum) {
bail!(
"table minimum size of {} elements exceeds table limits",
plan.table.minimum
);
}
}
Ok(Self {
storage,
limiter: limiter.cloned(),
})
}
/// Returns the type of the elements in this table.
pub fn element_type(&self) -> TableElementType {
match &self.0 {
match &self.storage {
TableStorage::Static { ty, .. } => *ty,
TableStorage::Dynamic { ty, .. } => *ty,
}
@@ -147,7 +184,7 @@ impl Table {
/// Returns whether or not the underlying storage of the table is "static".
pub(crate) fn is_static(&self) -> bool {
if let TableStorage::Static { .. } = &self.0 {
if let TableStorage::Static { .. } = &self.storage {
true
} else {
false
@@ -156,20 +193,51 @@ impl Table {
/// Returns the number of allocated elements.
pub fn size(&self) -> u32 {
match &self.0 {
match &self.storage {
TableStorage::Static { size, .. } => size.get(),
TableStorage::Dynamic { elements, .. } => elements.borrow().len().try_into().unwrap(),
}
}
/// Returns the maximum number of elements.
/// Returns the maximum number of elements at runtime.
///
/// Returns `None` if the table is unbounded.
///
/// The runtime maximum may not be equal to the maximum from the table's Wasm type
/// when it is being constrained by an instance allocator.
pub fn maximum(&self) -> Option<u32> {
match &self.0 {
match &self.storage {
TableStorage::Static { maximum, .. } => Some(*maximum),
TableStorage::Dynamic { maximum, .. } => maximum.clone(),
}
}
/// Fill `table[dst..]` with values from `items`
///
/// Returns a trap error on out-of-bounds accesses.
pub fn init_funcs(
&self,
dst: u32,
items: impl ExactSizeIterator<Item = *mut VMCallerCheckedAnyfunc>,
) -> Result<(), Trap> {
assert!(self.element_type() == TableElementType::Func);
self.with_elements_mut(|elements| {
let elements = match elements
.get_mut(usize::try_from(dst).unwrap()..)
.and_then(|s| s.get_mut(..items.len()))
{
Some(elements) => elements,
None => return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds)),
};
for (item, slot) in items.zip(elements) {
*slot = item as *mut u8;
}
Ok(())
})
}
/// Fill `table[dst..dst + len]` with `val`.
///
/// Returns a trap error on out-of-bounds accesses.
@@ -218,8 +286,14 @@ impl Table {
/// this unsafety.
pub unsafe fn grow(&self, delta: u32, init_value: TableElement) -> Option<u32> {
let old_size = self.size();
let new_size = old_size.checked_add(delta)?;
if let Some(limiter) = &self.limiter {
if !limiter.table_growing(old_size, new_size, self.maximum()) {
return None;
}
}
if let Some(max) = self.maximum() {
if new_size > max {
return None;
@@ -229,7 +303,7 @@ impl Table {
debug_assert!(self.type_matches(&init_value));
// First resize the storage and then fill with the init value
match &self.0 {
match &self.storage {
TableStorage::Static { size, .. } => {
size.set(new_size);
}
@@ -319,7 +393,7 @@ impl Table {
/// Return a `VMTableDefinition` for exposing the table to compiled wasm code.
pub fn vmtable(&self) -> VMTableDefinition {
match &self.0 {
match &self.storage {
TableStorage::Static { data, size, .. } => VMTableDefinition {
base: *data as _,
current_elements: size.get(),
@@ -346,7 +420,7 @@ impl Table {
where
F: FnOnce(&[*mut u8]) -> R,
{
match &self.0 {
match &self.storage {
TableStorage::Static { data, size, .. } => unsafe {
f(std::slice::from_raw_parts(*data, size.get() as usize))
},
@@ -361,7 +435,7 @@ impl Table {
where
F: FnOnce(&mut [*mut u8]) -> R,
{
match &self.0 {
match &self.storage {
TableStorage::Static { data, size, .. } => unsafe {
f(std::slice::from_raw_parts_mut(*data, size.get() as usize))
},
@@ -463,11 +537,14 @@ impl Drop for Table {
// The default table representation is an empty funcref table that cannot grow.
impl Default for Table {
fn default() -> Self {
Self(TableStorage::Static {
data: std::ptr::null_mut(),
size: Cell::new(0),
ty: TableElementType::Func,
maximum: 0,
})
Self {
storage: TableStorage::Static {
data: std::ptr::null_mut(),
size: Cell::new(0),
ty: TableElementType::Func,
maximum: 0,
},
limiter: None,
}
}
}

View File

@@ -58,13 +58,12 @@ static mut IS_WASM_PC: fn(usize) -> bool = |_| false;
/// program counter is the pc of an actual wasm trap or not. This is then used
/// to disambiguate faults that happen due to wasm and faults that happen due to
/// bugs in Rust or elsewhere.
pub fn init_traps(is_wasm_pc: fn(usize) -> bool) -> Result<(), Trap> {
pub fn init_traps(is_wasm_pc: fn(usize) -> bool) {
static INIT: Once = Once::new();
INIT.call_once(|| unsafe {
IS_WASM_PC = is_wasm_pc;
sys::platform_init();
});
sys::lazy_per_thread_init()
}
/// Raises a user-defined trap immediately.
@@ -256,7 +255,7 @@ impl<'a> CallThreadState<'a> {
}
fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Trap> {
let ret = tls::set(&self, || closure(&self));
let ret = tls::set(&self, || closure(&self))?;
if ret != 0 {
return Ok(());
}
@@ -366,6 +365,7 @@ impl<T: Copy> Drop for ResetCell<'_, T> {
// the caller to the trap site.
mod tls {
use super::CallThreadState;
use crate::Trap;
use std::mem;
use std::ptr;
@@ -384,21 +384,38 @@ mod tls {
// these TLS values when the runtime may have crossed threads.
mod raw {
use super::CallThreadState;
use crate::Trap;
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState<'static>;
thread_local!(static PTR: Cell<Ptr> = Cell::new(ptr::null()));
// The first entry here is the `Ptr` which is what's used as part of the
// public interface of this module. The second entry is a boolean which
// allows the runtime to perform per-thread initialization if necessary
// for handling traps (e.g. setting up ports on macOS and sigaltstack on
// Unix).
thread_local!(static PTR: Cell<(Ptr, bool)> = Cell::new((ptr::null(), false)));
#[inline(never)] // see module docs for why this is here
pub fn replace(val: Ptr) -> Ptr {
PTR.with(|p| p.replace(val))
pub fn replace(val: Ptr) -> Result<Ptr, Trap> {
PTR.with(|p| {
// When a new value is configured that means that we may be
// entering WebAssembly so check to see if this thread has
// performed per-thread initialization for traps.
let (prev, mut initialized) = p.get();
if !initialized {
super::super::sys::lazy_per_thread_init()?;
initialized = true;
}
p.set((val, initialized));
Ok(prev)
})
}
#[inline(never)] // see module docs for why this is here
pub fn get() -> Ptr {
PTR.with(|p| p.get())
PTR.with(|p| p.get().0)
}
}
@@ -412,7 +429,7 @@ mod tls {
///
/// This is not a safe operation since it's intended to only be used
/// with stack switching found with fibers and async wasmtime.
pub unsafe fn take() -> TlsRestore {
pub unsafe fn take() -> Result<TlsRestore, Trap> {
// Our tls pointer must be set at this time, and it must not be
// null. We need to restore the previous pointer since we're
// removing ourselves from the call-stack, and in the process we
@@ -421,8 +438,8 @@ mod tls {
let raw = raw::get();
assert!(!raw.is_null());
let prev = (*raw).prev.replace(ptr::null());
raw::replace(prev);
TlsRestore(raw)
raw::replace(prev)?;
Ok(TlsRestore(raw))
}
/// Restores a previous tls state back into this thread's TLS.
@@ -430,17 +447,12 @@ mod tls {
/// This is unsafe because it's intended to only be used within the
/// context of stack switching within wasmtime.
pub unsafe fn replace(self) -> Result<(), super::Trap> {
// When replacing to the previous value of TLS, we might have
// crossed a thread: make sure the trap-handling lazy initializer
// runs.
super::sys::lazy_per_thread_init()?;
// We need to configure our previous TLS pointer to whatever is in
// TLS at this time, and then we set the current state to ourselves.
let prev = raw::get();
assert!((*self.0).prev.get().is_null());
(*self.0).prev.set(prev);
raw::replace(self.0);
raw::replace(self.0)?;
Ok(())
}
}
@@ -448,13 +460,14 @@ mod tls {
/// Configures thread local state such that for the duration of the
/// execution of `closure` any call to `with` will yield `ptr`, unless this
/// is recursively called again.
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> R {
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> Result<R, Trap> {
struct Reset<'a, 'b>(&'a CallThreadState<'b>);
impl Drop for Reset<'_, '_> {
#[inline]
fn drop(&mut self) {
raw::replace(self.0.prev.replace(ptr::null()));
raw::replace(self.0.prev.replace(ptr::null()))
.expect("tls should be previously initialized");
}
}
@@ -464,10 +477,10 @@ mod tls {
let ptr = unsafe {
mem::transmute::<*const CallThreadState<'_>, *const CallThreadState<'static>>(state)
};
let prev = raw::replace(ptr);
let prev = raw::replace(ptr)?;
state.prev.set(prev);
let _reset = Reset(state);
closure()
Ok(closure())
}
/// Returns the last pointer configured with `set` above. Panics if `set`

View File

@@ -25,7 +25,7 @@
//! use a thread-local to store information about how to unwind. Additionally
//! this requires that the check of whether a pc is a wasm trap or not is a
//! global check rather than a per-thread check. This necessitates the existence
//! of `GlobalFrameInfo` in the `wasmtime` crate.
//! of `GlobalModuleRegistry` in the `wasmtime` crate.
//!
//! Otherwise this file heavily uses the `mach` Rust crate for type and
//! function declarations. Many bits and pieces are copied or translated from
@@ -42,7 +42,6 @@ use mach::message::*;
use mach::port::*;
use mach::thread_act::*;
use mach::traps::*;
use std::cell::Cell;
use std::mem;
use std::thread;
@@ -425,26 +424,16 @@ impl Drop for ClosePort {
/// task-level port which is where we'd expected things like breakpad/crashpad
/// exception handlers to get registered.
pub fn lazy_per_thread_init() -> Result<(), Trap> {
thread_local! {
static PORTS_SET: Cell<bool> = Cell::new(false);
unsafe {
assert!(WASMTIME_PORT != MACH_PORT_NULL);
let kret = thread_set_exception_ports(
MY_PORT.with(|p| p.0),
EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
WASMTIME_PORT,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
mach_addons::THREAD_STATE_NONE,
);
assert_eq!(kret, KERN_SUCCESS, "failed to set thread exception port");
}
PORTS_SET.with(|ports| {
if ports.replace(true) {
return;
}
unsafe {
assert!(WASMTIME_PORT != MACH_PORT_NULL);
let kret = thread_set_exception_ports(
MY_PORT.with(|p| p.0),
EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
WASMTIME_PORT,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
mach_addons::THREAD_STATE_NONE,
);
assert_eq!(kret, KERN_SUCCESS, "failed to set thread exception port");
}
});
Ok(())
}

View File

@@ -47,8 +47,8 @@ pub unsafe fn platform_init() {
// Handle `unreachable` instructions which execute `ud2` right now
register(&mut PREV_SIGILL, libc::SIGILL);
// x86 uses SIGFPE to report division by zero
if cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") {
// x86 and s390x use SIGFPE to report division by zero
if cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") || cfg!(target_arch = "s390x") {
register(&mut PREV_SIGFPE, libc::SIGFPE);
}
@@ -85,7 +85,7 @@ unsafe extern "C" fn trap_handler(
// Otherwise flag ourselves as handling a trap, do the trap
// handling, and reset our trap handling flag. Then we figure
// out what to do based on the result of the trap handling.
let pc = get_pc(context);
let pc = get_pc(context, signum);
let jmp_buf = info.jmp_buf_if_trap(pc, |handler| handler(signum, siginfo, context));
// Figure out what to do based on the result of this handling of
@@ -127,7 +127,7 @@ unsafe extern "C" fn trap_handler(
}
}
unsafe fn get_pc(cx: *mut libc::c_void) -> *const u8 {
unsafe fn get_pc(cx: *mut libc::c_void, _signum: libc::c_int) -> *const u8 {
cfg_if::cfg_if! {
if #[cfg(all(target_os = "linux", target_arch = "x86_64"))] {
let cx = &*(cx as *const libc::ucontext_t);
@@ -138,6 +138,23 @@ unsafe fn get_pc(cx: *mut libc::c_void) -> *const u8 {
} else if #[cfg(all(any(target_os = "linux", target_os = "android"), target_arch = "aarch64"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.pc as *const u8
} else if #[cfg(all(target_os = "linux", target_arch = "s390x"))] {
// On s390x, SIGILL and SIGFPE are delivered with the PSW address
// pointing *after* the faulting instruction, while SIGSEGV and
// SIGBUS are delivered with the PSW address pointing *to* the
// faulting instruction. To handle this, the code generator registers
// any trap that results in one of "late" signals on the last byte
// of the instruction, and any trap that results in one of the "early"
// signals on the first byte of the instruction (as usual). This
// means we simply need to decrement the reported PSW address by
// one in the case of a "late" signal here to ensure we always
// correctly find the associated trap handler.
let trap_offset = match _signum {
libc::SIGILL | libc::SIGFPE => 1,
_ => 0,
};
let cx = &*(cx as *const libc::ucontext_t);
(cx.uc_mcontext.psw.addr - trap_offset) as *const u8
} else if #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.mc_rip as *const u8
@@ -154,41 +171,35 @@ unsafe fn get_pc(cx: *mut libc::c_void) -> *const u8 {
/// and registering our own alternate stack that is large enough and has a guard
/// page.
pub fn lazy_per_thread_init() -> Result<(), Trap> {
// This thread local is purely used to register a `Stack` to get deallocated
// when the thread exists. Otherwise this function is only ever called at
// most once per-thread.
thread_local! {
/// Thread-local state is lazy-initialized on the first time it's used,
/// and dropped when the thread exits.
static TLS: RefCell<Tls> = RefCell::new(Tls::None);
static STACK: RefCell<Option<Stack>> = RefCell::new(None);
}
/// The size of the sigaltstack (not including the guard, which will be
/// added). Make this large enough to run our signal handlers.
const MIN_STACK_SIZE: usize = 16 * 4096;
enum Tls {
None,
Allocated {
mmap_ptr: *mut libc::c_void,
mmap_size: usize,
},
BigEnough,
struct Stack {
mmap_ptr: *mut libc::c_void,
mmap_size: usize,
}
return TLS.with(|slot| unsafe {
let mut slot = slot.borrow_mut();
match *slot {
Tls::None => {}
// already checked
_ => return Ok(()),
}
return STACK.with(|s| {
*s.borrow_mut() = unsafe { allocate_sigaltstack()? };
Ok(())
});
unsafe fn allocate_sigaltstack() -> Result<Option<Stack>, Trap> {
// Check to see if the existing sigaltstack, if it exists, is big
// enough. If so we don't need to allocate our own.
let mut old_stack = mem::zeroed();
let r = libc::sigaltstack(ptr::null(), &mut old_stack);
assert_eq!(r, 0, "learning about sigaltstack failed");
if old_stack.ss_flags & libc::SS_DISABLE == 0 && old_stack.ss_size >= MIN_STACK_SIZE {
*slot = Tls::BigEnough;
return Ok(());
return Ok(None);
}
// ... but failing that we need to allocate our own, so do all that
@@ -226,25 +237,17 @@ pub fn lazy_per_thread_init() -> Result<(), Trap> {
let r = libc::sigaltstack(&new_stack, ptr::null_mut());
assert_eq!(r, 0, "registering new sigaltstack failed");
*slot = Tls::Allocated {
Ok(Some(Stack {
mmap_ptr: ptr,
mmap_size: alloc_size,
};
Ok(())
});
}))
}
impl Drop for Tls {
impl Drop for Stack {
fn drop(&mut self) {
let (ptr, size) = match self {
Tls::Allocated {
mmap_ptr,
mmap_size,
} => (*mmap_ptr, *mmap_size),
_ => return,
};
unsafe {
// Deallocate the stack memory.
let r = libc::munmap(ptr, size);
let r = libc::munmap(self.mmap_ptr, self.mmap_size);
debug_assert_eq!(r, 0, "munmap failed during thread shutdown");
}
}

View File

@@ -15,10 +15,10 @@ include = ["src/**/*", "LICENSE" ]
wasi-common = { path = "../", version = "0.26.0" }
async-trait = "0.1"
anyhow = "1.0"
cap-std = "0.13.7"
cap-fs-ext = "0.13.7"
cap-time-ext = "0.13.7"
cap-rand = "0.13.2"
cap-std = "0.13.9"
cap-fs-ext = "0.13.9"
cap-time-ext = "0.13.9"
cap-rand = "0.13.9"
fs-set-times = "0.3.1"
unsafe-io = "0.6.5"
system-interface = { version = "0.6.3", features = ["cap_std_impls"] }

View File

@@ -1,4 +1,4 @@
use super::{guest_types, WasiCryptoCtx};
use super::guest_types;
use std::num::TryFromIntError;
use wasi_crypto::CryptoError;

View File

@@ -22,7 +22,7 @@ wasmtime-wasi = { path = "../wasi", version = "0.26.0" }
wiggle = { path = "../wiggle", version = "0.26.0" }
# These dependencies are necessary for the wasi-nn implementation:
openvino = "0.1.5"
openvino = { version = "0.3.1", features = ["runtime-linking"] }
thiserror = "1.0"
[build-dependencies]

View File

@@ -1,11 +1,9 @@
//! This build script:
//! - has the configuration necessary for the wiggle and witx macros.
use std::path::PathBuf;
fn main() {
// This is necessary for Wiggle/Witx macros.
let wasi_root = PathBuf::from("./spec").canonicalize().unwrap();
let cwd = std::env::current_dir().unwrap();
let wasi_root = cwd.join("spec");
println!("cargo:rustc-env=WASI_ROOT={}", wasi_root.display());
// Also automatically rebuild if the Witx files change

View File

@@ -2,7 +2,7 @@
//! wasi-nn API.
use crate::r#impl::UsageError;
use crate::witx::types::{Graph, GraphExecutionContext};
use openvino::InferenceError;
use openvino::{InferenceError, SetupError};
use std::cell::RefCell;
use std::collections::HashMap;
use std::hash::Hash;
@@ -14,8 +14,10 @@ use wiggle::GuestError;
pub enum WasiNnError {
#[error("guest error")]
GuestError(#[from] GuestError),
#[error("openvino error")]
OpenvinoError(#[from] InferenceError),
#[error("openvino inference error")]
OpenvinoInferenceError(#[from] InferenceError),
#[error("openvino setup error")]
OpenvinoSetupError(#[from] SetupError),
#[error("usage error")]
UsageError(#[from] UsageError),
}
@@ -74,7 +76,7 @@ impl ExecutionContext {
/// Capture the state necessary for calling into `openvino`.
pub struct Ctx {
pub(crate) core: openvino::Core,
pub(crate) core: Option<openvino::Core>,
pub(crate) graphs: Table<Graph, (openvino::CNNNetwork, openvino::ExecutableNetwork)>,
pub(crate) executions: Table<GraphExecutionContext, ExecutionContext>,
}
@@ -83,7 +85,7 @@ impl Ctx {
/// Make a new `WasiNnCtx` with the default settings.
pub fn new() -> WasiNnResult<Self> {
Ok(Self {
core: openvino::Core::new(None)?,
core: Option::default(),
graphs: Table::default(),
executions: Table::default(),
})

View File

@@ -12,6 +12,8 @@ use wiggle::GuestPtr;
#[derive(Debug, Error)]
pub enum UsageError {
#[error("Invalid context; has the load function been called?")]
InvalidContext,
#[error("Only OpenVINO's IR is currently supported, passed encoding: {0:?}")]
InvalidEncoding(GraphEncoding),
#[error("OpenVINO expects only two buffers (i.e. [ir, weights]), passed: {0}")]
@@ -34,9 +36,21 @@ impl<'a> WasiEphemeralNn for WasiNnCtx {
if encoding != GraphEncoding::Openvino {
return Err(UsageError::InvalidEncoding(encoding).into());
}
if builders.len() != 2 {
return Err(UsageError::InvalidNumberOfBuilders(builders.len()).into());
}
// Construct the context if none is present; this is done lazily (i.e. upon actually loading
// a model) because it may fail to find and load the OpenVINO libraries. The laziness limits
// the extent of the error only to wasi-nn users, not all WASI users.
if self.ctx.borrow().core.is_none() {
self.ctx
.borrow_mut()
.core
.replace(openvino::Core::new(None)?);
}
let builders = builders.as_ptr();
let xml = builders.read()?.as_slice()?;
let weights = builders.add(1)?.read()?.as_slice()?;
@@ -44,11 +58,15 @@ impl<'a> WasiEphemeralNn for WasiNnCtx {
.ctx
.borrow_mut()
.core
.as_mut()
.ok_or(UsageError::InvalidContext)?
.read_network_from_buffer(&xml, &weights)?;
let executable_graph = self
.ctx
.borrow_mut()
.core
.as_mut()
.ok_or(UsageError::InvalidContext)?
.load_network(&graph, map_execution_target_to_string(target))?;
let id = self
.ctx
@@ -94,7 +112,7 @@ impl<'a> WasiEphemeralNn for WasiNnCtx {
.dimensions
.as_slice()?
.iter()
.map(|d| *d as u64)
.map(|d| *d as usize)
.collect::<Vec<_>>();
let precision = match tensor.type_ {
TensorType::F16 => Precision::FP16,

View File

@@ -14,7 +14,8 @@ impl<'a> types::UserErrorConversion for WasiNnCtx {
fn nn_errno_from_wasi_nn_error(&self, e: WasiNnError) -> Result<NnErrno, wiggle::Trap> {
eprintln!("Host error: {:?}", e);
match e {
WasiNnError::OpenvinoError(_) => unimplemented!(),
WasiNnError::OpenvinoSetupError(_) => unimplemented!(),
WasiNnError::OpenvinoInferenceError(_) => unimplemented!(),
WasiNnError::GuestError(_) => unimplemented!(),
WasiNnError::UsageError(_) => unimplemented!(),
}

View File

@@ -319,6 +319,10 @@ impl HostFuncMap {
fn async_required(&self) -> bool {
self.funcs.values().any(|f| f.1)
}
fn iter(&self) -> impl Iterator<Item = &HostFunc> {
self.funcs.values().map(|v| &*v.0)
}
}
macro_rules! generate_wrap_async_host_func {
@@ -379,9 +383,6 @@ pub struct Config {
pub(crate) max_wasm_stack: usize,
pub(crate) features: WasmFeatures,
pub(crate) wasm_backtrace_details_env_used: bool,
pub(crate) max_instances: usize,
pub(crate) max_tables: usize,
pub(crate) max_memories: usize,
#[cfg(feature = "async")]
pub(crate) async_stack_size: usize,
host_funcs: HostFuncMap,
@@ -418,9 +419,6 @@ impl Config {
max_wasm_stack: 1 << 20,
wasm_backtrace_details_env_used: false,
features: WasmFeatures::default(),
max_instances: 10_000,
max_tables: 10_000,
max_memories: 10_000,
#[cfg(feature = "async")]
async_stack_size: 2 << 20,
host_funcs: HostFuncMap::new(),
@@ -1192,39 +1190,6 @@ impl Config {
self
}
/// Configures the maximum number of instances which can be created within
/// this `Store`.
///
/// Instantiation will fail with an error if this limit is exceeded.
///
/// This value defaults to 10,000.
pub fn max_instances(&mut self, instances: usize) -> &mut Self {
self.max_instances = instances;
self
}
/// Configures the maximum number of tables which can be created within
/// this `Store`.
///
/// Instantiation will fail with an error if this limit is exceeded.
///
/// This value defaults to 10,000.
pub fn max_tables(&mut self, tables: usize) -> &mut Self {
self.max_tables = tables;
self
}
/// Configures the maximum number of memories which can be created within
/// this `Store`.
///
/// Instantiation will fail with an error if this limit is exceeded.
///
/// This value defaults to 10,000.
pub fn max_memories(&mut self, memories: usize) -> &mut Self {
self.max_memories = memories;
self
}
/// Defines a host function for the [`Config`] for the given callback.
///
/// Use [`Store::get_host_func`](crate::Store::get_host_func) to get a [`Func`](crate::Func) representing the function.
@@ -1318,6 +1283,10 @@ impl Config {
for_each_function_signature!(generate_wrap_async_host_func);
pub(crate) fn host_funcs(&self) -> impl Iterator<Item = &HostFunc> {
self.host_funcs.iter()
}
pub(crate) fn get_host_func(&self, module: &str, name: &str) -> Option<&HostFunc> {
self.host_funcs.get(module, name)
}

View File

@@ -1,10 +1,41 @@
use crate::signatures::{SignatureCollection, SignatureRegistry};
use crate::Config;
use anyhow::Result;
use std::collections::HashMap;
use std::sync::Arc;
#[cfg(feature = "cache")]
use wasmtime_cache::CacheConfig;
use wasmtime_jit::Compiler;
use wasmtime_runtime::{debug_builtins, InstanceAllocator};
use wasmtime_runtime::{debug_builtins, InstanceAllocator, InstanceHandle, VMCallerCheckedAnyfunc};
/// This is used as a Send+Sync wrapper around two data structures relating to
/// host functions defined on `Config`:
///
/// * `anyfuncs` - this stores a mapping between the host function instance and
/// a `VMCallerCheckedAnyfunc` that can be used as the function's value in Wasmtime's ABI.
/// The address of the anyfunc needs to be stable, thus the boxed value.
///
/// * `signatures` - this stores the collection of shared signatures registered for every
/// usable host functions with this engine.
struct EngineHostFuncs {
anyfuncs: HashMap<InstanceHandle, Box<VMCallerCheckedAnyfunc>>,
signatures: SignatureCollection,
}
impl EngineHostFuncs {
fn new(registry: &SignatureRegistry) -> Self {
Self {
anyfuncs: HashMap::new(),
signatures: SignatureCollection::new(registry),
}
}
}
// This is safe for send and sync as it is read-only once the
// engine is constructed and the host functions live with the config,
// which the engine keeps a strong reference to.
unsafe impl Send for EngineHostFuncs {}
unsafe impl Sync for EngineHostFuncs {}
/// An `Engine` which is a global context for compilation and management of wasm
/// modules.
@@ -37,6 +68,8 @@ struct EngineInner {
config: Config,
compiler: Compiler,
allocator: Box<dyn InstanceAllocator>,
signatures: SignatureRegistry,
host_funcs: EngineHostFuncs,
}
impl Engine {
@@ -46,11 +79,29 @@ impl Engine {
debug_builtins::ensure_exported();
config.validate()?;
let allocator = config.build_allocator()?;
let registry = SignatureRegistry::new();
let mut host_funcs = EngineHostFuncs::new(&registry);
// Register all the host function signatures with the collection
for func in config.host_funcs() {
let sig = host_funcs
.signatures
.register(func.ty.as_wasm_func_type(), func.trampoline);
// Cloning the instance handle is safe as host functions outlive the engine
host_funcs.anyfuncs.insert(
unsafe { func.instance.clone() },
Box::new(func.anyfunc(sig)),
);
}
Ok(Engine {
inner: Arc::new(EngineInner {
config: config.clone(),
compiler: config.build_compiler(allocator.as_ref()),
allocator,
signatures: registry,
host_funcs,
}),
})
}
@@ -79,6 +130,25 @@ impl Engine {
Arc::ptr_eq(&a.inner, &b.inner)
}
pub(crate) fn signatures(&self) -> &SignatureRegistry {
&self.inner.signatures
}
pub(crate) fn host_func_signatures(&self) -> &SignatureCollection {
&self.inner.host_funcs.signatures
}
pub(crate) fn host_func_anyfunc(
&self,
instance: &InstanceHandle,
) -> Option<&VMCallerCheckedAnyfunc> {
self.inner
.host_funcs
.anyfuncs
.get(instance)
.map(AsRef::as_ref)
}
/// Ahead-of-time (AOT) compiles a WebAssembly module.
///
/// The `bytes` provided must be in one of two formats:
@@ -90,8 +160,9 @@ impl Engine {
/// Note that the `wat` feature is enabled by default.
///
/// This method may be used to compile a module for use with a different target
/// host. The output of this method may be used with [`Module::new`](crate::Module::new)
/// on hosts compatible with the [`Config`] associated with this [`Engine`].
/// host. The output of this method may be used with
/// [`Module::deserialize`](crate::Module::deserialize) on hosts compatible
/// with the [`Config`] associated with this [`Engine`].
///
/// The output of this method is safe to send to another host machine for later
/// execution. As the output is already a compiled module, translation and code

View File

@@ -1,4 +1,4 @@
use crate::{sig_registry::SignatureRegistry, trampoline::StoreInstanceHandle};
use crate::trampoline::StoreInstanceHandle;
use crate::{Config, Extern, FuncType, Store, Trap, Val, ValType};
use anyhow::{bail, Context as _, Result};
use smallvec::{smallvec, SmallVec};
@@ -22,9 +22,9 @@ use wasmtime_runtime::{
/// This differs from `Func` in that it is not associated with a `Store`.
/// Host functions are associated with a `Config`.
pub(crate) struct HostFunc {
ty: FuncType,
instance: InstanceHandle,
trampoline: VMTrampoline,
pub ty: FuncType,
pub instance: InstanceHandle,
pub trampoline: VMTrampoline,
}
impl HostFunc {
@@ -73,6 +73,23 @@ impl HostFunc {
}
}
/// Gets a caller-checked anyfunc for this host function given a shared signature index.
///
/// The shared signature index must have been registered for the signature of
/// this host function.
pub fn anyfunc(&self, sig: VMSharedSignatureIndex) -> VMCallerCheckedAnyfunc {
let mut anyfunc = match self
.instance
.lookup_by_declaration(&EntityIndex::Function(FuncIndex::from_u32(0)))
{
wasmtime_runtime::Export::Function(f) => unsafe { f.anyfunc.as_ref() }.clone(),
_ => unreachable!(),
};
anyfunc.type_index = sig;
anyfunc
}
/// Converts a `HostFunc` to a `Func`.
///
/// # Safety
@@ -88,11 +105,11 @@ impl HostFunc {
};
let export = ExportFunction {
anyfunc: std::ptr::NonNull::new_unchecked(store.get_host_anyfunc(
&self.instance,
&self.ty,
self.trampoline,
)),
anyfunc: store
.engine()
.host_func_anyfunc(&self.instance)
.unwrap()
.into(),
};
Func {
@@ -408,13 +425,9 @@ impl Func {
Func::invoke(&store, &ty_clone, caller_vmctx, values_vec, &func)
});
let (instance, trampoline) = crate::trampoline::create_function(
&ty,
func,
store.engine().config(),
Some(&mut store.signatures().borrow_mut()),
)
.expect("failed to create function");
let (instance, trampoline) =
crate::trampoline::create_function(&ty, func, store.engine().config(), Some(store))
.expect("failed to create function");
let idx = EntityIndex::Function(FuncIndex::from_u32(0));
let (instance, export) = match instance.lookup_by_declaration(&idx) {
@@ -734,7 +747,7 @@ impl Func {
/// # }
/// ```
pub fn wrap<Params, Results>(store: &Store, func: impl IntoFunc<Params, Results>) -> Func {
let (_, instance, trampoline) = func.into_func(Some(&mut store.signatures().borrow_mut()));
let (_, instance, trampoline) = func.into_func(Some(store));
let (instance, export) = unsafe {
let idx = EntityIndex::Function(FuncIndex::from_u32(0));
@@ -759,35 +772,26 @@ impl Func {
/// Returns the underlying wasm type that this `Func` has.
pub fn ty(&self) -> FuncType {
// Signatures should always be registered in the store's registry of
// Signatures should always be registered in the engine's registry of
// shared signatures, so we should be able to unwrap safely here.
let signatures = self.instance.store.signatures().borrow();
let (wft, _) = signatures
.lookup_shared(self.sig_index())
.expect("signature should be registered");
// This is only called with `Export::Function`, and since it's coming
// from wasmtime_runtime itself we should support all the types coming
// out of it, so assert such here.
FuncType::from_wasm_func_type(&wft)
FuncType::from_wasm_func_type(
self.instance
.store
.engine()
.signatures()
.lookup_type(self.sig_index())
.expect("signature should be registered"),
)
}
/// Returns the number of parameters that this function takes.
pub fn param_arity(&self) -> usize {
let signatures = self.instance.store.signatures().borrow();
let (sig, _) = signatures
.lookup_shared(self.sig_index())
.expect("signature should be registered");
sig.params.len()
self.ty().params().len()
}
/// Returns the number of results this function produces.
pub fn result_arity(&self) -> usize {
let signatures = self.instance.store.signatures().borrow();
let (sig, _) = signatures
.lookup_shared(self.sig_index())
.expect("signature should be registered");
sig.returns.len()
self.ty().results().len()
}
/// Invokes this function with the `params` given, returning the results and
@@ -907,21 +911,12 @@ impl Func {
}
pub(crate) unsafe fn from_wasmtime_function(export: &ExportFunction, store: &Store) -> Self {
// Each function signature in a module should have a trampoline stored
// on that module as well, so unwrap the result here since otherwise
// it's a bug in wasmtime.
let anyfunc = export.anyfunc.as_ref();
let trampoline = store
.signatures()
.borrow()
.lookup_shared(anyfunc.type_index)
.expect("failed to retrieve trampoline from module")
.1;
Func {
instance: store.existing_vmctx(anyfunc.vmctx),
export: export.clone(),
trampoline,
trampoline: store.lookup_trampoline(&*anyfunc),
}
}
@@ -1542,10 +1537,7 @@ for_each_function_signature!(impl_host_abi);
/// as an implementation detail of this crate.
pub trait IntoFunc<Params, Results> {
#[doc(hidden)]
fn into_func(
self,
registry: Option<&mut SignatureRegistry>,
) -> (FuncType, InstanceHandle, VMTrampoline);
fn into_func(self, store: Option<&Store>) -> (FuncType, InstanceHandle, VMTrampoline);
}
/// A structure representing the *caller's* context when creating a function
@@ -1658,12 +1650,12 @@ macro_rules! impl_into_func {
$($args: WasmTy,)*
R: WasmRet,
{
fn into_func(self, registry: Option<&mut SignatureRegistry>) -> (FuncType, InstanceHandle, VMTrampoline) {
fn into_func(self, store: Option<&Store>) -> (FuncType, InstanceHandle, VMTrampoline) {
let f = move |_: Caller<'_>, $($args:$args),*| {
self($($args),*)
};
f.into_func(registry)
f.into_func(store)
}
}
@@ -1674,7 +1666,7 @@ macro_rules! impl_into_func {
$($args: WasmTy,)*
R: WasmRet,
{
fn into_func(self, registry: Option<&mut SignatureRegistry>) -> (FuncType, InstanceHandle, VMTrampoline) {
fn into_func(self, store: Option<&Store>) -> (FuncType, InstanceHandle, VMTrampoline) {
/// This shim is called by Wasm code, constructs a `Caller`,
/// calls the wrapped host function, and returns the translated
/// result back to Wasm.
@@ -1807,10 +1799,10 @@ macro_rules! impl_into_func {
let trampoline = host_trampoline::<$($args,)* R>;
// If not given a registry, use a default signature index that is guaranteed to trap
// if the function is called indirectly without first being associated with a store (a bug condition).
let shared_signature_id = registry
.map(|r| r.register(ty.as_wasm_func_type(), trampoline))
// If not given a store, use a default signature index that is guaranteed to trap.
// If the function is called indirectly without first being associated with a store (a bug condition).
let shared_signature_id = store
.map(|s| s.signatures().borrow_mut().register(ty.as_wasm_func_type(), trampoline))
.unwrap_or(VMSharedSignatureIndex::default());
let instance = unsafe {

View File

@@ -207,7 +207,7 @@ unsafe impl WasmTy for Option<ExternRef> {
unsafe {
store
.externref_activations_table()
.insert_with_gc(x.inner, store.stack_map_registry());
.insert_with_gc(x.inner, store.module_info_lookup());
}
abi
} else {

View File

@@ -13,9 +13,9 @@ use wasmtime_environ::wasm::{
};
use wasmtime_environ::Initializer;
use wasmtime_runtime::{
Imports, InstanceAllocationRequest, InstantiationError, RuntimeInstance, StackMapRegistry,
VMContext, VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMGlobalImport,
VMMemoryImport, VMTableImport,
Imports, InstanceAllocationRequest, InstantiationError, RuntimeInstance, VMContext,
VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMGlobalImport, VMMemoryImport,
VMTableImport,
};
/// An instantiated WebAssembly module.
@@ -362,6 +362,7 @@ impl<'a> Instantiator<'a> {
let expected_ty =
self.cur.module.compiled_module().module().type_of(*index);
matching::MatchCx {
signatures: self.cur.module.signatures(),
types: self.cur.module.types(),
store: self.store,
}
@@ -505,29 +506,26 @@ impl<'a> Instantiator<'a> {
fn instantiate_raw(&self) -> Result<StoreInstanceHandle> {
let compiled_module = self.cur.module.compiled_module();
// Register the module just before instantiation to ensure we have a
// trampoline registered for every signature and to preserve the module's
// compiled JIT code within the `Store`.
self.store.register_module(&self.cur.module);
// Register the module just before instantiation to ensure we keep the module
// properly referenced while in use by the store.
self.store.modules().borrow_mut().register(&self.cur.module);
unsafe {
let engine = self.store.engine();
let allocator = engine.allocator();
let signatures = self.store.signatures().borrow();
let signatures = signatures.lookup_table(&self.cur.module);
let instance = allocator.allocate(InstanceAllocationRequest {
module: compiled_module.module().clone(),
finished_functions: compiled_module.finished_functions(),
imports: self.cur.build(),
shared_signatures: (&signatures).into(),
shared_signatures: self.cur.module.signatures().as_module_map().into(),
host_state: Box::new(()),
interrupts: self.store.interrupts(),
externref_activations_table: self.store.externref_activations_table()
as *const VMExternRefActivationsTable
as *mut _,
stack_map_registry: self.store.stack_map_registry() as *const StackMapRegistry
as *mut _,
module_info_lookup: Some(self.store.module_info_lookup()),
limiter: self.store.limiter().as_ref(),
})?;
// After we've created the `InstanceHandle` we still need to run

View File

@@ -282,13 +282,13 @@ mod func;
mod config;
mod engine;
mod externals;
mod frame_info;
mod instance;
mod limits;
mod linker;
mod memory;
mod module;
mod r#ref;
mod sig_registry;
mod signatures;
mod store;
mod trampoline;
mod trap;
@@ -298,12 +298,12 @@ mod values;
pub use crate::config::*;
pub use crate::engine::*;
pub use crate::externals::*;
pub use crate::frame_info::{FrameInfo, FrameSymbol};
pub use crate::func::*;
pub use crate::instance::Instance;
pub use crate::limits::*;
pub use crate::linker::*;
pub use crate::memory::*;
pub use crate::module::Module;
pub use crate::module::{FrameInfo, FrameSymbol, Module};
pub use crate::r#ref::ExternRef;
pub use crate::store::*;
pub use crate::trap::*;

View File

@@ -0,0 +1,208 @@
pub(crate) const DEFAULT_INSTANCE_LIMIT: usize = 10000;
pub(crate) const DEFAULT_TABLE_LIMIT: usize = 10000;
pub(crate) const DEFAULT_MEMORY_LIMIT: usize = 10000;
/// Used by hosts to limit resource consumption of instances at runtime.
///
/// [`Store::new_with_limits`](crate::Store::new_with_limits) can be used
/// with a resource limiter to take into account non-WebAssembly resource
/// usage to determine if a linear memory or table should be grown.
pub trait ResourceLimiter {
/// Notifies the resource limiter that an instance's linear memory has been requested to grow.
///
/// * `current` is the current size of the linear memory in WebAssembly page units.
/// * `desired` is the desired size of the linear memory in WebAssembly page units.
/// * `maximum` is either the linear memory's maximum or a maximum from an instance allocator,
/// also in WebAssembly page units. A value of `None` indicates that the linear memory is
/// unbounded.
///
/// This function should return `true` to indicate that the growing operation is permitted or
/// `false` if not permitted.
///
/// Note that this function will be called even when the desired count exceeds the given maximum.
///
/// Returning `true` when a maximum has been exceeded will have no effect as the linear memory
/// will not be grown.
fn memory_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
/// Notifies the resource limiter that an instance's table has been requested to grow.
///
/// * `current` is the current number of elements in the table.
/// * `desired` is the desired number of elements in the table.
/// * `maximum` is either the table's maximum or a maximum from an instance allocator,
/// A value of `None` indicates that the table is unbounded.
///
/// This function should return `true` to indicate that the growing operation is permitted or
/// `false` if not permitted.
///
/// Note that this function will be called even when the desired count exceeds the given maximum.
///
/// Returning `true` when a maximum has been exceeded will have no effect as the table will
/// not be grown.
fn table_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool;
/// The maximum number of instances that can be created for a [`Store`](crate::Store).
///
/// Module instantiation will fail if this limit is exceeded.
///
/// This value defaults to 10,000.
fn instances(&self) -> usize {
DEFAULT_INSTANCE_LIMIT
}
/// The maximum number of tables that can be created for a [`Store`](crate::Store).
///
/// Module instantiation will fail if this limit is exceeded.
///
/// This value defaults to 10,000.
fn tables(&self) -> usize {
DEFAULT_TABLE_LIMIT
}
/// The maximum number of linear memories that can be created for a [`Store`](crate::Store).
///
/// Instantiation will fail with an error if this limit is exceeded.
///
/// This value defaults to 10,000.
fn memories(&self) -> usize {
DEFAULT_MEMORY_LIMIT
}
}
pub(crate) struct ResourceLimiterProxy<T>(pub T);
impl<T: ResourceLimiter> wasmtime_runtime::ResourceLimiter for ResourceLimiterProxy<T> {
fn memory_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool {
self.0.memory_growing(current, desired, maximum)
}
fn table_growing(&self, current: u32, desired: u32, maximum: Option<u32>) -> bool {
self.0.table_growing(current, desired, maximum)
}
fn instances(&self) -> usize {
self.0.instances()
}
fn tables(&self) -> usize {
self.0.tables()
}
fn memories(&self) -> usize {
self.0.memories()
}
}
/// Used to build [`StoreLimits`].
pub struct StoreLimitsBuilder(StoreLimits);
impl StoreLimitsBuilder {
/// Creates a new [`StoreLimitsBuilder`].
pub fn new() -> Self {
Self(StoreLimits::default())
}
/// The maximum number of WebAssembly pages a linear memory can grow to.
///
/// Growing a linear memory beyond this limit will fail.
///
/// By default, linear memory pages will not be limited.
pub fn memory_pages(mut self, limit: u32) -> Self {
self.0.memory_pages = Some(limit);
self
}
/// The maximum number of elements in a table.
///
/// Growing a table beyond this limit will fail.
///
/// By default, table elements will not be limited.
pub fn table_elements(mut self, limit: u32) -> Self {
self.0.table_elements = Some(limit);
self
}
/// The maximum number of instances that can be created for a [`Store`](crate::Store).
///
/// Module instantiation will fail if this limit is exceeded.
///
/// This value defaults to 10,000.
pub fn instances(mut self, limit: usize) -> Self {
self.0.instances = limit;
self
}
/// The maximum number of tables that can be created for a [`Store`](crate::Store).
///
/// Module instantiation will fail if this limit is exceeded.
///
/// This value defaults to 10,000.
pub fn tables(mut self, tables: usize) -> Self {
self.0.tables = tables;
self
}
/// The maximum number of linear memories that can be created for a [`Store`](crate::Store).
///
/// Instantiation will fail with an error if this limit is exceeded.
///
/// This value defaults to 10,000.
pub fn memories(mut self, memories: usize) -> Self {
self.0.memories = memories;
self
}
/// Consumes this builder and returns the [`StoreLimits`].
pub fn build(self) -> StoreLimits {
self.0
}
}
/// Provides limits for a [`Store`](crate::Store).
pub struct StoreLimits {
memory_pages: Option<u32>,
table_elements: Option<u32>,
instances: usize,
tables: usize,
memories: usize,
}
impl Default for StoreLimits {
fn default() -> Self {
Self {
memory_pages: None,
table_elements: None,
instances: DEFAULT_INSTANCE_LIMIT,
tables: DEFAULT_TABLE_LIMIT,
memories: DEFAULT_MEMORY_LIMIT,
}
}
}
impl ResourceLimiter for StoreLimits {
fn memory_growing(&self, _current: u32, desired: u32, _maximum: Option<u32>) -> bool {
match self.memory_pages {
Some(limit) if desired > limit => false,
_ => true,
}
}
fn table_growing(&self, _current: u32, desired: u32, _maximum: Option<u32>) -> bool {
match self.table_elements {
Some(limit) if desired > limit => false,
_ => true,
}
}
fn instances(&self) -> usize {
self.instances
}
fn tables(&self) -> usize {
self.tables
}
fn memories(&self) -> usize {
self.memories
}
}

View File

@@ -262,7 +262,7 @@ impl Memory {
/// let store = Store::new(&engine);
///
/// let memory_ty = MemoryType::new(Limits::new(1, None));
/// let memory = Memory::new(&store, memory_ty);
/// let memory = Memory::new(&store, memory_ty)?;
///
/// let module = Module::new(&engine, "(module (memory (import \"\" \"\") 1))")?;
/// let instance = Instance::new(&store, &module, &[memory.into()])?;
@@ -270,13 +270,12 @@ impl Memory {
/// # Ok(())
/// # }
/// ```
pub fn new(store: &Store, ty: MemoryType) -> Memory {
let (instance, wasmtime_export) =
generate_memory_export(store, &ty).expect("generated memory");
Memory {
pub fn new(store: &Store, ty: MemoryType) -> Result<Memory> {
let (instance, wasmtime_export) = generate_memory_export(store, &ty)?;
Ok(Memory {
instance,
wasmtime_export,
}
})
}
/// Returns the underlying type of this memory.
@@ -454,7 +453,7 @@ impl Memory {
.memory_index(unsafe { &*self.wasmtime_export.definition });
self.instance
.memory_grow(index, delta)
.ok_or_else(|| anyhow!("failed to grow memory"))
.ok_or_else(|| anyhow!("failed to grow memory by `{}`", delta))
}
pub(crate) unsafe fn from_wasmtime_memory(
@@ -500,6 +499,10 @@ pub unsafe trait LinearMemory {
/// Returns the number of allocated wasm pages.
fn size(&self) -> u32;
/// Returns the maximum number of pages the memory can grow to.
/// Returns `None` if the memory is unbounded.
fn maximum(&self) -> Option<u32>;
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
@@ -568,7 +571,7 @@ mod tests {
.dynamic_memory_guard_size(0);
let store = Store::new(&Engine::new(&cfg).unwrap());
let ty = MemoryType::new(Limits::new(1, None));
let mem = Memory::new(&store, ty);
let mem = Memory::new(&store, ty).unwrap();
assert_eq!(mem.wasmtime_export.memory.offset_guard_size, 0);
match mem.wasmtime_export.memory.style {
wasmtime_environ::MemoryStyle::Dynamic => {}

View File

@@ -1,4 +1,7 @@
use crate::types::{ExportType, ExternType, ImportType};
use crate::{
signatures::SignatureCollection,
types::{ExportType, ExternType, ImportType},
};
use crate::{Engine, ModuleType};
use anyhow::{bail, Context, Result};
use std::fs;
@@ -11,8 +14,10 @@ use wasmtime_environ::entity::PrimaryMap;
use wasmtime_environ::wasm::ModuleIndex;
use wasmtime_jit::{CompilationArtifacts, CompiledModule, TypeTables};
mod registry;
mod serialization;
pub use registry::{FrameInfo, FrameSymbol, GlobalModuleRegistry, ModuleRegistry};
pub use serialization::SerializedModule;
/// A compiled WebAssembly module, ready to be instantiated.
@@ -102,6 +107,8 @@ struct ModuleInner {
/// Type information of this module and all `artifact_upvars` compiled
/// modules.
types: Arc<TypeTables>,
/// Registered shared signature for the module.
signatures: Arc<SignatureCollection>,
}
impl Module {
@@ -114,9 +121,6 @@ impl Module {
/// This is only supported when the `wat` feature of this crate is enabled.
/// If this is supplied then the text format will be parsed before validation.
/// Note that the `wat` feature is enabled by default.
/// * A module serialized with [`Module::serialize`].
/// * A module compiled with [`Engine::precompile_module`] or the
/// `wasmtime compile` command.
///
/// The data for the wasm module must be loaded in-memory if it's present
/// elsewhere, for example on disk. This requires that the entire binary is
@@ -175,11 +179,6 @@ impl Module {
/// ```
pub fn new(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Module> {
let bytes = bytes.as_ref();
if let Some(module) = SerializedModule::from_bytes(bytes)? {
return module.into_module(engine);
}
#[cfg(feature = "wat")]
let bytes = wat::parse_bytes(bytes)?;
Self::from_binary(engine, &bytes)
@@ -251,10 +250,10 @@ impl Module {
/// data.
///
/// This is similar to [`Module::new`] except that it requires that the
/// `binary` input is a WebAssembly binary or a compiled module, the
/// text format is not supported by this function. It's generally
/// recommended to use [`Module::new`], but if it's required to not
/// support the text format this function can be used instead.
/// `binary` input is a WebAssembly binary, the text format is not supported
/// by this function. It's generally recommended to use [`Module::new`], but
/// if it's required to not support the text format this function can be
/// used instead.
///
/// # Examples
///
@@ -279,10 +278,6 @@ impl Module {
/// # }
/// ```
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Module> {
if let Some(module) = SerializedModule::from_bytes(binary)? {
return module.into_module(engine);
}
// Check to see that the config's target matches the host
let target = engine.config().isa_flags.triple();
if *target != target_lexicon::Triple::host() {
@@ -313,25 +308,138 @@ impl Module {
}
};
let mut modules = CompiledModule::from_artifacts_list(
let modules = CompiledModule::from_artifacts_list(
artifacts,
engine.compiler().isa(),
&*engine.config().profiler,
)?;
Self::from_parts(engine, modules, main_module, Arc::new(types), &[])
}
/// Deserializes an in-memory compiled module previously created with
/// [`Module::serialize`] or [`Engine::precompile_module`].
///
/// This function will deserialize the binary blobs emitted by
/// [`Module::serialize`] and [`Engine::precompile_module`] back into an
/// in-memory [`Module`] that's ready to be instantiated.
///
/// # Unsafety
///
/// This function is marked as `unsafe` because if fed invalid input or used
/// improperly this could lead to memory safety vulnerabilities. This method
/// should not, for example, be exposed to arbitrary user input.
///
/// The structure of the binary blob read here is only lightly validated
/// internally in `wasmtime`. This is intended to be an efficient
/// "rehydration" for a [`Module`] which has very few runtime checks beyond
/// deserialization. Arbitrary input could, for example, replace valid
/// compiled code with any other valid compiled code, meaning that this can
/// trivially be used to execute arbitrary code otherwise.
///
/// For these reasons this function is `unsafe`. This function is only
/// designed to receive the previous input from [`Module::serialize`] and
/// [`Engine::precompile_module`]. If the exact output of those functions
/// (unmodified) is passed to this function then calls to this function can
/// be considered safe. It is the caller's responsibility to provide the
/// guarantee that only previously-serialized bytes are being passed in
/// here.
///
/// Note that this function is designed to be safe receiving output from
/// *any* compiled version of `wasmtime` itself. This means that it is safe
/// to feed output from older versions of Wasmtime into this function, in
/// addition to newer versions of wasmtime (from the future!). These inputs
/// will deterministically and safely produce an `Err`. This function only
/// successfully accepts inputs from the same version of `wasmtime`, but the
/// safety guarantee only applies to externally-defined blobs of bytes, not
/// those defined by any version of wasmtime. (this means that if you cache
/// blobs across versions of wasmtime you can be safely guaranteed that
/// future versions of wasmtime will reject old cache entries).
pub unsafe fn deserialize(engine: &Engine, bytes: impl AsRef<[u8]>) -> Result<Module> {
let module = SerializedModule::from_bytes(bytes.as_ref())?;
module.into_module(engine)
}
fn from_parts(
engine: &Engine,
mut modules: Vec<Arc<CompiledModule>>,
main_module: usize,
types: Arc<TypeTables>,
module_upvars: &[serialization::SerializedModuleUpvar],
) -> Result<Self> {
// Validate the module can be used with the current allocator
engine.allocator().validate(modules[main_module].module())?;
let signatures = Arc::new(SignatureCollection::new_for_module(
engine.signatures(),
&types.wasm_signatures,
modules.iter().flat_map(|m| m.trampolines().iter().cloned()),
));
let module = modules.remove(main_module);
// Validate the module can be used with the current allocator
engine.allocator().validate(module.module())?;
let module_upvars = module_upvars
.iter()
.map(|m| {
mk(
engine,
&modules,
&types,
m.index,
&m.artifact_upvars,
&m.module_upvars,
&signatures,
)
})
.collect::<Result<Vec<_>>>()?;
Ok(Module {
return Ok(Self {
inner: Arc::new(ModuleInner {
engine: engine.clone(),
types,
module,
types: Arc::new(types),
artifact_upvars: modules,
module_upvars: Vec::new(),
module_upvars,
signatures,
}),
})
});
fn mk(
engine: &Engine,
artifacts: &[Arc<CompiledModule>],
types: &Arc<TypeTables>,
module_index: usize,
artifact_upvars: &[usize],
module_upvars: &[serialization::SerializedModuleUpvar],
signatures: &Arc<SignatureCollection>,
) -> Result<Module> {
Ok(Module {
inner: Arc::new(ModuleInner {
engine: engine.clone(),
types: types.clone(),
module: artifacts[module_index].clone(),
artifact_upvars: artifact_upvars
.iter()
.map(|i| artifacts[*i].clone())
.collect(),
module_upvars: module_upvars
.into_iter()
.map(|m| {
mk(
engine,
artifacts,
types,
m.index,
&m.artifact_upvars,
&m.module_upvars,
signatures,
)
})
.collect::<Result<Vec<_>>>()?,
signatures: signatures.clone(),
}),
})
}
}
/// Validates `binary` input data as a WebAssembly binary given the
@@ -416,8 +524,8 @@ impl Module {
) -> Module {
Module {
inner: Arc::new(ModuleInner {
types: self.types().clone(),
engine: self.engine().clone(),
types: self.inner.types.clone(),
engine: self.inner.engine.clone(),
module: self.inner.artifact_upvars[artifact_index].clone(),
artifact_upvars: artifact_upvars
.iter()
@@ -432,6 +540,7 @@ impl Module {
wasmtime_environ::ModuleUpvar::Local(i) => modules[i].clone(),
})
.collect(),
signatures: self.inner.signatures.clone(),
}),
}
}
@@ -448,6 +557,10 @@ impl Module {
&self.inner.types
}
pub(crate) fn signatures(&self) -> &Arc<SignatureCollection> {
&self.inner.signatures
}
/// Looks up the module upvar value at the `index` specified.
///
/// Note that this panics if `index` is out of bounds since this should

View File

@@ -1,35 +1,38 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use std::sync::Mutex;
use wasmtime_environ::entity::EntityRef;
use wasmtime_environ::ir;
use wasmtime_environ::wasm::DefinedFuncIndex;
use wasmtime_environ::{FunctionAddressMap, TrapInformation};
use wasmtime_jit::CompiledModule;
//! Implements a registry of modules for a store.
/// This is a structure that lives within a `Store` and retains information
/// about all modules registered with the `Store` via instantiation.
///
/// "frame information" here refers to things like determining whether a
/// program counter is a wasm program counter, and additionally mapping program
/// counters to wasm filenames, modules, line numbers, etc. This store of
/// information lives as long as a `Store` lives since modules are never
/// unloaded today.
#[derive(Default)]
pub struct StoreFrameInfo {
/// An internal map that keeps track of backtrace frame information for
/// each module.
///
/// This map is morally a map of ranges to a map of information for that
/// module. Each module is expected to reside in a disjoint section of
/// contiguous memory. No modules can overlap.
///
/// The key of this map is the highest address in the module and the value
/// is the module's information, which also contains the start address.
ranges: BTreeMap<usize, ModuleFrameInfo>,
use crate::{signatures::SignatureCollection, Module};
use std::{
collections::BTreeMap,
sync::{Arc, Mutex},
};
use wasmtime_environ::{
entity::EntityRef,
ir::{self, StackMap},
wasm::DefinedFuncIndex,
FunctionAddressMap, TrapInformation,
};
use wasmtime_jit::CompiledModule;
use wasmtime_runtime::{ModuleInfo, VMCallerCheckedAnyfunc, VMTrampoline};
lazy_static::lazy_static! {
static ref GLOBAL_MODULES: Mutex<GlobalModuleRegistry> = Default::default();
}
impl StoreFrameInfo {
fn func_by_pc(module: &CompiledModule, pc: usize) -> Option<(DefinedFuncIndex, u32)> {
let (index, start, _) = module.func_by_pc(pc)?;
Some((index, (pc - start) as u32))
}
/// Used for registering modules with a store.
///
/// The map is from the ending (exclusive) address for the module code to
/// the registered module.
///
/// The `BTreeMap` is used to quickly locate a module based on a program counter value.
#[derive(Default)]
pub struct ModuleRegistry(BTreeMap<usize, Arc<RegisteredModule>>);
impl ModuleRegistry {
/// Fetches frame information about a program counter in a backtrace.
///
/// Returns an object if this `pc` is known to some previously registered
@@ -48,8 +51,14 @@ impl StoreFrameInfo {
self.module(pc)?.lookup_trap_info(pc)
}
fn module(&self, pc: usize) -> Option<&ModuleFrameInfo> {
let (end, info) = self.ranges.range(pc..).next()?;
/// Fetches information about a registered module given a program counter value.
pub fn lookup_module(&self, pc: usize) -> Option<Arc<dyn ModuleInfo>> {
self.module(pc)
.map(|m| -> Arc<dyn ModuleInfo> { m.clone() })
}
fn module(&self, pc: usize) -> Option<&Arc<RegisteredModule>> {
let (end, info) = self.0.range(pc..).next()?;
if pc < info.start || *end < pc {
return None;
}
@@ -57,12 +66,13 @@ impl StoreFrameInfo {
Some(info)
}
/// Registers a new compiled module's frame information.
pub fn register(&mut self, module: &Arc<CompiledModule>) {
let (start, end) = module.code().range();
/// Registers a new module with the registry.
pub fn register(&mut self, module: &Module) {
let compiled_module = module.compiled_module();
let (start, end) = compiled_module.code().range();
// Ignore modules with no code or finished functions
if start == end || module.finished_functions().is_empty() {
if start == end || compiled_module.finished_functions().is_empty() {
return;
}
@@ -70,44 +80,58 @@ impl StoreFrameInfo {
// may be a valid PC value
let end = end - 1;
// Ensure the module isn't already present in the registry
// This is expected when a module is instantiated multiple times in the same store
if let Some(m) = self.0.get(&end) {
assert_eq!(m.start, start);
return;
}
// Assert that this module's code doesn't collide with any other registered modules
if let Some((_, prev)) = self.ranges.range(end..).next() {
if let Some((_, prev)) = self.0.range(end..).next() {
assert!(prev.start > end);
}
if let Some((prev_end, _)) = self.ranges.range(..=start).next_back() {
if let Some((prev_end, _)) = self.0.range(..=start).next_back() {
assert!(*prev_end < start);
}
let prev = self.ranges.insert(
let prev = self.0.insert(
end,
ModuleFrameInfo {
Arc::new(RegisteredModule {
start,
module: module.clone(),
},
module: compiled_module.clone(),
signatures: module.signatures().clone(),
}),
);
assert!(prev.is_none());
GLOBAL_INFO.lock().unwrap().register(start, end, module);
GLOBAL_MODULES.lock().unwrap().register(start, end, module);
}
/// Looks up a trampoline from an anyfunc.
pub fn lookup_trampoline(&self, anyfunc: &VMCallerCheckedAnyfunc) -> Option<VMTrampoline> {
let module = self.module(anyfunc.func_ptr.as_ptr() as usize)?;
module.signatures.trampoline(anyfunc.type_index)
}
}
impl Drop for StoreFrameInfo {
impl Drop for ModuleRegistry {
fn drop(&mut self) {
let mut info = GLOBAL_INFO.lock().unwrap();
for end in self.ranges.keys() {
let mut info = GLOBAL_MODULES.lock().unwrap();
for end in self.0.keys() {
info.unregister(*end);
}
}
}
/// Represents a module's frame information.
#[derive(Clone)]
pub struct ModuleFrameInfo {
struct RegisteredModule {
start: usize,
module: Arc<CompiledModule>,
signatures: Arc<SignatureCollection>,
}
impl ModuleFrameInfo {
impl RegisteredModule {
/// Determines if the related module has unparsed debug information.
pub fn has_unparsed_debuginfo(&self) -> bool {
self.module.has_unparsed_debuginfo()
@@ -118,9 +142,9 @@ impl ModuleFrameInfo {
/// Returns an object if this `pc` is known to this module, or returns `None`
/// if no information can be found.
pub fn lookup_frame_info(&self, pc: usize) -> Option<FrameInfo> {
let (index, offset) = self.func(pc)?;
let (addr_map, _) = self.module.func_info(index);
let pos = Self::instr_pos(offset, addr_map);
let (index, offset) = func_by_pc(&self.module, pc)?;
let info = self.module.func_info(index);
let pos = Self::instr_pos(offset, &info.address_map);
// In debug mode for now assert that we found a mapping for `pc` within
// the function, because otherwise something is buggy along the way and
@@ -129,8 +153,8 @@ impl ModuleFrameInfo {
debug_assert!(pos.is_some(), "failed to find instruction for {:x}", pc);
let instr = match pos {
Some(pos) => addr_map.instructions[pos].srcloc,
None => addr_map.start_srcloc,
Some(pos) => info.address_map.instructions[pos].srcloc,
None => info.address_map.start_srcloc,
};
// Use our wasm-relative pc to symbolize this frame. If there's a
@@ -173,24 +197,20 @@ impl ModuleFrameInfo {
func_index: index.index() as u32,
func_name: module.func_names.get(&index).cloned(),
instr,
func_start: addr_map.start_srcloc,
func_start: info.address_map.start_srcloc,
symbols,
})
}
/// Fetches trap information about a program counter in a backtrace.
pub fn lookup_trap_info(&self, pc: usize) -> Option<&TrapInformation> {
let (index, offset) = self.func(pc)?;
let (_, traps) = self.module.func_info(index);
let idx = traps
let (index, offset) = func_by_pc(&self.module, pc)?;
let info = self.module.func_info(index);
let idx = info
.traps
.binary_search_by_key(&offset, |info| info.code_offset)
.ok()?;
Some(&traps[idx])
}
fn func(&self, pc: usize) -> Option<(DefinedFuncIndex, u32)> {
let (index, start, _) = self.module.func_by_pc(pc)?;
Some((index, (pc - start) as u32))
Some(&info.traps[idx])
}
fn instr_pos(offset: u32, addr_map: &FunctionAddressMap) -> Option<usize> {
@@ -214,56 +234,117 @@ impl ModuleFrameInfo {
}
}
/// This is the dual of `StoreFrameInfo` and is stored globally (as the name
/// implies) rather than simply in one `Store`.
impl ModuleInfo for RegisteredModule {
fn lookup_stack_map(&self, pc: usize) -> Option<&StackMap> {
let (index, offset) = func_by_pc(&self.module, pc)?;
let info = self.module.func_info(index);
// Do a binary search to find the stack map for the given offset.
//
// Because GC safepoints are technically only associated with a single
// PC, we should ideally only care about `Ok(index)` values returned
// from the binary search. However, safepoints are inserted right before
// calls, and there are two things that can disturb the PC/offset
// associated with the safepoint versus the PC we actually use to query
// for the stack map:
//
// 1. The `backtrace` crate gives us the PC in a frame that will be
// *returned to*, and where execution will continue from, rather than
// the PC of the call we are currently at. So we would need to
// disassemble one instruction backwards to query the actual PC for
// the stack map.
//
// TODO: One thing we *could* do to make this a little less error
// prone, would be to assert/check that the nearest GC safepoint
// found is within `max_encoded_size(any kind of call instruction)`
// our queried PC for the target architecture.
//
// 2. Cranelift's stack maps only handle the stack, not
// registers. However, some references that are arguments to a call
// may need to be in registers. In these cases, what Cranelift will
// do is:
//
// a. spill all the live references,
// b. insert a GC safepoint for those references,
// c. reload the references into registers, and finally
// d. make the call.
//
// Step (c) adds drift between the GC safepoint and the location of
// the call, which is where we actually walk the stack frame and
// collect its live references.
//
// Luckily, the spill stack slots for the live references are still
// up to date, so we can still find all the on-stack roots.
// Furthermore, we do not have a moving GC, so we don't need to worry
// whether the following code will reuse the references in registers
// (which would not have been updated to point to the moved objects)
// or reload from the stack slots (which would have been updated to
// point to the moved objects).
let index = match info
.stack_maps
.binary_search_by_key(&offset, |i| i.code_offset)
{
// Exact hit.
Ok(i) => i,
// `Err(0)` means that the associated stack map would have been the
// first element in the array if this pc had an associated stack
// map, but this pc does not have an associated stack map. This can
// only happen inside a Wasm frame if there are no live refs at this
// pc.
Err(0) => return None,
Err(i) => i - 1,
};
Some(&info.stack_maps[index].stack_map)
}
}
// Counterpart to `RegisteredModule`, but stored in the global registry.
struct GlobalRegisteredModule {
start: usize,
module: Arc<CompiledModule>,
/// Note that modules can be instantiated in many stores, so the purpose of
/// this field is to keep track of how many stores have registered a
/// module. Information is only removed from the global registry when this
/// reference count reaches 0.
references: usize,
}
/// This is the global module registry that stores information for all modules
/// that are currently in use by any `Store`.
///
/// The purpose of this map is to be called from signal handlers to determine
/// whether a program counter is a wasm trap or not. Specifically macOS has
/// no contextual information about the thread available, hence the necessity
/// for global state rather than using thread local state.
///
/// This is similar to `StoreFrameInfo` except that it has less information and
/// supports removal. Any time anything is registered with a `StoreFrameInfo`
/// it is also automatically registered with the singleton global frame
/// information. When a `StoreFrameInfo` is destroyed then all of its entries
/// are removed from the global frame information.
/// This is similar to `ModuleRegistry` except that it has less information and
/// supports removal. Any time anything is registered with a `ModuleRegistry`
/// it is also automatically registered with the singleton global module
/// registry. When a `ModuleRegistry` is destroyed then all of its entries
/// are removed from the global module registry.
#[derive(Default)]
pub struct GlobalFrameInfo {
// The map here behaves the same way as `StoreFrameInfo`.
ranges: BTreeMap<usize, GlobalModuleFrameInfo>,
}
pub struct GlobalModuleRegistry(BTreeMap<usize, GlobalRegisteredModule>);
/// This is the equivalent of `ModuleFrameInfo` except it keeps a reference count.
struct GlobalModuleFrameInfo {
module: ModuleFrameInfo,
/// Note that modules can be instantiated in many stores, so the purpose of
/// this field is to keep track of how many stores have registered a
/// module. Information is only removed from the global store when this
/// reference count reaches 0.
references: usize,
}
lazy_static::lazy_static! {
static ref GLOBAL_INFO: Mutex<GlobalFrameInfo> = Default::default();
}
impl GlobalFrameInfo {
impl GlobalModuleRegistry {
/// Returns whether the `pc`, according to globally registered information,
/// is a wasm trap or not.
pub(crate) fn is_wasm_pc(pc: usize) -> bool {
let info = GLOBAL_INFO.lock().unwrap();
let modules = GLOBAL_MODULES.lock().unwrap();
match info.ranges.range(pc..).next() {
Some((end, info)) => {
if pc < info.module.start || *end < pc {
match modules.0.range(pc..).next() {
Some((end, entry)) => {
if pc < entry.start || *end < pc {
return false;
}
match info.module.func(pc) {
match func_by_pc(&entry.module, pc) {
Some((index, offset)) => {
let (addr_map, _) = info.module.module.func_info(index);
ModuleFrameInfo::instr_pos(offset, addr_map).is_some()
let info = entry.module.func_info(index);
RegisteredModule::instr_pos(offset, &info.address_map).is_some()
}
None => false,
}
@@ -274,32 +355,27 @@ impl GlobalFrameInfo {
/// Registers a new region of code, described by `(start, end)` and with
/// the given function information, with the global information.
fn register(&mut self, start: usize, end: usize, module: &Arc<CompiledModule>) {
let info = self
.ranges
.entry(end)
.or_insert_with(|| GlobalModuleFrameInfo {
module: ModuleFrameInfo {
start,
module: module.clone(),
},
references: 0,
});
fn register(&mut self, start: usize, end: usize, module: &Module) {
let info = self.0.entry(end).or_insert_with(|| GlobalRegisteredModule {
start,
module: module.compiled_module().clone(),
references: 0,
});
// Note that ideally we'd debug_assert that the information previously
// stored, if any, matches the `functions` we were given, but for now we
// just do some simple checks to hope it's the same.
assert_eq!(info.module.start, start);
assert_eq!(info.start, start);
info.references += 1;
}
/// Unregisters a region of code (keyed by the `end` address) from this
/// Unregisters a region of code (keyed by the `end` address) from the
/// global information.
fn unregister(&mut self, end: usize) {
let info = self.ranges.get_mut(&end).unwrap();
let info = self.0.get_mut(&end).unwrap();
info.references -= 1;
if info.references == 0 {
self.ranges.remove(&end);
self.0.remove(&end);
}
}
}
@@ -321,19 +397,6 @@ pub struct FrameInfo {
symbols: Vec<FrameSymbol>,
}
/// Debug information for a symbol that is attached to a [`FrameInfo`].
///
/// When DWARF debug information is present in a wasm file then this structure
/// can be found on a [`FrameInfo`] and can be used to learn about filenames,
/// line numbers, etc, which are the origin of a function in a stack trace.
#[derive(Debug)]
pub struct FrameSymbol {
name: Option<String>,
file: Option<String>,
line: Option<u32>,
column: Option<u32>,
}
impl FrameInfo {
/// Returns the WebAssembly function index for this frame.
///
@@ -405,6 +468,19 @@ impl FrameInfo {
}
}
/// Debug information for a symbol that is attached to a [`FrameInfo`].
///
/// When DWARF debug information is present in a wasm file then this structure
/// can be found on a [`FrameInfo`] and can be used to learn about filenames,
/// line numbers, etc, which are the origin of a function in a stack trace.
#[derive(Debug)]
pub struct FrameSymbol {
name: Option<String>,
file: Option<String>,
line: Option<u32>,
column: Option<u32>,
}
impl FrameSymbol {
/// Returns the function name associated with this symbol.
///
@@ -463,7 +539,7 @@ fn test_frame_info() -> Result<(), anyhow::Error> {
)?;
// Create an instance to ensure the frame information is registered.
Instance::new(&store, &module, &[])?;
let info = store.frame_info().borrow();
let modules = store.modules().borrow();
for (i, alloc) in module.compiled_module().finished_functions() {
let (start, end) = unsafe {
let ptr = (**alloc).as_ptr();
@@ -471,7 +547,7 @@ fn test_frame_info() -> Result<(), anyhow::Error> {
(ptr as usize, ptr as usize + len)
};
for pc in start..end {
let (frame, _) = info.lookup_frame_info(pc).unwrap();
let (frame, _) = modules.lookup_frame_info(pc).unwrap();
assert!(frame.func_index() == i.as_u32());
}
}

View File

@@ -1,6 +1,5 @@
//! Implements module serialization.
use super::ModuleInner;
use crate::{Engine, Module, OptLevel};
use anyhow::{anyhow, bail, Context, Result};
use bincode::Options;
@@ -10,8 +9,7 @@ use std::fmt;
use std::str::FromStr;
use std::sync::Arc;
use std::{collections::HashMap, fmt::Display};
use wasmtime_environ::Tunables;
use wasmtime_environ::{isa::TargetIsa, settings};
use wasmtime_environ::{isa::TargetIsa, settings, Tunables};
use wasmtime_jit::{
CompilationArtifacts, CompilationStrategy, CompiledModule, Compiler, TypeTables,
};
@@ -123,55 +121,44 @@ impl From<settings::OptLevel> for OptLevel {
}
}
/// A small helper struct which defines modules are serialized.
/// A small helper struct for serialized module upvars.
#[derive(Serialize, Deserialize)]
struct SerializedModuleData<'a> {
/// All compiled artifacts needed by this module, where the last entry in
/// this list is the artifacts for the module itself.
artifacts: Vec<MyCow<'a, CompilationArtifacts>>,
pub struct SerializedModuleUpvar {
/// The module's index into the compilation artifact.
pub index: usize,
/// Indexes into the list of all compilation artifacts for this module.
pub artifact_upvars: Vec<usize>,
/// Closed-over module values that are also needed for this module.
modules: Vec<SerializedModuleData<'a>>,
/// The index into the list of type tables that are used for this module's
/// type tables.
type_tables: usize,
pub module_upvars: Vec<SerializedModuleUpvar>,
}
impl<'a> SerializedModuleData<'a> {
pub fn new(module: &'a Module) -> (Self, Vec<MyCow<'a, TypeTables>>) {
let mut pushed = HashMap::new();
let mut tables = Vec::new();
return (module_data(module, &mut pushed, &mut tables), tables);
impl SerializedModuleUpvar {
pub fn new(module: &Module, artifacts: &[Arc<CompiledModule>]) -> Self {
// TODO: improve upon the linear searches in the artifact list
let index = artifacts
.iter()
.position(|a| Arc::as_ptr(a) == Arc::as_ptr(&module.inner.module))
.expect("module should be in artifacts list");
fn module_data<'a>(
module: &'a Module,
type_tables_pushed: &mut HashMap<usize, usize>,
type_tables: &mut Vec<MyCow<'a, TypeTables>>,
) -> SerializedModuleData<'a> {
// Deduplicate `Arc<TypeTables>` using our two parameters to ensure we
// serialize type tables as little as possible.
let ptr = Arc::as_ptr(module.types());
let type_tables_idx = *type_tables_pushed.entry(ptr as usize).or_insert_with(|| {
type_tables.push(MyCow::Borrowed(module.types()));
type_tables.len() - 1
});
SerializedModuleData {
artifacts: module
.inner
.artifact_upvars
.iter()
.map(|i| MyCow::Borrowed(i.compilation_artifacts()))
.chain(Some(MyCow::Borrowed(
module.compiled_module().compilation_artifacts(),
)))
.collect(),
modules: module
.inner
.module_upvars
.iter()
.map(|i| module_data(i, type_tables_pushed, type_tables))
.collect(),
type_tables: type_tables_idx,
}
SerializedModuleUpvar {
index,
artifact_upvars: module
.inner
.artifact_upvars
.iter()
.map(|m| {
artifacts
.iter()
.position(|a| Arc::as_ptr(a) == Arc::as_ptr(m))
.expect("artifact should be in artifacts list")
})
.collect(),
module_upvars: module
.inner
.module_upvars
.iter()
.map(|m| SerializedModuleUpvar::new(m, artifacts))
.collect(),
}
}
}
@@ -212,14 +199,36 @@ pub struct SerializedModule<'a> {
strategy: CompilationStrategy,
tunables: Tunables,
features: WasmFeatures,
data: SerializedModuleData<'a>,
tables: Vec<MyCow<'a, TypeTables>>,
artifacts: Vec<MyCow<'a, CompilationArtifacts>>,
module_upvars: Vec<SerializedModuleUpvar>,
types: MyCow<'a, TypeTables>,
}
impl<'a> SerializedModule<'a> {
pub fn new(module: &'a Module) -> Self {
let (data, tables) = SerializedModuleData::new(module);
Self::with_data(module.engine().compiler(), data, tables)
let compiler = module.engine().compiler();
let artifacts = module
.inner
.artifact_upvars
.iter()
.map(|m| MyCow::Borrowed(m.compilation_artifacts()))
.chain(Some(MyCow::Borrowed(
module.inner.module.compilation_artifacts(),
)))
.collect::<Vec<_>>();
let module_upvars = module
.inner
.module_upvars
.iter()
.map(|m| SerializedModuleUpvar::new(m, &module.inner.artifact_upvars))
.collect::<Vec<_>>();
Self::with_data(
compiler,
artifacts,
module_upvars,
MyCow::Borrowed(module.types()),
)
}
pub fn from_artifacts(
@@ -229,19 +238,17 @@ impl<'a> SerializedModule<'a> {
) -> Self {
Self::with_data(
compiler,
SerializedModuleData {
artifacts: artifacts.iter().map(MyCow::Borrowed).collect(),
modules: Vec::new(),
type_tables: 0,
},
vec![MyCow::Borrowed(types)],
artifacts.iter().map(MyCow::Borrowed).collect(),
Vec::new(),
MyCow::Borrowed(types),
)
}
fn with_data(
compiler: &Compiler,
data: SerializedModuleData<'a>,
tables: Vec<MyCow<'a, TypeTables>>,
artifacts: Vec<MyCow<'a, CompilationArtifacts>>,
module_upvars: Vec<SerializedModuleUpvar>,
types: MyCow<'a, TypeTables>,
) -> Self {
let isa = compiler.isa();
@@ -260,8 +267,9 @@ impl<'a> SerializedModule<'a> {
strategy: compiler.strategy(),
tunables: compiler.tunables().clone(),
features: compiler.features().into(),
data,
tables,
artifacts,
module_upvars,
types,
}
}
@@ -276,47 +284,26 @@ impl<'a> SerializedModule<'a> {
self.check_tunables(compiler)?;
self.check_features(compiler)?;
let types = self
.tables
.into_iter()
.map(|t| Arc::new(t.unwrap_owned()))
.collect::<Vec<_>>();
let module = mk(engine, &types, self.data)?;
let modules = CompiledModule::from_artifacts_list(
self.artifacts
.into_iter()
.map(|i| i.unwrap_owned())
.collect(),
engine.compiler().isa(),
&*engine.config().profiler,
)?;
// Validate the module can be used with the current allocator
engine.allocator().validate(module.inner.module.module())?;
assert!(!modules.is_empty());
return Ok(module);
let main_module = modules.len() - 1;
fn mk(
engine: &Engine,
types: &Vec<Arc<TypeTables>>,
data: SerializedModuleData<'_>,
) -> Result<Module> {
let mut artifacts = CompiledModule::from_artifacts_list(
data.artifacts
.into_iter()
.map(|i| i.unwrap_owned())
.collect(),
engine.compiler().isa(),
&*engine.config().profiler,
)?;
let inner = ModuleInner {
engine: engine.clone(),
types: types[data.type_tables].clone(),
module: artifacts.pop().unwrap(),
artifact_upvars: artifacts,
module_upvars: data
.modules
.into_iter()
.map(|m| mk(engine, types, m))
.collect::<Result<Vec<_>>>()?,
};
Ok(Module {
inner: Arc::new(inner),
})
}
Module::from_parts(
engine,
modules,
main_module,
Arc::new(self.types.unwrap_owned()),
&self.module_upvars,
)
}
pub fn to_bytes(&self) -> Result<Vec<u8>> {
@@ -342,9 +329,9 @@ impl<'a> SerializedModule<'a> {
Ok(bytes)
}
pub fn from_bytes(bytes: &[u8]) -> Result<Option<Self>> {
pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
if !bytes.starts_with(HEADER) {
return Ok(None);
bail!("bytes are not a compatible serialized wasmtime module");
}
let bytes = &bytes[HEADER.len()..];
@@ -366,11 +353,9 @@ impl<'a> SerializedModule<'a> {
);
}
Ok(Some(
bincode_options()
.deserialize::<SerializedModule<'_>>(&bytes[1 + version_len..])
.context("deserialize compilation artifacts")?,
))
Ok(bincode_options()
.deserialize::<SerializedModule<'_>>(&bytes[1 + version_len..])
.context("deserialize compilation artifacts")?)
}
fn check_triple(&self, isa: &dyn TargetIsa) -> Result<()> {

View File

@@ -1,155 +0,0 @@
//! Implement a registry of function signatures, for fast indirect call
//! signature checking.
use crate::Module;
use std::collections::{hash_map, HashMap};
use std::convert::TryFrom;
use wasmtime_environ::entity::PrimaryMap;
use wasmtime_environ::wasm::{SignatureIndex, WasmFuncType};
use wasmtime_runtime::{VMSharedSignatureIndex, VMTrampoline};
/// WebAssembly requires that the caller and callee signatures in an indirect
/// call must match. To implement this efficiently, keep a registry of all
/// signatures, shared by all instances, so that call sites can just do an
/// index comparison.
#[derive(Debug, Default)]
pub struct SignatureRegistry {
// Map from a wasm actual function type to the index that it is assigned,
// shared amongst all wasm modules.
wasm2index: HashMap<WasmFuncType, VMSharedSignatureIndex>,
// Map of all known wasm function signatures in this registry. This is
// keyed by `VMSharedSignatureIndex` above.
index_map: Vec<Entry>,
}
#[derive(Debug)]
struct Entry {
// The WebAssembly type signature, using wasm types.
wasm: WasmFuncType,
// The native trampoline used to invoke this type signature from `Func`.
// Note that the code memory for this trampoline is not owned by this
// type, but instead it's expected to be owned by the store that this
// registry lives within.
trampoline: Option<VMTrampoline>,
}
impl SignatureRegistry {
/// Registers all signatures within a module into this registry all at once.
///
/// This will also internally register trampolines compiled in the module.
pub fn register_module(&mut self, module: &Module) {
// Register a unique index for all types in this module, even if they
// don't have a trampoline.
let signatures = &module.types().wasm_signatures;
for ty in module.compiled_module().module().types.values() {
if let wasmtime_environ::ModuleType::Function(index) = ty {
self.register_one(&signatures[*index], None);
}
}
// Once we've got a shared index for all types used then also fill in
// any trampolines that the module has compiled as well.
for (index, trampoline) in module.compiled_module().trampolines() {
let shared = self.wasm2index[&signatures[*index]];
let entry = &mut self.index_map[shared.bits() as usize];
if entry.trampoline.is_none() {
entry.trampoline = Some(*trampoline);
}
}
}
/// Register a signature and return its unique index.
pub fn register(
&mut self,
wasm: &WasmFuncType,
trampoline: VMTrampoline,
) -> VMSharedSignatureIndex {
self.register_one(wasm, Some(trampoline))
}
fn register_one(
&mut self,
wasm: &WasmFuncType,
trampoline: Option<VMTrampoline>,
) -> VMSharedSignatureIndex {
let len = self.wasm2index.len();
match self.wasm2index.entry(wasm.clone()) {
hash_map::Entry::Occupied(entry) => {
let ret = *entry.get();
let entry = &mut self.index_map[ret.bits() as usize];
// If the entry does not previously have a trampoline, then
// overwrite it with whatever was specified by this function.
if entry.trampoline.is_none() {
entry.trampoline = trampoline;
}
ret
}
hash_map::Entry::Vacant(entry) => {
// Keep `signature_hash` len under 2**32 -- VMSharedSignatureIndex::new(std::u32::MAX)
// is reserved for VMSharedSignatureIndex::default().
assert!(
len < std::u32::MAX as usize,
"Invariant check: signature_hash.len() < std::u32::MAX"
);
debug_assert_eq!(len, self.index_map.len());
let index = VMSharedSignatureIndex::new(u32::try_from(len).unwrap());
self.index_map.push(Entry {
wasm: wasm.clone(),
trampoline,
});
entry.insert(index);
index
}
}
}
/// Looks up a shared index from the wasm signature itself.
pub fn lookup(&self, wasm: &WasmFuncType) -> Option<VMSharedSignatureIndex> {
self.wasm2index.get(wasm).cloned()
}
/// Builds a lookup table for a module from the possible module's signature
/// indices to the shared signature index within this registry.
pub fn lookup_table(
&self,
module: &Module,
) -> PrimaryMap<SignatureIndex, VMSharedSignatureIndex> {
// For module-linking using modules this builds up a map that is
// too large. This builds up a map for everything in `TypeTables` but
// that's all the types for all modules in a whole module linking graph,
// which our `module` may not be using.
//
// For all non-module-linking-using modules, though, this is not an
// issue. This is optimizing for the non-module-linking case right now
// and it seems like module linking will likely change to the point that
// this will no longer be an issue in the future.
let signatures = &module.types().wasm_signatures;
let mut map = PrimaryMap::with_capacity(signatures.len());
for wasm in signatures.values() {
map.push(
self.wasm2index
.get(wasm)
.cloned()
.unwrap_or(VMSharedSignatureIndex::new(u32::MAX)),
);
}
map
}
/// Looks up information known about a shared signature index.
///
/// Note that for this operation to be semantically correct the `idx` must
/// have previously come from a call to `register` of this same object.
pub fn lookup_shared(
&self,
idx: VMSharedSignatureIndex,
) -> Option<(&WasmFuncType, VMTrampoline)> {
let (wasm, trampoline) = self
.index_map
.get(idx.bits() as usize)
.map(|e| (&e.wasm, e.trampoline))?;
Some((wasm, trampoline?))
}
}

View File

@@ -0,0 +1,262 @@
//! Implement a registry of function signatures, for fast indirect call
//! signature checking.
use std::{
collections::{hash_map::Entry, HashMap},
sync::RwLock,
};
use std::{convert::TryFrom, sync::Arc};
use wasmtime_environ::entity::PrimaryMap;
use wasmtime_environ::wasm::{SignatureIndex, WasmFuncType};
use wasmtime_runtime::{VMSharedSignatureIndex, VMTrampoline};
/// Represents a collection of shared signatures.
///
/// This is used to register shared signatures with a shared signature registry.
///
/// The collection will unregister any contained signatures with the registry
/// when dropped.
#[derive(Debug)]
pub struct SignatureCollection {
registry: Arc<RwLock<SignatureRegistryInner>>,
signatures: PrimaryMap<SignatureIndex, VMSharedSignatureIndex>,
trampolines: HashMap<VMSharedSignatureIndex, (usize, VMTrampoline)>,
}
impl SignatureCollection {
/// Creates a new, empty signature collection given a signature registry.
pub fn new(registry: &SignatureRegistry) -> Self {
Self {
registry: registry.0.clone(),
signatures: PrimaryMap::new(),
trampolines: HashMap::new(),
}
}
/// Creates a signature collection for a module given the module's signatures
/// and trampolines.
pub fn new_for_module(
registry: &SignatureRegistry,
signatures: &PrimaryMap<SignatureIndex, WasmFuncType>,
trampolines: impl Iterator<Item = (SignatureIndex, VMTrampoline)>,
) -> Self {
let (signatures, trampolines) = registry
.0
.write()
.unwrap()
.register_for_module(signatures, trampolines);
Self {
registry: registry.0.clone(),
signatures,
trampolines,
}
}
/// Treats the signature collection as a map from a module signature index to
/// registered shared signature indexes.
///
/// This is used for looking up module shared signature indexes during module
/// instantiation.
pub fn as_module_map(&self) -> &PrimaryMap<SignatureIndex, VMSharedSignatureIndex> {
&self.signatures
}
/// Gets the shared signature index given a module signature index.
pub fn shared_signature(&self, index: SignatureIndex) -> Option<VMSharedSignatureIndex> {
self.signatures.get(index).copied()
}
/// Gets a trampoline for a registered signature.
pub fn trampoline(&self, index: VMSharedSignatureIndex) -> Option<VMTrampoline> {
self.trampolines
.get(&index)
.map(|(_, trampoline)| *trampoline)
}
/// Registers a single function with the collection.
///
/// Returns the shared signature index for the function.
pub fn register(
&mut self,
ty: &WasmFuncType,
trampoline: VMTrampoline,
) -> VMSharedSignatureIndex {
let index = self.registry.write().unwrap().register(ty);
let entry = match self.trampolines.entry(index) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => e.insert((0, trampoline)),
};
// Increment the ref count
entry.0 += 1;
index
}
}
impl Drop for SignatureCollection {
fn drop(&mut self) {
if !self.signatures.is_empty() || !self.trampolines.is_empty() {
self.registry.write().unwrap().unregister_signatures(self);
}
}
}
#[derive(Debug)]
struct RegistryEntry {
references: usize,
ty: WasmFuncType,
}
#[derive(Debug, Default)]
struct SignatureRegistryInner {
map: HashMap<WasmFuncType, VMSharedSignatureIndex>,
entries: Vec<Option<RegistryEntry>>,
free: Vec<VMSharedSignatureIndex>,
}
impl SignatureRegistryInner {
fn register_for_module(
&mut self,
signatures: &PrimaryMap<SignatureIndex, WasmFuncType>,
trampolines: impl Iterator<Item = (SignatureIndex, VMTrampoline)>,
) -> (
PrimaryMap<SignatureIndex, VMSharedSignatureIndex>,
HashMap<VMSharedSignatureIndex, (usize, VMTrampoline)>,
) {
let mut sigs = PrimaryMap::default();
let mut map = HashMap::default();
for (_, ty) in signatures.iter() {
sigs.push(self.register(ty));
}
for (index, trampoline) in trampolines {
map.insert(sigs[index], (1, trampoline));
}
(sigs, map)
}
fn register(&mut self, ty: &WasmFuncType) -> VMSharedSignatureIndex {
let len = self.map.len();
let index = match self.map.entry(ty.clone()) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
let (index, entry) = match self.free.pop() {
Some(index) => (index, &mut self.entries[index.bits() as usize]),
None => {
// Keep `index_map` len under 2**32 -- VMSharedSignatureIndex::new(std::u32::MAX)
// is reserved for VMSharedSignatureIndex::default().
assert!(
len < std::u32::MAX as usize,
"Invariant check: index_map.len() < std::u32::MAX"
);
debug_assert_eq!(len, self.entries.len());
let index = VMSharedSignatureIndex::new(u32::try_from(len).unwrap());
self.entries.push(None);
(index, self.entries.last_mut().unwrap())
}
};
// The entry should be missing for one just allocated or
// taken from the free list
assert!(entry.is_none());
*entry = Some(RegistryEntry {
references: 0,
ty: ty.clone(),
});
*e.insert(index)
}
};
self.entries[index.bits() as usize]
.as_mut()
.unwrap()
.references += 1;
index
}
fn unregister_signatures(&mut self, collection: &SignatureCollection) {
// If the collection has a populated signatures map, use it to deregister
// This is always 1:1 from entry to registration
if !collection.signatures.is_empty() {
for (_, index) in collection.signatures.iter() {
self.unregister_entry(*index, 1);
}
} else {
// Otherwise, use the trampolines map, which has reference counts related
// to the stored index
for (index, (count, _)) in collection.trampolines.iter() {
self.unregister_entry(*index, *count);
}
}
}
fn unregister_entry(&mut self, index: VMSharedSignatureIndex, count: usize) {
let removed = {
let entry = self.entries[index.bits() as usize].as_mut().unwrap();
debug_assert!(entry.references >= count);
entry.references -= count;
if entry.references == 0 {
self.map.remove(&entry.ty);
self.free.push(index);
true
} else {
false
}
};
if removed {
self.entries[index.bits() as usize] = None;
}
}
}
// `SignatureRegistryInner` implements `Drop` in debug builds to assert that
// all signatures have been unregistered for the registry.
#[cfg(debug_assertions)]
impl Drop for SignatureRegistryInner {
fn drop(&mut self) {
assert!(
self.map.is_empty() && self.free.len() == self.entries.len(),
"signature registry not empty"
);
}
}
/// Implements a shared signature registry.
///
/// WebAssembly requires that the caller and callee signatures in an indirect
/// call must match. To implement this efficiently, keep a registry of all
/// signatures, shared by all instances, so that call sites can just do an
/// index comparison.
#[derive(Debug)]
pub struct SignatureRegistry(Arc<RwLock<SignatureRegistryInner>>);
impl SignatureRegistry {
/// Creates a new shared signature registry.
pub fn new() -> Self {
Self(Arc::new(RwLock::new(SignatureRegistryInner::default())))
}
/// Looks up a function type from a shared signature index.
pub fn lookup_type(&self, index: VMSharedSignatureIndex) -> Option<WasmFuncType> {
self.0
.read()
.unwrap()
.entries
.get(index.bits() as usize)
.and_then(|e| e.as_ref().map(|e| &e.ty).cloned())
}
}

View File

@@ -1,12 +1,12 @@
use crate::frame_info;
use crate::frame_info::StoreFrameInfo;
use crate::sig_registry::SignatureRegistry;
use crate::trampoline::StoreInstanceHandle;
use crate::{Engine, Func, FuncType, Module, Trap};
use crate::{
module::ModuleRegistry, signatures::SignatureCollection, trampoline::StoreInstanceHandle,
Engine, Func, Module, ResourceLimiter, ResourceLimiterProxy, Trap, DEFAULT_INSTANCE_LIMIT,
DEFAULT_MEMORY_LIMIT, DEFAULT_TABLE_LIMIT,
};
use anyhow::{bail, Result};
use std::any::{Any, TypeId};
use std::cell::{Cell, RefCell};
use std::collections::{hash_map::Entry, HashMap, HashSet};
use std::collections::{hash_map::Entry, HashMap};
use std::convert::TryFrom;
use std::fmt;
use std::future::Future;
@@ -16,12 +16,10 @@ use std::ptr;
use std::rc::Rc;
use std::sync::Arc;
use std::task::{Context, Poll};
use wasmtime_environ::wasm;
use wasmtime_jit::{CompiledModule, ModuleCode};
use wasmtime_runtime::{
Export, InstanceAllocator, InstanceHandle, OnDemandInstanceAllocator, SignalHandler,
StackMapRegistry, TrapInfo, VMCallerCheckedAnyfunc, VMContext, VMExternRef,
VMExternRefActivationsTable, VMInterrupts, VMTrampoline,
InstanceAllocator, InstanceHandle, ModuleInfo, OnDemandInstanceAllocator, SignalHandler,
TrapInfo, VMCallerCheckedAnyfunc, VMContext, VMExternRef, VMExternRefActivationsTable,
VMInterrupts, VMTrampoline,
};
/// Used to associate instances with the store.
@@ -72,20 +70,13 @@ pub struct Store {
pub(crate) struct StoreInner {
engine: Engine,
/// The map of all host functions registered with this store's signature registry
host_funcs: RefCell<HashMap<InstanceHandle, Box<VMCallerCheckedAnyfunc>>>,
interrupts: Arc<VMInterrupts>,
signatures: RefCell<SignatureRegistry>,
instances: RefCell<Vec<StoreInstance>>,
signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
externref_activations_table: VMExternRefActivationsTable,
stack_map_registry: StackMapRegistry,
/// Information about JIT code which allows us to test if a program counter
/// is in JIT code, lookup trap information, etc.
frame_info: RefCell<StoreFrameInfo>,
/// Set of all compiled modules that we're holding a strong reference to
/// the module's code for. This includes JIT functions, trampolines, etc.
modules: RefCell<HashSet<ArcModuleCode>>,
modules: RefCell<ModuleRegistry>,
// The signatures and trampolines for `Func` objects
signatures: RefCell<SignatureCollection>,
// Numbers of resources instantiated in this store.
instance_count: Cell<usize>,
memory_count: Cell<usize>,
@@ -99,6 +90,7 @@ pub(crate) struct StoreInner {
current_poll_cx: Cell<*mut Context<'static>>,
out_of_gas_behavior: Cell<OutOfGas>,
context_values: RefCell<HashMap<TypeId, Box<dyn Any>>>,
limiter: Option<Rc<dyn wasmtime_runtime::ResourceLimiter>>,
}
#[derive(Copy, Clone)]
@@ -130,28 +122,57 @@ impl Hash for HostInfoKey {
}
impl Store {
/// Creates a new store to be associated with the given [`Engine`].
pub fn new(engine: &Engine) -> Store {
// Ensure that wasmtime_runtime's signal handlers are configured. Note
// that at the `Store` level it means we should perform this
// once-per-thread. Platforms like Unix, however, only require this
// once-per-program. In any case this is safe to call many times and
// each one that's not relevant just won't do anything.
wasmtime_runtime::init_traps(frame_info::GlobalFrameInfo::is_wasm_pc)
.expect("failed to initialize trap handling");
/// Creates a new [`Store`] to be associated with the given [`Engine`].
///
/// The created [`Store`] will place no additional limits on the size of linear
/// memories or tables at runtime. Linear memories and tables will be allowed to
/// grow to any upper limit specified in their definitions.
///
/// The store will limit the number of instances, linear memories, and tables created to 10,000.
///
/// Use [`Store::new_with_limits`] with a [`StoreLimitsBuilder`](crate::StoreLimitsBuilder) to
/// specify different limits for the store.
pub fn new(engine: &Engine) -> Self {
Self::new_(engine, None)
}
Store {
/// Creates a new [`Store`] to be associated with the given [`Engine`] and using the supplied
/// resource limiter.
///
/// A [`ResourceLimiter`] can be implemented by hosts to control the size of WebAssembly
/// linear memories and tables when a request is made to grow them.
///
/// [`StoreLimitsBuilder`](crate::StoreLimitsBuilder) can be used to create a
/// [`StoreLimits`](crate::StoreLimits) that implements [`ResourceLimiter`] using
/// static limit values.
///
/// # Example
///
/// ```rust
/// # use wasmtime::{Engine, Store, StoreLimitsBuilder};
/// // Place a limit on linear memories so they cannot grow beyond 1 MiB
/// let engine = Engine::default();
/// let store = Store::new_with_limits(&engine, StoreLimitsBuilder::new().memory_pages(16).build());
/// ```
pub fn new_with_limits(engine: &Engine, limiter: impl ResourceLimiter + 'static) -> Self {
Self::new_(engine, Some(Rc::new(ResourceLimiterProxy(limiter))))
}
fn new_(engine: &Engine, limiter: Option<Rc<dyn wasmtime_runtime::ResourceLimiter>>) -> Self {
// Ensure that wasmtime_runtime's signal handlers are configured. This
// is the per-program initialization required for handling traps, such
// as configuring signals, vectored exception handlers, etc.
wasmtime_runtime::init_traps(crate::module::GlobalModuleRegistry::is_wasm_pc);
Self {
inner: Rc::new(StoreInner {
engine: engine.clone(),
host_funcs: RefCell::new(HashMap::new()),
interrupts: Arc::new(Default::default()),
signatures: RefCell::new(Default::default()),
instances: RefCell::new(Vec::new()),
signal_handler: RefCell::new(None),
externref_activations_table: VMExternRefActivationsTable::new(),
stack_map_registry: StackMapRegistry::default(),
frame_info: Default::default(),
modules: Default::default(),
modules: RefCell::new(ModuleRegistry::default()),
signatures: RefCell::new(SignatureCollection::new(engine.signatures())),
instance_count: Default::default(),
memory_count: Default::default(),
table_count: Default::default(),
@@ -162,6 +183,7 @@ impl Store {
current_poll_cx: Cell::new(ptr::null_mut()),
out_of_gas_behavior: Cell::new(OutOfGas::Trap),
context_values: RefCell::new(HashMap::new()),
limiter,
}),
}
}
@@ -181,35 +203,6 @@ impl Store {
})
}
pub(crate) fn get_host_anyfunc(
&self,
instance: &InstanceHandle,
ty: &FuncType,
trampoline: VMTrampoline,
) -> *mut VMCallerCheckedAnyfunc {
let mut funcs = self.inner.host_funcs.borrow_mut();
let anyfunc = funcs.entry(unsafe { instance.clone() }).or_insert_with(|| {
let mut anyfunc = match instance
.lookup_by_declaration(&wasm::EntityIndex::Function(wasm::FuncIndex::from_u32(0)))
{
Export::Function(f) => unsafe { f.anyfunc.as_ref() }.clone(),
_ => unreachable!(),
};
// Register the function with this store's signature registry
anyfunc.type_index = self
.inner
.signatures
.borrow_mut()
.register(ty.as_wasm_func_type(), trampoline);
Box::new(anyfunc)
});
&mut **anyfunc
}
/// Returns the [`Engine`] that this store is associated with.
#[inline]
pub fn engine(&self) -> &Engine {
@@ -244,69 +237,39 @@ impl Store {
}
}
pub(crate) fn signatures(&self) -> &RefCell<SignatureRegistry> {
pub(crate) fn limiter(&self) -> &Option<Rc<dyn wasmtime_runtime::ResourceLimiter>> {
&self.inner.limiter
}
pub(crate) fn signatures(&self) -> &RefCell<SignatureCollection> {
&self.inner.signatures
}
pub(crate) fn register_module(&self, module: &Module) {
// With a module being instantiated into this `Store` we need to
// preserve its jit-code. References to this module's code and
// trampolines are not owning-references so it's our responsibility to
// keep it all alive within the `Store`.
//
// If this module is already present in the store then we skip all
// further registration steps.
let first = self
pub(crate) fn lookup_trampoline(&self, anyfunc: &VMCallerCheckedAnyfunc) -> VMTrampoline {
// Look up the trampoline with the store's trampolines (from `Func`).
if let Some(trampoline) = self
.inner
.modules
.borrow_mut()
.insert(ArcModuleCode(module.compiled_module().code().clone()));
if !first {
return;
.signatures
.borrow()
.trampoline(anyfunc.type_index)
{
return trampoline;
}
// All modules register their JIT code in a store for two reasons
// currently:
//
// * First we only catch signals/traps if the program counter falls
// within the jit code of an instantiated wasm module. This ensures
// we don't catch accidental Rust/host segfaults.
//
// * Second when generating a backtrace we'll use this mapping to
// only generate wasm frames for instruction pointers that fall
// within jit code.
// Look up the trampoline with the registered modules
if let Some(trampoline) = self.inner.modules.borrow().lookup_trampoline(anyfunc) {
return trampoline;
}
// Lastly, check with the engine (for `HostFunc`)
self.inner
.frame_info
.borrow_mut()
.register(module.compiled_module());
// We need to know about all the stack maps of all instantiated modules
// so when performing a GC we know about all wasm frames that we find
// on the stack.
self.register_stack_maps(module.compiled_module());
// Signatures are loaded into our `SignatureRegistry` here
// once-per-module (and once-per-signature). This allows us to create
// a `Func` wrapper for any function in the module, which requires that
// we know about the signature and trampoline for all instances.
self.signatures().borrow_mut().register_module(module);
}
fn register_stack_maps(&self, module: &CompiledModule) {
self.stack_map_registry()
.register_stack_maps(module.stack_maps().map(|(func, stack_maps)| unsafe {
let ptr = (*func).as_ptr();
let len = (*func).len();
let start = ptr as usize;
let end = ptr as usize + len;
let range = start..end;
(range, stack_maps)
}));
.engine
.host_func_signatures()
.trampoline(anyfunc.type_index)
.expect("trampoline missing")
}
pub(crate) fn bump_resource_counts(&self, module: &Module) -> Result<()> {
let config = self.engine().config();
fn bump(slot: &Cell<usize>, max: usize, amt: usize, desc: &str) -> Result<()> {
let new = slot.get().saturating_add(amt);
if new > max {
@@ -323,20 +286,11 @@ impl Store {
let module = module.env_module();
let memories = module.memory_plans.len() - module.num_imported_memories;
let tables = module.table_plans.len() - module.num_imported_tables;
let (max_instances, max_memories, max_tables) = self.limits();
bump(
&self.inner.instance_count,
config.max_instances,
1,
"instance",
)?;
bump(
&self.inner.memory_count,
config.max_memories,
memories,
"memory",
)?;
bump(&self.inner.table_count, config.max_tables, tables, "table")?;
bump(&self.inner.instance_count, max_instances, 1, "instance")?;
bump(&self.inner.memory_count, max_memories, memories, "memory")?;
bump(&self.inner.table_count, max_tables, tables, "table")?;
Ok(())
}
@@ -363,7 +317,7 @@ impl Store {
.borrow()
.iter()
.any(|i| i.handle.vmctx_ptr() == handle.vmctx_ptr())
|| self.inner.host_funcs.borrow().get(&handle).is_some()
|| self.inner.engine.host_func_anyfunc(&handle).is_some()
);
StoreInstanceHandle {
store: self.clone(),
@@ -490,24 +444,21 @@ impl Store {
}
#[inline]
pub(crate) fn stack_map_registry(&self) -> &StackMapRegistry {
&self.inner.stack_map_registry
pub(crate) fn modules(&self) -> &RefCell<ModuleRegistry> {
&self.inner.modules
}
pub(crate) fn frame_info(&self) -> &RefCell<StoreFrameInfo> {
&self.inner.frame_info
#[inline]
pub(crate) fn module_info_lookup(&self) -> &dyn wasmtime_runtime::ModuleInfoLookup {
self.inner.as_ref()
}
/// Perform garbage collection of `ExternRef`s.
pub fn gc(&self) {
// For this crate's API, we ensure that `set_stack_canary` invariants
// are upheld for all host-->Wasm calls, and we register every module
// used with this store in `self.inner.stack_map_registry`.
// are upheld for all host-->Wasm calls.
unsafe {
wasmtime_runtime::gc(
&self.inner.stack_map_registry,
&self.inner.externref_activations_table,
);
wasmtime_runtime::gc(self.inner.as_ref(), &self.inner.externref_activations_table);
}
}
@@ -700,7 +651,8 @@ impl Store {
}
unsafe {
let before = wasmtime_runtime::TlsRestore::take();
let before = wasmtime_runtime::TlsRestore::take()
.map_err(|e| Trap::from_runtime(self, e))?;
let res = (*suspend).suspend(());
before.replace().map_err(|e| Trap::from_runtime(self, e))?;
res?;
@@ -896,6 +848,18 @@ impl Store {
Err(trap) => unsafe { wasmtime_runtime::raise_user_trap(trap.into()) },
}
}
fn limits(&self) -> (usize, usize, usize) {
self.inner
.limiter
.as_ref()
.map(|l| (l.instances(), l.memories(), l.tables()))
.unwrap_or((
DEFAULT_INSTANCE_LIMIT,
DEFAULT_MEMORY_LIMIT,
DEFAULT_TABLE_LIMIT,
))
}
}
unsafe impl TrapInfo for Store {
@@ -968,6 +932,12 @@ impl Drop for StoreInner {
}
}
impl wasmtime_runtime::ModuleInfoLookup for StoreInner {
fn lookup(&self, pc: usize) -> Option<Arc<dyn ModuleInfo>> {
self.modules.borrow().lookup_module(pc)
}
}
/// A threadsafe handle used to interrupt instances executing within a
/// particular `Store`.
///
@@ -996,24 +966,6 @@ impl InterruptHandle {
}
}
// Wrapper struct to implement hash/equality based on the pointer value of the
// `Arc` in question.
struct ArcModuleCode(Arc<ModuleCode>);
impl PartialEq for ArcModuleCode {
fn eq(&self, other: &ArcModuleCode) -> bool {
Arc::ptr_eq(&self.0, &other.0)
}
}
impl Eq for ArcModuleCode {}
impl Hash for ArcModuleCode {
fn hash<H: Hasher>(&self, hasher: &mut H) {
Arc::as_ptr(&self.0).hash(hasher)
}
}
struct Reset<'a, T: Copy>(&'a Cell<T>, T);
impl<T: Copy> Drop for Reset<'_, T> {

View File

@@ -19,8 +19,8 @@ use std::sync::Arc;
use wasmtime_environ::{entity::PrimaryMap, wasm, Module};
use wasmtime_runtime::{
Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
OnDemandInstanceAllocator, StackMapRegistry, VMExternRefActivationsTable, VMFunctionBody,
VMFunctionImport, VMSharedSignatureIndex,
OnDemandInstanceAllocator, VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport,
VMSharedSignatureIndex,
};
/// A wrapper around `wasmtime_runtime::InstanceHandle` which pairs it with the
@@ -77,7 +77,8 @@ fn create_handle(
externref_activations_table: store.externref_activations_table()
as *const VMExternRefActivationsTable
as *mut _,
stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _,
module_info_lookup: Some(store.module_info_lookup()),
limiter: store.limiter().as_ref(),
},
)?;

View File

@@ -1,51 +0,0 @@
//! Support for a calling of an imported function.
use crate::trampoline::StoreInstanceHandle;
use crate::Store;
use anyhow::Result;
use std::any::Any;
use std::sync::Arc;
use wasmtime_environ::entity::PrimaryMap;
use wasmtime_environ::wasm::DefinedFuncIndex;
use wasmtime_environ::Module;
use wasmtime_runtime::{
Imports, InstanceAllocationRequest, InstanceAllocator, StackMapRegistry,
VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMSharedSignatureIndex,
};
pub(crate) fn create_handle(
module: Module,
store: &Store,
finished_functions: PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
host_state: Box<dyn Any>,
func_imports: &[VMFunctionImport],
shared_signature_id: Option<VMSharedSignatureIndex>,
) -> Result<StoreInstanceHandle> {
let mut imports = Imports::default();
imports.functions = func_imports;
let module = Arc::new(module);
unsafe {
// Use the default allocator when creating handles associated with host objects
// The configured instance allocator should only be used when creating module instances
// as we don't want host objects to count towards instance limits.
let handle = store
.engine()
.config()
.default_instance_allocator
.allocate(InstanceAllocationRequest {
module: module.clone(),
finished_functions: &finished_functions,
imports,
shared_signatures: shared_signature_id.into(),
host_state,
interrupts: store.interrupts(),
externref_activations_table: store.externref_activations_table()
as *const VMExternRefActivationsTable
as *mut _,
stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _,
})?;
Ok(store.add_instance(handle, true))
}
}

View File

@@ -1,6 +1,6 @@
//! Support for a calling of an imported function.
use crate::{sig_registry::SignatureRegistry, Config, FuncType, Trap};
use crate::{Config, FuncType, Store, Trap};
use anyhow::Result;
use std::any::Any;
use std::cmp;
@@ -262,15 +262,19 @@ pub fn create_function(
ft: &FuncType,
func: Box<dyn Fn(*mut VMContext, *mut u128) -> Result<(), Trap>>,
config: &Config,
registry: Option<&mut SignatureRegistry>,
store: Option<&Store>,
) -> Result<(InstanceHandle, VMTrampoline)> {
let (module, finished_functions, trampoline, trampoline_state) =
create_function_trampoline(config, ft, func)?;
// If there is no signature registry, use the default signature index which is
// If there is no store, use the default signature index which is
// guaranteed to trap if there is ever an indirect call on the function (should not happen)
let shared_signature_id = registry
.map(|r| r.register(ft.as_wasm_func_type(), trampoline))
let shared_signature_id = store
.map(|s| {
s.signatures()
.borrow_mut()
.register(ft.as_wasm_func_type(), trampoline)
})
.unwrap_or(VMSharedSignatureIndex::default());
unsafe {
@@ -283,7 +287,8 @@ pub fn create_function(
host_state: Box::new(trampoline_state),
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
stack_map_registry: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
})?,
trampoline,
))
@@ -315,7 +320,8 @@ pub unsafe fn create_raw_function(
host_state,
interrupts: std::ptr::null(),
externref_activations_table: std::ptr::null_mut(),
stack_map_registry: std::ptr::null_mut(),
module_info_lookup: None,
limiter: None,
})?,
)
}

View File

@@ -37,6 +37,10 @@ impl RuntimeLinearMemory for LinearMemoryProxy {
self.mem.size()
}
fn maximum(&self) -> Option<u32> {
self.mem.maximum()
}
fn grow(&self, delta: u32) -> Option<u32> {
self.mem.grow(delta)
}

View File

@@ -161,7 +161,7 @@ impl Trap {
maybe_interrupted,
} => {
let mut code = store
.frame_info()
.modules()
.borrow()
.lookup_trap_info(pc)
.map(|info| info.trap_code)
@@ -239,7 +239,7 @@ impl Trap {
// (the call instruction) so we subtract one as the lookup.
let pc_to_lookup = if Some(pc) == trap_pc { pc } else { pc - 1 };
if let Some((info, has_unparsed_debuginfo)) =
store.frame_info().borrow().lookup_frame_info(pc_to_lookup)
store.modules().borrow().lookup_frame_info(pc_to_lookup)
{
wasm_trace.push(info);

View File

@@ -204,8 +204,7 @@ impl ExternType {
) -> ExternType {
match ty {
EntityType::Function(idx) => {
let sig = &types.wasm_signatures[*idx];
FuncType::from_wasm_func_type(sig).into()
FuncType::from_wasm_func_type(types.wasm_signatures[*idx].clone()).into()
}
EntityType::Global(ty) => GlobalType::from_wasmtime_global(ty).into(),
EntityType::Memory(ty) => MemoryType::from_wasmtime_memory(ty).into(),
@@ -298,8 +297,8 @@ impl FuncType {
&self.sig
}
pub(crate) fn from_wasm_func_type(sig: &wasm::WasmFuncType) -> FuncType {
FuncType { sig: sig.clone() }
pub(crate) fn from_wasm_func_type(sig: wasm::WasmFuncType) -> FuncType {
Self { sig }
}
}

View File

@@ -1,4 +1,4 @@
use crate::{Extern, Store};
use crate::{signatures::SignatureCollection, Extern, Store};
use anyhow::{bail, Context, Result};
use wasmtime_environ::wasm::{
EntityType, Global, InstanceTypeIndex, Memory, ModuleTypeIndex, SignatureIndex, Table,
@@ -6,6 +6,7 @@ use wasmtime_environ::wasm::{
use wasmtime_jit::TypeTables;
pub struct MatchCx<'a> {
pub signatures: &'a SignatureCollection,
pub types: &'a TypeTables,
pub store: &'a Store,
}
@@ -70,12 +71,7 @@ impl MatchCx<'_> {
}
pub fn func(&self, expected: SignatureIndex, actual: &crate::Func) -> Result<()> {
let matches = match self
.store
.signatures()
.borrow()
.lookup(&self.types.wasm_signatures[expected])
{
let matches = match self.signatures.shared_signature(expected) {
Some(idx) => actual.sig_index() == idx,
// If our expected signature isn't registered, then there's no way
// that `actual` can match it.
@@ -114,15 +110,19 @@ impl MatchCx<'_> {
let module = actual.compiled_module().module();
self.imports_match(
expected,
actual.signatures(),
actual.types(),
module.imports().map(|(name, field, ty)| {
assert!(field.is_none()); // should be true if module linking is enabled
(name, ty)
}),
)?;
self.exports_match(expected_sig.exports, actual.types(), |name| {
module.exports.get(name).map(|idx| module.type_of(*idx))
})?;
self.exports_match(
expected_sig.exports,
actual.signatures(),
actual.types(),
|name| module.exports.get(name).map(|idx| module.type_of(*idx)),
)?;
Ok(())
}
@@ -133,6 +133,7 @@ impl MatchCx<'_> {
fn imports_match<'a>(
&self,
expected: ModuleTypeIndex,
actual_signatures: &SignatureCollection,
actual_types: &TypeTables,
actual_imports: impl Iterator<Item = (&'a str, EntityType)>,
) -> Result<()> {
@@ -146,10 +147,11 @@ impl MatchCx<'_> {
None => bail!("expected type doesn't import {:?}", name),
};
MatchCx {
signatures: actual_signatures,
types: actual_types,
store: self.store,
}
.extern_ty_matches(&actual_ty, expected_ty, self.types)
.extern_ty_matches(&actual_ty, expected_ty, self.signatures, self.types)
.with_context(|| format!("module import {:?} incompatible", name))?;
}
Ok(())
@@ -160,6 +162,7 @@ impl MatchCx<'_> {
fn exports_match(
&self,
expected: InstanceTypeIndex,
actual_signatures: &SignatureCollection,
actual_types: &TypeTables,
lookup: impl Fn(&str) -> Option<EntityType>,
) -> Result<()> {
@@ -169,7 +172,7 @@ impl MatchCx<'_> {
for (name, expected) in self.types.instance_signatures[expected].exports.iter() {
match lookup(name) {
Some(ty) => self
.extern_ty_matches(expected, &ty, actual_types)
.extern_ty_matches(expected, &ty, actual_signatures, actual_types)
.with_context(|| format!("export {:?} incompatible", name))?,
None => bail!("failed to find export {:?}", name),
}
@@ -183,6 +186,7 @@ impl MatchCx<'_> {
&self,
expected: &EntityType,
actual_ty: &EntityType,
actual_signatures: &SignatureCollection,
actual_types: &TypeTables,
) -> Result<()> {
let actual_desc = match actual_ty {
@@ -221,7 +225,7 @@ impl MatchCx<'_> {
EntityType::Instance(expected) => match actual_ty {
EntityType::Instance(actual) => {
let sig = &actual_types.instance_signatures[*actual];
self.exports_match(*expected, actual_types, |name| {
self.exports_match(*expected, actual_signatures, actual_types, |name| {
sig.exports.get(name).cloned()
})?;
Ok(())
@@ -237,15 +241,19 @@ impl MatchCx<'_> {
self.imports_match(
*expected,
actual_signatures,
actual_types,
actual_module_sig
.imports
.iter()
.map(|(module, ty)| (module.as_str(), ty.clone())),
)?;
self.exports_match(expected_module_sig.exports, actual_types, |name| {
actual_instance_sig.exports.get(name).cloned()
})?;
self.exports_match(
expected_module_sig.exports,
actual_signatures,
actual_types,
|name| actual_instance_sig.exports.get(name).cloned(),
)?;
Ok(())
}
_ => bail!("expected module, but found {}", actual_desc),

View File

@@ -98,7 +98,7 @@ impl Val {
let externref_ptr = x.inner.as_raw();
store
.externref_activations_table()
.insert_with_gc(x.inner, store.stack_map_registry());
.insert_with_gc(x.inner, store.module_info_lookup());
ptr::write(p as *mut *mut u8, externref_ptr)
}
Val::FuncRef(f) => ptr::write(

View File

@@ -39,7 +39,7 @@ pub fn link_spectest(linker: &mut Linker) -> Result<()> {
linker.define("spectest", "table", table)?;
let ty = MemoryType::new(Limits::new(1, Some(2)));
let memory = Memory::new(linker.store(), ty);
let memory = Memory::new(linker.store(), ty)?;
linker.define("spectest", "memory", memory)?;
Ok(())