Implement lazy funcref table and anyfunc initialization. (#3733)
During instance initialization, we build two sorts of arrays eagerly:
- We create an "anyfunc" (a `VMCallerCheckedAnyfunc`) for every function
in an instance.
- We initialize every element of a funcref table with an initializer to
a pointer to one of these anyfuncs.
Most instances will not touch (via call_indirect or table.get) all
funcref table elements. And most anyfuncs will never be referenced,
because most functions are never placed in tables or used with
`ref.func`. Thus, both of these initialization tasks are quite wasteful.
Profiling shows that a significant fraction of the remaining
instance-initialization time after our other recent optimizations is
going into these two tasks.
This PR implements two basic ideas:
- The anyfunc array can be lazily initialized as long as we retain the
information needed to do so. For now, in this PR, we just recreate the
anyfunc whenever a pointer is taken to it, because doing so is fast
enough; in the future we could keep some state to know whether the
anyfunc has been written yet and skip this work if redundant.
This technique allows us to leave the anyfunc array as uninitialized
memory, which can be a significant savings. Filling it with
initialized anyfuncs is very expensive, but even zeroing it is
expensive: e.g. in a large module, it can be >500KB.
- A funcref table can be lazily initialized as long as we retain a link
to its corresponding instance and function index for each element. A
zero in a table element means "uninitialized", and a slowpath does the
initialization.
Funcref tables are a little tricky because funcrefs can be null. We need
to distinguish "element was initially non-null, but user stored explicit
null later" from "element never touched" (ie the lazy init should not
blow away an explicitly stored null). We solve this by stealing the LSB
from every funcref (anyfunc pointer): when the LSB is set, the funcref
is initialized and we don't hit the lazy-init slowpath. We insert the
bit on storing to the table and mask it off after loading.
We do have to set up a precomputed array of `FuncIndex`s for the table
in order for this to work. We do this as part of the module compilation.
This PR also refactors the way that the runtime crate gains access to
information computed during module compilation.
Performance effect measured with in-tree benches/instantiation.rs, using
SpiderMonkey built for WASI, and with memfd enabled:
```
BEFORE:
sequential/default/spidermonkey.wasm
time: [68.569 us 68.696 us 68.856 us]
sequential/pooling/spidermonkey.wasm
time: [69.406 us 69.435 us 69.465 us]
parallel/default/spidermonkey.wasm: with 1 background thread
time: [69.444 us 69.470 us 69.497 us]
parallel/default/spidermonkey.wasm: with 16 background threads
time: [183.72 us 184.31 us 184.89 us]
parallel/pooling/spidermonkey.wasm: with 1 background thread
time: [69.018 us 69.070 us 69.136 us]
parallel/pooling/spidermonkey.wasm: with 16 background threads
time: [326.81 us 337.32 us 347.01 us]
WITH THIS PR:
sequential/default/spidermonkey.wasm
time: [6.7821 us 6.8096 us 6.8397 us]
change: [-90.245% -90.193% -90.142%] (p = 0.00 < 0.05)
Performance has improved.
sequential/pooling/spidermonkey.wasm
time: [3.0410 us 3.0558 us 3.0724 us]
change: [-95.566% -95.552% -95.537%] (p = 0.00 < 0.05)
Performance has improved.
parallel/default/spidermonkey.wasm: with 1 background thread
time: [7.2643 us 7.2689 us 7.2735 us]
change: [-89.541% -89.533% -89.525%] (p = 0.00 < 0.05)
Performance has improved.
parallel/default/spidermonkey.wasm: with 16 background threads
time: [147.36 us 148.99 us 150.74 us]
change: [-18.997% -18.081% -17.285%] (p = 0.00 < 0.05)
Performance has improved.
parallel/pooling/spidermonkey.wasm: with 1 background thread
time: [3.1009 us 3.1021 us 3.1033 us]
change: [-95.517% -95.511% -95.506%] (p = 0.00 < 0.05)
Performance has improved.
parallel/pooling/spidermonkey.wasm: with 16 background threads
time: [49.449 us 50.475 us 51.540 us]
change: [-85.423% -84.964% -84.465%] (p = 0.00 < 0.05)
Performance has improved.
```
So an improvement of something like 80-95% for a very large module (7420
functions in its one funcref table, 31928 functions total).
This commit is contained in:
@@ -612,7 +612,7 @@ pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
|
||||
bitcast_arguments(args, &types, builder);
|
||||
|
||||
let call = environ.translate_call_indirect(
|
||||
builder.cursor(),
|
||||
builder,
|
||||
TableIndex::from_u32(*table_index),
|
||||
table,
|
||||
TypeIndex::from_u32(*index),
|
||||
|
||||
@@ -404,7 +404,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
|
||||
fn translate_call_indirect(
|
||||
&mut self,
|
||||
mut pos: FuncCursor,
|
||||
builder: &mut FunctionBuilder,
|
||||
_table_index: TableIndex,
|
||||
_table: ir::Table,
|
||||
_sig_index: TypeIndex,
|
||||
@@ -413,7 +413,7 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
call_args: &[ir::Value],
|
||||
) -> WasmResult<ir::Inst> {
|
||||
// Pass the current function's vmctx parameter on to the callee.
|
||||
let vmctx = pos
|
||||
let vmctx = builder
|
||||
.func
|
||||
.special_param(ir::ArgumentPurpose::VMContext)
|
||||
.expect("Missing vmctx parameter");
|
||||
@@ -423,22 +423,22 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
|
||||
// TODO: Generate bounds checking code.
|
||||
let ptr = self.pointer_type();
|
||||
let callee_offset = if ptr == I32 {
|
||||
pos.ins().imul_imm(callee, 4)
|
||||
builder.ins().imul_imm(callee, 4)
|
||||
} else {
|
||||
let ext = pos.ins().uextend(I64, callee);
|
||||
pos.ins().imul_imm(ext, 4)
|
||||
let ext = builder.ins().uextend(I64, callee);
|
||||
builder.ins().imul_imm(ext, 4)
|
||||
};
|
||||
let mflags = ir::MemFlags::trusted();
|
||||
let func_ptr = pos.ins().load(ptr, mflags, callee_offset, 0);
|
||||
let func_ptr = builder.ins().load(ptr, mflags, callee_offset, 0);
|
||||
|
||||
// Build a value list for the indirect call instruction containing the callee, call_args,
|
||||
// and the vmctx parameter.
|
||||
let mut args = ir::ValueList::default();
|
||||
args.push(func_ptr, &mut pos.func.dfg.value_lists);
|
||||
args.extend(call_args.iter().cloned(), &mut pos.func.dfg.value_lists);
|
||||
args.push(vmctx, &mut pos.func.dfg.value_lists);
|
||||
args.push(func_ptr, &mut builder.func.dfg.value_lists);
|
||||
args.extend(call_args.iter().cloned(), &mut builder.func.dfg.value_lists);
|
||||
args.push(vmctx, &mut builder.func.dfg.value_lists);
|
||||
|
||||
Ok(pos
|
||||
Ok(builder
|
||||
.ins()
|
||||
.CallIndirect(ir::Opcode::CallIndirect, INVALID, sig_ref, args)
|
||||
.0)
|
||||
|
||||
@@ -219,7 +219,7 @@ pub trait FuncEnvironment: TargetEnvironment {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(clippy::too_many_arguments))]
|
||||
fn translate_call_indirect(
|
||||
&mut self,
|
||||
pos: FuncCursor,
|
||||
builder: &mut FunctionBuilder,
|
||||
table_index: TableIndex,
|
||||
table: ir::Table,
|
||||
sig_index: TypeIndex,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use cranelift_codegen::cursor::FuncCursor;
|
||||
use cranelift_codegen::ir;
|
||||
use cranelift_codegen::ir::condcodes::*;
|
||||
use cranelift_codegen::ir::immediates::{Offset32, Uimm64};
|
||||
use cranelift_codegen::ir::immediates::{Imm64, Offset32, Uimm64};
|
||||
use cranelift_codegen::ir::types::*;
|
||||
use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature};
|
||||
use cranelift_codegen::isa::{self, TargetFrontendConfig, TargetIsa};
|
||||
@@ -19,6 +19,7 @@ use wasmtime_environ::{
|
||||
BuiltinFunctionIndex, MemoryPlan, MemoryStyle, Module, ModuleTranslation, TableStyle, Tunables,
|
||||
TypeTables, VMOffsets, INTERRUPTED, WASM_PAGE_SIZE,
|
||||
};
|
||||
use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK};
|
||||
|
||||
/// Compute an `ir::ExternalName` for a given wasm function index.
|
||||
pub fn get_func_name(func_index: FuncIndex) -> ir::ExternalName {
|
||||
@@ -750,6 +751,59 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
|
||||
pos.ins().uextend(I64, val)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_or_init_funcref_table_elem(
|
||||
&mut self,
|
||||
builder: &mut FunctionBuilder,
|
||||
table_index: TableIndex,
|
||||
table: ir::Table,
|
||||
index: ir::Value,
|
||||
) -> ir::Value {
|
||||
let pointer_type = self.pointer_type();
|
||||
|
||||
// To support lazy initialization of table
|
||||
// contents, we check for a null entry here, and
|
||||
// if null, we take a slow-path that invokes a
|
||||
// libcall.
|
||||
let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
|
||||
let value = builder
|
||||
.ins()
|
||||
.load(pointer_type, ir::MemFlags::trusted(), table_entry_addr, 0);
|
||||
// Mask off the "initialized bit". See documentation on
|
||||
// FUNCREF_INIT_BIT in crates/environ/src/ref_bits.rs for more
|
||||
// details.
|
||||
let value_masked = builder
|
||||
.ins()
|
||||
.band_imm(value, Imm64::from(FUNCREF_MASK as i64));
|
||||
|
||||
let null_block = builder.create_block();
|
||||
let continuation_block = builder.create_block();
|
||||
let result_param = builder.append_block_param(continuation_block, pointer_type);
|
||||
builder.set_cold_block(null_block);
|
||||
|
||||
builder.ins().brz(value, null_block, &[]);
|
||||
builder.ins().jump(continuation_block, &[value_masked]);
|
||||
builder.seal_block(null_block);
|
||||
|
||||
builder.switch_to_block(null_block);
|
||||
let table_index = builder.ins().iconst(I32, table_index.index() as i64);
|
||||
let builtin_idx = BuiltinFunctionIndex::table_get_lazy_init_funcref();
|
||||
let builtin_sig = self
|
||||
.builtin_function_signatures
|
||||
.table_get_lazy_init_funcref(builder.func);
|
||||
let (vmctx, builtin_addr) =
|
||||
self.translate_load_builtin_function_address(&mut builder.cursor(), builtin_idx);
|
||||
let call_inst =
|
||||
builder
|
||||
.ins()
|
||||
.call_indirect(builtin_sig, builtin_addr, &[vmctx, table_index, index]);
|
||||
let returned_entry = builder.func.dfg.inst_results(call_inst)[0];
|
||||
builder.ins().jump(continuation_block, &[returned_entry]);
|
||||
builder.seal_block(continuation_block);
|
||||
|
||||
builder.switch_to_block(continuation_block);
|
||||
result_param
|
||||
}
|
||||
}
|
||||
|
||||
impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> {
|
||||
@@ -886,13 +940,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
match plan.table.wasm_ty {
|
||||
WasmType::FuncRef => match plan.style {
|
||||
TableStyle::CallerChecksSignature => {
|
||||
let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
|
||||
Ok(builder.ins().load(
|
||||
pointer_type,
|
||||
ir::MemFlags::trusted(),
|
||||
table_entry_addr,
|
||||
0,
|
||||
))
|
||||
Ok(self.get_or_init_funcref_table_elem(builder, table_index, table, index))
|
||||
}
|
||||
},
|
||||
WasmType::ExternRef => {
|
||||
@@ -1033,9 +1081,18 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
WasmType::FuncRef => match plan.style {
|
||||
TableStyle::CallerChecksSignature => {
|
||||
let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
|
||||
builder
|
||||
// Set the "initialized bit". See doc-comment on
|
||||
// `FUNCREF_INIT_BIT` in
|
||||
// crates/environ/src/ref_bits.rs for details.
|
||||
let value_with_init_bit = builder
|
||||
.ins()
|
||||
.store(ir::MemFlags::trusted(), value, table_entry_addr, 0);
|
||||
.bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64));
|
||||
builder.ins().store(
|
||||
ir::MemFlags::trusted(),
|
||||
value_with_init_bit,
|
||||
table_entry_addr,
|
||||
0,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
@@ -1253,10 +1310,16 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
|
||||
func_index: FuncIndex,
|
||||
) -> WasmResult<ir::Value> {
|
||||
let vmctx = self.vmctx(&mut pos.func);
|
||||
let vmctx = pos.ins().global_value(self.pointer_type(), vmctx);
|
||||
let offset = self.offsets.vmctx_anyfunc(func_index);
|
||||
Ok(pos.ins().iadd_imm(vmctx, i64::from(offset)))
|
||||
let func_index = pos.ins().iconst(I32, func_index.as_u32() as i64);
|
||||
let builtin_index = BuiltinFunctionIndex::ref_func();
|
||||
let builtin_sig = self.builtin_function_signatures.ref_func(&mut pos.func);
|
||||
let (vmctx, builtin_addr) =
|
||||
self.translate_load_builtin_function_address(&mut pos, builtin_index);
|
||||
|
||||
let call_inst = pos
|
||||
.ins()
|
||||
.call_indirect(builtin_sig, builtin_addr, &[vmctx, func_index]);
|
||||
Ok(pos.func.dfg.first_result(call_inst))
|
||||
}
|
||||
|
||||
fn translate_custom_global_get(
|
||||
@@ -1459,7 +1522,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
|
||||
fn translate_call_indirect(
|
||||
&mut self,
|
||||
mut pos: FuncCursor<'_>,
|
||||
builder: &mut FunctionBuilder,
|
||||
table_index: TableIndex,
|
||||
table: ir::Table,
|
||||
ty_index: TypeIndex,
|
||||
@@ -1469,21 +1532,17 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
) -> WasmResult<ir::Inst> {
|
||||
let pointer_type = self.pointer_type();
|
||||
|
||||
let table_entry_addr = pos.ins().table_addr(pointer_type, table, callee, 0);
|
||||
|
||||
// Dereference the table entry to get the pointer to the
|
||||
// `VMCallerCheckedAnyfunc`.
|
||||
let anyfunc_ptr =
|
||||
pos.ins()
|
||||
.load(pointer_type, ir::MemFlags::trusted(), table_entry_addr, 0);
|
||||
// Get the anyfunc pointer (the funcref) from the table.
|
||||
let anyfunc_ptr = self.get_or_init_funcref_table_elem(builder, table_index, table, callee);
|
||||
|
||||
// Check for whether the table element is null, and trap if so.
|
||||
pos.ins()
|
||||
builder
|
||||
.ins()
|
||||
.trapz(anyfunc_ptr, ir::TrapCode::IndirectCallToNull);
|
||||
|
||||
// Dereference anyfunc pointer to get the function address.
|
||||
let mem_flags = ir::MemFlags::trusted();
|
||||
let func_addr = pos.ins().load(
|
||||
let func_addr = builder.ins().load(
|
||||
pointer_type,
|
||||
mem_flags,
|
||||
anyfunc_ptr,
|
||||
@@ -1495,19 +1554,19 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
TableStyle::CallerChecksSignature => {
|
||||
let sig_id_size = self.offsets.size_of_vmshared_signature_index();
|
||||
let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap();
|
||||
let vmctx = self.vmctx(pos.func);
|
||||
let base = pos.ins().global_value(pointer_type, vmctx);
|
||||
let vmctx = self.vmctx(builder.func);
|
||||
let base = builder.ins().global_value(pointer_type, vmctx);
|
||||
let offset =
|
||||
i32::try_from(self.offsets.vmctx_vmshared_signature_id(ty_index)).unwrap();
|
||||
|
||||
// Load the caller ID.
|
||||
let mut mem_flags = ir::MemFlags::trusted();
|
||||
mem_flags.set_readonly();
|
||||
let caller_sig_id = pos.ins().load(sig_id_type, mem_flags, base, offset);
|
||||
let caller_sig_id = builder.ins().load(sig_id_type, mem_flags, base, offset);
|
||||
|
||||
// Load the callee ID.
|
||||
let mem_flags = ir::MemFlags::trusted();
|
||||
let callee_sig_id = pos.ins().load(
|
||||
let callee_sig_id = builder.ins().load(
|
||||
sig_id_type,
|
||||
mem_flags,
|
||||
anyfunc_ptr,
|
||||
@@ -1515,16 +1574,21 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
);
|
||||
|
||||
// Check that they match.
|
||||
let cmp = pos.ins().icmp(IntCC::Equal, callee_sig_id, caller_sig_id);
|
||||
pos.ins().trapz(cmp, ir::TrapCode::BadSignature);
|
||||
let cmp = builder
|
||||
.ins()
|
||||
.icmp(IntCC::Equal, callee_sig_id, caller_sig_id);
|
||||
builder.ins().trapz(cmp, ir::TrapCode::BadSignature);
|
||||
}
|
||||
}
|
||||
|
||||
let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
|
||||
let caller_vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap();
|
||||
let caller_vmctx = builder
|
||||
.func
|
||||
.special_param(ArgumentPurpose::VMContext)
|
||||
.unwrap();
|
||||
|
||||
// First append the callee vmctx address.
|
||||
let vmctx = pos.ins().load(
|
||||
let vmctx = builder.ins().load(
|
||||
pointer_type,
|
||||
mem_flags,
|
||||
anyfunc_ptr,
|
||||
@@ -1536,7 +1600,9 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
// Then append the regular call arguments.
|
||||
real_call_args.extend_from_slice(call_args);
|
||||
|
||||
Ok(pos.ins().call_indirect(sig_ref, func_addr, &real_call_args))
|
||||
Ok(builder
|
||||
.ins()
|
||||
.call_indirect(sig_ref, func_addr, &real_call_args))
|
||||
}
|
||||
|
||||
fn translate_call(
|
||||
|
||||
@@ -18,8 +18,12 @@ macro_rules! foreach_builtin_function {
|
||||
memory_fill(vmctx, i32, i64, i32, i64) -> ();
|
||||
/// Returns an index for wasm's `memory.init` instruction.
|
||||
memory_init(vmctx, i32, i32, i64, i32, i32) -> ();
|
||||
/// Returns a value for wasm's `ref.func` instruction.
|
||||
ref_func(vmctx, i32) -> (pointer);
|
||||
/// Returns an index for wasm's `data.drop` instruction.
|
||||
data_drop(vmctx, i32) -> ();
|
||||
/// Returns a table entry after lazily initializing it.
|
||||
table_get_lazy_init_funcref(vmctx, i32, i32) -> (pointer);
|
||||
/// Returns an index for Wasm's `table.grow` instruction for `funcref`s.
|
||||
table_grow_funcref(vmctx, i32, i32, pointer) -> (i32);
|
||||
/// Returns an index for Wasm's `table.grow` instruction for `externref`s.
|
||||
|
||||
@@ -29,6 +29,7 @@ mod compilation;
|
||||
mod module;
|
||||
mod module_environ;
|
||||
pub mod obj;
|
||||
mod ref_bits;
|
||||
mod stack_map;
|
||||
mod trap_encoding;
|
||||
mod tunables;
|
||||
@@ -39,6 +40,7 @@ pub use crate::builtin::*;
|
||||
pub use crate::compilation::*;
|
||||
pub use crate::module::*;
|
||||
pub use crate::module_environ::*;
|
||||
pub use crate::ref_bits::*;
|
||||
pub use crate::stack_map::StackMap;
|
||||
pub use crate::trap_encoding::*;
|
||||
pub use crate::tunables::Tunables;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//! Data structures for representing decoded wasm modules.
|
||||
|
||||
use crate::{EntityRef, ModuleTranslation, PrimaryMap, Tunables, WASM_PAGE_SIZE};
|
||||
use crate::{ModuleTranslation, PrimaryMap, Tunables, WASM_PAGE_SIZE};
|
||||
use cranelift_entity::{packed_option::ReservedValue, EntityRef};
|
||||
use indexmap::IndexMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
@@ -259,6 +260,92 @@ impl ModuleTranslation<'_> {
|
||||
}
|
||||
self.module.memory_initialization = MemoryInitialization::Paged { map };
|
||||
}
|
||||
|
||||
/// Attempts to convert the module's table initializers to
|
||||
/// FuncTable form where possible. This enables lazy table
|
||||
/// initialization later by providing a one-to-one map of initial
|
||||
/// table values, without having to parse all segments.
|
||||
pub fn try_func_table_init(&mut self) {
|
||||
// This should be large enough to support very large Wasm
|
||||
// modules with huge funcref tables, but small enough to avoid
|
||||
// OOMs or DoS on truly sparse tables.
|
||||
const MAX_FUNC_TABLE_SIZE: u32 = 1024 * 1024;
|
||||
|
||||
let segments = match &self.module.table_initialization {
|
||||
TableInitialization::Segments { segments } => segments,
|
||||
TableInitialization::FuncTable { .. } => {
|
||||
// Already done!
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Build the table arrays per-table.
|
||||
let mut tables = PrimaryMap::with_capacity(self.module.table_plans.len());
|
||||
// Keep the "leftovers" for eager init.
|
||||
let mut leftovers = vec![];
|
||||
|
||||
for segment in segments {
|
||||
// Skip imported tables: we can't provide a preconstructed
|
||||
// table for them, because their values depend on the
|
||||
// imported table overlaid with whatever segments we have.
|
||||
if self
|
||||
.module
|
||||
.defined_table_index(segment.table_index)
|
||||
.is_none()
|
||||
{
|
||||
leftovers.push(segment.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this is not a funcref table, then we can't support a
|
||||
// pre-computed table of function indices.
|
||||
if self.module.table_plans[segment.table_index].table.wasm_ty != WasmType::FuncRef {
|
||||
leftovers.push(segment.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the base of this segment is dynamic, then we can't
|
||||
// include it in the statically-built array of initial
|
||||
// contents.
|
||||
if segment.base.is_some() {
|
||||
leftovers.push(segment.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get the end of this segment. If out-of-bounds, or too
|
||||
// large for our dense table representation, then skip the
|
||||
// segment.
|
||||
let top = match segment.offset.checked_add(segment.elements.len() as u32) {
|
||||
Some(top) => top,
|
||||
None => {
|
||||
leftovers.push(segment.clone());
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let table_size = self.module.table_plans[segment.table_index].table.minimum;
|
||||
if top > table_size || top > MAX_FUNC_TABLE_SIZE {
|
||||
leftovers.push(segment.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
// We can now incorporate this segment into the initializers array.
|
||||
while tables.len() <= segment.table_index.index() {
|
||||
tables.push(vec![]);
|
||||
}
|
||||
let elements = &mut tables[segment.table_index];
|
||||
if elements.is_empty() {
|
||||
elements.resize(table_size as usize, FuncIndex::reserved_value());
|
||||
}
|
||||
|
||||
let dst = &mut elements[(segment.offset as usize)..(top as usize)];
|
||||
dst.copy_from_slice(&segment.elements[..]);
|
||||
}
|
||||
|
||||
self.module.table_initialization = TableInitialization::FuncTable {
|
||||
tables,
|
||||
segments: leftovers,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemoryInitialization {
|
||||
@@ -460,7 +547,7 @@ impl TablePlan {
|
||||
}
|
||||
}
|
||||
|
||||
/// A WebAssembly table initializer.
|
||||
/// A WebAssembly table initializer segment.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct TableInitializer {
|
||||
/// The index of a table to initialize.
|
||||
@@ -473,6 +560,56 @@ pub struct TableInitializer {
|
||||
pub elements: Box<[FuncIndex]>,
|
||||
}
|
||||
|
||||
/// Table initialization data for all tables in the module.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum TableInitialization {
|
||||
/// "Segment" mode: table initializer segments, possibly with
|
||||
/// dynamic bases, possibly applying to an imported memory.
|
||||
///
|
||||
/// Every kind of table initialization is supported by the
|
||||
/// Segments mode.
|
||||
Segments {
|
||||
/// The segment initializers. All apply to the table for which
|
||||
/// this TableInitialization is specified.
|
||||
segments: Vec<TableInitializer>,
|
||||
},
|
||||
|
||||
/// "FuncTable" mode: a single array per table, with a function
|
||||
/// index or null per slot. This is only possible to provide for a
|
||||
/// given table when it is defined by the module itself, and can
|
||||
/// only include data from initializer segments that have
|
||||
/// statically-knowable bases (i.e., not dependent on global
|
||||
/// values).
|
||||
///
|
||||
/// Any segments that are not compatible with this mode are held
|
||||
/// in the `segments` array of "leftover segments", which are
|
||||
/// still processed eagerly.
|
||||
///
|
||||
/// This mode facilitates lazy initialization of the tables. It is
|
||||
/// thus "nice to have", but not necessary for correctness.
|
||||
FuncTable {
|
||||
/// For each table, an array of function indices (or
|
||||
/// FuncIndex::reserved_value(), meaning no initialized value,
|
||||
/// hence null by default). Array elements correspond
|
||||
/// one-to-one to table elements; i.e., `elements[i]` is the
|
||||
/// initial value for `table[i]`.
|
||||
tables: PrimaryMap<TableIndex, Vec<FuncIndex>>,
|
||||
|
||||
/// Leftover segments that need to be processed eagerly on
|
||||
/// instantiation. These either apply to an imported table (so
|
||||
/// we can't pre-build a full image of the table from this
|
||||
/// overlay) or have dynamically (at instantiation time)
|
||||
/// determined bases.
|
||||
segments: Vec<TableInitializer>,
|
||||
},
|
||||
}
|
||||
|
||||
impl Default for TableInitialization {
|
||||
fn default() -> Self {
|
||||
TableInitialization::Segments { segments: vec![] }
|
||||
}
|
||||
}
|
||||
|
||||
/// Different types that can appear in a module.
|
||||
///
|
||||
/// Note that each of these variants are intended to index further into a
|
||||
@@ -512,8 +649,8 @@ pub struct Module {
|
||||
/// The module "start" function, if present.
|
||||
pub start_func: Option<FuncIndex>,
|
||||
|
||||
/// WebAssembly table initializers.
|
||||
pub table_initializers: Vec<TableInitializer>,
|
||||
/// WebAssembly table initialization data, per table.
|
||||
pub table_initialization: TableInitialization,
|
||||
|
||||
/// WebAssembly linear memory initializer.
|
||||
pub memory_initialization: MemoryInitialization,
|
||||
|
||||
@@ -5,8 +5,8 @@ use crate::module::{
|
||||
use crate::{
|
||||
DataIndex, DefinedFuncIndex, ElemIndex, EntityIndex, EntityType, FuncIndex, Global,
|
||||
GlobalIndex, GlobalInit, InstanceIndex, InstanceTypeIndex, MemoryIndex, ModuleIndex,
|
||||
ModuleTypeIndex, PrimaryMap, SignatureIndex, TableIndex, Tunables, TypeIndex, WasmError,
|
||||
WasmFuncType, WasmResult,
|
||||
ModuleTypeIndex, PrimaryMap, SignatureIndex, TableIndex, TableInitialization, Tunables,
|
||||
TypeIndex, WasmError, WasmFuncType, WasmResult,
|
||||
};
|
||||
use cranelift_entity::packed_option::ReservedValue;
|
||||
use std::borrow::Cow;
|
||||
@@ -512,9 +512,6 @@ impl<'data> ModuleEnvironment<'data> {
|
||||
Payload::ElementSection(elements) => {
|
||||
validator.element_section(&elements)?;
|
||||
|
||||
let cnt = usize::try_from(elements.get_count()).unwrap();
|
||||
self.result.module.table_initializers.reserve_exact(cnt);
|
||||
|
||||
for (index, entry) in elements.into_iter().enumerate() {
|
||||
let wasmparser::Element {
|
||||
kind,
|
||||
@@ -527,7 +524,7 @@ impl<'data> ModuleEnvironment<'data> {
|
||||
// entries listed in this segment. Note that it's not
|
||||
// possible to create anything other than a `ref.null
|
||||
// extern` for externref segments, so those just get
|
||||
// translate to the reserved value of `FuncIndex`.
|
||||
// translated to the reserved value of `FuncIndex`.
|
||||
let items_reader = items.get_items_reader()?;
|
||||
let mut elements =
|
||||
Vec::with_capacity(usize::try_from(items_reader.get_count()).unwrap());
|
||||
@@ -576,15 +573,18 @@ impl<'data> ModuleEnvironment<'data> {
|
||||
)));
|
||||
}
|
||||
};
|
||||
self.result
|
||||
.module
|
||||
.table_initializers
|
||||
.push(TableInitializer {
|
||||
table_index,
|
||||
base,
|
||||
offset,
|
||||
elements: elements.into(),
|
||||
});
|
||||
|
||||
let table_segments = match &mut self.result.module.table_initialization
|
||||
{
|
||||
TableInitialization::Segments { segments } => segments,
|
||||
TableInitialization::FuncTable { .. } => unreachable!(),
|
||||
};
|
||||
table_segments.push(TableInitializer {
|
||||
table_index,
|
||||
base,
|
||||
offset,
|
||||
elements: elements.into(),
|
||||
});
|
||||
}
|
||||
|
||||
ElementKind::Passive => {
|
||||
|
||||
36
crates/environ/src/ref_bits.rs
Normal file
36
crates/environ/src/ref_bits.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
//! Definitions for bits in the in-memory / in-table representation of references.
|
||||
|
||||
/// An "initialized bit" in a funcref table.
|
||||
///
|
||||
/// We lazily initialize tables of funcrefs, and this mechanism
|
||||
/// requires us to interpret zero as "uninitialized", triggering a
|
||||
/// slowpath on table read to possibly initialize the element. (This
|
||||
/// has to be *zero* because that is the only value we can cheaply
|
||||
/// initialize, e.g. with newly mmap'd memory.)
|
||||
///
|
||||
/// However, the user can also store a null reference into a table. We
|
||||
/// have to interpret this as "actually null", and not "lazily
|
||||
/// initialize to the original funcref that this slot had".
|
||||
///
|
||||
/// To do so, we rewrite nulls into the "initialized null" value. Note
|
||||
/// that this should *only exist inside the table*: whenever we load a
|
||||
/// value out of a table, we immediately mask off the low bit that
|
||||
/// contains the initialized-null flag. Conversely, when we store into
|
||||
/// a table, we have to translate a true null into an "initialized
|
||||
/// null".
|
||||
///
|
||||
/// We can generalize a bit in order to simply the table-set logic: we
|
||||
/// can set the LSB of *all* explicitly stored values to 1 in order to
|
||||
/// note that they are indeed explicitly stored. We then mask off this
|
||||
/// bit every time we load.
|
||||
///
|
||||
/// Note that we take care to set this bit and mask it off when
|
||||
/// accessing tables direclty in fastpaths in generated code as well.
|
||||
pub const FUNCREF_INIT_BIT: usize = 1;
|
||||
|
||||
/// The mask we apply to all refs loaded from funcref tables.
|
||||
///
|
||||
/// This allows us to use the LSB as an "initialized flag" (see below)
|
||||
/// to distinguish from an uninitialized element in a
|
||||
/// lazily-initialized funcref table.
|
||||
pub const FUNCREF_MASK: usize = !FUNCREF_INIT_BIT;
|
||||
@@ -11,7 +11,7 @@ use crate::vmcontext::{
|
||||
VMCallerCheckedAnyfunc, VMContext, VMFunctionImport, VMGlobalDefinition, VMGlobalImport,
|
||||
VMInterrupts, VMMemoryDefinition, VMMemoryImport, VMTableDefinition, VMTableImport,
|
||||
};
|
||||
use crate::{CompiledModuleId, ExportFunction, ExportGlobal, ExportMemory, ExportTable, Store};
|
||||
use crate::{ExportFunction, ExportGlobal, ExportMemory, ExportTable, ModuleRuntimeInfo, Store};
|
||||
use anyhow::Error;
|
||||
use memoffset::offset_of;
|
||||
use more_asserts::assert_lt;
|
||||
@@ -24,6 +24,7 @@ use std::ptr::NonNull;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::Arc;
|
||||
use std::{mem, ptr, slice};
|
||||
use wasmtime_environ::TableInitialization;
|
||||
use wasmtime_environ::{
|
||||
packed_option::ReservedValue, DataIndex, DefinedGlobalIndex, DefinedMemoryIndex,
|
||||
DefinedTableIndex, ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex,
|
||||
@@ -51,11 +52,12 @@ pub use allocator::*;
|
||||
/// values, whether or not they were created on the host or through a module.
|
||||
#[repr(C)] // ensure that the vmctx field is last.
|
||||
pub(crate) struct Instance {
|
||||
/// The `Module` this `Instance` was instantiated from.
|
||||
module: Arc<Module>,
|
||||
|
||||
/// The unique ID for the `Module` this `Instance` was instantiated from.
|
||||
unique_id: Option<CompiledModuleId>,
|
||||
/// The runtime info (corresponding to the "compiled module"
|
||||
/// abstraction in higher layers) that is retained and needed for
|
||||
/// lazy initialization. This provides access to the underlying
|
||||
/// Wasm module entities, the compiled JIT code, metadata about
|
||||
/// functions, lazy initialization state, etc.
|
||||
runtime_info: Arc<dyn ModuleRuntimeInfo>,
|
||||
|
||||
/// Offsets in the `vmctx` region, precomputed from the `module` above.
|
||||
offsets: VMOffsets<HostPtr>,
|
||||
@@ -80,12 +82,6 @@ pub(crate) struct Instance {
|
||||
/// If the index is present in the set, the segment has been dropped.
|
||||
dropped_data: EntitySet<DataIndex>,
|
||||
|
||||
/// A slice pointing to all data that is referenced by this instance. This
|
||||
/// data is managed externally so this is effectively an unsafe reference,
|
||||
/// and this does not live for the `'static` lifetime so the API boundaries
|
||||
/// here are careful to never hand out static references.
|
||||
wasm_data: &'static [u8],
|
||||
|
||||
/// Hosts can store arbitrary per-instance information here.
|
||||
///
|
||||
/// Most of the time from Wasmtime this is `Box::new(())`, a noop
|
||||
@@ -102,23 +98,23 @@ pub(crate) struct Instance {
|
||||
impl Instance {
|
||||
/// Helper for allocators; not a public API.
|
||||
pub(crate) fn create_raw(
|
||||
module: &Arc<Module>,
|
||||
unique_id: Option<CompiledModuleId>,
|
||||
wasm_data: &'static [u8],
|
||||
runtime_info: Arc<dyn ModuleRuntimeInfo>,
|
||||
memories: PrimaryMap<DefinedMemoryIndex, Memory>,
|
||||
tables: PrimaryMap<DefinedTableIndex, Table>,
|
||||
host_state: Box<dyn Any + Send + Sync>,
|
||||
) -> Instance {
|
||||
let module = runtime_info.module();
|
||||
let offsets = VMOffsets::new(HostPtr, &module);
|
||||
let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
|
||||
let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
|
||||
Instance {
|
||||
module: module.clone(),
|
||||
unique_id,
|
||||
offsets: VMOffsets::new(HostPtr, &module),
|
||||
runtime_info,
|
||||
offsets,
|
||||
memories,
|
||||
tables,
|
||||
dropped_elements: EntitySet::with_capacity(module.passive_elements.len()),
|
||||
dropped_data: EntitySet::with_capacity(module.passive_data_map.len()),
|
||||
dropped_elements,
|
||||
dropped_data,
|
||||
host_state,
|
||||
wasm_data,
|
||||
vmctx: VMContext {
|
||||
_marker: std::marker::PhantomPinned,
|
||||
},
|
||||
@@ -134,7 +130,7 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub(crate) fn module(&self) -> &Arc<Module> {
|
||||
&self.module
|
||||
self.runtime_info.module()
|
||||
}
|
||||
|
||||
/// Return the indexed `VMFunctionImport`.
|
||||
@@ -177,7 +173,7 @@ impl Instance {
|
||||
|
||||
/// Get a locally defined or imported memory.
|
||||
pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
|
||||
if let Some(defined_index) = self.module.defined_memory_index(index) {
|
||||
if let Some(defined_index) = self.module().defined_memory_index(index) {
|
||||
self.memory(defined_index)
|
||||
} else {
|
||||
let import = self.imported_memory(index);
|
||||
@@ -220,7 +216,7 @@ impl Instance {
|
||||
&self,
|
||||
index: GlobalIndex,
|
||||
) -> *mut VMGlobalDefinition {
|
||||
if let Some(index) = self.module.defined_global_index(index) {
|
||||
if let Some(index) = self.module().defined_global_index(index) {
|
||||
self.global_ptr(index)
|
||||
} else {
|
||||
self.imported_global(index).from
|
||||
@@ -276,7 +272,7 @@ impl Instance {
|
||||
}
|
||||
|
||||
/// Lookup an export with the given export declaration.
|
||||
pub fn lookup_by_declaration(&self, export: &EntityIndex) -> Export {
|
||||
pub fn lookup_by_declaration(&mut self, export: &EntityIndex) -> Export {
|
||||
match export {
|
||||
EntityIndex::Function(index) => {
|
||||
let anyfunc = self.get_caller_checked_anyfunc(*index).unwrap();
|
||||
@@ -286,7 +282,7 @@ impl Instance {
|
||||
}
|
||||
EntityIndex::Table(index) => {
|
||||
let (definition, vmctx) =
|
||||
if let Some(def_index) = self.module.defined_table_index(*index) {
|
||||
if let Some(def_index) = self.module().defined_table_index(*index) {
|
||||
(self.table_ptr(def_index), self.vmctx_ptr())
|
||||
} else {
|
||||
let import = self.imported_table(*index);
|
||||
@@ -295,13 +291,13 @@ impl Instance {
|
||||
ExportTable {
|
||||
definition,
|
||||
vmctx,
|
||||
table: self.module.table_plans[*index].clone(),
|
||||
table: self.module().table_plans[*index].clone(),
|
||||
}
|
||||
.into()
|
||||
}
|
||||
EntityIndex::Memory(index) => {
|
||||
let (definition, vmctx) =
|
||||
if let Some(def_index) = self.module.defined_memory_index(*index) {
|
||||
if let Some(def_index) = self.module().defined_memory_index(*index) {
|
||||
(self.memory_ptr(def_index), self.vmctx_ptr())
|
||||
} else {
|
||||
let import = self.imported_memory(*index);
|
||||
@@ -310,18 +306,18 @@ impl Instance {
|
||||
ExportMemory {
|
||||
definition,
|
||||
vmctx,
|
||||
memory: self.module.memory_plans[*index].clone(),
|
||||
memory: self.module().memory_plans[*index].clone(),
|
||||
}
|
||||
.into()
|
||||
}
|
||||
EntityIndex::Global(index) => ExportGlobal {
|
||||
definition: if let Some(def_index) = self.module.defined_global_index(*index) {
|
||||
definition: if let Some(def_index) = self.module().defined_global_index(*index) {
|
||||
self.global_ptr(def_index)
|
||||
} else {
|
||||
self.imported_global(*index).from
|
||||
},
|
||||
vmctx: self.vmctx_ptr(),
|
||||
global: self.module.globals[*index],
|
||||
global: self.module().globals[*index],
|
||||
}
|
||||
.into(),
|
||||
|
||||
@@ -337,7 +333,7 @@ impl Instance {
|
||||
/// are export names, and the values are export declarations which can be
|
||||
/// resolved `lookup_by_declaration`.
|
||||
pub fn exports(&self) -> indexmap::map::Iter<String, EntityIndex> {
|
||||
self.module.exports.iter()
|
||||
self.module().exports.iter()
|
||||
}
|
||||
|
||||
/// Return a reference to the custom state attached to this instance.
|
||||
@@ -388,7 +384,7 @@ impl Instance {
|
||||
index: MemoryIndex,
|
||||
delta: u64,
|
||||
) -> Result<Option<usize>, Error> {
|
||||
let (idx, instance) = if let Some(idx) = self.module.defined_memory_index(index) {
|
||||
let (idx, instance) = if let Some(idx) = self.module().defined_memory_index(index) {
|
||||
(idx, self)
|
||||
} else {
|
||||
let import = self.imported_memory(index);
|
||||
@@ -462,6 +458,42 @@ impl Instance {
|
||||
Layout::from_size_align(size, align).unwrap()
|
||||
}
|
||||
|
||||
/// Construct a new VMCallerCheckedAnyfunc for the given function
|
||||
/// (imported or defined in this module) and store into the given
|
||||
/// location. Used during lazy initialization.
|
||||
///
|
||||
/// Note that our current lazy-init scheme actually calls this every
|
||||
/// time the anyfunc pointer is fetched; this turns out to be better
|
||||
/// than tracking state related to whether it's been initialized
|
||||
/// before, because resetting that state on (re)instantiation is
|
||||
/// very expensive if there are many anyfuncs.
|
||||
fn construct_anyfunc(&mut self, index: FuncIndex, into: *mut VMCallerCheckedAnyfunc) {
|
||||
let sig = self.module().functions[index];
|
||||
let type_index = self.runtime_info.signature(sig);
|
||||
|
||||
let (func_ptr, vmctx) = if let Some(def_index) = self.module().defined_func_index(index) {
|
||||
(
|
||||
(self.runtime_info.image_base()
|
||||
+ self.runtime_info.function_info(def_index).start as usize)
|
||||
as *mut _,
|
||||
self.vmctx_ptr(),
|
||||
)
|
||||
} else {
|
||||
let import = self.imported_function(index);
|
||||
(import.body.as_ptr(), import.vmctx)
|
||||
};
|
||||
|
||||
// Safety: we have a `&mut self`, so we have exclusive access
|
||||
// to this Instance.
|
||||
unsafe {
|
||||
*into = VMCallerCheckedAnyfunc {
|
||||
vmctx,
|
||||
type_index,
|
||||
func_ptr: NonNull::new(func_ptr).expect("Non-null function pointer"),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a `&VMCallerCheckedAnyfunc` for the given `FuncIndex`.
|
||||
///
|
||||
/// Returns `None` if the index is the reserved index value.
|
||||
@@ -469,18 +501,46 @@ impl Instance {
|
||||
/// The returned reference is a stable reference that won't be moved and can
|
||||
/// be passed into JIT code.
|
||||
pub(crate) fn get_caller_checked_anyfunc(
|
||||
&self,
|
||||
&mut self,
|
||||
index: FuncIndex,
|
||||
) -> Option<&VMCallerCheckedAnyfunc> {
|
||||
) -> Option<*mut VMCallerCheckedAnyfunc> {
|
||||
if index == FuncIndex::reserved_value() {
|
||||
return None;
|
||||
}
|
||||
|
||||
unsafe { Some(&*self.vmctx_plus_offset(self.offsets.vmctx_anyfunc(index))) }
|
||||
}
|
||||
// Safety: we have a `&mut self`, so we have exclusive access
|
||||
// to this Instance.
|
||||
unsafe {
|
||||
// For now, we eagerly initialize an anyfunc struct in-place
|
||||
// whenever asked for a reference to it. This is mostly
|
||||
// fine, because in practice each anyfunc is unlikely to be
|
||||
// requested more than a few times: once-ish for funcref
|
||||
// tables used for call_indirect (the usual compilation
|
||||
// strategy places each function in the table at most once),
|
||||
// and once or a few times when fetching exports via API.
|
||||
// Note that for any case driven by table accesses, the lazy
|
||||
// table init behaves like a higher-level cache layer that
|
||||
// protects this initialization from happening multiple
|
||||
// times, via that particular table at least.
|
||||
//
|
||||
// When `ref.func` becomes more commonly used or if we
|
||||
// otherwise see a use-case where this becomes a hotpath,
|
||||
// we can reconsider by using some state to track
|
||||
// "uninitialized" explicitly, for example by zeroing the
|
||||
// anyfuncs (perhaps together with other
|
||||
// zeroed-at-instantiate-time state) or using a separate
|
||||
// is-initialized bitmap.
|
||||
//
|
||||
// We arrived at this design because zeroing memory is
|
||||
// expensive, so it's better for instantiation performance
|
||||
// if we don't have to track "is-initialized" state at
|
||||
// all!
|
||||
let anyfunc: *mut VMCallerCheckedAnyfunc =
|
||||
self.vmctx_plus_offset::<VMCallerCheckedAnyfunc>(self.offsets.vmctx_anyfunc(index));
|
||||
self.construct_anyfunc(index, anyfunc);
|
||||
|
||||
unsafe fn anyfunc_base(&self) -> *mut VMCallerCheckedAnyfunc {
|
||||
self.vmctx_plus_offset(self.offsets.vmctx_anyfuncs_begin())
|
||||
Some(anyfunc)
|
||||
}
|
||||
}
|
||||
|
||||
/// The `table.init` operation: initializes a portion of a table with a
|
||||
@@ -501,7 +561,7 @@ impl Instance {
|
||||
// TODO: this `clone()` shouldn't be necessary but is used for now to
|
||||
// inform `rustc` that the lifetime of the elements here are
|
||||
// disconnected from the lifetime of `self`.
|
||||
let module = self.module.clone();
|
||||
let module = self.module().clone();
|
||||
|
||||
let elements = match module.passive_elements_map.get(&elem_index) {
|
||||
Some(index) if !self.dropped_elements.contains(elem_index) => {
|
||||
@@ -533,20 +593,15 @@ impl Instance {
|
||||
};
|
||||
|
||||
match table.element_type() {
|
||||
TableElementType::Func => unsafe {
|
||||
let base = self.anyfunc_base();
|
||||
TableElementType::Func => {
|
||||
table.init_funcs(
|
||||
dst,
|
||||
elements.iter().map(|idx| {
|
||||
if *idx == FuncIndex::reserved_value() {
|
||||
ptr::null_mut()
|
||||
} else {
|
||||
debug_assert!(idx.as_u32() < self.offsets.num_defined_functions);
|
||||
base.add(usize::try_from(idx.as_u32()).unwrap())
|
||||
}
|
||||
self.get_caller_checked_anyfunc(*idx)
|
||||
.unwrap_or(std::ptr::null_mut())
|
||||
}),
|
||||
)?;
|
||||
},
|
||||
}
|
||||
|
||||
TableElementType::Extern => {
|
||||
debug_assert!(elements.iter().all(|e| *e == FuncIndex::reserved_value()));
|
||||
@@ -657,7 +712,7 @@ impl Instance {
|
||||
src: u32,
|
||||
len: u32,
|
||||
) -> Result<(), Trap> {
|
||||
let range = match self.module.passive_data_map.get(&data_index).cloned() {
|
||||
let range = match self.module().passive_data_map.get(&data_index).cloned() {
|
||||
Some(range) if !self.dropped_data.contains(data_index) => range,
|
||||
_ => 0..0,
|
||||
};
|
||||
@@ -665,7 +720,7 @@ impl Instance {
|
||||
}
|
||||
|
||||
pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
|
||||
&self.wasm_data[range.start as usize..range.end as usize]
|
||||
&self.runtime_info.wasm_data()[range.start as usize..range.end as usize]
|
||||
}
|
||||
|
||||
pub(crate) fn memory_init_segment(
|
||||
@@ -703,6 +758,74 @@ impl Instance {
|
||||
// dropping a non-passive segment is a no-op (not a trap).
|
||||
}
|
||||
|
||||
/// Get a table by index regardless of whether it is locally-defined
|
||||
/// or an imported, foreign table. Ensure that the given range of
|
||||
/// elements in the table is lazily initialized. We define this
|
||||
/// operation all-in-one for safety, to ensure the lazy-init
|
||||
/// happens.
|
||||
///
|
||||
/// Takes an `Iterator` for the index-range to lazy-initialize,
|
||||
/// for flexibility. This can be a range, single item, or empty
|
||||
/// sequence, for example. The iterator should return indices in
|
||||
/// increasing order, so that the break-at-out-of-bounds behavior
|
||||
/// works correctly.
|
||||
pub(crate) fn get_table_with_lazy_init(
|
||||
&mut self,
|
||||
table_index: TableIndex,
|
||||
range: impl Iterator<Item = u32>,
|
||||
) -> *mut Table {
|
||||
let (idx, instance) = self.get_defined_table_index_and_instance(table_index);
|
||||
let elt_ty = instance.tables[idx].element_type();
|
||||
|
||||
if elt_ty == TableElementType::Func {
|
||||
for i in range {
|
||||
let value = match instance.tables[idx].get(i) {
|
||||
Some(value) => value,
|
||||
None => {
|
||||
// Out-of-bounds; caller will handle by likely
|
||||
// throwing a trap. No work to do to lazy-init
|
||||
// beyond the end.
|
||||
break;
|
||||
}
|
||||
};
|
||||
if value.is_uninit() {
|
||||
let table_init = match &instance.module().table_initialization {
|
||||
// We unfortunately can't borrow `tables`
|
||||
// outside the loop because we need to call
|
||||
// `get_caller_checked_anyfunc` (a `&mut`
|
||||
// method) below; so unwrap it dynamically
|
||||
// here.
|
||||
TableInitialization::FuncTable { tables, .. } => tables,
|
||||
_ => break,
|
||||
}
|
||||
.get(table_index);
|
||||
|
||||
// The TableInitialization::FuncTable elements table may
|
||||
// be smaller than the current size of the table: it
|
||||
// always matches the initial table size, if present. We
|
||||
// want to iterate up through the end of the accessed
|
||||
// index range so that we set an "initialized null" even
|
||||
// if there is no initializer. We do a checked `get()` on
|
||||
// the initializer table below and unwrap to a null if
|
||||
// we're past its end.
|
||||
let func_index =
|
||||
table_init.and_then(|indices| indices.get(i as usize).cloned());
|
||||
let anyfunc = func_index
|
||||
.and_then(|func_index| instance.get_caller_checked_anyfunc(func_index))
|
||||
.unwrap_or(std::ptr::null_mut());
|
||||
|
||||
let value = TableElement::FuncRef(anyfunc);
|
||||
|
||||
instance.tables[idx]
|
||||
.set(i, value)
|
||||
.expect("Table type should match and index should be in-bounds");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ptr::addr_of_mut!(instance.tables[idx])
|
||||
}
|
||||
|
||||
/// Get a table by index regardless of whether it is locally-defined or an
|
||||
/// imported, foreign table.
|
||||
pub(crate) fn get_table(&mut self, table_index: TableIndex) -> *mut Table {
|
||||
@@ -719,7 +842,7 @@ impl Instance {
|
||||
&mut self,
|
||||
index: TableIndex,
|
||||
) -> (DefinedTableIndex, &mut Instance) {
|
||||
if let Some(defined_table_index) = self.module.defined_table_index(index) {
|
||||
if let Some(defined_table_index) = self.module().defined_table_index(index) {
|
||||
(defined_table_index, self)
|
||||
} else {
|
||||
let import = self.imported_table(index);
|
||||
@@ -733,8 +856,8 @@ impl Instance {
|
||||
}
|
||||
|
||||
fn drop_globals(&mut self) {
|
||||
for (idx, global) in self.module.globals.iter() {
|
||||
let idx = match self.module.defined_global_index(idx) {
|
||||
for (idx, global) in self.module().globals.iter() {
|
||||
let idx = match self.module().defined_global_index(idx) {
|
||||
Some(idx) => idx,
|
||||
None => continue,
|
||||
};
|
||||
@@ -804,8 +927,8 @@ impl InstanceHandle {
|
||||
}
|
||||
|
||||
/// Lookup an export with the given export declaration.
|
||||
pub fn lookup_by_declaration(&self, export: &EntityIndex) -> Export {
|
||||
self.instance().lookup_by_declaration(export)
|
||||
pub fn lookup_by_declaration(&mut self, export: &EntityIndex) -> Export {
|
||||
self.instance_mut().lookup_by_declaration(export)
|
||||
}
|
||||
|
||||
/// Return an iterator over the exports of this instance.
|
||||
@@ -842,6 +965,17 @@ impl InstanceHandle {
|
||||
self.instance_mut().get_defined_table(index)
|
||||
}
|
||||
|
||||
/// Get a table defined locally within this module, lazily
|
||||
/// initializing the given range first.
|
||||
pub fn get_defined_table_with_lazy_init(
|
||||
&mut self,
|
||||
index: DefinedTableIndex,
|
||||
range: impl Iterator<Item = u32>,
|
||||
) -> *mut Table {
|
||||
let index = self.instance().module().table_index(index);
|
||||
self.instance_mut().get_table_with_lazy_init(index, range)
|
||||
}
|
||||
|
||||
/// Return a reference to the contained `Instance`.
|
||||
#[inline]
|
||||
pub(crate) fn instance(&self) -> &Instance {
|
||||
|
||||
@@ -6,20 +6,20 @@ use crate::traphandlers::Trap;
|
||||
use crate::vmcontext::{
|
||||
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMGlobalDefinition, VMSharedSignatureIndex,
|
||||
};
|
||||
use crate::ModuleMemFds;
|
||||
use crate::{CompiledModuleId, Store};
|
||||
use crate::ModuleRuntimeInfo;
|
||||
use crate::Store;
|
||||
use anyhow::Result;
|
||||
use std::alloc;
|
||||
use std::any::Any;
|
||||
use std::convert::TryFrom;
|
||||
use std::ptr::{self, NonNull};
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
use wasmtime_environ::{
|
||||
DefinedFuncIndex, DefinedMemoryIndex, DefinedTableIndex, EntityRef, FunctionInfo, GlobalInit,
|
||||
InitMemory, MemoryInitialization, MemoryInitializer, Module, ModuleType, PrimaryMap,
|
||||
SignatureIndex, TableInitializer, TrapCode, WasmType, WASM_PAGE_SIZE,
|
||||
DefinedMemoryIndex, DefinedTableIndex, EntityRef, GlobalInit, InitMemory, MemoryInitialization,
|
||||
MemoryInitializer, Module, ModuleType, PrimaryMap, TableInitialization, TableInitializer,
|
||||
TrapCode, WasmType, WASM_PAGE_SIZE,
|
||||
};
|
||||
|
||||
#[cfg(feature = "pooling-allocator")]
|
||||
@@ -32,28 +32,16 @@ pub use self::pooling::{
|
||||
|
||||
/// Represents a request for a new runtime instance.
|
||||
pub struct InstanceAllocationRequest<'a> {
|
||||
/// The module being instantiated.
|
||||
pub module: &'a Arc<Module>,
|
||||
|
||||
/// The unique ID of the module being allocated within this engine.
|
||||
pub unique_id: Option<CompiledModuleId>,
|
||||
|
||||
/// The base address of where JIT functions are located.
|
||||
pub image_base: usize,
|
||||
|
||||
/// If using MemFD-based memories, the backing MemFDs.
|
||||
pub memfds: Option<&'a Arc<ModuleMemFds>>,
|
||||
|
||||
/// Descriptors about each compiled function, such as the offset from
|
||||
/// `image_base`.
|
||||
pub functions: &'a PrimaryMap<DefinedFuncIndex, FunctionInfo>,
|
||||
/// The info related to the compiled version of this module,
|
||||
/// needed for instantiation: function metadata, JIT code
|
||||
/// addresses, precomputed images for lazy memory and table
|
||||
/// initialization, and the like. This Arc is cloned and held for
|
||||
/// the lifetime of the instance.
|
||||
pub runtime_info: &'a Arc<dyn ModuleRuntimeInfo>,
|
||||
|
||||
/// The imports to use for the instantiation.
|
||||
pub imports: Imports<'a>,
|
||||
|
||||
/// Translation from `SignatureIndex` to `VMSharedSignatureIndex`
|
||||
pub shared_signatures: SharedSignatures<'a>,
|
||||
|
||||
/// The host state to associate with the instance.
|
||||
pub host_state: Box<dyn Any + Send + Sync>,
|
||||
|
||||
@@ -72,16 +60,6 @@ pub struct InstanceAllocationRequest<'a> {
|
||||
/// We use a number of `PhantomPinned` declarations to indicate this to the
|
||||
/// compiler. More info on this in `wasmtime/src/store.rs`
|
||||
pub store: StorePtr,
|
||||
|
||||
/// A list of all wasm data that can be referenced by the module that
|
||||
/// will be allocated. The `Module` given here has active/passive data
|
||||
/// segments that are specified as relative indices into this list of bytes.
|
||||
///
|
||||
/// Note that this is an unsafe pointer. The pointer is expected to live for
|
||||
/// the entire duration of the instance at this time. It's the
|
||||
/// responsibility of the callee when allocating to ensure that this data
|
||||
/// outlives the instance.
|
||||
pub wasm_data: *const [u8],
|
||||
}
|
||||
|
||||
/// A pointer to a Store. This Option<*mut dyn Store> is wrapped in a struct
|
||||
@@ -218,46 +196,6 @@ pub unsafe trait InstanceAllocator: Send + Sync {
|
||||
unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack);
|
||||
}
|
||||
|
||||
pub enum SharedSignatures<'a> {
|
||||
/// Used for instantiating user-defined modules
|
||||
Table(&'a PrimaryMap<SignatureIndex, VMSharedSignatureIndex>),
|
||||
/// Used for instance creation that has only a single function
|
||||
Always(VMSharedSignatureIndex),
|
||||
/// Used for instance creation that has no functions
|
||||
None,
|
||||
}
|
||||
|
||||
impl SharedSignatures<'_> {
|
||||
fn lookup(&self, index: SignatureIndex) -> VMSharedSignatureIndex {
|
||||
match self {
|
||||
SharedSignatures::Table(table) => table[index],
|
||||
SharedSignatures::Always(index) => *index,
|
||||
SharedSignatures::None => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<VMSharedSignatureIndex> for SharedSignatures<'a> {
|
||||
fn from(val: VMSharedSignatureIndex) -> SharedSignatures<'a> {
|
||||
SharedSignatures::Always(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<Option<VMSharedSignatureIndex>> for SharedSignatures<'a> {
|
||||
fn from(val: Option<VMSharedSignatureIndex>) -> SharedSignatures<'a> {
|
||||
match val {
|
||||
Some(idx) => SharedSignatures::Always(idx),
|
||||
None => SharedSignatures::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a PrimaryMap<SignatureIndex, VMSharedSignatureIndex>> for SharedSignatures<'a> {
|
||||
fn from(val: &'a PrimaryMap<SignatureIndex, VMSharedSignatureIndex>) -> SharedSignatures<'a> {
|
||||
SharedSignatures::Table(val)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_table_init_start(
|
||||
init: &TableInitializer,
|
||||
instance: &Instance,
|
||||
@@ -265,7 +203,7 @@ fn get_table_init_start(
|
||||
match init.base {
|
||||
Some(base) => {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
if let Some(def_index) = instance.module().defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
@@ -286,20 +224,25 @@ fn check_table_init_bounds(
|
||||
instance: &mut Instance,
|
||||
module: &Module,
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in &module.table_initializers {
|
||||
let table = unsafe { &*instance.get_table(init.table_index) };
|
||||
let start = get_table_init_start(init, instance)?;
|
||||
let start = usize::try_from(start).unwrap();
|
||||
let end = start.checked_add(init.elements.len());
|
||||
match &module.table_initialization {
|
||||
TableInitialization::FuncTable { segments, .. }
|
||||
| TableInitialization::Segments { segments } => {
|
||||
for segment in segments {
|
||||
let table = unsafe { &*instance.get_table(segment.table_index) };
|
||||
let start = get_table_init_start(segment, instance)?;
|
||||
let start = usize::try_from(start).unwrap();
|
||||
let end = start.checked_add(segment.elements.len());
|
||||
|
||||
match end {
|
||||
Some(end) if end <= table.size() as usize => {
|
||||
// Initializer is in bounds
|
||||
}
|
||||
_ => {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"table out of bounds: elements segment does not fit".to_owned(),
|
||||
)))
|
||||
match end {
|
||||
Some(end) if end <= table.size() as usize => {
|
||||
// Initializer is in bounds
|
||||
}
|
||||
_ => {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"table out of bounds: elements segment does not fit".to_owned(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -308,16 +251,28 @@ fn check_table_init_bounds(
|
||||
}
|
||||
|
||||
fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<(), InstantiationError> {
|
||||
for init in &module.table_initializers {
|
||||
instance
|
||||
.table_init_segment(
|
||||
init.table_index,
|
||||
&init.elements,
|
||||
get_table_init_start(init, instance)?,
|
||||
0,
|
||||
init.elements.len() as u32,
|
||||
)
|
||||
.map_err(InstantiationError::Trap)?;
|
||||
// Note: if the module's table initializer state is in
|
||||
// FuncTable mode, we will lazily initialize tables based on
|
||||
// any statically-precomputed image of FuncIndexes, but there
|
||||
// may still be "leftover segments" that could not be
|
||||
// incorporated. So we have a unified handler here that
|
||||
// iterates over all segments (Segments mode) or leftover
|
||||
// segments (FuncTable mode) to initialize.
|
||||
match &module.table_initialization {
|
||||
TableInitialization::FuncTable { segments, .. }
|
||||
| TableInitialization::Segments { segments } => {
|
||||
for segment in segments {
|
||||
instance
|
||||
.table_init_segment(
|
||||
segment.table_index,
|
||||
&segment.elements,
|
||||
get_table_init_start(segment, instance)?,
|
||||
0,
|
||||
segment.elements.len() as u32,
|
||||
)
|
||||
.map_err(InstantiationError::Trap)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -329,11 +284,11 @@ fn get_memory_init_start(
|
||||
) -> Result<u64, InstantiationError> {
|
||||
match init.base {
|
||||
Some(base) => {
|
||||
let mem64 = instance.module.memory_plans[init.memory_index]
|
||||
let mem64 = instance.module().memory_plans[init.memory_index]
|
||||
.memory
|
||||
.memory64;
|
||||
let val = unsafe {
|
||||
let global = if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
let global = if let Some(def_index) = instance.module().defined_global_index(base) {
|
||||
instance.global(def_index)
|
||||
} else {
|
||||
&*instance.imported_global(base).from
|
||||
@@ -386,7 +341,7 @@ fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<(), I
|
||||
// Loads the `global` value and returns it as a `u64`, but sign-extends
|
||||
// 32-bit globals which can be used as the base for 32-bit memories.
|
||||
let get_global_as_u64 = &|global| unsafe {
|
||||
let def = if let Some(def_index) = instance.module.defined_global_index(global) {
|
||||
let def = if let Some(def_index) = instance.module().defined_global_index(global) {
|
||||
instance.global(def_index)
|
||||
} else {
|
||||
&*instance.imported_global(global).from
|
||||
@@ -441,7 +396,7 @@ fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<(), I
|
||||
fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<(), InstantiationError> {
|
||||
check_table_init_bounds(instance, module)?;
|
||||
|
||||
match &instance.module.memory_initialization {
|
||||
match &instance.module().memory_initialization {
|
||||
MemoryInitialization::Segmented(initializers) => {
|
||||
check_memory_init_bounds(instance, initializers)?;
|
||||
}
|
||||
@@ -474,6 +429,11 @@ fn initialize_instance(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize the VMContext data associated with an Instance.
|
||||
///
|
||||
/// The `VMContext` memory is assumed to be uninitialized; any field
|
||||
/// that we need in a certain state will be explicitly written by this
|
||||
/// function.
|
||||
unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationRequest) {
|
||||
if let Some(store) = req.store.as_raw() {
|
||||
*instance.interrupts() = (*store).vminterrupts();
|
||||
@@ -482,13 +442,13 @@ unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationR
|
||||
instance.set_store(store);
|
||||
}
|
||||
|
||||
let module = &instance.module;
|
||||
let module = req.runtime_info.module();
|
||||
|
||||
// Initialize shared signatures
|
||||
let mut ptr = instance.vmctx_plus_offset(instance.offsets.vmctx_signature_ids_begin());
|
||||
for sig in module.types.values() {
|
||||
*ptr = match sig {
|
||||
ModuleType::Function(sig) => req.shared_signatures.lookup(*sig),
|
||||
ModuleType::Function(sig) => req.runtime_info.signature(*sig),
|
||||
_ => VMSharedSignatureIndex::new(u32::max_value()),
|
||||
};
|
||||
ptr = ptr.add(1);
|
||||
@@ -524,32 +484,11 @@ unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationR
|
||||
req.imports.globals.len(),
|
||||
);
|
||||
|
||||
// Initialize the functions
|
||||
let mut base = instance.anyfunc_base();
|
||||
for (index, sig) in instance.module.functions.iter() {
|
||||
let type_index = req.shared_signatures.lookup(*sig);
|
||||
|
||||
let (func_ptr, vmctx) = if let Some(def_index) = instance.module.defined_func_index(index) {
|
||||
(
|
||||
NonNull::new((req.image_base + req.functions[def_index].start as usize) as *mut _)
|
||||
.unwrap(),
|
||||
instance.vmctx_ptr(),
|
||||
)
|
||||
} else {
|
||||
let import = instance.imported_function(index);
|
||||
(import.body, import.vmctx)
|
||||
};
|
||||
|
||||
ptr::write(
|
||||
base,
|
||||
VMCallerCheckedAnyfunc {
|
||||
func_ptr,
|
||||
type_index,
|
||||
vmctx,
|
||||
},
|
||||
);
|
||||
base = base.add(1);
|
||||
}
|
||||
// N.B.: there is no need to initialize the anyfuncs array because
|
||||
// we eagerly construct each element in it whenever asked for a
|
||||
// reference to that element. In other words, there is no state
|
||||
// needed to track the lazy-init, so we don't need to initialize
|
||||
// any state now.
|
||||
|
||||
// Initialize the defined tables
|
||||
let mut ptr = instance.vmctx_plus_offset(instance.offsets.vmctx_tables_begin());
|
||||
@@ -569,11 +508,13 @@ unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationR
|
||||
}
|
||||
|
||||
// Initialize the defined globals
|
||||
initialize_vmcontext_globals(instance);
|
||||
initialize_vmcontext_globals(instance, module);
|
||||
}
|
||||
|
||||
unsafe fn initialize_vmcontext_globals(instance: &Instance) {
|
||||
let module = &instance.module;
|
||||
unsafe fn initialize_vmcontext_globals(
|
||||
instance: &mut Instance,
|
||||
module: &Arc<wasmtime_environ::Module>,
|
||||
) {
|
||||
let num_imports = module.num_imported_globals;
|
||||
for (index, global) in module.globals.iter().skip(num_imports) {
|
||||
let def_index = module.defined_global_index(index).unwrap();
|
||||
@@ -637,13 +578,14 @@ impl OnDemandInstanceAllocator {
|
||||
}
|
||||
|
||||
fn create_tables(
|
||||
module: &Module,
|
||||
store: &mut StorePtr,
|
||||
runtime_info: &Arc<dyn ModuleRuntimeInfo>,
|
||||
) -> Result<PrimaryMap<DefinedTableIndex, Table>, InstantiationError> {
|
||||
let module = runtime_info.module();
|
||||
let num_imports = module.num_imported_tables;
|
||||
let mut tables: PrimaryMap<DefinedTableIndex, _> =
|
||||
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
|
||||
for table in &module.table_plans.values().as_slice()[num_imports..] {
|
||||
for (_, table) in module.table_plans.iter().skip(num_imports) {
|
||||
tables.push(
|
||||
Table::new_dynamic(table, unsafe {
|
||||
store
|
||||
@@ -658,10 +600,10 @@ impl OnDemandInstanceAllocator {
|
||||
|
||||
fn create_memories(
|
||||
&self,
|
||||
module: &Module,
|
||||
store: &mut StorePtr,
|
||||
memfds: Option<&Arc<ModuleMemFds>>,
|
||||
runtime_info: &Arc<dyn ModuleRuntimeInfo>,
|
||||
) -> Result<PrimaryMap<DefinedMemoryIndex, Memory>, InstantiationError> {
|
||||
let module = runtime_info.module();
|
||||
let creator = self
|
||||
.mem_creator
|
||||
.as_deref()
|
||||
@@ -674,7 +616,9 @@ impl OnDemandInstanceAllocator {
|
||||
let defined_memory_idx = module
|
||||
.defined_memory_index(memory_idx)
|
||||
.expect("Skipped imports, should never be None");
|
||||
let memfd_image = memfds.and_then(|memfds| memfds.get_memory_image(defined_memory_idx));
|
||||
let memfd_image = runtime_info
|
||||
.memfd_image(defined_memory_idx)
|
||||
.map_err(|err| InstantiationError::Resource(err.into()))?;
|
||||
|
||||
memories.push(
|
||||
Memory::new_dynamic(
|
||||
@@ -709,20 +653,14 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
|
||||
&self,
|
||||
mut req: InstanceAllocationRequest,
|
||||
) -> Result<InstanceHandle, InstantiationError> {
|
||||
let memories = self.create_memories(&req.module, &mut req.store, req.memfds)?;
|
||||
let tables = Self::create_tables(&req.module, &mut req.store)?;
|
||||
let memories = self.create_memories(&mut req.store, &req.runtime_info)?;
|
||||
let tables = Self::create_tables(&mut req.store, &req.runtime_info)?;
|
||||
|
||||
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
|
||||
|
||||
let mut handle = {
|
||||
let instance = Instance::create_raw(
|
||||
&req.module,
|
||||
req.unique_id,
|
||||
&*req.wasm_data,
|
||||
memories,
|
||||
tables,
|
||||
host_state,
|
||||
);
|
||||
let instance =
|
||||
Instance::create_raw(req.runtime_info.clone(), memories, tables, host_state);
|
||||
let layout = instance.alloc_layout();
|
||||
let instance_ptr = alloc::alloc(layout) as *mut Instance;
|
||||
if instance_ptr.is_null() {
|
||||
|
||||
@@ -11,8 +11,8 @@ use super::{
|
||||
initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator,
|
||||
InstanceHandle, InstantiationError,
|
||||
};
|
||||
use crate::MemFdSlot;
|
||||
use crate::{instance::Instance, Memory, Mmap, ModuleMemFds, Table};
|
||||
use crate::{instance::Instance, Memory, Mmap, Table};
|
||||
use crate::{MemFdSlot, ModuleRuntimeInfo};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use libc::c_void;
|
||||
use std::convert::TryFrom;
|
||||
@@ -350,20 +350,18 @@ impl InstancePool {
|
||||
) -> Result<InstanceHandle, InstantiationError> {
|
||||
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
|
||||
let instance_data = Instance::create_raw(
|
||||
&req.module,
|
||||
req.unique_id,
|
||||
&*req.wasm_data,
|
||||
req.runtime_info.clone(),
|
||||
PrimaryMap::default(),
|
||||
PrimaryMap::default(),
|
||||
host_state,
|
||||
);
|
||||
|
||||
let instance = self.instance(index);
|
||||
|
||||
// Instances are uninitialized memory at first; we need to
|
||||
// write an empty but initialized `Instance` struct into the
|
||||
// chosen slot before we do anything else with it. (This is
|
||||
// paired with a `drop_in_place` in deallocate below.)
|
||||
let instance = self.instance(index);
|
||||
|
||||
std::ptr::write(instance as _, instance_data);
|
||||
|
||||
// set_instance_memories and _tables will need the store before we can completely
|
||||
@@ -376,8 +374,8 @@ impl InstancePool {
|
||||
index,
|
||||
instance,
|
||||
&self.memories,
|
||||
req.memfds,
|
||||
self.memories.max_wasm_pages,
|
||||
&req.runtime_info,
|
||||
)?;
|
||||
|
||||
Self::set_instance_tables(
|
||||
@@ -402,7 +400,7 @@ impl InstancePool {
|
||||
if alloc.is_empty() {
|
||||
return Err(InstantiationError::Limit(self.max_instances as u32));
|
||||
}
|
||||
alloc.alloc(req.unique_id).index()
|
||||
alloc.alloc(req.runtime_info.unique_id()).index()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
@@ -504,10 +502,10 @@ impl InstancePool {
|
||||
instance_idx: usize,
|
||||
instance: &mut Instance,
|
||||
memories: &MemoryPool,
|
||||
maybe_memfds: Option<&Arc<ModuleMemFds>>,
|
||||
max_pages: u64,
|
||||
runtime_info: &Arc<dyn ModuleRuntimeInfo>,
|
||||
) -> Result<(), InstantiationError> {
|
||||
let module = instance.module.as_ref();
|
||||
let module = instance.runtime_info.module();
|
||||
|
||||
assert!(instance.memories.is_empty());
|
||||
|
||||
@@ -527,8 +525,10 @@ impl InstancePool {
|
||||
)
|
||||
};
|
||||
|
||||
if let Some(memfds) = maybe_memfds {
|
||||
let image = memfds.get_memory_image(defined_index);
|
||||
if let Some(image) = runtime_info
|
||||
.memfd_image(defined_index)
|
||||
.map_err(|err| InstantiationError::Resource(err.into()))?
|
||||
{
|
||||
let mut slot = memories.take_memfd_slot(instance_idx, defined_index);
|
||||
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
|
||||
|
||||
@@ -545,7 +545,7 @@ impl InstancePool {
|
||||
// the process to continue, because we never perform a
|
||||
// mmap that would leave an open space for someone
|
||||
// else to come in and map something.
|
||||
slot.instantiate(initial_size as usize, image)
|
||||
slot.instantiate(initial_size as usize, Some(image))
|
||||
.map_err(|e| InstantiationError::Resource(e.into()))?;
|
||||
|
||||
instance.memories.push(
|
||||
@@ -574,11 +574,11 @@ impl InstancePool {
|
||||
mut tables: impl Iterator<Item = *mut usize>,
|
||||
max_elements: u32,
|
||||
) -> Result<(), InstantiationError> {
|
||||
let module = instance.module.as_ref();
|
||||
let module = instance.runtime_info.module();
|
||||
|
||||
assert!(instance.tables.is_empty());
|
||||
|
||||
for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() {
|
||||
for (_, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
|
||||
let base = tables.next().unwrap();
|
||||
|
||||
commit_table_pages(
|
||||
@@ -1130,10 +1130,10 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{Imports, StorePtr, VMSharedSignatureIndex};
|
||||
use crate::{CompiledModuleId, Imports, MemoryMemFd, StorePtr, VMSharedSignatureIndex};
|
||||
use wasmtime_environ::{
|
||||
EntityRef, Global, GlobalInit, Memory, MemoryPlan, ModuleType, SignatureIndex, Table,
|
||||
TablePlan, TableStyle, WasmType,
|
||||
DefinedFuncIndex, DefinedMemoryIndex, EntityRef, FunctionInfo, Global, GlobalInit, Memory,
|
||||
MemoryPlan, ModuleType, SignatureIndex, Table, TablePlan, TableStyle, WasmType,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -1422,6 +1422,42 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn empty_runtime_info(
|
||||
module: Arc<wasmtime_environ::Module>,
|
||||
) -> Arc<dyn ModuleRuntimeInfo> {
|
||||
struct RuntimeInfo(Arc<wasmtime_environ::Module>);
|
||||
|
||||
impl ModuleRuntimeInfo for RuntimeInfo {
|
||||
fn module(&self) -> &Arc<wasmtime_environ::Module> {
|
||||
&self.0
|
||||
}
|
||||
fn image_base(&self) -> usize {
|
||||
0
|
||||
}
|
||||
fn function_info(&self, _: DefinedFuncIndex) -> &FunctionInfo {
|
||||
unimplemented!()
|
||||
}
|
||||
fn signature(&self, _: SignatureIndex) -> VMSharedSignatureIndex {
|
||||
unimplemented!()
|
||||
}
|
||||
fn memfd_image(
|
||||
&self,
|
||||
_: DefinedMemoryIndex,
|
||||
) -> anyhow::Result<Option<&Arc<MemoryMemFd>>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn unique_id(&self) -> Option<CompiledModuleId> {
|
||||
None
|
||||
}
|
||||
fn wasm_data(&self) -> &[u8] {
|
||||
&[]
|
||||
}
|
||||
}
|
||||
|
||||
Arc::new(RuntimeInfo(module))
|
||||
}
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[test]
|
||||
fn test_instance_pool() -> Result<()> {
|
||||
@@ -1462,27 +1498,20 @@ mod test {
|
||||
|
||||
let mut handles = Vec::new();
|
||||
let module = Arc::new(Module::default());
|
||||
let functions = &PrimaryMap::new();
|
||||
|
||||
for _ in (0..3).rev() {
|
||||
handles.push(
|
||||
instances
|
||||
.allocate(InstanceAllocationRequest {
|
||||
module: &module,
|
||||
unique_id: None,
|
||||
image_base: 0,
|
||||
functions,
|
||||
runtime_info: &empty_runtime_info(module.clone()),
|
||||
imports: Imports {
|
||||
functions: &[],
|
||||
tables: &[],
|
||||
memories: &[],
|
||||
globals: &[],
|
||||
},
|
||||
shared_signatures: VMSharedSignatureIndex::default().into(),
|
||||
host_state: Box::new(()),
|
||||
store: StorePtr::empty(),
|
||||
wasm_data: &[],
|
||||
memfds: None,
|
||||
})
|
||||
.expect("allocation should succeed"),
|
||||
);
|
||||
@@ -1494,21 +1523,15 @@ mod test {
|
||||
);
|
||||
|
||||
match instances.allocate(InstanceAllocationRequest {
|
||||
module: &module,
|
||||
unique_id: None,
|
||||
functions,
|
||||
image_base: 0,
|
||||
runtime_info: &empty_runtime_info(module),
|
||||
imports: Imports {
|
||||
functions: &[],
|
||||
tables: &[],
|
||||
memories: &[],
|
||||
globals: &[],
|
||||
},
|
||||
shared_signatures: VMSharedSignatureIndex::default().into(),
|
||||
host_state: Box::new(()),
|
||||
store: StorePtr::empty(),
|
||||
wasm_data: &[],
|
||||
memfds: None,
|
||||
}) {
|
||||
Err(InstantiationError::Limit(3)) => {}
|
||||
_ => panic!("unexpected error"),
|
||||
|
||||
@@ -262,7 +262,9 @@ unsafe fn initialize_wasm_page(
|
||||
page_index: usize,
|
||||
) -> Result<()> {
|
||||
// Check for paged initialization and copy the page if present in the initialization data
|
||||
if let MemoryInitialization::Paged { map, .. } = &instance.module.memory_initialization {
|
||||
if let MemoryInitialization::Paged { map, .. } =
|
||||
&instance.runtime_info.module().memory_initialization
|
||||
{
|
||||
let memory_index = instance.module().memory_index(memory_index);
|
||||
let pages = &map[memory_index];
|
||||
|
||||
@@ -437,11 +439,11 @@ mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
Imports, InstanceAllocationRequest, InstanceLimits, ModuleLimits,
|
||||
PoolingAllocationStrategy, Store, StorePtr, VMSharedSignatureIndex,
|
||||
PoolingAllocationStrategy, Store, StorePtr,
|
||||
};
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::Arc;
|
||||
use wasmtime_environ::{Memory, MemoryPlan, MemoryStyle, Module, PrimaryMap, Tunables};
|
||||
use wasmtime_environ::{Memory, MemoryPlan, MemoryStyle, Module, Tunables};
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[test]
|
||||
@@ -573,28 +575,21 @@ mod test {
|
||||
|
||||
let mut handles = Vec::new();
|
||||
let module = Arc::new(module);
|
||||
let functions = &PrimaryMap::new();
|
||||
|
||||
// Allocate the maximum number of instances with the maximum number of memories
|
||||
for _ in 0..instances.max_instances {
|
||||
handles.push(
|
||||
instances
|
||||
.allocate(InstanceAllocationRequest {
|
||||
module: &module,
|
||||
memfds: None,
|
||||
unique_id: None,
|
||||
image_base: 0,
|
||||
functions,
|
||||
runtime_info: &super::super::test::empty_runtime_info(module.clone()),
|
||||
imports: Imports {
|
||||
functions: &[],
|
||||
tables: &[],
|
||||
memories: &[],
|
||||
globals: &[],
|
||||
},
|
||||
shared_signatures: VMSharedSignatureIndex::default().into(),
|
||||
host_state: Box::new(()),
|
||||
store: StorePtr::new(&mut mock_store),
|
||||
wasm_data: &[],
|
||||
})
|
||||
.expect("instance should allocate"),
|
||||
);
|
||||
|
||||
@@ -22,8 +22,13 @@
|
||||
#![cfg_attr(not(memfd), allow(unused_variables, unreachable_code))]
|
||||
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
use wasmtime_environ::DefinedFuncIndex;
|
||||
use wasmtime_environ::DefinedMemoryIndex;
|
||||
use wasmtime_environ::FunctionInfo;
|
||||
use wasmtime_environ::SignatureIndex;
|
||||
|
||||
mod export;
|
||||
mod externref;
|
||||
@@ -145,3 +150,42 @@ pub unsafe trait Store {
|
||||
/// completely semantically transparent. Returns the new deadline.
|
||||
fn new_epoch(&mut self) -> Result<u64, Error>;
|
||||
}
|
||||
|
||||
/// Functionality required by this crate for a particular module. This
|
||||
/// is chiefly needed for lazy initialization of various bits of
|
||||
/// instance state.
|
||||
///
|
||||
/// When an instance is created, it holds an Arc<dyn ModuleRuntimeInfo>
|
||||
/// so that it can get to signatures, metadata on functions, memfd and
|
||||
/// funcref-table images, etc. All of these things are ordinarily known
|
||||
/// by the higher-level layers of Wasmtime. Specifically, the main
|
||||
/// implementation of this trait is provided by
|
||||
/// `wasmtime::module::ModuleInner`. Since the runtime crate sits at
|
||||
/// the bottom of the dependence DAG though, we don't know or care about
|
||||
/// that; we just need some implementor of this trait for each
|
||||
/// allocation request.
|
||||
pub trait ModuleRuntimeInfo: Send + Sync + 'static {
|
||||
/// The underlying Module.
|
||||
fn module(&self) -> &Arc<wasmtime_environ::Module>;
|
||||
|
||||
/// The signatures.
|
||||
fn signature(&self, index: SignatureIndex) -> VMSharedSignatureIndex;
|
||||
|
||||
/// The base address of where JIT functions are located.
|
||||
fn image_base(&self) -> usize;
|
||||
|
||||
/// Descriptors about each compiled function, such as the offset from
|
||||
/// `image_base`.
|
||||
fn function_info(&self, func_index: DefinedFuncIndex) -> &FunctionInfo;
|
||||
|
||||
/// memfd images, if any, for this module.
|
||||
fn memfd_image(&self, memory: DefinedMemoryIndex) -> anyhow::Result<Option<&Arc<MemoryMemFd>>>;
|
||||
|
||||
/// A unique ID for this particular module. This can be used to
|
||||
/// allow for fastpaths to optimize a "re-instantiate the same
|
||||
/// module again" case.
|
||||
fn unique_id(&self) -> Option<CompiledModuleId>;
|
||||
|
||||
/// A slice pointing to all data that is referenced by this instance.
|
||||
fn wasm_data(&self) -> &[u8];
|
||||
}
|
||||
|
||||
@@ -64,7 +64,9 @@ use crate::vmcontext::{VMCallerCheckedAnyfunc, VMContext};
|
||||
use backtrace::Backtrace;
|
||||
use std::mem;
|
||||
use std::ptr::{self, NonNull};
|
||||
use wasmtime_environ::{DataIndex, ElemIndex, GlobalIndex, MemoryIndex, TableIndex, TrapCode};
|
||||
use wasmtime_environ::{
|
||||
DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TrapCode,
|
||||
};
|
||||
|
||||
const TOINT_32: f32 = 1.0 / f32::EPSILON;
|
||||
const TOINT_64: f64 = 1.0 / f64::EPSILON;
|
||||
@@ -293,7 +295,9 @@ pub unsafe extern "C" fn table_copy(
|
||||
let src_table_index = TableIndex::from_u32(src_table_index);
|
||||
let instance = (*vmctx).instance_mut();
|
||||
let dst_table = instance.get_table(dst_table_index);
|
||||
let src_table = instance.get_table(src_table_index);
|
||||
// Lazy-initialize the whole range in the source table first.
|
||||
let src_range = src..(src.checked_add(len).unwrap_or(u32::MAX));
|
||||
let src_table = instance.get_table_with_lazy_init(src_table_index, src_range);
|
||||
Table::copy(dst_table, src_table, dst, src, len)
|
||||
};
|
||||
if let Err(trap) = result {
|
||||
@@ -386,6 +390,15 @@ pub unsafe extern "C" fn memory_init(
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of `ref.func`.
|
||||
pub unsafe extern "C" fn ref_func(vmctx: *mut VMContext, func_index: u32) -> *mut u8 {
|
||||
let instance = (*vmctx).instance_mut();
|
||||
let anyfunc = instance
|
||||
.get_caller_checked_anyfunc(FuncIndex::from_u32(func_index))
|
||||
.expect("ref_func: caller_checked_anyfunc should always be available for given func index");
|
||||
anyfunc as *mut _
|
||||
}
|
||||
|
||||
/// Implementation of `data.drop`.
|
||||
pub unsafe extern "C" fn data_drop(vmctx: *mut VMContext, data_index: u32) {
|
||||
let data_index = DataIndex::from_u32(data_index);
|
||||
@@ -393,6 +406,22 @@ pub unsafe extern "C" fn data_drop(vmctx: *mut VMContext, data_index: u32) {
|
||||
instance.data_drop(data_index)
|
||||
}
|
||||
|
||||
/// Returns a table entry after lazily initializing it.
|
||||
pub unsafe extern "C" fn table_get_lazy_init_funcref(
|
||||
vmctx: *mut VMContext,
|
||||
table_index: u32,
|
||||
index: u32,
|
||||
) -> *mut u8 {
|
||||
let instance = (*vmctx).instance_mut();
|
||||
let table_index = TableIndex::from_u32(table_index);
|
||||
let table = instance.get_table_with_lazy_init(table_index, std::iter::once(index));
|
||||
let elem = (*table)
|
||||
.get(index)
|
||||
.expect("table access already bounds-checked");
|
||||
|
||||
elem.into_ref_asserting_initialized() as *mut _
|
||||
}
|
||||
|
||||
/// Drop a `VMExternRef`.
|
||||
pub unsafe extern "C" fn drop_externref(externref: *mut u8) {
|
||||
let externref = externref as *mut crate::externref::VMExternData;
|
||||
|
||||
@@ -22,10 +22,8 @@ pub struct ModuleMemFds {
|
||||
const MAX_MEMFD_IMAGE_SIZE: usize = 1024 * 1024 * 1024; // limit to 1GiB.
|
||||
|
||||
impl ModuleMemFds {
|
||||
pub(crate) fn get_memory_image(
|
||||
&self,
|
||||
defined_index: DefinedMemoryIndex,
|
||||
) -> Option<&Arc<MemoryMemFd>> {
|
||||
/// Get the MemoryMemFd for a given memory.
|
||||
pub fn get_memory_image(&self, defined_index: DefinedMemoryIndex) -> Option<&Arc<MemoryMemFd>> {
|
||||
self.memories[defined_index].as_ref()
|
||||
}
|
||||
}
|
||||
@@ -66,7 +64,7 @@ impl ModuleMemFds {
|
||||
/// Create a new `ModuleMemFds` for the given module. This can be
|
||||
/// passed in as part of a `InstanceAllocationRequest` to speed up
|
||||
/// instantiation and execution by using memfd-backed memories.
|
||||
pub fn new(module: &Module, wasm_data: &[u8]) -> Result<Option<Arc<ModuleMemFds>>> {
|
||||
pub fn new(module: &Module, wasm_data: &[u8]) -> Result<Option<ModuleMemFds>> {
|
||||
let page_size = region::page::size() as u64;
|
||||
let page_align = |x: u64| x & !(page_size - 1);
|
||||
let page_align_up = |x: u64| page_align(x + page_size - 1);
|
||||
@@ -198,7 +196,7 @@ impl ModuleMemFds {
|
||||
assert_eq!(idx, defined_memory);
|
||||
}
|
||||
|
||||
Ok(Some(Arc::new(ModuleMemFds { memories })))
|
||||
Ok(Some(ModuleMemFds { memories }))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,7 +397,7 @@ impl MemFdSlot {
|
||||
|
||||
// The initial memory image, if given. If not, we just get a
|
||||
// memory filled with zeroes.
|
||||
if let Some(image) = maybe_image {
|
||||
if let Some(image) = maybe_image.as_ref() {
|
||||
assert!(image.offset.checked_add(image.len).unwrap() <= initial_size_bytes);
|
||||
if image.len > 0 {
|
||||
unsafe {
|
||||
|
||||
@@ -19,12 +19,12 @@ impl ModuleMemFds {
|
||||
/// Construct a new set of memfd images. This variant is used
|
||||
/// when memfd support is not included; it always returns no
|
||||
/// images.
|
||||
pub fn new(_: &Module, _: &[u8]) -> Result<Option<Arc<ModuleMemFds>>> {
|
||||
pub fn new(_: &Module, _: &[u8]) -> Result<Option<ModuleMemFds>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Get the memfd image for a particular memory.
|
||||
pub(crate) fn get_memory_image(&self, _: DefinedMemoryIndex) -> Option<&Arc<MemoryMemFd>> {
|
||||
pub fn get_memory_image(&self, _: DefinedMemoryIndex) -> Option<&Arc<MemoryMemFd>> {
|
||||
// Should be unreachable because the `Self` type is
|
||||
// uninhabitable.
|
||||
match *self {}
|
||||
|
||||
@@ -8,7 +8,7 @@ use anyhow::{bail, format_err, Error, Result};
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::ops::Range;
|
||||
use std::ptr;
|
||||
use wasmtime_environ::{TablePlan, TrapCode, WasmType};
|
||||
use wasmtime_environ::{TablePlan, TrapCode, WasmType, FUNCREF_INIT_BIT, FUNCREF_MASK};
|
||||
|
||||
/// An element going into or coming out of a table.
|
||||
///
|
||||
@@ -19,6 +19,11 @@ pub enum TableElement {
|
||||
FuncRef(*mut VMCallerCheckedAnyfunc),
|
||||
/// An `exrernref`.
|
||||
ExternRef(Option<VMExternRef>),
|
||||
/// An uninitialized funcref value. This should never be exposed
|
||||
/// beyond the `wasmtime` crate boundary; the upper-level code
|
||||
/// (which has access to the info needed for lazy initialization)
|
||||
/// will replace it when fetched.
|
||||
UninitFunc,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
@@ -33,41 +38,43 @@ unsafe impl Send for TableElement where VMExternRef: Send {}
|
||||
unsafe impl Sync for TableElement where VMExternRef: Sync {}
|
||||
|
||||
impl TableElement {
|
||||
/// Consumes the given raw pointer into a table element.
|
||||
/// Consumes the given raw table element value into a table element.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This is unsafe as it will *not* clone any externref, leaving the reference count unchanged.
|
||||
///
|
||||
/// This should only be used if the raw pointer is no longer in use.
|
||||
unsafe fn from_raw(ty: TableElementType, ptr: usize) -> Self {
|
||||
match ty {
|
||||
TableElementType::Func => Self::FuncRef(ptr as _),
|
||||
TableElementType::Extern => Self::ExternRef(if ptr == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(VMExternRef::from_raw(ptr as *mut u8))
|
||||
}),
|
||||
unsafe fn from_table_value(ty: TableElementType, ptr: usize) -> Self {
|
||||
match (ty, ptr) {
|
||||
(TableElementType::Func, 0) => Self::UninitFunc,
|
||||
(TableElementType::Func, ptr) => Self::FuncRef((ptr & FUNCREF_MASK) as _),
|
||||
(TableElementType::Extern, 0) => Self::ExternRef(None),
|
||||
(TableElementType::Extern, ptr) => {
|
||||
Self::ExternRef(Some(VMExternRef::from_raw(ptr as *mut u8)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Clones a table element from the underlying raw pointer.
|
||||
/// Clones a table element from the underlying table element.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This is unsafe as it will clone any externref, incrementing the reference count.
|
||||
unsafe fn clone_from_raw(ty: TableElementType, ptr: usize) -> Self {
|
||||
match ty {
|
||||
TableElementType::Func => Self::FuncRef(ptr as _),
|
||||
TableElementType::Extern => Self::ExternRef(if ptr == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(VMExternRef::clone_from_raw(ptr as *mut u8))
|
||||
}),
|
||||
unsafe fn clone_from_table_value(ty: TableElementType, ptr: usize) -> Self {
|
||||
match (ty, ptr) {
|
||||
(TableElementType::Func, 0) => Self::UninitFunc,
|
||||
(TableElementType::Func, ptr) => Self::FuncRef((ptr & FUNCREF_MASK) as _),
|
||||
(TableElementType::Extern, 0) => Self::ExternRef(None),
|
||||
(TableElementType::Extern, ptr) => {
|
||||
Self::ExternRef(Some(VMExternRef::clone_from_raw(ptr as *mut u8)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes a table element into a raw pointer.
|
||||
/// Consumes a table element into a raw table element value. This
|
||||
/// includes any tag bits or other storage details that we
|
||||
/// maintain in the table slot.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
@@ -75,12 +82,41 @@ impl TableElement {
|
||||
/// the reference count.
|
||||
///
|
||||
/// Use `from_raw` to properly drop any table elements stored as raw pointers.
|
||||
unsafe fn into_raw(self) -> usize {
|
||||
unsafe fn into_table_value(self) -> usize {
|
||||
match self {
|
||||
Self::FuncRef(e) => e as _,
|
||||
Self::UninitFunc => 0,
|
||||
Self::FuncRef(e) => (e as usize) | FUNCREF_INIT_BIT,
|
||||
Self::ExternRef(e) => e.map_or(0, |e| e.into_raw() as usize),
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes a table element into a pointer/reference, as it
|
||||
/// exists outside the table itself. This strips off any tag bits
|
||||
/// or other information that only lives inside the table.
|
||||
///
|
||||
/// Can only be done to an initialized table element; lazy init
|
||||
/// must occur first. (In other words, lazy values do not survive
|
||||
/// beyond the table, as every table read path initializes them.)
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The same warnings as for `into_table_values()` apply.
|
||||
pub(crate) unsafe fn into_ref_asserting_initialized(self) -> usize {
|
||||
match self {
|
||||
Self::FuncRef(e) => (e as usize),
|
||||
Self::ExternRef(e) => e.map_or(0, |e| e.into_raw() as usize),
|
||||
Self::UninitFunc => panic!("Uninitialized table element value outside of table slot"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates whether this value is the "uninitialized element"
|
||||
/// value.
|
||||
pub(crate) fn is_uninit(&self) -> bool {
|
||||
match self {
|
||||
Self::UninitFunc => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<*mut VMCallerCheckedAnyfunc> for TableElement {
|
||||
@@ -334,7 +370,7 @@ impl Table {
|
||||
pub fn get(&self, index: u32) -> Option<TableElement> {
|
||||
self.elements()
|
||||
.get(index as usize)
|
||||
.map(|p| unsafe { TableElement::clone_from_raw(self.element_type(), *p) })
|
||||
.map(|p| unsafe { TableElement::clone_from_table_value(self.element_type(), *p) })
|
||||
}
|
||||
|
||||
/// Set reference to the specified element.
|
||||
@@ -436,10 +472,10 @@ impl Table {
|
||||
fn set_raw(ty: TableElementType, elem: &mut usize, val: TableElement) {
|
||||
unsafe {
|
||||
let old = *elem;
|
||||
*elem = val.into_raw();
|
||||
*elem = val.into_table_value();
|
||||
|
||||
// Drop the old element
|
||||
let _ = TableElement::from_raw(ty, old);
|
||||
let _ = TableElement::from_table_value(ty, old);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -465,7 +501,7 @@ impl Table {
|
||||
let dst = dst_table.elements_mut();
|
||||
let src = src_table.elements();
|
||||
for (s, d) in src_range.zip(dst_range) {
|
||||
let elem = unsafe { TableElement::clone_from_raw(ty, src[s]) };
|
||||
let elem = unsafe { TableElement::clone_from_table_value(ty, src[s]) };
|
||||
Self::set_raw(ty, &mut dst[d], elem);
|
||||
}
|
||||
}
|
||||
@@ -485,12 +521,12 @@ impl Table {
|
||||
// ranges
|
||||
if dst_range.start <= src_range.start {
|
||||
for (s, d) in src_range.zip(dst_range) {
|
||||
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
|
||||
let elem = unsafe { TableElement::clone_from_table_value(ty, dst[s]) };
|
||||
Self::set_raw(ty, &mut dst[d], elem);
|
||||
}
|
||||
} else {
|
||||
for (s, d) in src_range.rev().zip(dst_range.rev()) {
|
||||
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
|
||||
let elem = unsafe { TableElement::clone_from_table_value(ty, dst[s]) };
|
||||
Self::set_raw(ty, &mut dst[d], elem);
|
||||
}
|
||||
}
|
||||
@@ -510,7 +546,7 @@ impl Drop for Table {
|
||||
|
||||
// Properly drop any table elements stored in the table
|
||||
for element in self.elements() {
|
||||
drop(unsafe { TableElement::from_raw(ty, *element) });
|
||||
drop(unsafe { TableElement::from_table_value(ty, *element) });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -477,7 +477,7 @@ impl Table {
|
||||
let init = init.into_table_element(store, ty.element())?;
|
||||
unsafe {
|
||||
let table = Table::from_wasmtime_table(wasmtime_export, store);
|
||||
(*table.wasmtime_table(store))
|
||||
(*table.wasmtime_table(store, std::iter::empty()))
|
||||
.fill(0, init, ty.minimum())
|
||||
.map_err(Trap::from_runtime)?;
|
||||
|
||||
@@ -497,12 +497,16 @@ impl Table {
|
||||
TableType::from_wasmtime_table(ty)
|
||||
}
|
||||
|
||||
fn wasmtime_table(&self, store: &mut StoreOpaque) -> *mut runtime::Table {
|
||||
fn wasmtime_table(
|
||||
&self,
|
||||
store: &mut StoreOpaque,
|
||||
lazy_init_range: impl Iterator<Item = u32>,
|
||||
) -> *mut runtime::Table {
|
||||
unsafe {
|
||||
let export = &store[self.0];
|
||||
let mut handle = InstanceHandle::from_vmctx(export.vmctx);
|
||||
let idx = handle.table_index(&*export.definition);
|
||||
handle.get_defined_table(idx)
|
||||
handle.get_defined_table_with_lazy_init(idx, lazy_init_range)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,7 +519,7 @@ impl Table {
|
||||
/// Panics if `store` does not own this table.
|
||||
pub fn get(&self, mut store: impl AsContextMut, index: u32) -> Option<Val> {
|
||||
let store = store.as_context_mut().0;
|
||||
let table = self.wasmtime_table(store);
|
||||
let table = self.wasmtime_table(store, std::iter::once(index));
|
||||
unsafe {
|
||||
match (*table).get(index)? {
|
||||
runtime::TableElement::FuncRef(f) => {
|
||||
@@ -526,6 +530,9 @@ impl Table {
|
||||
runtime::TableElement::ExternRef(Some(x)) => {
|
||||
Some(Val::ExternRef(Some(ExternRef { inner: x })))
|
||||
}
|
||||
runtime::TableElement::UninitFunc => {
|
||||
unreachable!("lazy init above should have converted UninitFunc")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -545,7 +552,7 @@ impl Table {
|
||||
let store = store.as_context_mut().0;
|
||||
let ty = self.ty(&store).element().clone();
|
||||
let val = val.into_table_element(store, ty)?;
|
||||
let table = self.wasmtime_table(store);
|
||||
let table = self.wasmtime_table(store, std::iter::empty());
|
||||
unsafe {
|
||||
(*table)
|
||||
.set(index, val)
|
||||
@@ -591,7 +598,7 @@ impl Table {
|
||||
let store = store.as_context_mut().0;
|
||||
let ty = self.ty(&store).element().clone();
|
||||
let init = init.into_table_element(store, ty)?;
|
||||
let table = self.wasmtime_table(store);
|
||||
let table = self.wasmtime_table(store, std::iter::empty());
|
||||
unsafe {
|
||||
match (*table).grow(delta, init, store)? {
|
||||
Some(size) => {
|
||||
@@ -656,10 +663,11 @@ impl Table {
|
||||
bail!("tables do not have the same element type");
|
||||
}
|
||||
|
||||
let dst = dst_table.wasmtime_table(store);
|
||||
let src = src_table.wasmtime_table(store);
|
||||
let dst_table = dst_table.wasmtime_table(store, std::iter::empty());
|
||||
let src_range = src_index..(src_index.checked_add(len).unwrap_or(u32::MAX));
|
||||
let src_table = src_table.wasmtime_table(store, src_range);
|
||||
unsafe {
|
||||
runtime::Table::copy(dst, src, dst_index, src_index, len)
|
||||
runtime::Table::copy(dst_table, src_table, dst_index, src_index, len)
|
||||
.map_err(Trap::from_runtime)?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -686,7 +694,7 @@ impl Table {
|
||||
let ty = self.ty(&store).element().clone();
|
||||
let val = val.into_table_element(store, ty)?;
|
||||
|
||||
let table = self.wasmtime_table(store);
|
||||
let table = self.wasmtime_table(store, std::iter::empty());
|
||||
unsafe {
|
||||
(*table).fill(dst, val, len).map_err(Trap::from_runtime)?;
|
||||
}
|
||||
|
||||
@@ -2060,7 +2060,7 @@ impl HostFunc {
|
||||
|
||||
/// Requires that this function's signature is already registered within
|
||||
/// `Engine`. This happens automatically during the above two constructors.
|
||||
fn _new(engine: &Engine, instance: InstanceHandle, trampoline: VMTrampoline) -> Self {
|
||||
fn _new(engine: &Engine, mut instance: InstanceHandle, trampoline: VMTrampoline) -> Self {
|
||||
let idx = EntityIndex::Function(FuncIndex::from_u32(0));
|
||||
let export = match instance.lookup_by_declaration(&idx) {
|
||||
wasmtime_runtime::Export::Function(f) => f,
|
||||
|
||||
@@ -328,13 +328,16 @@ impl Instance {
|
||||
// Instantiated instances will lazily fill in exports, so we process
|
||||
// all that lazy logic here.
|
||||
InstanceData::Instantiated { id, exports, .. } => {
|
||||
let instance = store.instance(*id);
|
||||
let (i, _, index) = instance.module().exports.get_full(name)?;
|
||||
let id = *id;
|
||||
let instance = store.instance(id);
|
||||
let (i, _, &index) = instance.module().exports.get_full(name)?;
|
||||
if let Some(export) = &exports[i] {
|
||||
return Some(export.clone());
|
||||
}
|
||||
|
||||
let instance = store.instance_mut(id); // reborrow the &mut Instancehandle
|
||||
let item = unsafe {
|
||||
Extern::from_wasmtime_export(instance.lookup_by_declaration(index), store)
|
||||
Extern::from_wasmtime_export(instance.lookup_by_declaration(&index), store)
|
||||
};
|
||||
let exports = match &mut store[self.0] {
|
||||
InstanceData::Instantiated { exports, .. } => exports,
|
||||
@@ -690,9 +693,6 @@ impl<'a> Instantiator<'a> {
|
||||
// properly referenced while in use by the store.
|
||||
store.modules_mut().register(&self.cur.module);
|
||||
|
||||
// Initialize any memfd images now.
|
||||
let memfds = self.cur.module.memfds()?;
|
||||
|
||||
unsafe {
|
||||
// The first thing we do is issue an instance allocation request
|
||||
// to the instance allocator. This, on success, will give us an
|
||||
@@ -704,21 +704,16 @@ impl<'a> Instantiator<'a> {
|
||||
// this instance, so we determine what the ID is and then assert
|
||||
// it's the same later when we do actually insert it.
|
||||
let instance_to_be = store.store_data().next_id::<InstanceData>();
|
||||
|
||||
let mut instance_handle =
|
||||
store
|
||||
.engine()
|
||||
.allocator()
|
||||
.allocate(InstanceAllocationRequest {
|
||||
module: compiled_module.module(),
|
||||
unique_id: Some(compiled_module.unique_id()),
|
||||
memfds,
|
||||
image_base: compiled_module.code().as_ptr() as usize,
|
||||
functions: compiled_module.functions(),
|
||||
runtime_info: &self.cur.module.runtime_info(),
|
||||
imports: self.cur.build(),
|
||||
shared_signatures: self.cur.module.signatures().as_module_map().into(),
|
||||
host_state: Box::new(Instance(instance_to_be)),
|
||||
store: StorePtr::new(store.traitobj()),
|
||||
wasm_data: compiled_module.wasm_data(),
|
||||
})?;
|
||||
|
||||
// The instance still has lots of setup, for example
|
||||
@@ -821,7 +816,7 @@ impl<'a> Instantiator<'a> {
|
||||
};
|
||||
// If a start function is present, invoke it. Make sure we use all the
|
||||
// trap-handling configuration in `store` as well.
|
||||
let instance = store.0.instance(id);
|
||||
let instance = store.0.instance_mut(id);
|
||||
let f = match instance.lookup_by_declaration(&EntityIndex::Function(start)) {
|
||||
wasmtime_runtime::Export::Function(f) => f,
|
||||
_ => unreachable!(), // valid modules shouldn't hit this
|
||||
|
||||
@@ -10,9 +10,12 @@ use std::mem;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use wasmparser::{Parser, ValidPayload, Validator};
|
||||
use wasmtime_environ::{ModuleEnvironment, ModuleIndex, PrimaryMap};
|
||||
use wasmtime_environ::{
|
||||
DefinedFuncIndex, DefinedMemoryIndex, FunctionInfo, ModuleEnvironment, ModuleIndex, PrimaryMap,
|
||||
SignatureIndex,
|
||||
};
|
||||
use wasmtime_jit::{CompiledModule, CompiledModuleInfo, MmapVec, TypeTables};
|
||||
use wasmtime_runtime::ModuleMemFds;
|
||||
use wasmtime_runtime::{CompiledModuleId, MemoryMemFd, ModuleMemFds, VMSharedSignatureIndex};
|
||||
|
||||
mod registry;
|
||||
mod serialization;
|
||||
@@ -110,10 +113,10 @@ struct ModuleInner {
|
||||
/// Registered shared signature for the module.
|
||||
signatures: Arc<SignatureCollection>,
|
||||
/// A set of memfd images for memories, if any. Note that module
|
||||
/// instantiation (hence the need for lazy init) may happen for the
|
||||
/// same module concurrently in multiple Stores, so we use a
|
||||
/// instantiation (hence the need for lazy init) may happen for
|
||||
/// the same module concurrently in multiple Stores, so we use a
|
||||
/// OnceCell.
|
||||
memfds: OnceCell<Option<Arc<ModuleMemFds>>>,
|
||||
memfds: OnceCell<Option<ModuleMemFds>>,
|
||||
}
|
||||
|
||||
impl Module {
|
||||
@@ -421,6 +424,11 @@ impl Module {
|
||||
translation.try_paged_init();
|
||||
}
|
||||
|
||||
// Attempt to convert table initializer segments to
|
||||
// FuncTable representation where possible, to enable
|
||||
// table lazy init.
|
||||
translation.try_func_table_init();
|
||||
|
||||
let (mmap, info) =
|
||||
wasmtime_jit::finish_compile(translation, obj, funcs, trampolines, tunables)?;
|
||||
Ok((mmap, Some(info)))
|
||||
@@ -723,19 +731,6 @@ impl Module {
|
||||
&self.inner.signatures
|
||||
}
|
||||
|
||||
pub(crate) fn memfds(&self) -> Result<Option<&Arc<ModuleMemFds>>> {
|
||||
if !self.engine().config().memfd {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(self
|
||||
.inner
|
||||
.memfds
|
||||
.get_or_try_init(|| {
|
||||
ModuleMemFds::new(self.inner.module.module(), self.inner.module.wasm_data())
|
||||
})?
|
||||
.as_ref())
|
||||
}
|
||||
|
||||
/// Looks up the module upvar value at the `index` specified.
|
||||
///
|
||||
/// Note that this panics if `index` is out of bounds since this should
|
||||
@@ -953,6 +948,14 @@ impl Module {
|
||||
pub fn engine(&self) -> &Engine {
|
||||
&self.inner.engine
|
||||
}
|
||||
|
||||
/// Returns the `ModuleInner` cast as `ModuleRuntimeInfo` for use
|
||||
/// by the runtime.
|
||||
pub(crate) fn runtime_info(&self) -> Arc<dyn wasmtime_runtime::ModuleRuntimeInfo> {
|
||||
// N.B.: this needs to return a clone because we cannot
|
||||
// statically cast the &Arc<ModuleInner> to &Arc<dyn Trait...>.
|
||||
self.inner.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn _assert_send_sync() {
|
||||
@@ -987,3 +990,131 @@ impl std::hash::Hash for HashedEngineCompileEnv<'_> {
|
||||
env!("CARGO_PKG_VERSION").hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl wasmtime_runtime::ModuleRuntimeInfo for ModuleInner {
|
||||
fn module(&self) -> &Arc<wasmtime_environ::Module> {
|
||||
self.module.module()
|
||||
}
|
||||
|
||||
fn signature(&self, index: SignatureIndex) -> VMSharedSignatureIndex {
|
||||
self.signatures.as_module_map()[index]
|
||||
}
|
||||
|
||||
fn image_base(&self) -> usize {
|
||||
self.module.code().as_ptr() as usize
|
||||
}
|
||||
|
||||
fn function_info(&self, index: DefinedFuncIndex) -> &FunctionInfo {
|
||||
self.module.func_info(index)
|
||||
}
|
||||
|
||||
fn memfd_image(&self, memory: DefinedMemoryIndex) -> Result<Option<&Arc<MemoryMemFd>>> {
|
||||
if !self.engine.config().memfd {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let memfds = self
|
||||
.memfds
|
||||
.get_or_try_init(|| ModuleMemFds::new(self.module.module(), self.module.wasm_data()))?;
|
||||
Ok(memfds
|
||||
.as_ref()
|
||||
.and_then(|memfds| memfds.get_memory_image(memory)))
|
||||
}
|
||||
|
||||
fn unique_id(&self) -> Option<CompiledModuleId> {
|
||||
Some(self.module.unique_id())
|
||||
}
|
||||
|
||||
fn wasm_data(&self) -> &[u8] {
|
||||
self.module.wasm_data()
|
||||
}
|
||||
}
|
||||
|
||||
/// A barebones implementation of ModuleRuntimeInfo that is useful for
|
||||
/// cases where a purpose-built environ::Module is used and a full
|
||||
/// CompiledModule does not exist (for example, for tests or for the
|
||||
/// default-callee instance).
|
||||
pub(crate) struct BareModuleInfo {
|
||||
module: Arc<wasmtime_environ::Module>,
|
||||
image_base: usize,
|
||||
one_signature: Option<(SignatureIndex, VMSharedSignatureIndex)>,
|
||||
function_info: PrimaryMap<DefinedFuncIndex, FunctionInfo>,
|
||||
}
|
||||
|
||||
impl BareModuleInfo {
|
||||
pub(crate) fn empty(module: Arc<wasmtime_environ::Module>) -> Self {
|
||||
BareModuleInfo {
|
||||
module,
|
||||
image_base: 0,
|
||||
one_signature: None,
|
||||
function_info: PrimaryMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn maybe_imported_func(
|
||||
module: Arc<wasmtime_environ::Module>,
|
||||
one_signature: Option<(SignatureIndex, VMSharedSignatureIndex)>,
|
||||
) -> Self {
|
||||
BareModuleInfo {
|
||||
module,
|
||||
image_base: 0,
|
||||
one_signature,
|
||||
function_info: PrimaryMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn one_func(
|
||||
module: Arc<wasmtime_environ::Module>,
|
||||
image_base: usize,
|
||||
info: FunctionInfo,
|
||||
signature_id: SignatureIndex,
|
||||
signature: VMSharedSignatureIndex,
|
||||
) -> Self {
|
||||
let mut function_info = PrimaryMap::with_capacity(1);
|
||||
function_info.push(info);
|
||||
BareModuleInfo {
|
||||
module,
|
||||
image_base,
|
||||
function_info,
|
||||
one_signature: Some((signature_id, signature)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_traitobj(self) -> Arc<dyn wasmtime_runtime::ModuleRuntimeInfo> {
|
||||
Arc::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl wasmtime_runtime::ModuleRuntimeInfo for BareModuleInfo {
|
||||
fn module(&self) -> &Arc<wasmtime_environ::Module> {
|
||||
&self.module
|
||||
}
|
||||
|
||||
fn signature(&self, index: SignatureIndex) -> VMSharedSignatureIndex {
|
||||
let (signature_id, signature) = self
|
||||
.one_signature
|
||||
.expect("Signature for one function should be present if queried");
|
||||
assert_eq!(index, signature_id);
|
||||
signature
|
||||
}
|
||||
|
||||
fn image_base(&self) -> usize {
|
||||
self.image_base
|
||||
}
|
||||
|
||||
fn function_info(&self, index: DefinedFuncIndex) -> &FunctionInfo {
|
||||
&self.function_info[index]
|
||||
}
|
||||
|
||||
fn memfd_image(&self, _memory: DefinedMemoryIndex) -> Result<Option<&Arc<MemoryMemFd>>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn unique_id(&self) -> Option<CompiledModuleId> {
|
||||
None
|
||||
}
|
||||
|
||||
fn wasm_data(&self) -> &[u8] {
|
||||
&[]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,6 +76,7 @@
|
||||
//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
|
||||
//! `wasmtime`, must uphold for the public interface to be safe.
|
||||
|
||||
use crate::module::BareModuleInfo;
|
||||
use crate::{module::ModuleRegistry, Engine, Module, Trap, Val, ValRaw};
|
||||
use anyhow::{bail, Result};
|
||||
use std::cell::UnsafeCell;
|
||||
@@ -409,7 +410,6 @@ impl<T> Store<T> {
|
||||
/// tables created to 10,000. This can be overridden with the
|
||||
/// [`Store::limiter`] configuration method.
|
||||
pub fn new(engine: &Engine, data: T) -> Self {
|
||||
let functions = &Default::default();
|
||||
// Wasmtime uses the callee argument to host functions to learn about
|
||||
// the original pointer to the `Store` itself, allowing it to
|
||||
// reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
|
||||
@@ -419,18 +419,13 @@ impl<T> Store<T> {
|
||||
// is never null.
|
||||
let default_callee = unsafe {
|
||||
let module = Arc::new(wasmtime_environ::Module::default());
|
||||
let shim = BareModuleInfo::empty(module).into_traitobj();
|
||||
OnDemandInstanceAllocator::default()
|
||||
.allocate(InstanceAllocationRequest {
|
||||
host_state: Box::new(()),
|
||||
image_base: 0,
|
||||
functions,
|
||||
shared_signatures: None.into(),
|
||||
imports: Default::default(),
|
||||
module: &module,
|
||||
unique_id: None,
|
||||
memfds: None,
|
||||
store: StorePtr::empty(),
|
||||
wasm_data: &[],
|
||||
runtime_info: &shim,
|
||||
})
|
||||
.expect("failed to allocate default callee")
|
||||
};
|
||||
|
||||
@@ -11,12 +11,13 @@ pub use self::func::*;
|
||||
use self::global::create_global;
|
||||
use self::memory::create_memory;
|
||||
use self::table::create_table;
|
||||
use crate::module::BareModuleInfo;
|
||||
use crate::store::{InstanceId, StoreOpaque};
|
||||
use crate::{GlobalType, MemoryType, TableType, Val};
|
||||
use anyhow::Result;
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use wasmtime_environ::{EntityIndex, GlobalIndex, MemoryIndex, Module, TableIndex};
|
||||
use wasmtime_environ::{EntityIndex, GlobalIndex, MemoryIndex, Module, SignatureIndex, TableIndex};
|
||||
use wasmtime_runtime::{
|
||||
Imports, InstanceAllocationRequest, InstanceAllocator, OnDemandInstanceAllocator, StorePtr,
|
||||
VMFunctionImport, VMSharedSignatureIndex,
|
||||
@@ -27,11 +28,10 @@ fn create_handle(
|
||||
store: &mut StoreOpaque,
|
||||
host_state: Box<dyn Any + Send + Sync>,
|
||||
func_imports: &[VMFunctionImport],
|
||||
shared_signature_id: Option<VMSharedSignatureIndex>,
|
||||
one_signature: Option<(SignatureIndex, VMSharedSignatureIndex)>,
|
||||
) -> Result<InstanceId> {
|
||||
let mut imports = Imports::default();
|
||||
imports.functions = func_imports;
|
||||
let functions = &Default::default();
|
||||
|
||||
unsafe {
|
||||
let config = store.engine().config();
|
||||
@@ -39,18 +39,14 @@ fn create_handle(
|
||||
// The configured instance allocator should only be used when creating module instances
|
||||
// as we don't want host objects to count towards instance limits.
|
||||
let module = Arc::new(module);
|
||||
let runtime_info =
|
||||
&BareModuleInfo::maybe_imported_func(module, one_signature).into_traitobj();
|
||||
let handle = OnDemandInstanceAllocator::new(config.mem_creator.clone(), 0).allocate(
|
||||
InstanceAllocationRequest {
|
||||
module: &module,
|
||||
unique_id: None,
|
||||
memfds: None,
|
||||
functions,
|
||||
image_base: 0,
|
||||
imports,
|
||||
shared_signatures: shared_signature_id.into(),
|
||||
host_state,
|
||||
store: StorePtr::new(store.traitobj()),
|
||||
wasm_data: &[],
|
||||
runtime_info,
|
||||
},
|
||||
)?;
|
||||
|
||||
@@ -65,7 +61,7 @@ pub fn generate_global_export(
|
||||
) -> Result<wasmtime_runtime::ExportGlobal> {
|
||||
let instance = create_global(store, gt, val)?;
|
||||
let idx = EntityIndex::Global(GlobalIndex::from_u32(0));
|
||||
match store.instance(instance).lookup_by_declaration(&idx) {
|
||||
match store.instance_mut(instance).lookup_by_declaration(&idx) {
|
||||
wasmtime_runtime::Export::Global(g) => Ok(g),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
@@ -77,7 +73,7 @@ pub fn generate_memory_export(
|
||||
) -> Result<wasmtime_runtime::ExportMemory> {
|
||||
let instance = create_memory(store, m)?;
|
||||
let idx = EntityIndex::Memory(MemoryIndex::from_u32(0));
|
||||
match store.instance(instance).lookup_by_declaration(&idx) {
|
||||
match store.instance_mut(instance).lookup_by_declaration(&idx) {
|
||||
wasmtime_runtime::Export::Memory(m) => Ok(m),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
@@ -89,7 +85,7 @@ pub fn generate_table_export(
|
||||
) -> Result<wasmtime_runtime::ExportTable> {
|
||||
let instance = create_table(store, t)?;
|
||||
let idx = EntityIndex::Table(TableIndex::from_u32(0));
|
||||
match store.instance(instance).lookup_by_declaration(&idx) {
|
||||
match store.instance_mut(instance).lookup_by_declaration(&idx) {
|
||||
wasmtime_runtime::Export::Table(t) => Ok(t),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
//! Support for a calling of an imported function.
|
||||
|
||||
use crate::module::BareModuleInfo;
|
||||
use crate::{Engine, FuncType, Trap, ValRaw};
|
||||
use anyhow::Result;
|
||||
use std::any::Any;
|
||||
use std::panic::{self, AssertUnwindSafe};
|
||||
use std::sync::Arc;
|
||||
use wasmtime_environ::{EntityIndex, Module, ModuleType, PrimaryMap, SignatureIndex};
|
||||
use wasmtime_environ::{EntityIndex, FunctionInfo, Module, ModuleType, SignatureIndex};
|
||||
use wasmtime_jit::{CodeMemory, MmapVec, ProfilingAgent};
|
||||
use wasmtime_runtime::{
|
||||
Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
|
||||
@@ -148,8 +149,6 @@ pub unsafe fn create_raw_function(
|
||||
host_state: Box<dyn Any + Send + Sync>,
|
||||
) -> Result<InstanceHandle> {
|
||||
let mut module = Module::new();
|
||||
let mut functions = PrimaryMap::new();
|
||||
functions.push(Default::default());
|
||||
|
||||
let sig_id = SignatureIndex::from_u32(u32::max_value() - 1);
|
||||
module.types.push(ModuleType::Function(sig_id));
|
||||
@@ -159,18 +158,21 @@ pub unsafe fn create_raw_function(
|
||||
.insert(String::new(), EntityIndex::Function(func_id));
|
||||
let module = Arc::new(module);
|
||||
|
||||
let runtime_info = &BareModuleInfo::one_func(
|
||||
module.clone(),
|
||||
(*func).as_ptr() as usize,
|
||||
FunctionInfo::default(),
|
||||
sig_id,
|
||||
sig,
|
||||
)
|
||||
.into_traitobj();
|
||||
|
||||
Ok(
|
||||
OnDemandInstanceAllocator::default().allocate(InstanceAllocationRequest {
|
||||
module: &module,
|
||||
unique_id: None,
|
||||
memfds: None,
|
||||
functions: &functions,
|
||||
image_base: (*func).as_ptr() as usize,
|
||||
imports: Imports::default(),
|
||||
shared_signatures: sig.into(),
|
||||
host_state,
|
||||
store: StorePtr::empty(),
|
||||
wasm_data: &[],
|
||||
runtime_info,
|
||||
})?,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ pub fn create_global(store: &mut StoreOpaque, gt: &GlobalType, val: Val) -> Resu
|
||||
let mut module = Module::new();
|
||||
let mut func_imports = Vec::new();
|
||||
let mut externref_init = None;
|
||||
let mut shared_signature_id = None;
|
||||
let mut one_signature = None;
|
||||
|
||||
let global = Global {
|
||||
wasm_ty: gt.content().to_wasm_type(),
|
||||
@@ -37,8 +37,8 @@ pub fn create_global(store: &mut StoreOpaque, gt: &GlobalType, val: Val) -> Resu
|
||||
// our global with a `ref.func` to grab that imported function.
|
||||
let f = f.caller_checked_anyfunc(store);
|
||||
let f = unsafe { f.as_ref() };
|
||||
shared_signature_id = Some(f.type_index);
|
||||
let sig_id = SignatureIndex::from_u32(u32::max_value() - 1);
|
||||
one_signature = Some((sig_id, f.type_index));
|
||||
module.types.push(ModuleType::Function(sig_id));
|
||||
let func_index = module.functions.push(sig_id);
|
||||
module.num_imported_funcs = 1;
|
||||
@@ -64,16 +64,10 @@ pub fn create_global(store: &mut StoreOpaque, gt: &GlobalType, val: Val) -> Resu
|
||||
module
|
||||
.exports
|
||||
.insert(String::new(), EntityIndex::Global(global_id));
|
||||
let id = create_handle(
|
||||
module,
|
||||
store,
|
||||
Box::new(()),
|
||||
&func_imports,
|
||||
shared_signature_id,
|
||||
)?;
|
||||
let id = create_handle(module, store, Box::new(()), &func_imports, one_signature)?;
|
||||
|
||||
if let Some(x) = externref_init {
|
||||
let instance = store.instance(id);
|
||||
let instance = store.instance_mut(id);
|
||||
match instance.lookup_by_declaration(&EntityIndex::Global(global_id)) {
|
||||
wasmtime_runtime::Export::Global(g) => unsafe {
|
||||
*(*g.definition).as_externref_mut() = Some(x.inner);
|
||||
|
||||
Reference in New Issue
Block a user