wasmtime: Implement table.get and table.set
These instructions have fast, inline JIT paths for the common cases, and only
call out to host VM functions for the slow paths. This required some changes to
`cranelift-wasm`'s `FuncEnvironment`: instead of taking a `FuncCursor` to insert
an instruction sequence within the current basic block,
`FuncEnvironment::translate_table_{get,set}` now take a `&mut FunctionBuilder`
so that they can create whole new basic blocks. This is necessary for
implementing GC read/write barriers that involve branching (e.g. checking for
null, or whether a store buffer is at capacity).
Furthermore, it required that the `load`, `load_complex`, and `store`
instructions handle loading and storing through an `r{32,64}` rather than just
`i{32,64}` addresses. This involved making `r{32,64}` types acceptable
instantiations of the `iAddr` type variable, plus a few new instruction
encodings.
Part of #929
This commit is contained in:
@@ -15,6 +15,7 @@ edition = "2018"
|
||||
anyhow = "1.0"
|
||||
cranelift-codegen = { path = "../../cranelift/codegen", version = "0.65.0", features = ["enable-serde"] }
|
||||
cranelift-entity = { path = "../../cranelift/entity", version = "0.65.0", features = ["enable-serde"] }
|
||||
cranelift-frontend = { path = "../../cranelift/frontend", version = "0.65.0" }
|
||||
cranelift-wasm = { path = "../../cranelift/wasm", version = "0.65.0", features = ["enable-serde"] }
|
||||
wasmparser = "0.58.0"
|
||||
lightbeam = { path = "../lightbeam", optional = true, version = "0.18.0" }
|
||||
|
||||
@@ -9,6 +9,7 @@ use cranelift_codegen::ir::types::*;
|
||||
use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature};
|
||||
use cranelift_codegen::isa::{self, TargetFrontendConfig};
|
||||
use cranelift_entity::EntityRef;
|
||||
use cranelift_frontend::FunctionBuilder;
|
||||
use cranelift_wasm::{
|
||||
self, FuncIndex, GlobalIndex, GlobalVariable, MemoryIndex, SignatureIndex, TableIndex,
|
||||
TargetEnvironment, WasmError, WasmResult, WasmType,
|
||||
@@ -169,6 +170,11 @@ declare_builtin_functions! {
|
||||
table_grow_funcref(vmctx, i32, i32, pointer) -> (i32);
|
||||
/// Returns an index for Wasm's `table.grow` instruction for `externref`s.
|
||||
table_grow_externref(vmctx, i32, i32, reference) -> (i32);
|
||||
/// Returns an index to drop a `VMExternRef`.
|
||||
drop_externref(pointer) -> ();
|
||||
/// Returns an index to do a GC and then insert a `VMExternRef` into the
|
||||
/// `VMExternRefActivationsTable`.
|
||||
activations_table_insert_with_gc(vmctx, reference) -> ();
|
||||
}
|
||||
|
||||
impl BuiltinFunctionIndex {
|
||||
@@ -392,6 +398,40 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
|
||||
|
||||
(base, func_addr)
|
||||
}
|
||||
|
||||
/// Generate code to increment or decrement the given `externref`'s
|
||||
/// reference count.
|
||||
///
|
||||
/// The new reference count is returned.
|
||||
fn mutate_extenref_ref_count(
|
||||
&mut self,
|
||||
builder: &mut FunctionBuilder,
|
||||
externref: ir::Value,
|
||||
delta: i64,
|
||||
) -> ir::Value {
|
||||
debug_assert!(delta == -1 || delta == 1);
|
||||
|
||||
let pointer_type = self.pointer_type();
|
||||
let ref_count_offset = ir::immediates::Offset32::new(
|
||||
i32::try_from(VMOffsets::vm_extern_data_ref_count()).unwrap(),
|
||||
);
|
||||
|
||||
let old_ref_count = builder.ins().load(
|
||||
pointer_type,
|
||||
ir::MemFlags::trusted(),
|
||||
externref,
|
||||
ref_count_offset,
|
||||
);
|
||||
let new_ref_count = builder.ins().iadd_imm(old_ref_count, delta);
|
||||
builder.ins().store(
|
||||
ir::MemFlags::trusted(),
|
||||
new_ref_count,
|
||||
externref,
|
||||
ref_count_offset,
|
||||
);
|
||||
|
||||
new_ref_count
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This is necessary as if Lightbeam used `FuncEnvironment` directly it would cause
|
||||
@@ -593,9 +633,10 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
readonly: false,
|
||||
});
|
||||
|
||||
let element_size = match self.module.table_plans[index].style {
|
||||
TableStyle::CallerChecksSignature => u64::from(self.pointer_type().bytes()),
|
||||
};
|
||||
let element_size = u64::from(
|
||||
self.reference_type(self.module.table_plans[index].table.wasm_ty)
|
||||
.bytes(),
|
||||
);
|
||||
|
||||
Ok(func.create_table(ir::TableData {
|
||||
base_gv,
|
||||
@@ -646,27 +687,325 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
|
||||
fn translate_table_get(
|
||||
&mut self,
|
||||
_: cranelift_codegen::cursor::FuncCursor<'_>,
|
||||
_: TableIndex,
|
||||
_: ir::Table,
|
||||
_: ir::Value,
|
||||
builder: &mut FunctionBuilder,
|
||||
table_index: TableIndex,
|
||||
table: ir::Table,
|
||||
index: ir::Value,
|
||||
) -> WasmResult<ir::Value> {
|
||||
Err(WasmError::Unsupported(
|
||||
"the `table.get` instruction is not supported yet".into(),
|
||||
))
|
||||
let pointer_type = self.pointer_type();
|
||||
|
||||
let plan = &self.module.table_plans[table_index];
|
||||
match plan.table.wasm_ty {
|
||||
WasmType::FuncRef => match plan.style {
|
||||
TableStyle::CallerChecksSignature => {
|
||||
let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
|
||||
Ok(builder.ins().load(
|
||||
pointer_type,
|
||||
ir::MemFlags::trusted(),
|
||||
table_entry_addr,
|
||||
0,
|
||||
))
|
||||
}
|
||||
},
|
||||
WasmType::ExternRef => {
|
||||
// Our read barrier for `externref` tables is roughly equivalent
|
||||
// to the following pseudocode:
|
||||
//
|
||||
// ```
|
||||
// let elem = table[index]
|
||||
// if elem is not null:
|
||||
// let (next, end) = VMExternRefActivationsTable bump region
|
||||
// if next != end:
|
||||
// elem.ref_count += 1
|
||||
// *next = elem
|
||||
// next += 1
|
||||
// else:
|
||||
// call activations_table_insert_with_gc(elem)
|
||||
// return elem
|
||||
// ```
|
||||
//
|
||||
// This ensures that all `externref`s coming out of tables and
|
||||
// onto the stack are safely held alive by the
|
||||
// `VMExternRefActivationsTable`.
|
||||
|
||||
let reference_type = self.reference_type(WasmType::ExternRef);
|
||||
|
||||
let continue_block = builder.create_block();
|
||||
let non_null_elem_block = builder.create_block();
|
||||
let gc_block = builder.create_block();
|
||||
let no_gc_block = builder.create_block();
|
||||
let current_block = builder.current_block().unwrap();
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(non_null_elem_block, current_block);
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(no_gc_block, non_null_elem_block);
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(gc_block, no_gc_block);
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(continue_block, gc_block);
|
||||
|
||||
// Load the table element.
|
||||
let elem_addr = builder.ins().table_addr(pointer_type, table, index, 0);
|
||||
let elem =
|
||||
builder
|
||||
.ins()
|
||||
.load(reference_type, ir::MemFlags::trusted(), elem_addr, 0);
|
||||
|
||||
let elem_is_null = builder.ins().is_null(elem);
|
||||
builder.ins().brnz(elem_is_null, continue_block, &[]);
|
||||
builder.ins().jump(non_null_elem_block, &[]);
|
||||
|
||||
// Load the `VMExternRefActivationsTable::next` bump finger and
|
||||
// the `VMExternRefActivationsTable::end` bump boundary.
|
||||
builder.switch_to_block(non_null_elem_block);
|
||||
let vmctx = self.vmctx(&mut builder.func);
|
||||
let vmctx = builder.ins().global_value(pointer_type, vmctx);
|
||||
let activations_table = builder.ins().load(
|
||||
pointer_type,
|
||||
ir::MemFlags::trusted(),
|
||||
vmctx,
|
||||
i32::try_from(self.offsets.vmctx_externref_activations_table()).unwrap(),
|
||||
);
|
||||
let next = builder.ins().load(
|
||||
pointer_type,
|
||||
ir::MemFlags::trusted(),
|
||||
activations_table,
|
||||
i32::try_from(self.offsets.vm_extern_ref_activation_table_next()).unwrap(),
|
||||
);
|
||||
let end = builder.ins().load(
|
||||
pointer_type,
|
||||
ir::MemFlags::trusted(),
|
||||
activations_table,
|
||||
i32::try_from(self.offsets.vm_extern_ref_activation_table_end()).unwrap(),
|
||||
);
|
||||
|
||||
// If `next == end`, then we are at full capacity. Call a
|
||||
// builtin to do a GC and insert this reference into the
|
||||
// just-swept table for us.
|
||||
let at_capacity = builder.ins().icmp(ir::condcodes::IntCC::Equal, next, end);
|
||||
builder.ins().brnz(at_capacity, gc_block, &[]);
|
||||
builder.ins().jump(no_gc_block, &[]);
|
||||
builder.switch_to_block(gc_block);
|
||||
let builtin_idx = BuiltinFunctionIndex::activations_table_insert_with_gc();
|
||||
let builtin_sig = self
|
||||
.builtin_function_signatures
|
||||
.activations_table_insert_with_gc(builder.func);
|
||||
let (vmctx, builtin_addr) = self
|
||||
.translate_load_builtin_function_address(&mut builder.cursor(), builtin_idx);
|
||||
builder
|
||||
.ins()
|
||||
.call_indirect(builtin_sig, builtin_addr, &[vmctx, elem]);
|
||||
builder.ins().jump(continue_block, &[]);
|
||||
|
||||
// If `next != end`, then:
|
||||
//
|
||||
// * increment this reference's ref count,
|
||||
// * store the reference into the bump table at `*next`,
|
||||
// * and finally increment the `next` bump finger.
|
||||
builder.switch_to_block(no_gc_block);
|
||||
self.mutate_extenref_ref_count(builder, elem, 1);
|
||||
builder.ins().store(ir::MemFlags::trusted(), elem, next, 0);
|
||||
|
||||
let new_next = builder
|
||||
.ins()
|
||||
.iadd_imm(next, i64::from(reference_type.bytes()));
|
||||
builder.ins().store(
|
||||
ir::MemFlags::trusted(),
|
||||
new_next,
|
||||
activations_table,
|
||||
i32::try_from(self.offsets.vm_extern_ref_activation_table_next()).unwrap(),
|
||||
);
|
||||
|
||||
builder.ins().jump(continue_block, &[]);
|
||||
builder.switch_to_block(continue_block);
|
||||
|
||||
builder.seal_block(non_null_elem_block);
|
||||
builder.seal_block(gc_block);
|
||||
builder.seal_block(no_gc_block);
|
||||
builder.seal_block(continue_block);
|
||||
|
||||
Ok(elem)
|
||||
}
|
||||
ty => Err(WasmError::Unsupported(format!(
|
||||
"unsupported table type for `table.get` instruction: {:?}",
|
||||
ty
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn translate_table_set(
|
||||
&mut self,
|
||||
_: cranelift_codegen::cursor::FuncCursor<'_>,
|
||||
_: TableIndex,
|
||||
_: ir::Table,
|
||||
_: ir::Value,
|
||||
_: ir::Value,
|
||||
builder: &mut FunctionBuilder,
|
||||
table_index: TableIndex,
|
||||
table: ir::Table,
|
||||
value: ir::Value,
|
||||
index: ir::Value,
|
||||
) -> WasmResult<()> {
|
||||
Err(WasmError::Unsupported(
|
||||
"the `table.set` instruction is not supported yet".into(),
|
||||
))
|
||||
let pointer_type = self.pointer_type();
|
||||
|
||||
let plan = &self.module.table_plans[table_index];
|
||||
match plan.table.wasm_ty {
|
||||
WasmType::FuncRef => match plan.style {
|
||||
TableStyle::CallerChecksSignature => {
|
||||
let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
|
||||
builder
|
||||
.ins()
|
||||
.store(ir::MemFlags::trusted(), value, table_entry_addr, 0);
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
WasmType::ExternRef => {
|
||||
// Our write barrier for `externref`s being copied out of the
|
||||
// stack and into a table is roughly equivalent to the following
|
||||
// pseudocode:
|
||||
//
|
||||
// ```
|
||||
// if value != null:
|
||||
// value.ref_count += 1
|
||||
// let current_elem = table[index]
|
||||
// table[index] = value
|
||||
// if current_elem != null:
|
||||
// current_elem.ref_count -= 1
|
||||
// if current_elem.ref_count == 0:
|
||||
// call drop_externref(current_elem)
|
||||
// ```
|
||||
//
|
||||
// This write barrier is responsible for ensuring that:
|
||||
//
|
||||
// 1. The value's ref count is incremented now that the
|
||||
// table is holding onto it. This is required for memory safety.
|
||||
//
|
||||
// 2. The old table element, if any, has its ref count
|
||||
// decremented, and that the wrapped data is dropped if the
|
||||
// ref count reaches zero. This is not required for memory
|
||||
// safety, but is required to avoid leaks. Furthermore, the
|
||||
// destructor might GC or touch this table, so we must only
|
||||
// drop the old table element *after* we've replaced it with
|
||||
// the new `value`!
|
||||
|
||||
let current_block = builder.current_block().unwrap();
|
||||
let inc_ref_count_block = builder.create_block();
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(inc_ref_count_block, current_block);
|
||||
let check_current_elem_block = builder.create_block();
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(check_current_elem_block, inc_ref_count_block);
|
||||
let dec_ref_count_block = builder.create_block();
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(dec_ref_count_block, check_current_elem_block);
|
||||
let drop_block = builder.create_block();
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(drop_block, dec_ref_count_block);
|
||||
let continue_block = builder.create_block();
|
||||
builder
|
||||
.func
|
||||
.layout
|
||||
.insert_block_after(continue_block, drop_block);
|
||||
|
||||
// Calculate the table address of the current element and do
|
||||
// bounds checks. This is the first thing we do, because we
|
||||
// don't want to modify any ref counts if this `table.set` is
|
||||
// going to trap.
|
||||
let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
|
||||
|
||||
// If value is not null, increment `value`'s ref count.
|
||||
//
|
||||
// This has to come *before* decrementing the current table
|
||||
// element's ref count, because it might reach ref count == zero,
|
||||
// causing us to deallocate the current table element. However,
|
||||
// if `value` *is* the current table element (and therefore this
|
||||
// whole `table.set` is a no-op), then we would incorrectly
|
||||
// deallocate `value` and leave it in the table, leading to use
|
||||
// after free.
|
||||
let value_is_null = builder.ins().is_null(value);
|
||||
builder
|
||||
.ins()
|
||||
.brnz(value_is_null, check_current_elem_block, &[]);
|
||||
builder.ins().jump(inc_ref_count_block, &[]);
|
||||
builder.switch_to_block(inc_ref_count_block);
|
||||
self.mutate_extenref_ref_count(builder, value, 1);
|
||||
builder.ins().jump(check_current_elem_block, &[]);
|
||||
|
||||
// Grab the current element from the table, and store the new
|
||||
// `value` into the table.
|
||||
//
|
||||
// Note that we load the current element as a pointer, not a
|
||||
// reference. This is so that if we call out-of-line to run its
|
||||
// destructor, and its destructor triggers GC, this reference is
|
||||
// not recorded in the stack map (which would lead to the GC
|
||||
// saving a reference to a deallocated object, and then using it
|
||||
// after its been freed).
|
||||
builder.switch_to_block(check_current_elem_block);
|
||||
let current_elem =
|
||||
builder
|
||||
.ins()
|
||||
.load(pointer_type, ir::MemFlags::trusted(), table_entry_addr, 0);
|
||||
builder
|
||||
.ins()
|
||||
.store(ir::MemFlags::trusted(), value, table_entry_addr, 0);
|
||||
|
||||
// If the current element is non-null, decrement its reference
|
||||
// count. And if its reference count has reached zero, then make
|
||||
// an out-of-line call to deallocate it.
|
||||
let current_elem_is_null =
|
||||
builder
|
||||
.ins()
|
||||
.icmp_imm(ir::condcodes::IntCC::Equal, current_elem, 0);
|
||||
builder
|
||||
.ins()
|
||||
.brz(current_elem_is_null, dec_ref_count_block, &[]);
|
||||
builder.ins().jump(continue_block, &[]);
|
||||
|
||||
builder.switch_to_block(dec_ref_count_block);
|
||||
let ref_count = self.mutate_extenref_ref_count(builder, current_elem, -1);
|
||||
builder.ins().brz(ref_count, drop_block, &[]);
|
||||
builder.ins().jump(continue_block, &[]);
|
||||
|
||||
// Call the `drop_externref` builtin to (you guessed it) drop
|
||||
// the `externref`.
|
||||
builder.switch_to_block(drop_block);
|
||||
let builtin_idx = BuiltinFunctionIndex::drop_externref();
|
||||
let builtin_sig = self
|
||||
.builtin_function_signatures
|
||||
.drop_externref(builder.func);
|
||||
let (_vmctx, builtin_addr) = self
|
||||
.translate_load_builtin_function_address(&mut builder.cursor(), builtin_idx);
|
||||
builder
|
||||
.ins()
|
||||
.call_indirect(builtin_sig, builtin_addr, &[current_elem]);
|
||||
builder.ins().jump(continue_block, &[]);
|
||||
|
||||
builder.switch_to_block(continue_block);
|
||||
|
||||
builder.seal_block(inc_ref_count_block);
|
||||
builder.seal_block(check_current_elem_block);
|
||||
builder.seal_block(dec_ref_count_block);
|
||||
builder.seal_block(drop_block);
|
||||
builder.seal_block(continue_block);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
ty => Err(WasmError::Unsupported(format!(
|
||||
"unsupported table type for `table.set` instruction: {:?}",
|
||||
ty
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn translate_table_fill(
|
||||
|
||||
@@ -168,7 +168,7 @@ use wasmtime_environ::{ir::Stackmap, StackMapInformation};
|
||||
pub struct VMExternRef(NonNull<VMExternData>);
|
||||
|
||||
#[repr(C)]
|
||||
struct VMExternData {
|
||||
pub(crate) struct VMExternData {
|
||||
// Implicit, dynamically-sized member that always preceded an
|
||||
// `VMExternData`.
|
||||
//
|
||||
@@ -237,7 +237,7 @@ impl VMExternData {
|
||||
}
|
||||
|
||||
/// Drop the inner value and then free this `VMExternData` heap allocation.
|
||||
unsafe fn drop_and_dealloc(mut data: NonNull<VMExternData>) {
|
||||
pub(crate) unsafe fn drop_and_dealloc(mut data: NonNull<VMExternData>) {
|
||||
// Note: we introduce a block scope so that we drop the live
|
||||
// reference to the data before we free the heap allocation it
|
||||
// resides within after this block.
|
||||
@@ -614,17 +614,39 @@ impl VMExternRefActivationsTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_precise_stack_root(&self, root: NonNull<VMExternData>) {
|
||||
let mut precise_stack_roots = self.precise_stack_roots.borrow_mut();
|
||||
fn insert_precise_stack_root(
|
||||
precise_stack_roots: &mut HashSet<VMExternRefWithTraits>,
|
||||
root: NonNull<VMExternData>,
|
||||
) {
|
||||
let root = unsafe { VMExternRef::clone_from_raw(root.as_ptr() as *mut _) };
|
||||
precise_stack_roots.insert(VMExternRefWithTraits(root));
|
||||
}
|
||||
|
||||
/// Sweep the bump allocation table after we've discovered our precise stack
|
||||
/// roots.
|
||||
fn sweep(&self) {
|
||||
fn sweep(&self, precise_stack_roots: &mut HashSet<VMExternRefWithTraits>) {
|
||||
// Swap out the over-approximated set so we can distinguish between the
|
||||
// over-approximation before we started sweeping, and any new elements
|
||||
// we might insert into the table because of re-entering Wasm via an
|
||||
// `externref`'s destructor. The new elements must be kept alive for
|
||||
// memory safety, but we keep this set around because we likely want to
|
||||
// reuse its allocation/capacity for the new `precise_stack_roots` in
|
||||
// the next GC cycle.
|
||||
let mut old_over_approximated = mem::replace(
|
||||
&mut *self.over_approximated_stack_roots.borrow_mut(),
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
// Sweep our bump chunk.
|
||||
//
|
||||
// Just in case an `externref` destructor calls back into Wasm, passing
|
||||
// more `externref`s into that Wasm, which requires the `externref`s to
|
||||
// be inserted into this `VMExternRefActivationsTable`, make sure `next
|
||||
// == end` so that they go into the over-approximation hash set.
|
||||
let num_filled = self.num_filled_in_bump_chunk();
|
||||
unsafe {
|
||||
*self.next.get() = self.end;
|
||||
}
|
||||
for slot in self.chunk.iter().take(num_filled) {
|
||||
unsafe {
|
||||
*slot.get() = None;
|
||||
@@ -637,22 +659,35 @@ impl VMExternRefActivationsTable {
|
||||
"after sweeping the bump chunk, all slots should be `None`"
|
||||
);
|
||||
|
||||
// Reset our `next` bump allocation finger.
|
||||
// Reset our `next` finger to the start of the bump allocation chunk.
|
||||
unsafe {
|
||||
let next = self.chunk.as_ptr() as *mut TableElem;
|
||||
debug_assert!(!next.is_null());
|
||||
*self.next.get() = NonNull::new_unchecked(next);
|
||||
}
|
||||
|
||||
// The current `precise_roots` becomes our new over-appoximated set for
|
||||
// the next GC cycle.
|
||||
let mut precise_roots = self.precise_stack_roots.borrow_mut();
|
||||
// The current `precise_stack_roots` becomes our new over-appoximated
|
||||
// set for the next GC cycle.
|
||||
let mut over_approximated = self.over_approximated_stack_roots.borrow_mut();
|
||||
mem::swap(&mut *precise_roots, &mut *over_approximated);
|
||||
mem::swap(&mut *precise_stack_roots, &mut *over_approximated);
|
||||
|
||||
// And finally, the new `precise_roots` should be cleared and remain
|
||||
// empty until the next GC cycle.
|
||||
precise_roots.clear();
|
||||
// And finally, the new `precise_stack_roots` should be cleared and
|
||||
// remain empty until the next GC cycle.
|
||||
//
|
||||
// However, if an `externref` destructor called re-entered Wasm with
|
||||
// more `externref`s, then the temp over-approximated set we were using
|
||||
// during sweeping (now `precise_stack_roots`) is not empty, and we need
|
||||
// to keep its references alive in our new over-approximated set.
|
||||
over_approximated.extend(precise_stack_roots.drain());
|
||||
|
||||
// If we didn't re-enter Wasm during destructors (likely),
|
||||
// `precise_stack_roots` has zero capacity, and the old
|
||||
// over-approximated has a bunch of capacity. Reuse whichever set has
|
||||
// most capacity.
|
||||
if old_over_approximated.capacity() > precise_stack_roots.capacity() {
|
||||
old_over_approximated.clear();
|
||||
*precise_stack_roots = old_over_approximated;
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the stack canary around a call into Wasm.
|
||||
@@ -944,6 +979,20 @@ pub unsafe fn gc(
|
||||
stack_maps_registry: &StackMapRegistry,
|
||||
externref_activations_table: &VMExternRefActivationsTable,
|
||||
) {
|
||||
// We borrow the precise stack roots `RefCell` for the whole duration of
|
||||
// GC. Whether it is dynamically borrowed serves as a flag for detecting
|
||||
// re-entrancy into GC. Re-entrancy can occur if we do a GC, drop an
|
||||
// `externref`, and that `externref`'s destructor then triggers another
|
||||
// GC. Whenever we detect re-entrancy, we return and give the first,
|
||||
// outermost GC call priority.
|
||||
let mut precise_stack_roots = match externref_activations_table
|
||||
.precise_stack_roots
|
||||
.try_borrow_mut()
|
||||
{
|
||||
Err(_) => return,
|
||||
Ok(roots) => roots,
|
||||
};
|
||||
|
||||
log::debug!("start GC");
|
||||
|
||||
debug_assert!({
|
||||
@@ -952,7 +1001,6 @@ pub unsafe fn gc(
|
||||
// into the activations table's bump-allocated space at the
|
||||
// end. Therefore, it should always be empty upon entering this
|
||||
// function.
|
||||
let precise_stack_roots = externref_activations_table.precise_stack_roots.borrow();
|
||||
precise_stack_roots.is_empty()
|
||||
});
|
||||
|
||||
@@ -971,7 +1019,7 @@ pub unsafe fn gc(
|
||||
true
|
||||
});
|
||||
}
|
||||
externref_activations_table.sweep();
|
||||
externref_activations_table.sweep(&mut precise_stack_roots);
|
||||
log::debug!("end GC");
|
||||
return;
|
||||
}
|
||||
@@ -1029,7 +1077,10 @@ pub unsafe fn gc(
|
||||
have an entry in the VMExternRefActivationsTable"
|
||||
);
|
||||
if let Some(r) = NonNull::new(r) {
|
||||
externref_activations_table.insert_precise_stack_root(r);
|
||||
VMExternRefActivationsTable::insert_precise_stack_root(
|
||||
&mut precise_stack_roots,
|
||||
r,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1056,11 +1107,10 @@ pub unsafe fn gc(
|
||||
// would free those missing roots while they are still in use, leading to
|
||||
// use-after-free.
|
||||
if found_canary {
|
||||
externref_activations_table.sweep();
|
||||
externref_activations_table.sweep(&mut precise_stack_roots);
|
||||
} else {
|
||||
log::warn!("did not find stack canary; skipping GC sweep");
|
||||
let mut roots = externref_activations_table.precise_stack_roots.borrow_mut();
|
||||
roots.clear();
|
||||
precise_stack_roots.clear();
|
||||
}
|
||||
|
||||
log::debug!("end GC");
|
||||
|
||||
@@ -60,6 +60,7 @@ use crate::externref::VMExternRef;
|
||||
use crate::table::Table;
|
||||
use crate::traphandlers::raise_lib_trap;
|
||||
use crate::vmcontext::{VMCallerCheckedAnyfunc, VMContext};
|
||||
use std::ptr::NonNull;
|
||||
use wasmtime_environ::wasm::{
|
||||
DataIndex, DefinedMemoryIndex, ElemIndex, MemoryIndex, TableElementType, TableIndex,
|
||||
};
|
||||
@@ -409,3 +410,23 @@ pub unsafe extern "C" fn wasmtime_data_drop(vmctx: *mut VMContext, data_index: u
|
||||
let instance = (&mut *vmctx).instance();
|
||||
instance.data_drop(data_index)
|
||||
}
|
||||
|
||||
/// Drop a `VMExternRef`.
|
||||
pub unsafe extern "C" fn wasmtime_drop_externref(externref: *mut u8) {
|
||||
let externref = externref as *mut crate::externref::VMExternData;
|
||||
let externref = NonNull::new(externref).unwrap();
|
||||
crate::externref::VMExternData::drop_and_dealloc(externref);
|
||||
}
|
||||
|
||||
/// Do a GC and insert the given `externref` into the
|
||||
/// `VMExternRefActivationsTable`.
|
||||
pub unsafe extern "C" fn wasmtime_activations_table_insert_with_gc(
|
||||
vmctx: *mut VMContext,
|
||||
externref: *mut u8,
|
||||
) {
|
||||
let externref = VMExternRef::clone_from_raw(externref);
|
||||
let instance = (&mut *vmctx).instance();
|
||||
let activations_table = &**instance.externref_activations_table();
|
||||
let registry = &**instance.stack_map_registry();
|
||||
activations_table.insert_with_gc(externref, registry);
|
||||
}
|
||||
|
||||
@@ -555,6 +555,10 @@ impl VMBuiltinFunctionsArray {
|
||||
wasmtime_imported_memory_fill as usize;
|
||||
ptrs[BuiltinFunctionIndex::memory_init().index() as usize] = wasmtime_memory_init as usize;
|
||||
ptrs[BuiltinFunctionIndex::data_drop().index() as usize] = wasmtime_data_drop as usize;
|
||||
ptrs[BuiltinFunctionIndex::drop_externref().index() as usize] =
|
||||
wasmtime_drop_externref as usize;
|
||||
ptrs[BuiltinFunctionIndex::activations_table_insert_with_gc().index() as usize] =
|
||||
wasmtime_activations_table_insert_with_gc as usize;
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
for i in 0..ptrs.len() {
|
||||
|
||||
Reference in New Issue
Block a user