Merge pull request #1832 from fitzgen/externref-stack-maps

externref: implement stack map-based garbage collection
This commit is contained in:
Nick Fitzgerald
2020-06-15 18:26:24 -07:00
committed by GitHub
31 changed files with 1355 additions and 165 deletions

118
Cargo.lock generated
View File

@@ -1,5 +1,20 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "addr2line"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a49806b9dadc843c61e7c97e72490ad7f7220ae249012fbda9ad0609457c0543"
dependencies = [
"gimli",
]
[[package]]
name = "adler32"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d"
[[package]]
name = "ahash"
version = "0.2.18"
@@ -85,26 +100,18 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
[[package]]
name = "backtrace"
version = "0.3.46"
version = "0.3.49"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1e692897359247cc6bb902933361652380af0f1b7651ae5c5013407f30e109e"
checksum = "05100821de9e028f12ae3d189176b41ee198341eb8f369956407fea2f5cc666c"
dependencies = [
"backtrace-sys",
"addr2line",
"cfg-if",
"libc",
"miniz_oxide",
"object 0.20.0",
"rustc-demangle",
]
[[package]]
name = "backtrace-sys"
version = "0.1.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18fbebbe1c9d1f383a9cc7e8ccdb471b91c8d024ee9c2ca5b5346121fe8b4399"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "base64"
version = "0.11.0"
@@ -493,7 +500,7 @@ dependencies = [
"anyhow",
"cranelift-codegen",
"cranelift-module",
"object",
"object 0.19.0",
"target-lexicon",
]
@@ -633,12 +640,13 @@ dependencies = [
[[package]]
name = "crossbeam-queue"
version = "0.2.2"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab6bffe714b6bb07e42f201352c34f51fefd355ace793f9e638ebd52d23f98d2"
checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
dependencies = [
"cfg-if",
"crossbeam-utils",
"maybe-uninit",
]
[[package]]
@@ -981,9 +989,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
version = "0.1.13"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71"
checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909"
dependencies = [
"libc",
]
@@ -1181,6 +1189,15 @@ dependencies = [
"autocfg 1.0.0",
]
[[package]]
name = "miniz_oxide"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435"
dependencies = [
"adler32",
]
[[package]]
name = "more-asserts"
version = "0.2.1"
@@ -1189,9 +1206,9 @@ checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238"
[[package]]
name = "num-integer"
version = "0.1.42"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b"
dependencies = [
"autocfg 1.0.0",
"num-traits",
@@ -1199,9 +1216,9 @@ dependencies = [
[[package]]
name = "num-traits"
version = "0.2.11"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
dependencies = [
"autocfg 1.0.0",
]
@@ -1232,6 +1249,12 @@ dependencies = [
"indexmap",
]
[[package]]
name = "object"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5"
[[package]]
name = "once_cell"
version = "1.4.0"
@@ -1613,10 +1636,11 @@ dependencies = [
[[package]]
name = "rayon"
version = "1.3.0"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080"
dependencies = [
"autocfg 1.0.0",
"crossbeam-deque",
"either",
"rayon-core",
@@ -1624,9 +1648,9 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.7.0"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280"
dependencies = [
"crossbeam-deque",
"crossbeam-queue",
@@ -1715,9 +1739,9 @@ dependencies = [
[[package]]
name = "remove_dir_all"
version = "0.5.2"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
dependencies = [
"winapi",
]
@@ -1832,18 +1856,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "serde"
version = "1.0.111"
version = "1.0.112"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9124df5b40cbd380080b2cc6ab894c040a3070d995f5c9dc77e18c34a8ae37d"
checksum = "736aac72d1eafe8e5962d1d1c3d99b0df526015ba40915cb3c49d042e92ec243"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.111"
version = "1.0.112"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f2c3ac8e6ca1e9c80b8be1023940162bf81ae3cffbb1809474152f2ce1eb250"
checksum = "bf0343ce212ac0d3d6afd9391ac8e9c9efe06b533c8d33f660f6390cc4093f57"
dependencies = [
"proc-macro2",
"quote",
@@ -1852,9 +1876,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.53"
version = "1.0.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2"
checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226"
dependencies = [
"itoa",
"ryu",
@@ -1941,9 +1965,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.30"
version = "1.0.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93a56fabc59dce20fe48b6c832cc249c713e7ed88fa28b0ee0a3bfcaae5fe4e2"
checksum = "b5304cfdf27365b7585c25d4af91b35016ed21ef88f17ced89c7093b43dba8b6"
dependencies = [
"proc-macro2",
"quote",
@@ -2348,8 +2372,9 @@ dependencies = [
"filecheck",
"humantime",
"libc",
"log",
"more-asserts",
"object",
"object 0.19.0",
"pretty_env_logger",
"rayon",
"structopt",
@@ -2376,7 +2401,7 @@ dependencies = [
"anyhow",
"gimli",
"more-asserts",
"object",
"object 0.19.0",
"target-lexicon",
"thiserror",
"wasmparser 0.57.0",
@@ -2477,7 +2502,7 @@ version = "0.18.0"
dependencies = [
"anyhow",
"more-asserts",
"object",
"object 0.19.0",
"wasmtime-environ",
]
@@ -2491,7 +2516,7 @@ dependencies = [
"ittapi-rs",
"lazy_static",
"libc",
"object",
"object 0.19.0",
"scroll",
"serde",
"target-lexicon",
@@ -2509,6 +2534,7 @@ dependencies = [
"indexmap",
"lazy_static",
"libc",
"log",
"memoffset",
"more-asserts",
"region",
@@ -2746,18 +2772,18 @@ dependencies = [
[[package]]
name = "zstd"
version = "0.5.2+zstd.1.4.5"
version = "0.5.3+zstd.1.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "644352b10ce7f333d6e0af85bd4f5322dc449416dc1211c6308e95bca8923db4"
checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8"
dependencies = [
"zstd-safe",
]
[[package]]
name = "zstd-safe"
version = "2.0.4+zstd.1.4.5"
version = "2.0.5+zstd.1.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7113c0c9aed2c55181f2d9f5b0a36e7d2c0183b11c058ab40b35987479efe4d7"
checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055"
dependencies = [
"libc",
"zstd-sys",
@@ -2765,9 +2791,9 @@ dependencies = [
[[package]]
name = "zstd-sys"
version = "1.4.16+zstd.1.4.5"
version = "1.4.17+zstd.1.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c442965efc45353be5a9b9969c9b0872fff6828c7e06d118dda2cb2d0bb11d5a"
checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b"
dependencies = [
"cc",
"glob",

View File

@@ -38,6 +38,7 @@ pretty_env_logger = "0.4.0"
file-per-thread-logger = "0.1.1"
wat = "1.0.18"
libc = "0.2.60"
log = "0.4.8"
rayon = "1.2.1"
humantime = "1.3.0"
@@ -86,3 +87,6 @@ maintenance = { status = "actively-developed" }
[[test]]
name = "host_segfault"
harness = false
[profile.dev.package.backtrace]
debug = false # FIXME(#1813)

View File

@@ -209,9 +209,14 @@ fn ignore(testsuite: &str, testname: &str, strategy: &str) -> bool {
// testsuite repo.
("simd", "simd_const") => return true,
("reference_types", "table_copy_on_imported_tables")
| ("reference_types", "externref_id_function") => {
// Ignore if this isn't x64, because Cranelift only supports
// reference types on x64.
return env::var("CARGO_CFG_TARGET_ARCH").unwrap() != "x86_64";
}
// Still working on implementing these. See #929.
("reference_types", "table_copy_on_imported_tables") => return false,
("reference_types", "externref_id_function") => return false,
("reference_types", _) => return true,
_ => {}

View File

@@ -103,7 +103,7 @@ impl Stackmap {
// Refer to the doc comment for `Stackmap` above to understand the
// bitmap representation used here.
let map_size = (dbg!(info.frame_size) + dbg!(info.inbound_args_size)) as usize;
let map_size = (info.frame_size + info.inbound_args_size) as usize;
let word_size = isa.pointer_bytes() as usize;
let num_words = map_size / word_size;

View File

@@ -1,5 +1,5 @@
use crate::address_map::{ModuleAddressMap, ValueLabelsRanges};
use crate::compilation::{Compilation, Relocations, Traps};
use crate::compilation::{Compilation, Relocations, StackMaps, Traps};
use cranelift_codegen::ir;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::DefinedFuncIndex;
@@ -35,6 +35,7 @@ pub struct ModuleCacheData {
value_ranges: ValueLabelsRanges,
stack_slots: PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
traps: Traps,
stack_maps: StackMaps,
}
/// A type alias over the module cache data as a tuple.
@@ -45,6 +46,7 @@ pub type ModuleCacheDataTupleType = (
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
Traps,
StackMaps,
);
struct Sha256Hasher(Sha256);
@@ -204,6 +206,7 @@ impl ModuleCacheData {
value_ranges: data.3,
stack_slots: data.4,
traps: data.5,
stack_maps: data.6,
}
}
@@ -215,6 +218,7 @@ impl ModuleCacheData {
self.value_ranges,
self.stack_slots,
self.traps,
self.stack_maps,
)
}
}

View File

@@ -100,5 +100,6 @@ fn new_module_cache_data() -> Result<ModuleCacheDataTupleType, ()> {
PrimaryMap::new(),
PrimaryMap::new(),
PrimaryMap::new(),
PrimaryMap::new(),
))
}

View File

@@ -144,6 +144,22 @@ pub struct TrapInformation {
/// Information about traps associated with the functions where the traps are placed.
pub type Traps = PrimaryMap<DefinedFuncIndex, Vec<TrapInformation>>;
/// The offset within a function of a GC safepoint, and its associated stack
/// map.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct StackMapInformation {
/// The offset of the GC safepoint within the function's native code. It is
/// relative to the beginning of the function.
pub code_offset: binemit::CodeOffset,
/// The stack map for identifying live GC refs at the GC safepoint.
pub stack_map: binemit::Stackmap,
}
/// Information about GC safepoints and their associated stack maps within each
/// function.
pub type StackMaps = PrimaryMap<DefinedFuncIndex, Vec<StackMapInformation>>;
/// An error while compiling WebAssembly to machine code.
#[derive(Error, Debug)]
pub enum CompileError {

View File

@@ -88,7 +88,8 @@
use crate::address_map::{FunctionAddressMap, InstructionAddressMap};
use crate::cache::{ModuleCacheDataTupleType, ModuleCacheEntry};
use crate::compilation::{
Compilation, CompileError, CompiledFunction, Relocation, RelocationTarget, TrapInformation,
Compilation, CompileError, CompiledFunction, Relocation, RelocationTarget, StackMapInformation,
TrapInformation,
};
use crate::func_environ::{get_func_name, FuncEnvironment};
use crate::{CacheConfig, FunctionBodyData, ModuleLocal, ModuleTranslation, Tunables};
@@ -204,6 +205,27 @@ impl binemit::TrapSink for TrapSink {
}
}
#[derive(Default)]
struct StackMapSink {
infos: Vec<StackMapInformation>,
}
impl binemit::StackmapSink for StackMapSink {
fn add_stackmap(&mut self, code_offset: binemit::CodeOffset, stack_map: binemit::Stackmap) {
self.infos.push(StackMapInformation {
code_offset,
stack_map,
});
}
}
impl StackMapSink {
fn finish(mut self) -> Vec<StackMapInformation> {
self.infos.sort_unstable_by_key(|info| info.code_offset);
self.infos
}
}
fn get_function_address_map<'data>(
context: &Context,
data: &FunctionBodyData<'data>,
@@ -294,6 +316,7 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
let mut value_ranges = PrimaryMap::with_capacity(env.function_body_inputs.len());
let mut stack_slots = PrimaryMap::with_capacity(env.function_body_inputs.len());
let mut traps = PrimaryMap::with_capacity(env.function_body_inputs.len());
let mut stack_maps = PrimaryMap::with_capacity(env.function_body_inputs.len());
env.function_body_inputs
.into_iter()
@@ -354,14 +377,14 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
let mut code_buf: Vec<u8> = Vec::new();
let mut reloc_sink = RelocSink::new(func_index);
let mut trap_sink = TrapSink::new();
let mut stackmap_sink = binemit::NullStackmapSink {};
let mut stack_map_sink = StackMapSink::default();
context
.compile_and_emit(
isa,
&mut code_buf,
&mut reloc_sink,
&mut trap_sink,
&mut stackmap_sink,
&mut stack_map_sink,
)
.map_err(|error| {
CompileError::Codegen(pretty_error(&context.func, Some(isa), error))
@@ -391,6 +414,7 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
context.func.stack_slots,
trap_sink.traps,
unwind_info,
stack_map_sink.finish(),
))
})
.collect::<Result<Vec<_>, CompileError>>()?
@@ -405,6 +429,7 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
sss,
function_traps,
unwind_info,
stack_map,
)| {
functions.push(CompiledFunction {
body: function,
@@ -416,6 +441,7 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
value_ranges.push(ranges.unwrap_or_default());
stack_slots.push(sss);
traps.push(function_traps);
stack_maps.push(stack_map);
},
);
@@ -428,6 +454,7 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
value_ranges,
stack_slots,
traps,
stack_maps,
))
}

View File

@@ -1,6 +1,7 @@
#![doc(hidden)]
pub mod ir {
pub use cranelift_codegen::binemit::Stackmap;
pub use cranelift_codegen::ir::{
types, AbiParam, ArgumentPurpose, Signature, SourceLoc, StackSlots, TrapCode, Type,
ValueLabel, ValueLoc,

View File

@@ -658,13 +658,6 @@ impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environm
fn target_config(&self) -> TargetFrontendConfig {
self.target_config
}
fn reference_type(&self) -> ir::Type {
// For now, the only reference types we support are `externref`, which
// don't require tracing GC and stack maps. So we just use the target's
// pointer type. This will have to change once we move to tracing GC.
self.pointer_type()
}
}
impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'module_environment> {

View File

@@ -47,7 +47,7 @@ pub use crate::cache::create_new_config as cache_create_new_config;
pub use crate::cache::CacheConfig;
pub use crate::compilation::{
Compilation, CompileError, CompiledFunction, Compiler, Relocation, RelocationTarget,
Relocations, TrapInformation, Traps,
Relocations, StackMapInformation, StackMaps, TrapInformation, Traps,
};
pub use crate::cranelift::Cranelift;
pub use crate::data_structures::*;

View File

@@ -34,6 +34,7 @@ impl crate::compilation::Compiler for Lightbeam {
);
let mut relocations = PrimaryMap::with_capacity(translation.function_body_inputs.len());
let mut traps = PrimaryMap::with_capacity(translation.function_body_inputs.len());
let stack_maps = PrimaryMap::with_capacity(translation.function_body_inputs.len());
let mut codegen_session: CodeGenSession<_> = CodeGenSession::new(
translation.function_body_inputs.len() as u32,
@@ -81,6 +82,7 @@ impl crate::compilation::Compiler for Lightbeam {
ValueLabelsRanges::new(),
PrimaryMap::new(),
traps,
stack_maps,
))
}
}

View File

@@ -92,13 +92,6 @@ impl<'data> TargetEnvironment for ModuleEnvironment<'data> {
fn target_config(&self) -> TargetFrontendConfig {
self.result.target_config
}
fn reference_type(&self) -> ir::Type {
// For now, the only reference types we support are `externref`, which
// don't require tracing GC and stack maps. So we just use the target's
// pointer type. This will have to change once we move to tracing GC.
self.pointer_type()
}
}
/// This trait is useful for `translate_module` because it tells how to translate

View File

@@ -5,6 +5,8 @@
//
// struct VMContext {
// interrupts: *const VMInterrupts,
// externref_activations_table: *mut VMExternRefActivationsTable,
// stack_map_registry: *mut StackMapRegistry,
// signature_ids: [VMSharedSignatureIndex; module.num_signature_ids],
// imported_functions: [VMFunctionImport; module.num_imported_functions],
// imported_tables: [VMTableImport; module.num_imported_tables],
@@ -286,9 +288,23 @@ impl VMOffsets {
0
}
/// The offset of the `VMExternRefActivationsTable` member.
pub fn vmctx_externref_activations_table(&self) -> u32 {
self.vmctx_interrupts()
.checked_add(u32::from(self.pointer_size))
.unwrap()
}
/// The offset of the `*mut StackMapRegistry` member.
pub fn vmctx_stack_map_registry(&self) -> u32 {
self.vmctx_externref_activations_table()
.checked_add(u32::from(self.pointer_size))
.unwrap()
}
/// The offset of the `signature_ids` array.
pub fn vmctx_signature_ids_begin(&self) -> u32 {
self.vmctx_interrupts()
self.vmctx_stack_map_registry()
.checked_add(u32::from(self.pointer_size))
.unwrap()
}
@@ -591,6 +607,19 @@ impl VMOffsets {
}
}
/// Offsets for `VMExternRefActivationsTable`.
impl VMOffsets {
/// Return the offset for `VMExternRefActivationsTable::next`.
pub fn vm_extern_ref_activation_table_next(&self) -> u32 {
0
}
/// Return the offset for `VMExternRefActivationsTable::end`.
pub fn vm_extern_ref_activation_table_end(&self) -> u32 {
self.pointer_size.into()
}
}
/// Target specific type for shared signature index.
#[derive(Debug, Copy, Clone)]
pub struct TargetSharedSignatureIndex(u32);

View File

@@ -15,7 +15,7 @@ use wasmtime_environ::wasm::{DefinedFuncIndex, DefinedMemoryIndex, MemoryIndex,
use wasmtime_environ::{
CacheConfig, CompileError, CompiledFunction, Compiler as _C, Module, ModuleAddressMap,
ModuleMemoryOffset, ModuleTranslation, ModuleVmctxInfo, Relocation, RelocationTarget,
Relocations, Traps, Tunables, VMOffsets, ValueLabelsRanges,
Relocations, StackMaps, Traps, Tunables, VMOffsets, ValueLabelsRanges,
};
use wasmtime_runtime::{InstantiationError, VMFunctionBody, VMTrampoline};
@@ -138,6 +138,7 @@ pub struct Compilation {
pub jt_offsets: PrimaryMap<DefinedFuncIndex, ir::JumpTableOffsets>,
pub dwarf_sections: Vec<DwarfSection>,
pub traps: Traps,
pub stack_maps: StackMaps,
pub address_transform: ModuleAddressMap,
}
@@ -165,8 +166,15 @@ impl Compiler {
) -> Result<Compilation, SetupError> {
let mut code_memory = CodeMemory::new();
let (compilation, relocations, address_transform, value_ranges, stack_slots, traps) =
match self.strategy {
let (
compilation,
relocations,
address_transform,
value_ranges,
stack_slots,
traps,
stack_maps,
) = match self.strategy {
// For now, interpret `Auto` as `Cranelift` since that's the most stable
// implementation.
CompilationStrategy::Auto | CompilationStrategy::Cranelift => {
@@ -239,6 +247,7 @@ impl Compiler {
jt_offsets,
dwarf_sections,
traps,
stack_maps,
address_transform,
})
}

View File

@@ -18,13 +18,13 @@ use wasmtime_environ::isa::TargetIsa;
use wasmtime_environ::wasm::{DefinedFuncIndex, SignatureIndex};
use wasmtime_environ::{
CompileError, DataInitializer, DataInitializerLocation, Module, ModuleAddressMap,
ModuleEnvironment, ModuleTranslation, Traps,
ModuleEnvironment, ModuleTranslation, StackMaps, Traps,
};
use wasmtime_profiling::ProfilingAgent;
use wasmtime_runtime::VMInterrupts;
use wasmtime_runtime::{
GdbJitImageRegistration, InstanceHandle, InstantiationError, RuntimeMemoryCreator,
SignatureRegistry, VMFunctionBody, VMTrampoline,
SignatureRegistry, StackMapRegistry, VMExternRefActivationsTable, VMFunctionBody, VMTrampoline,
};
/// An error condition while setting up a wasm instance, be it validation,
@@ -69,6 +69,7 @@ pub struct CompiledModule {
trampolines: PrimaryMap<SignatureIndex, VMTrampoline>,
data_initializers: Box<[OwnedDataInitializer]>,
traps: Traps,
stack_maps: StackMaps,
address_transform: ModuleAddressMap,
}
@@ -99,6 +100,7 @@ impl CompiledModule {
jt_offsets,
dwarf_sections,
traps,
stack_maps,
address_transform,
} = compiler.compile(&translation, debug_data)?;
@@ -149,6 +151,7 @@ impl CompiledModule {
trampolines,
data_initializers,
traps,
stack_maps,
address_transform,
})
}
@@ -169,6 +172,8 @@ impl CompiledModule {
mem_creator: Option<&dyn RuntimeMemoryCreator>,
interrupts: Arc<VMInterrupts>,
host_state: Box<dyn Any>,
externref_activations_table: *mut VMExternRefActivationsTable,
stack_map_registry: *mut StackMapRegistry,
) -> Result<InstanceHandle, InstantiationError> {
// Compute indices into the shared signature table.
let signatures = {
@@ -200,6 +205,8 @@ impl CompiledModule {
signatures.into_boxed_slice(),
host_state,
interrupts,
externref_activations_table,
stack_map_registry,
)
}
@@ -229,11 +236,16 @@ impl CompiledModule {
&self.finished_functions.0
}
/// Returns the a map for all traps in this module.
/// Returns the map for all traps in this module.
pub fn traps(&self) -> &Traps {
&self.traps
}
/// Returns the map for each of this module's stack maps.
pub fn stack_maps(&self) -> &StackMaps {
&self.stack_maps
}
/// Returns a map of compiled addresses back to original bytecode offsets.
pub fn address_transform(&self) -> &ModuleAddressMap {
&self.address_transform

View File

@@ -15,12 +15,13 @@ edition = "2018"
wasmtime-environ = { path = "../environ", version = "0.18.0" }
region = "2.0.0"
libc = { version = "0.2.70", default-features = false }
log = "0.4.8"
memoffset = "0.5.3"
indexmap = "1.0.2"
thiserror = "1.0.4"
more-asserts = "0.2.1"
cfg-if = "0.1.9"
backtrace = "0.3.42"
backtrace = "0.3.49"
lazy_static = "1.3.0"
[target.'cfg(target_os = "windows")'.dependencies]

View File

@@ -58,29 +58,59 @@
//! need a ton of excess padding between the `VMExternData` and the value for
//! values with large alignment.
//!
//! ## Reference Counting Protocol and Wasm Functions
//! ## Reference Counting, Wasm Functions, and Garbage Collection
//!
//! Currently, `VMExternRef`s passed into compiled Wasm functions have move
//! semantics: the host code gives up ownership and does not decrement the
//! reference count. Similarly, `VMExternRef`s returned from compiled Wasm
//! functions also have move semantics: host code takes ownership and the
//! reference count is not incremented.
//! For host VM code, we use plain reference counting, where cloning increments
//! the reference count, and dropping decrements it. We can avoid many of the
//! on-stack increment/decrement operations that typically plague the
//! performance of reference counting via Rust's ownership and borrowing system.
//! Moving a `VMExternRef` avoids mutating its reference count, and borrowing it
//! either avoids the reference count increment or delays it until if/when the
//! `VMExternRef` is cloned.
//!
//! This works well when a reference is passed into Wasm and then passed back
//! out again. However, if a reference is passed into Wasm, but not passed back
//! out, then the reference is leaked. This is only a temporary state, and
//! follow up work will leverage stack maps to fix this issue. Follow
//! https://github.com/bytecodealliance/wasmtime/issues/929 to keep an eye on
//! this.
//! When passing a `VMExternRef` into compiled Wasm code, we don't want to do
//! reference count mutations for every compiled `local.{get,set}`, nor for
//! every function call. Therefore, we use a variation of **deferred reference
//! counting**, where we only mutate reference counts when storing
//! `VMExternRef`s somewhere that outlives the activation: into a global or
//! table. Simultaneously, we over-approximate the set of `VMExternRef`s that
//! are inside Wasm function activations. Periodically, we walk the stack at GC
//! safe points, and use stack map information to precisely identify the set of
//! `VMExternRef`s inside Wasm activations. Then we take the difference between
//! this precise set and our over-approximation, and decrement the reference
//! count for each of the `VMExternRef`s that are in our over-approximation but
//! not in the precise set. Finally, the over-approximation is replaced with the
//! precise set.
//!
//! The `VMExternRefActivationsTable` implements the over-approximized set of
//! `VMExternRef`s referenced by Wasm activations. Calling a Wasm function and
//! passing it a `VMExternRef` moves the `VMExternRef` into the table, and the
//! compiled Wasm function logically "borrows" the `VMExternRef` from the
//! table. Similarly, `global.get` and `table.get` operations clone the gotten
//! `VMExternRef` into the `VMExternRefActivationsTable` and then "borrow" the
//! reference out of the table.
//!
//! When a `VMExternRef` is returned to host code from a Wasm function, the host
//! increments the reference count (because the reference is logically
//! "borrowed" from the `VMExternRefActivationsTable` and the reference count
//! from the table will be dropped at the next GC).
//!
//! For more general information on deferred reference counting, see *An
//! Examination of Deferred Reference Counting and Cycle Detection* by Quinane:
//! https://openresearch-repository.anu.edu.au/bitstream/1885/42030/2/hon-thesis.pdf
use std::alloc::Layout;
use std::any::Any;
use std::cell::UnsafeCell;
use std::cell::{Cell, RefCell, UnsafeCell};
use std::cmp::Ordering;
use std::hash::Hasher;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ops::Deref;
use std::ptr::{self, NonNull};
use std::rc::Rc;
use wasmtime_environ::{ir::Stackmap, StackMapInformation};
/// An external reference to some opaque data.
///
@@ -309,33 +339,39 @@ impl VMExternRef {
/// Turn this `VMExternRef` into a raw, untyped pointer.
///
/// This forgets `self` and does *not* decrement the reference count on the
/// pointed-to data.
/// Unlike `into_raw`, this does not consume and forget `self`. It is *not*
/// safe to use `from_raw` on pointers returned from this method; only use
/// `clone_from_raw`!
///
/// This `VMExternRef` may be recovered with `VMExternRef::from_raw`.
pub fn into_raw(self) -> *mut u8 {
/// Nor does this method increment the reference count. You must ensure
/// that `self` (or some other clone of `self`) stays alive until
/// `clone_from_raw` is called.
pub fn as_raw(&self) -> *mut u8 {
let ptr = self.0.cast::<u8>().as_ptr();
mem::forget(self);
ptr
}
/// Create a `VMExternRef` from a pointer returned from a previous call to
/// `VMExternRef::into_raw`.
/// Recreate a `VMExternRef` from a pointer returned from a previous call to
/// `VMExternRef::as_raw`.
///
/// # Safety
///
/// Wildly unsafe to use with anything other than the result of a previous
/// `into_raw` call!
/// `as_raw` call!
///
/// This method does *not* increment the reference count on the pointed-to
/// data, so `from_raw` must be called at most *once* on the result of a
/// previous `into_raw` call. (Ideally, every `into_raw` is later followed
/// by a `from_raw`, but it is technically memory safe to never call
/// `from_raw` after `into_raw`: it will leak the pointed-to value, which is
/// memory safe).
pub unsafe fn from_raw(ptr: *mut u8) -> Self {
/// Additionally, it is your responsibility to ensure that this raw
/// `VMExternRef`'s reference count has not dropped to zero. Failure to do
/// so will result in use after free!
pub unsafe fn clone_from_raw(ptr: *mut u8) -> Self {
debug_assert!(!ptr.is_null());
VMExternRef(NonNull::new_unchecked(ptr).cast())
let x = VMExternRef(NonNull::new_unchecked(ptr).cast());
x.extern_data().increment_ref_count();
x
}
/// Get the strong reference count for this `VMExternRef`.
pub fn strong_count(&self) -> usize {
self.extern_data().get_ref_count()
}
#[inline]
@@ -393,6 +429,640 @@ impl Deref for VMExternRef {
}
}
/// A wrapper around a `VMExternRef` that implements `Eq` and `Hash` with
/// pointer semantics.
///
/// We use this so that we can morally put `VMExternRef`s inside of `HashSet`s
/// even though they don't implement `Eq` and `Hash` to avoid foot guns.
#[derive(Clone)]
struct VMExternRefWithTraits(VMExternRef);
impl Hash for VMExternRefWithTraits {
fn hash<H>(&self, hasher: &mut H)
where
H: Hasher,
{
VMExternRef::hash(&self.0, hasher)
}
}
impl PartialEq for VMExternRefWithTraits {
fn eq(&self, other: &Self) -> bool {
VMExternRef::eq(&self.0, &other.0)
}
}
impl Eq for VMExternRefWithTraits {}
type TableElem = UnsafeCell<Option<VMExternRef>>;
/// A table that over-approximizes the set of `VMExternRef`s that any Wasm
/// activation on this thread is currently using.
///
/// Under the covers, this is a simple bump allocator that allows duplicate
/// entries. Deduplication happens at GC time.
#[repr(C)]
pub struct VMExternRefActivationsTable {
/// Bump-allocation finger within the `chunk`.
///
/// NB: this is an `UnsafeCell` because it is written to by compiled Wasm
/// code.
next: UnsafeCell<NonNull<TableElem>>,
/// Pointer to just after the `chunk`.
///
/// This is *not* within the current chunk and therefore is not a valid
/// place to insert a reference!
end: NonNull<TableElem>,
/// Bump allocation chunk that stores fast-path insertions.
chunk: Box<[TableElem]>,
/// When unioned with `chunk`, this is an over-approximation of the GC roots
/// on the stack, inside Wasm frames.
///
/// This is used by slow-path insertion, and when a GC cycle finishes, is
/// re-initialized to the just-discovered precise set of stack roots (which
/// immediately becomes an over-approximation again as soon as Wasm runs and
/// potentially drops references).
over_approximated_stack_roots: RefCell<HashSet<VMExternRefWithTraits>>,
/// The precise set of on-stack, inside-Wasm GC roots that we discover via
/// walking the stack and interpreting stack maps.
///
/// This is *only* used inside the `gc` function, and is empty otherwise. It
/// is just part of this struct so that we can reuse the allocation, rather
/// than create a new hash set every GC.
precise_stack_roots: RefCell<HashSet<VMExternRefWithTraits>>,
/// A pointer to a `u8` on the youngest host stack frame before we called
/// into Wasm for the first time. When walking the stack in garbage
/// collection, if we don't find this frame, then we failed to walk every
/// Wasm stack frame, which means we failed to find all on-stack,
/// inside-a-Wasm-frame roots, and doing a GC could lead to freeing one of
/// those missed roots, and use after free.
stack_canary: Cell<Option<NonNull<u8>>>,
}
impl VMExternRefActivationsTable {
const CHUNK_SIZE: usize = 4096 / mem::size_of::<usize>();
/// Create a new `VMExternRefActivationsTable`.
pub fn new() -> Self {
let chunk = Self::new_chunk(Self::CHUNK_SIZE);
let next = chunk.as_ptr() as *mut TableElem;
let end = unsafe { next.add(chunk.len()) };
VMExternRefActivationsTable {
next: UnsafeCell::new(NonNull::new(next).unwrap()),
end: NonNull::new(end).unwrap(),
chunk,
over_approximated_stack_roots: RefCell::new(HashSet::with_capacity(Self::CHUNK_SIZE)),
precise_stack_roots: RefCell::new(HashSet::with_capacity(Self::CHUNK_SIZE)),
stack_canary: Cell::new(None),
}
}
fn new_chunk(size: usize) -> Box<[UnsafeCell<Option<VMExternRef>>]> {
assert!(size >= Self::CHUNK_SIZE);
(0..size).map(|_| UnsafeCell::new(None)).collect()
}
/// Try and insert a `VMExternRef` into this table.
///
/// This is a fast path that only succeeds when the bump chunk has the
/// capacity for the requested insertion.
///
/// If the insertion fails, then the `VMExternRef` is given back. Callers
/// may attempt a GC to free up space and try again, or may call
/// `insert_slow_path` to infallibly insert the reference (potentially
/// allocating additional space in the table to hold it).
#[inline]
pub fn try_insert(&self, externref: VMExternRef) -> Result<(), VMExternRef> {
unsafe {
let next = *self.next.get();
if next == self.end {
return Err(externref);
}
debug_assert!((*next.as_ref().get()).is_none());
ptr::write(next.as_ptr(), UnsafeCell::new(Some(externref)));
let next = NonNull::new_unchecked(next.as_ptr().add(1));
debug_assert!(next <= self.end);
*self.next.get() = next;
Ok(())
}
}
/// Insert a reference into the table, falling back on a GC to clear up
/// space if the table is already full.
///
/// # Unsafety
///
/// The same as `gc`.
#[inline]
pub unsafe fn insert_with_gc(
&self,
externref: VMExternRef,
stack_maps_registry: &StackMapRegistry,
) {
if let Err(externref) = self.try_insert(externref) {
self.gc_and_insert_slow(externref, stack_maps_registry);
}
}
#[inline(never)]
unsafe fn gc_and_insert_slow(
&self,
externref: VMExternRef,
stack_maps_registry: &StackMapRegistry,
) {
gc(stack_maps_registry, self);
// Might as well insert right into the hash set, rather than the bump
// chunk, since we are already on a slow path and we get de-duplication
// this way.
let mut roots = self.over_approximated_stack_roots.borrow_mut();
roots.insert(VMExternRefWithTraits(externref));
}
fn num_filled_in_bump_chunk(&self) -> usize {
let next = unsafe { *self.next.get() };
let bytes_unused = (self.end.as_ptr() as usize) - (next.as_ptr() as usize);
let slots_unused = bytes_unused / mem::size_of::<TableElem>();
self.chunk.len().saturating_sub(slots_unused)
}
fn elements(&self, mut f: impl FnMut(&VMExternRef)) {
let roots = self.over_approximated_stack_roots.borrow();
for elem in roots.iter() {
f(&elem.0);
}
// The bump chunk is not all the way full, so we only iterate over its
// filled-in slots.
let num_filled = self.num_filled_in_bump_chunk();
for slot in self.chunk.iter().take(num_filled) {
if let Some(elem) = unsafe { &*slot.get() } {
f(elem);
}
}
}
fn insert_precise_stack_root(&self, root: NonNull<VMExternData>) {
let mut precise_stack_roots = self.precise_stack_roots.borrow_mut();
let root = unsafe { VMExternRef::clone_from_raw(root.as_ptr() as *mut _) };
precise_stack_roots.insert(VMExternRefWithTraits(root));
}
/// Sweep the bump allocation table after we've discovered our precise stack
/// roots.
fn sweep(&self) {
// Sweep our bump chunk.
let num_filled = self.num_filled_in_bump_chunk();
for slot in self.chunk.iter().take(num_filled) {
unsafe {
*slot.get() = None;
}
}
debug_assert!(
self.chunk
.iter()
.all(|slot| unsafe { (*slot.get()).as_ref().is_none() }),
"after sweeping the bump chunk, all slots should be `None`"
);
// Reset our `next` bump allocation finger.
unsafe {
let next = self.chunk.as_ptr() as *mut TableElem;
debug_assert!(!next.is_null());
*self.next.get() = NonNull::new_unchecked(next);
}
// The current `precise_roots` becomes our new over-appoximated set for
// the next GC cycle.
let mut precise_roots = self.precise_stack_roots.borrow_mut();
let mut over_approximated = self.over_approximated_stack_roots.borrow_mut();
mem::swap(&mut *precise_roots, &mut *over_approximated);
// And finally, the new `precise_roots` should be cleared and remain
// empty until the next GC cycle.
precise_roots.clear();
}
/// Set the stack canary around a call into Wasm.
///
/// The return value should not be dropped until after the Wasm call has
/// returned.
///
/// While this method is always safe to call (or not call), it is unsafe to
/// call the `wasmtime_runtime::gc` function unless this method is called at
/// the proper times and its return value properly outlives its Wasm call.
///
/// For `gc` to be safe, this is only *strictly required* to surround the
/// oldest host-->Wasm stack frame transition on this thread, but repeatedly
/// calling it is idempotent and cheap, so it is recommended to call this
/// for every host-->Wasm call.
///
/// # Example
///
/// ```no_run
/// use wasmtime_runtime::*;
///
/// # let get_table_from_somewhere = || unimplemented!();
/// let table: &VMExternRefActivationsTable = get_table_from_somewhere();
///
/// // Set the canary before a Wasm call. The canary should always be a
/// // local on the stack.
/// let canary = 0;
/// let auto_reset_canary = table.set_stack_canary(&canary);
///
/// // Do the call into Wasm.
/// # let call_into_wasm = || unimplemented!();
/// call_into_wasm();
///
/// // Only drop the value returned by `set_stack_canary` after the Wasm
/// // call has returned.
/// drop(auto_reset_canary);
/// ```
pub fn set_stack_canary<'a>(&'a self, canary: &u8) -> impl Drop + 'a {
let should_reset = if self.stack_canary.get().is_none() {
let canary = canary as *const u8 as *mut u8;
self.stack_canary.set(Some(unsafe {
debug_assert!(!canary.is_null());
NonNull::new_unchecked(canary)
}));
true
} else {
false
};
return AutoResetCanary {
table: self,
should_reset,
};
struct AutoResetCanary<'a> {
table: &'a VMExternRefActivationsTable,
should_reset: bool,
}
impl Drop for AutoResetCanary<'_> {
fn drop(&mut self) {
if self.should_reset {
debug_assert!(self.table.stack_canary.get().is_some());
self.table.stack_canary.set(None);
}
}
}
}
}
/// A registry of stack maps for currently active Wasm modules.
#[derive(Default)]
pub struct StackMapRegistry {
inner: RefCell<StackMapRegistryInner>,
}
#[derive(Default)]
struct StackMapRegistryInner {
/// A map from the highest pc in a module, to its stack maps.
///
/// For details, see the comment above `GlobalFrameInfo::ranges`.
ranges: BTreeMap<usize, ModuleStackMaps>,
}
#[derive(Debug)]
struct ModuleStackMaps {
/// The range of PCs that this module covers. Different modules must always
/// have distinct ranges.
range: std::ops::Range<usize>,
/// A map from a PC in this module (that is a GC safepoint) to its
/// associated stack map.
pc_to_stack_map: Vec<(usize, Rc<Stackmap>)>,
}
impl StackMapRegistry {
/// Register the stack maps for a given module.
///
/// The stack maps should be given as an iterator over a function's PC range
/// in memory (that is, where the JIT actually allocated and emitted the
/// function's code at), and the stack maps and code offsets within that
/// range for each of its GC safepoints.
pub fn register_stack_maps<'a>(
&self,
stack_maps: impl IntoIterator<Item = (std::ops::Range<usize>, &'a [StackMapInformation])>,
) {
let mut min = usize::max_value();
let mut max = 0;
let mut pc_to_stack_map = vec![];
for (range, infos) in stack_maps {
let len = range.end - range.start;
min = std::cmp::min(min, range.start);
max = std::cmp::max(max, range.end);
for info in infos {
assert!((info.code_offset as usize) < len);
pc_to_stack_map.push((
range.start + (info.code_offset as usize),
Rc::new(info.stack_map.clone()),
));
}
}
if pc_to_stack_map.is_empty() {
// Nothing to register.
return;
}
let module_stack_maps = ModuleStackMaps {
range: min..max,
pc_to_stack_map,
};
let mut inner = self.inner.borrow_mut();
// Check if we've already registered this module.
if let Some(existing_module) = inner.ranges.get(&max) {
assert_eq!(existing_module.range, module_stack_maps.range);
debug_assert_eq!(
existing_module.pc_to_stack_map,
module_stack_maps.pc_to_stack_map,
);
return;
}
// Assert that this chunk of ranges doesn't collide with any other known
// chunks.
if let Some((_, prev)) = inner.ranges.range(max..).next() {
assert!(prev.range.start > max);
}
if let Some((prev_end, _)) = inner.ranges.range(..=min).next_back() {
assert!(*prev_end < min);
}
let old = inner.ranges.insert(max, module_stack_maps);
assert!(old.is_none());
}
/// Lookup the stack map for the given PC, if any.
pub fn lookup_stack_map(&self, pc: usize) -> Option<Rc<Stackmap>> {
let inner = self.inner.borrow();
let stack_maps = inner.module_stack_maps(pc)?;
// Do a binary search to find the stack map for the given PC.
//
// Because GC safepoints are technically only associated with a single
// PC, we should ideally only care about `Ok(index)` values returned
// from the binary search. However, safepoints are inserted right before
// calls, and there are two things that can disturb the PC/offset
// associated with the safepoint versus the PC we actually use to query
// for the stack map:
//
// 1. The `backtrace` crate gives us the PC in a frame that will be
// *returned to*, and where execution will continue from, rather than
// the PC of the call we are currently at. So we would need to
// disassemble one instruction backwards to query the actual PC for
// the stack map.
//
// TODO: One thing we *could* do to make this a little less error
// prone, would be to assert/check that the nearest GC safepoint
// found is within `max_encoded_size(any kind of call instruction)`
// our queried PC for the target architecture.
//
// 2. Cranelift's stack maps only handle the stack, not
// registers. However, some references that are arguments to a call
// may need to be in registers. In these cases, what Cranelift will
// do is:
//
// a. spill all the live references,
// b. insert a GC safepoint for those references,
// c. reload the references into registers, and finally
// d. make the call.
//
// Step (c) adds drift between the GC safepoint and the location of
// the call, which is where we actually walk the stack frame and
// collect its live references.
//
// Luckily, the spill stack slots for the live references are still
// up to date, so we can still find all the on-stack roots.
// Furthermore, we do not have a moving GC, so we don't need to worry
// whether the following code will reuse the references in registers
// (which would not have been updated to point to the moved objects)
// or reload from the stack slots (which would have been updated to
// point to the moved objects).
let index = match stack_maps
.pc_to_stack_map
.binary_search_by_key(&pc, |(pc, _stack_map)| *pc)
{
// Exact hit.
Ok(i) => i,
Err(n) => {
// `Err(0)` means that the associated stack map would have been
// the first element in the array if this pc had an associated
// stack map, but this pc does not have an associated stack
// map. That doesn't make sense since every call and trap inside
// Wasm is a GC safepoint and should have a stack map, and the
// only way to have Wasm frames under this native frame is if we
// are at a call or a trap.
debug_assert!(n != 0);
n - 1
}
};
let stack_map = stack_maps.pc_to_stack_map[index].1.clone();
Some(stack_map)
}
}
impl StackMapRegistryInner {
fn module_stack_maps(&self, pc: usize) -> Option<&ModuleStackMaps> {
let (end, stack_maps) = self.ranges.range(pc..).next()?;
if pc < stack_maps.range.start || *end < pc {
None
} else {
Some(stack_maps)
}
}
}
#[derive(Debug, Default)]
struct DebugOnly<T> {
inner: T,
}
impl<T> std::ops::Deref for DebugOnly<T> {
type Target = T;
fn deref(&self) -> &T {
if cfg!(debug_assertions) {
&self.inner
} else {
panic!(
"only deref `DebugOnly` when `cfg(debug_assertions)` or \
inside a `debug_assert!(..)`"
)
}
}
}
impl<T> std::ops::DerefMut for DebugOnly<T> {
fn deref_mut(&mut self) -> &mut T {
if cfg!(debug_assertions) {
&mut self.inner
} else {
panic!(
"only deref `DebugOnly` when `cfg(debug_assertions)` or \
inside a `debug_assert!(..)`"
)
}
}
}
/// Perform garbage collection of `VMExternRef`s.
///
/// # Unsafety
///
/// You must have called `VMExternRefActivationsTable::set_stack_canary` for at
/// least the oldest host-->Wasm stack frame transition on this thread's stack
/// (it is idempotent to call it more than once) and keep its return value alive
/// across the duration of that host-->Wasm call.
///
/// Additionally, you must have registered the stack maps for every Wasm module
/// that has frames on the stack with the given `stack_maps_registry`.
pub unsafe fn gc(
stack_maps_registry: &StackMapRegistry,
externref_activations_table: &VMExternRefActivationsTable,
) {
log::debug!("start GC");
debug_assert!({
// This set is only non-empty within this function. It is built up when
// walking the stack and interpreting stack maps, and then drained back
// into the activations table's bump-allocated space at the
// end. Therefore, it should always be empty upon entering this
// function.
let precise_stack_roots = externref_activations_table.precise_stack_roots.borrow();
precise_stack_roots.is_empty()
});
// Whenever we call into Wasm from host code for the first time, we set a
// stack canary. When we return to that host code, we unset the stack
// canary. If there is *not* a stack canary, then there must be zero Wasm
// frames on the stack. Therefore, we can simply reset the table without
// walking the stack.
let stack_canary = match externref_activations_table.stack_canary.get() {
None => {
if cfg!(debug_assertions) {
// Assert that there aren't any Wasm frames on the stack.
backtrace::trace(|frame| {
let stack_map = stack_maps_registry.lookup_stack_map(frame.ip() as usize);
assert!(stack_map.is_none());
true
});
}
externref_activations_table.sweep();
log::debug!("end GC");
return;
}
Some(canary) => canary.as_ptr() as usize,
};
// There is a stack canary, so there must be Wasm frames on the stack. The
// rest of this function consists of:
//
// * walking the stack,
//
// * finding the precise set of roots inside Wasm frames via our stack maps,
// and
//
// * resetting our bump-allocated table's over-approximation to the
// newly-discovered precise set.
// The SP of the previous (younger) frame we processed.
let mut last_sp = None;
// Whether we have found our stack canary or not yet.
let mut found_canary = false;
// The `activations_table_set` is used for `debug_assert!`s checking that
// every reference we read out from the stack via stack maps is actually in
// the table. If that weren't true, than either we forgot to insert a
// reference in the table when passing it into Wasm (a bug) or we are
// reading invalid references from the stack (another bug).
let mut activations_table_set: DebugOnly<HashSet<_>> = Default::default();
if cfg!(debug_assertions) {
externref_activations_table.elements(|elem| {
activations_table_set.insert(elem.as_raw() as *mut VMExternData);
});
}
backtrace::trace(|frame| {
let pc = frame.ip() as usize;
let sp = frame.sp() as usize;
if let Some(stack_map) = stack_maps_registry.lookup_stack_map(pc) {
debug_assert!(sp != 0, "we should always get a valid SP for Wasm frames");
for i in 0..(stack_map.mapped_words() as usize) {
if stack_map.get_bit(i) {
// Stack maps have one bit per word in the frame, and the
// zero^th bit is the *lowest* addressed word in the frame,
// i.e. the closest to the SP. So to get the `i`^th word in
// this frame, we add `i * sizeof(word)` to the SP.
let ptr_to_ref = sp + i * mem::size_of::<usize>();
let r = std::ptr::read(ptr_to_ref as *const *mut VMExternData);
debug_assert!(
r.is_null() || activations_table_set.contains(&r),
"every on-stack externref inside a Wasm frame should \
have an entry in the VMExternRefActivationsTable"
);
if let Some(r) = NonNull::new(r) {
externref_activations_table.insert_precise_stack_root(r);
}
}
}
}
if let Some(last_sp) = last_sp {
// We've found the stack canary when we walk over the frame that it
// is contained within.
found_canary |= last_sp <= stack_canary && stack_canary <= sp;
}
last_sp = Some(sp);
// Keep walking the stack until we've found the canary, which is the
// oldest frame before we ever called into Wasm. We can stop once we've
// found it because there won't be any more Wasm frames, and therefore
// there won't be anymore on-stack, inside-a-Wasm-frame roots.
!found_canary
});
// Only sweep and reset the table if we found the stack canary, and
// therefore know that we discovered all the on-stack, inside-a-Wasm-frame
// roots. If we did *not* find the stack canary, then `libunwind` failed to
// walk the whole stack, and we might be missing roots. Reseting the table
// would free those missing roots while they are still in use, leading to
// use-after-free.
if found_canary {
externref_activations_table.sweep();
} else {
log::warn!("did not find stack canary; skipping GC sweep");
let mut roots = externref_activations_table.precise_stack_roots.borrow_mut();
roots.clear();
}
log::debug!("end GC");
}
#[cfg(test)]
mod tests {
use super::*;
@@ -434,4 +1104,56 @@ mod tests {
actual_offset.try_into().unwrap(),
);
}
#[test]
fn table_next_is_at_correct_offset() {
let table = VMExternRefActivationsTable::new();
let table_ptr = &table as *const _;
let next_ptr = &table.next as *const _;
let actual_offset = (next_ptr as usize) - (table_ptr as usize);
let offsets = wasmtime_environ::VMOffsets {
pointer_size: 8,
num_signature_ids: 0,
num_imported_functions: 0,
num_imported_tables: 0,
num_imported_memories: 0,
num_imported_globals: 0,
num_defined_tables: 0,
num_defined_memories: 0,
num_defined_globals: 0,
};
assert_eq!(
offsets.vm_extern_ref_activation_table_next() as usize,
actual_offset
);
}
#[test]
fn table_end_is_at_correct_offset() {
let table = VMExternRefActivationsTable::new();
let table_ptr = &table as *const _;
let end_ptr = &table.end as *const _;
let actual_offset = (end_ptr as usize) - (table_ptr as usize);
let offsets = wasmtime_environ::VMOffsets {
pointer_size: 8,
num_signature_ids: 0,
num_imported_functions: 0,
num_imported_tables: 0,
num_imported_memories: 0,
num_imported_globals: 0,
num_defined_tables: 0,
num_defined_memories: 0,
num_defined_globals: 0,
};
assert_eq!(
offsets.vm_extern_ref_activation_table_end() as usize,
actual_offset
);
}
}

View File

@@ -3,6 +3,7 @@
//! `InstanceHandle` is a reference-counting handle for an `Instance`.
use crate::export::Export;
use crate::externref::{StackMapRegistry, VMExternRefActivationsTable};
use crate::imports::Imports;
use crate::memory::{DefaultMemoryCreator, RuntimeLinearMemory, RuntimeMemoryCreator};
use crate::table::{Table, TableElement};
@@ -238,6 +239,16 @@ impl Instance {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_interrupts()) }
}
/// Return a pointer to the `VMExternRefActivationsTable`.
pub fn externref_activations_table(&self) -> *mut *mut VMExternRefActivationsTable {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_externref_activations_table()) }
}
/// Return a pointer to the `StackMapRegistry`.
pub fn stack_map_registry(&self) -> *mut *mut StackMapRegistry {
unsafe { self.vmctx_plus_offset(self.offsets.vmctx_stack_map_registry()) }
}
/// Return a reference to the vmctx used by compiled wasm code.
pub fn vmctx(&self) -> &VMContext {
&self.vmctx
@@ -770,6 +781,10 @@ impl InstanceHandle {
/// internally if you'd like to do so. If possible it's recommended to use
/// the `wasmtime` crate API rather than this type since that is vetted for
/// safety.
///
/// It is your responsibility to ensure that the given raw
/// `externref_activations_table` and `stack_map_registry` outlive this
/// instance.
pub unsafe fn new(
module: Arc<Module>,
code: Arc<dyn Any>,
@@ -780,7 +795,12 @@ impl InstanceHandle {
vmshared_signatures: BoxedSlice<SignatureIndex, VMSharedSignatureIndex>,
host_state: Box<dyn Any>,
interrupts: Arc<VMInterrupts>,
externref_activations_table: *mut VMExternRefActivationsTable,
stack_map_registry: *mut StackMapRegistry,
) -> Result<Self, InstantiationError> {
debug_assert!(!externref_activations_table.is_null());
debug_assert!(!stack_map_registry.is_null());
let tables = create_tables(&module);
let memories = create_memories(&module, mem_creator.unwrap_or(&DefaultMemoryCreator {}))?;
@@ -874,6 +894,8 @@ impl InstanceHandle {
VMBuiltinFunctionsArray::initialized(),
);
*instance.interrupts() = &*instance.interrupts;
*instance.externref_activations_table() = externref_activations_table;
*instance.stack_map_registry() = stack_map_registry;
// Perform infallible initialization in this constructor, while fallible
// initialization is deferred to the `initialize` method.

View File

@@ -37,7 +37,7 @@ pub mod debug_builtins;
pub mod libcalls;
pub use crate::export::*;
pub use crate::externref::VMExternRef;
pub use crate::externref::*;
pub use crate::imports::Imports;
pub use crate::instance::{InstanceHandle, InstantiationError, LinkError};
pub use crate::jit_int::GdbJitImageRegistration;

View File

@@ -194,7 +194,8 @@ macro_rules! getters {
>(export.address);
let mut ret = None;
$(let $args = $args.into_abi();)*
catch_traps(export.vmctx, &instance.store, || {
invoke_wasm_and_catch_traps(export.vmctx, &instance.store, || {
ret = Some(fnptr(export.vmctx, ptr::null_mut(), $($args,)*));
})?;
@@ -265,14 +266,14 @@ impl Func {
// values produced are correct. There could be a bug in `func` that
// produces the wrong number or wrong types of values, and we need
// to catch that here.
for (i, (ret, ty)) in returns.iter_mut().zip(ty_clone.results()).enumerate() {
for (i, (ret, ty)) in returns.into_iter().zip(ty_clone.results()).enumerate() {
if ret.ty() != *ty {
return Err(Trap::new(
"function attempted to return an incompatible value",
));
}
unsafe {
ret.write_value_to(values_vec.add(i));
ret.write_value_to(&store, values_vec.add(i));
}
}
Ok(())
@@ -535,7 +536,7 @@ impl Func {
// Store the argument values into `values_vec`.
let param_tys = my_ty.params().iter();
for ((arg, slot), ty) in params.iter().zip(&mut values_vec).zip(param_tys) {
for ((arg, slot), ty) in params.iter().cloned().zip(&mut values_vec).zip(param_tys) {
if arg.ty() != *ty {
bail!(
"argument type mismatch: found {} but expected {}",
@@ -547,12 +548,12 @@ impl Func {
bail!("cross-`Store` values are not currently supported");
}
unsafe {
arg.write_value_to(slot);
arg.write_value_to(&self.instance.store, slot);
}
}
// Call the trampoline.
catch_traps(self.export.vmctx, &self.instance.store, || unsafe {
invoke_wasm_and_catch_traps(self.export.vmctx, &self.instance.store, || unsafe {
(self.trampoline)(
self.export.vmctx,
ptr::null_mut(),
@@ -729,13 +730,18 @@ impl fmt::Debug for Func {
}
}
pub(crate) fn catch_traps(
pub(crate) fn invoke_wasm_and_catch_traps(
vmctx: *mut VMContext,
store: &Store,
closure: impl FnMut(),
) -> Result<(), Trap> {
let signalhandler = store.signal_handler();
unsafe {
let canary = 0;
let _auto_reset_canary = store
.externref_activations_table()
.set_stack_canary(&canary);
wasmtime_runtime::catch_traps(
vmctx,
store.engine().config().max_wasm_stack,

View File

@@ -50,6 +50,8 @@ fn instantiate(
config.memory_creator.as_ref().map(|a| a as _),
store.interrupts().clone(),
host,
&*store.externref_activations_table() as *const _ as *mut _,
&*store.stack_map_registry() as *const _ as *mut _,
)?;
// After we've created the `InstanceHandle` we still need to run
@@ -89,7 +91,7 @@ fn instantiate(
};
let vmctx_ptr = instance.handle.vmctx_ptr();
unsafe {
super::func::catch_traps(vmctx_ptr, store, || {
super::func::invoke_wasm_and_catch_traps(vmctx_ptr, store, || {
mem::transmute::<
*const VMFunctionBody,
unsafe extern "C" fn(*mut VMContext, *mut VMContext),
@@ -183,10 +185,14 @@ impl Instance {
bail!("cross-`Engine` instantiation is not currently supported");
}
let info = module.register_frame_info();
let host_info = Box::new({
let frame_info_registration = module.register_frame_info();
store.register_jit_code(module.compiled_module().jit_code_ranges());
store.register_stack_maps(&module);
frame_info_registration
});
let handle = instantiate(store, module.compiled_module(), imports, Box::new(info))?;
let handle = instantiate(store, module.compiled_module(), imports, host_info)?;
Ok(Instance {
handle,

View File

@@ -36,6 +36,11 @@ impl ExternRef {
&*self.inner
}
/// Get the strong reference count for this `ExternRef`.
pub fn strong_count(&self) -> usize {
self.inner.strong_count()
}
/// Does this `ExternRef` point to the same inner value as `other`?0
///
/// This is *only* pointer equality, and does *not* run any inner value's

View File

@@ -1,6 +1,7 @@
use crate::externals::MemoryCreator;
use crate::r#ref::ExternRef;
use crate::trampoline::{MemoryCreatorProxy, StoreInstanceHandle};
use crate::Module;
use anyhow::{bail, Result};
use std::any::Any;
use std::cell::RefCell;
@@ -14,12 +15,13 @@ use std::rc::{Rc, Weak};
use std::sync::Arc;
use wasmparser::{OperatorValidatorConfig, ValidatingParserConfig};
use wasmtime_environ::settings::{self, Configurable};
use wasmtime_environ::{ir, wasm, CacheConfig, Tunables};
use wasmtime_environ::{ir, isa::TargetIsa, wasm, CacheConfig, Tunables};
use wasmtime_jit::{native, CompilationStrategy, Compiler};
use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent};
use wasmtime_runtime::{
debug_builtins, InstanceHandle, RuntimeMemoryCreator, SignalHandler, SignatureRegistry,
VMExternRef, VMInterrupts, VMSharedSignatureIndex,
StackMapRegistry, VMExternRef, VMExternRefActivationsTable, VMInterrupts,
VMSharedSignatureIndex,
};
// Runtime Environment
@@ -194,10 +196,16 @@ impl Config {
self.validating_config
.operator_config
.enable_reference_types = enable;
// The reference types proposal depends on the bulk memory proposal
self.flags
.set("enable_safepoints", if enable { "true" } else { "false" })
.unwrap();
// The reference types proposal depends on the bulk memory proposal.
if enable {
self.wasm_bulk_memory(true);
}
self
}
@@ -591,8 +599,12 @@ impl Config {
self
}
pub(crate) fn target_isa(&self) -> Box<dyn TargetIsa> {
native::builder().finish(settings::Flags::new(self.flags.clone()))
}
fn build_compiler(&self) -> Compiler {
let isa = native::builder().finish(settings::Flags::new(self.flags.clone()));
let isa = self.target_isa();
Compiler::new(
isa,
self.strategy,
@@ -792,6 +804,8 @@ pub(crate) struct StoreInner {
signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
jit_code_ranges: RefCell<Vec<(usize, usize)>>,
host_info: RefCell<HashMap<HostInfoKey, Rc<RefCell<dyn Any>>>>,
externref_activations_table: Rc<VMExternRefActivationsTable>,
stack_map_registry: Rc<StackMapRegistry>,
}
struct HostInfoKey(VMExternRef);
@@ -832,6 +846,8 @@ impl Store {
signal_handler: RefCell::new(None),
jit_code_ranges: RefCell::new(Vec::new()),
host_info: RefCell::new(HashMap::new()),
externref_activations_table: Rc::new(VMExternRefActivationsTable::new()),
stack_map_registry: Rc::new(StackMapRegistry::default()),
}),
}
}
@@ -904,6 +920,24 @@ impl Store {
}
}
pub(crate) fn register_stack_maps(&self, module: &Module) {
let module = &module.compiled_module();
self.stack_map_registry().register_stack_maps(
module
.finished_functions()
.values()
.zip(module.stack_maps().values())
.map(|(func, stack_maps)| unsafe {
let ptr = (**func).as_ptr();
let len = (**func).len();
let start = ptr as usize;
let end = ptr as usize + len;
let range = start..end;
(range, &stack_maps[..])
}),
);
}
pub(crate) unsafe fn add_instance(&self, handle: InstanceHandle) -> StoreInstanceHandle {
self.inner.instances.borrow_mut().push(handle.clone());
StoreInstanceHandle {
@@ -1074,6 +1108,27 @@ impl Store {
bail!("interrupts aren't enabled for this `Store`")
}
}
pub(crate) fn externref_activations_table(&self) -> &Rc<VMExternRefActivationsTable> {
&self.inner.externref_activations_table
}
pub(crate) fn stack_map_registry(&self) -> &Rc<StackMapRegistry> {
&self.inner.stack_map_registry
}
/// Perform garbage collection of `ExternRef`s.
pub fn gc(&self) {
// For this crate's API, we ensure that `set_stack_canary` invariants
// are upheld for all host-->Wasm calls, and we register every module
// used with this store in `self.inner.stack_map_registry`.
unsafe {
wasmtime_runtime::gc(
&*self.inner.stack_map_registry,
&*self.inner.externref_activations_table,
);
}
}
}
impl Default for Store {

View File

@@ -46,6 +46,8 @@ pub(crate) fn create_handle(
signatures.into_boxed_slice(),
state,
store.interrupts().clone(),
&*store.externref_activations_table() as *const _ as *mut _,
&*store.stack_map_registry() as *const _ as *mut _,
)?;
Ok(store.add_instance(handle))
}

View File

@@ -208,11 +208,7 @@ pub fn create_handle_with_function(
func: Box<dyn Fn(*mut VMContext, *mut u128) -> Result<(), Trap>>,
store: &Store,
) -> Result<(StoreInstanceHandle, VMTrampoline)> {
let isa = {
let isa_builder = native::builder();
let flag_builder = settings::builder();
isa_builder.finish(settings::Flags::new(flag_builder))
};
let isa = store.engine().config().target_isa();
let pointer_type = isa.pointer_type();
let sig = match ft.get_wasmtime_signature(pointer_type) {

View File

@@ -106,6 +106,10 @@ impl ValType {
ValType::F32 => Some(ir::types::F32),
ValType::F64 => Some(ir::types::F64),
ValType::V128 => Some(ir::types::I8X16),
#[cfg(target_pointer_width = "64")]
ValType::ExternRef => Some(ir::types::R64),
#[cfg(target_pointer_width = "32")]
ValType::ExternRef => Some(ir::types::R32),
_ => None,
}
}
@@ -117,6 +121,10 @@ impl ValType {
ir::types::F32 => Some(ValType::F32),
ir::types::F64 => Some(ValType::F64),
ir::types::I8X16 => Some(ValType::V128),
#[cfg(target_pointer_width = "64")]
ir::types::R64 => Some(ValType::ExternRef),
#[cfg(target_pointer_width = "32")]
ir::types::R32 => Some(ValType::ExternRef),
_ => None,
}
}

View File

@@ -79,15 +79,21 @@ impl Val {
}
}
pub(crate) unsafe fn write_value_to(&self, p: *mut u128) {
pub(crate) unsafe fn write_value_to(self, store: &Store, p: *mut u128) {
match self {
Val::I32(i) => ptr::write(p as *mut i32, *i),
Val::I64(i) => ptr::write(p as *mut i64, *i),
Val::F32(u) => ptr::write(p as *mut u32, *u),
Val::F64(u) => ptr::write(p as *mut u64, *u),
Val::V128(b) => ptr::write(p as *mut u128, *b),
Val::I32(i) => ptr::write(p as *mut i32, i),
Val::I64(i) => ptr::write(p as *mut i64, i),
Val::F32(u) => ptr::write(p as *mut u32, u),
Val::F64(u) => ptr::write(p as *mut u64, u),
Val::V128(b) => ptr::write(p as *mut u128, b),
Val::ExternRef(None) => ptr::write(p, 0),
Val::ExternRef(Some(x)) => ptr::write(p as *mut *mut u8, x.inner.clone().into_raw()),
Val::ExternRef(Some(x)) => {
let externref_ptr = x.inner.as_raw();
store
.externref_activations_table()
.insert_with_gc(x.inner, store.stack_map_registry());
ptr::write(p as *mut *mut u8, externref_ptr)
}
_ => unimplemented!("Val::write_value_to"),
}
}
@@ -105,7 +111,7 @@ impl Val {
Val::ExternRef(None)
} else {
Val::ExternRef(Some(ExternRef {
inner: VMExternRef::from_raw(raw),
inner: VMExternRef::clone_from_raw(raw),
store: store.weak(),
}))
}

View File

@@ -99,9 +99,16 @@ pub fn compile_to_obj(
.translate(wasm)
.context("failed to translate module")?;
// TODO: use the traps information
let (compilation, relocations, address_transform, value_ranges, stack_slots, _traps) =
match strategy {
// TODO: use the traps and stack maps information.
let (
compilation,
relocations,
address_transform,
value_ranges,
stack_slots,
_traps,
_stack_maps,
) = match strategy {
Strategy::Auto | Strategy::Cranelift => {
Cranelift::compile_module(&translation, &*isa, cache_config)
}

228
tests/all/gc.rs Normal file
View File

@@ -0,0 +1,228 @@
use std::cell::Cell;
use std::rc::Rc;
use wasmtime::*;
fn ref_types_module(source: &str) -> anyhow::Result<(Store, Module)> {
let _ = env_logger::try_init();
let mut config = Config::new();
config.wasm_reference_types(true);
let engine = Engine::new(&config);
let store = Store::new(&engine);
let module = Module::new(&engine, source)?;
Ok((store, module))
}
#[test]
fn smoke_test_gc() -> anyhow::Result<()> {
let (store, module) = ref_types_module(
r#"
(module
(import "" "" (func $do_gc))
(func $recursive (export "func") (param i32 externref) (result externref)
local.get 0
i32.eqz
if (result externref)
call $do_gc
local.get 1
else
local.get 0
i32.const 1
i32.sub
local.get 1
call $recursive
end
)
)
"#,
)?;
let do_gc = Func::wrap(&store, {
let store = store.clone();
move || {
// Do a GC with `externref`s on the stack in Wasm frames.
store.gc();
}
});
let instance = Instance::new(&store, &module, &[do_gc.into()])?;
let func = instance.get_func("func").unwrap();
let inner_dropped = Rc::new(Cell::new(false));
let r = ExternRef::new(&store, SetFlagOnDrop(inner_dropped.clone()));
{
let args = [Val::I32(5), Val::ExternRef(Some(r.clone()))];
func.call(&args)?;
}
// Still held alive by the `VMExternRefActivationsTable` (potentially in
// multiple slots within the table) and by this `r` local.
assert!(r.strong_count() >= 2);
// Doing a GC should see that there aren't any `externref`s on the stack in
// Wasm frames anymore.
store.gc();
assert_eq!(r.strong_count(), 1);
// Dropping `r` should drop the inner `SetFlagOnDrop` value.
drop(r);
assert!(inner_dropped.get());
return Ok(());
struct SetFlagOnDrop(Rc<Cell<bool>>);
impl Drop for SetFlagOnDrop {
fn drop(&mut self) {
self.0.set(true);
}
}
}
#[test]
fn wasm_dropping_refs() -> anyhow::Result<()> {
let (store, module) = ref_types_module(
r#"
(module
(func (export "drop_ref") (param externref)
nop
)
)
"#,
)?;
let instance = Instance::new(&store, &module, &[])?;
let drop_ref = instance.get_func("drop_ref").unwrap();
let num_refs_dropped = Rc::new(Cell::new(0));
// NB: 4096 is greater than the initial `VMExternRefActivationsTable`
// capacity, so this will trigger at least one GC.
for _ in 0..4096 {
let r = ExternRef::new(&store, CountDrops(num_refs_dropped.clone()));
let args = [Val::ExternRef(Some(r))];
drop_ref.call(&args)?;
}
assert!(num_refs_dropped.get() > 0);
// And after doing a final GC, all the refs should have been dropped.
store.gc();
assert_eq!(num_refs_dropped.get(), 4096);
return Ok(());
struct CountDrops(Rc<Cell<usize>>);
impl Drop for CountDrops {
fn drop(&mut self) {
self.0.set(self.0.get() + 1);
}
}
}
#[test]
fn many_live_refs() -> anyhow::Result<()> {
let mut wat = r#"
(module
;; Make new `externref`s.
(import "" "make_ref" (func $make_ref (result externref)))
;; Observe an `externref` so it is kept live.
(import "" "observe_ref" (func $observe_ref (param externref)))
(func (export "many_live_refs")
"#
.to_string();
// This is more than the initial `VMExternRefActivationsTable` capacity, so
// it will need to allocate additional bump chunks.
const NUM_LIVE_REFS: usize = 1024;
// Push `externref`s onto the stack.
for _ in 0..NUM_LIVE_REFS {
wat.push_str("(call $make_ref)\n");
}
// Pop `externref`s from the stack. Because we pass each of them to a
// function call here, they are all live references for the duration of
// their lifetimes.
for _ in 0..NUM_LIVE_REFS {
wat.push_str("(call $observe_ref)\n");
}
wat.push_str(
"
) ;; func
) ;; module
",
);
let (store, module) = ref_types_module(&wat)?;
let live_refs = Rc::new(Cell::new(0));
let make_ref = Func::new(
&store,
FuncType::new(
vec![].into_boxed_slice(),
vec![ValType::ExternRef].into_boxed_slice(),
),
{
let store = store.clone();
let live_refs = live_refs.clone();
move |_caller, _params, results| {
results[0] = Val::ExternRef(Some(ExternRef::new(
&store,
CountLiveRefs::new(live_refs.clone()),
)));
Ok(())
}
},
);
let observe_ref = Func::new(
&store,
FuncType::new(
vec![ValType::ExternRef].into_boxed_slice(),
vec![].into_boxed_slice(),
),
|_caller, params, _results| {
let r = params[0].externref().unwrap().unwrap();
let r = r.data().downcast_ref::<CountLiveRefs>().unwrap();
assert!(r.live_refs.get() > 0);
Ok(())
},
);
let instance = Instance::new(&store, &module, &[make_ref.into(), observe_ref.into()])?;
let many_live_refs = instance.get_func("many_live_refs").unwrap();
many_live_refs.call(&[])?;
store.gc();
assert_eq!(live_refs.get(), 0);
return Ok(());
struct CountLiveRefs {
live_refs: Rc<Cell<usize>>,
}
impl CountLiveRefs {
fn new(live_refs: Rc<Cell<usize>>) -> Self {
let live = live_refs.get();
live_refs.set(live + 1);
Self { live_refs }
}
}
impl Drop for CountLiveRefs {
fn drop(&mut self) {
let live = self.live_refs.get();
self.live_refs.set(live - 1);
}
}
}

View File

@@ -18,3 +18,7 @@ mod table;
mod traps;
mod use_after_drop;
mod wast;
// Cranelift only supports reference types on x64.
#[cfg(target_arch = "x86_64")]
mod gc;