Merge pull request #2518 from peterhuene/add-allocator
Implement the pooling instance allocator.
This commit is contained in:
35
.github/workflows/main.yml
vendored
35
.github/workflows/main.yml
vendored
@@ -73,14 +73,11 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
# Note that we use nightly Rust here to get intra-doc links which are a
|
||||
# nightly-only feature right now.
|
||||
# Note that we use nightly Rust for the doc_cfg feature (enabled via `nightlydoc` above)
|
||||
# This version is an older nightly for the new x64 backend (see below)
|
||||
- uses: ./.github/actions/install-rust
|
||||
with:
|
||||
# TODO (rust-lang/rust#79661): We are seeing an internal compiler error when
|
||||
# building with the latest (2020-12-06) nightly; pin on a slightly older
|
||||
# version for now.
|
||||
toolchain: nightly-2020-11-29
|
||||
toolchain: nightly-2020-12-26
|
||||
- run: cargo doc --no-deps --all --exclude wasmtime-cli --exclude test-programs --exclude cranelift-codegen-meta
|
||||
- run: cargo doc --package cranelift-codegen-meta --document-private-items
|
||||
- uses: actions/upload-artifact@v1
|
||||
@@ -122,6 +119,7 @@ jobs:
|
||||
- run: cargo check --manifest-path crates/wasmtime/Cargo.toml --features jitdump
|
||||
- run: cargo check --manifest-path crates/wasmtime/Cargo.toml --features cache
|
||||
- run: cargo check --manifest-path crates/wasmtime/Cargo.toml --features async
|
||||
- run: cargo check --manifest-path crates/wasmtime/Cargo.toml --features uffd
|
||||
|
||||
# Check some feature combinations of the `wasmtime-c-api` crate
|
||||
- run: cargo check --manifest-path crates/c-api/Cargo.toml --no-default-features
|
||||
@@ -167,7 +165,7 @@ jobs:
|
||||
# flags to rustc.
|
||||
- uses: ./.github/actions/install-rust
|
||||
with:
|
||||
toolchain: nightly-2020-11-29
|
||||
toolchain: nightly
|
||||
- run: cargo install cargo-fuzz --vers "^0.8"
|
||||
- run: cargo fetch
|
||||
working-directory: ./fuzz
|
||||
@@ -224,7 +222,7 @@ jobs:
|
||||
rust: beta
|
||||
- build: nightly
|
||||
os: ubuntu-latest
|
||||
rust: nightly-2020-11-29
|
||||
rust: nightly
|
||||
- build: macos
|
||||
os: macos-latest
|
||||
rust: stable
|
||||
@@ -303,6 +301,15 @@ jobs:
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
# Test uffd functionality on Linux
|
||||
- run: |
|
||||
cargo test --features uffd -p wasmtime-runtime instance::allocator::pooling
|
||||
cargo test --features uffd -p wasmtime-cli pooling_allocator
|
||||
cargo test --features uffd -p wasmtime-cli wast::Cranelift
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
# Build and test lightbeam. Note that
|
||||
# Lightbeam tests fail right now, but we don't want to block on that.
|
||||
- run: cargo build --package lightbeam
|
||||
@@ -312,8 +319,10 @@ jobs:
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
# Perform all tests (debug mode) for `wasmtime` with the experimental x64
|
||||
# backend. This runs on the nightly channel of Rust (because of issues with
|
||||
# unifying Cargo features on stable) on Ubuntu.
|
||||
# backend. This runs on an older nightly of Rust (because of issues with
|
||||
# unifying Cargo features on stable) on Ubuntu such that it's new enough
|
||||
# to build Wasmtime, but old enough where the -Z options being used
|
||||
# haven't been stabilized yet.
|
||||
test_x64:
|
||||
name: Test x64 new backend
|
||||
runs-on: ubuntu-latest
|
||||
@@ -323,7 +332,7 @@ jobs:
|
||||
submodules: true
|
||||
- uses: ./.github/actions/install-rust
|
||||
with:
|
||||
toolchain: nightly-2020-11-29
|
||||
toolchain: nightly-2020-12-26
|
||||
- uses: ./.github/actions/define-llvm-env
|
||||
|
||||
# Install wasm32 targets in order to build various tests throughout the
|
||||
@@ -334,7 +343,7 @@ jobs:
|
||||
# Run the x64 CI script.
|
||||
- run: ./ci/run-experimental-x64-ci.sh
|
||||
env:
|
||||
CARGO_VERSION: "+nightly-2020-11-29"
|
||||
CARGO_VERSION: "+nightly-2020-12-26"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
# Build and test the wasi-nn module.
|
||||
@@ -347,7 +356,7 @@ jobs:
|
||||
submodules: true
|
||||
- uses: ./.github/actions/install-rust
|
||||
with:
|
||||
toolchain: nightly-2020-11-29
|
||||
toolchain: nightly
|
||||
- run: rustup target add wasm32-wasi
|
||||
- uses: ./.github/actions/install-openvino
|
||||
- run: ./ci/run-wasi-nn-example.sh
|
||||
|
||||
68
Cargo.lock
generated
68
Cargo.lock
generated
@@ -213,6 +213,25 @@ dependencies = [
|
||||
"which",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bindgen"
|
||||
version = "0.57.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"lazy_static",
|
||||
"lazycell",
|
||||
"peeking_take_while",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"regex",
|
||||
"rustc-hash",
|
||||
"shlex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
version = "0.5.2"
|
||||
@@ -1567,6 +1586,19 @@ version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238"
|
||||
|
||||
[[package]]
|
||||
name = "nix"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cc",
|
||||
"cfg-if 0.1.10",
|
||||
"libc",
|
||||
"void",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nom"
|
||||
version = "5.1.2"
|
||||
@@ -1714,7 +1746,7 @@ version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fb64bef270a1ff665b0b2e28ebfa213e6205a007ce88223d020730225d6008f"
|
||||
dependencies = [
|
||||
"bindgen",
|
||||
"bindgen 0.55.1",
|
||||
"cmake",
|
||||
]
|
||||
|
||||
@@ -2929,6 +2961,30 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "userfaultfd"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18d8164d4a8198fa546e7553b529f53e82907214a25fafda4a6f90d978b30a5c"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"libc",
|
||||
"nix",
|
||||
"thiserror",
|
||||
"userfaultfd-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "userfaultfd-sys"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1ada4f4ae167325015f52cc65f9fb6c251b868d8fb3b6dd0ce2d60e497c4870a"
|
||||
dependencies = [
|
||||
"bindgen 0.57.0",
|
||||
"cc",
|
||||
"cfg-if 0.1.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "vec_map"
|
||||
version = "0.8.2"
|
||||
@@ -2941,6 +2997,12 @@ version = "0.9.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
|
||||
|
||||
[[package]]
|
||||
name = "void"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
|
||||
|
||||
[[package]]
|
||||
name = "wait-timeout"
|
||||
version = "0.2.0"
|
||||
@@ -3273,6 +3335,7 @@ dependencies = [
|
||||
"indexmap",
|
||||
"log",
|
||||
"more-asserts",
|
||||
"region",
|
||||
"serde",
|
||||
"thiserror",
|
||||
"wasmparser",
|
||||
@@ -3398,6 +3461,7 @@ dependencies = [
|
||||
name = "wasmtime-runtime"
|
||||
version = "0.24.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"backtrace",
|
||||
"cc",
|
||||
"cfg-if 1.0.0",
|
||||
@@ -3408,8 +3472,10 @@ dependencies = [
|
||||
"memoffset",
|
||||
"more-asserts",
|
||||
"psm",
|
||||
"rand 0.7.3",
|
||||
"region",
|
||||
"thiserror",
|
||||
"userfaultfd",
|
||||
"wasmtime-environ",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
@@ -91,6 +91,7 @@ jitdump = ["wasmtime/jitdump"]
|
||||
vtune = ["wasmtime/vtune"]
|
||||
wasi-crypto = ["wasmtime-wasi-crypto"]
|
||||
wasi-nn = ["wasmtime-wasi-nn"]
|
||||
uffd = ["wasmtime/uffd"]
|
||||
|
||||
# Try the experimental, work-in-progress new x86_64 backend. This is not stable
|
||||
# as of June 2020.
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
<p>
|
||||
<a href="https://github.com/bytecodealliance/wasmtime/actions?query=workflow%3ACI"><img src="https://github.com/bytecodealliance/wasmtime/workflows/CI/badge.svg" alt="build status" /></a>
|
||||
<a href="https://bytecodealliance.zulipchat.com/#narrow/stream/217126-wasmtime"><img src="https://img.shields.io/badge/zulip-join_chat-brightgreen.svg" alt="zulip chat" /></a>
|
||||
<img src="https://img.shields.io/badge/rustc-1.37+-green.svg" alt="min rustc" />
|
||||
<img src="https://img.shields.io/badge/rustc-stable+-green.svg" alt="supported rustc stable" />
|
||||
<a href="https://docs.rs/wasmtime"><img src="https://docs.rs/wasmtime/badge.svg" alt="Documentation Status" /></a>
|
||||
</p>
|
||||
|
||||
|
||||
20
build.rs
20
build.rs
@@ -111,7 +111,8 @@ fn test_directory(
|
||||
|
||||
let testsuite = &extract_name(path);
|
||||
for entry in dir_entries.iter() {
|
||||
write_testsuite_tests(out, entry, testsuite, strategy)?;
|
||||
write_testsuite_tests(out, entry, testsuite, strategy, false)?;
|
||||
write_testsuite_tests(out, entry, testsuite, strategy, true)?;
|
||||
}
|
||||
|
||||
Ok(dir_entries.len())
|
||||
@@ -148,6 +149,7 @@ fn write_testsuite_tests(
|
||||
path: impl AsRef<Path>,
|
||||
testsuite: &str,
|
||||
strategy: &str,
|
||||
pooling: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let path = path.as_ref();
|
||||
let testname = extract_name(path);
|
||||
@@ -160,14 +162,24 @@ fn write_testsuite_tests(
|
||||
)?;
|
||||
} else if ignore(testsuite, &testname, strategy) {
|
||||
writeln!(out, "#[ignore]")?;
|
||||
} else if pooling {
|
||||
// Ignore on aarch64 due to using QEMU for running tests (limited memory)
|
||||
writeln!(out, r#"#[cfg_attr(target_arch = "aarch64", ignore)]"#)?;
|
||||
}
|
||||
writeln!(out, "fn r#{}() {{", &testname)?;
|
||||
|
||||
writeln!(
|
||||
out,
|
||||
"fn r#{}{}() {{",
|
||||
&testname,
|
||||
if pooling { "_pooling" } else { "" }
|
||||
)?;
|
||||
writeln!(out, " let _ = env_logger::try_init();")?;
|
||||
writeln!(
|
||||
out,
|
||||
" crate::wast::run_wast(r#\"{}\"#, crate::wast::Strategy::{}).unwrap();",
|
||||
" crate::wast::run_wast(r#\"{}\"#, crate::wast::Strategy::{}, {}).unwrap();",
|
||||
path.display(),
|
||||
strategy
|
||||
strategy,
|
||||
pooling
|
||||
)?;
|
||||
writeln!(out, "}}")?;
|
||||
writeln!(out)?;
|
||||
|
||||
@@ -101,6 +101,8 @@ pub fn translate_module<'data>(
|
||||
|
||||
Payload::DataCountSection { count, range } => {
|
||||
validator.data_count_section(count, &range)?;
|
||||
|
||||
// NOTE: the count here is the total segment count, not the passive segment count
|
||||
environ.reserve_passive_data(count)?;
|
||||
}
|
||||
|
||||
|
||||
@@ -401,6 +401,12 @@ pub fn parse_element_section<'data>(
|
||||
));
|
||||
}
|
||||
};
|
||||
// Check for offset + len overflow
|
||||
if offset.checked_add(segments.len()).is_none() {
|
||||
return Err(wasm_unsupported!(
|
||||
"element segment offset and length overflows"
|
||||
));
|
||||
}
|
||||
environ.declare_table_elements(
|
||||
TableIndex::from_u32(table_index),
|
||||
base,
|
||||
@@ -447,6 +453,12 @@ pub fn parse_data_section<'data>(
|
||||
))
|
||||
}
|
||||
};
|
||||
// Check for offset + len overflow
|
||||
if offset.checked_add(data.len()).is_none() {
|
||||
return Err(wasm_unsupported!(
|
||||
"data segment offset and length overflows"
|
||||
));
|
||||
}
|
||||
environ.declare_data_initialization(
|
||||
MemoryIndex::from_u32(memory_index),
|
||||
base,
|
||||
|
||||
@@ -61,8 +61,8 @@ pub extern "C" fn wasmtime_config_consume_fuel_set(c: &mut wasm_config_t, enable
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wasmtime_config_max_wasm_stack_set(c: &mut wasm_config_t, size: usize) {
|
||||
c.config.max_wasm_stack(size);
|
||||
pub extern "C" fn wasmtime_config_max_wasm_stack_set(c: &mut wasm_config_t, size: usize) -> bool {
|
||||
c.config.max_wasm_stack(size).is_ok()
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
||||
@@ -13,6 +13,7 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
region = "2.2.0"
|
||||
cranelift-codegen = { path = "../../cranelift/codegen", version = "0.71.0", features = ["enable-serde"] }
|
||||
cranelift-entity = { path = "../../cranelift/entity", version = "0.71.0", features = ["enable-serde"] }
|
||||
cranelift-wasm = { path = "../../cranelift/wasm", version = "0.71.0", features = ["enable-serde"] }
|
||||
|
||||
@@ -20,7 +20,7 @@ pub mod isa {
|
||||
}
|
||||
|
||||
pub mod entity {
|
||||
pub use cranelift_entity::{packed_option, BoxedSlice, EntityRef, PrimaryMap};
|
||||
pub use cranelift_entity::{packed_option, BoxedSlice, EntityRef, EntitySet, PrimaryMap};
|
||||
}
|
||||
|
||||
pub mod wasm {
|
||||
|
||||
@@ -6,24 +6,10 @@ use cranelift_codegen::ir;
|
||||
use cranelift_entity::{EntityRef, PrimaryMap};
|
||||
use cranelift_wasm::*;
|
||||
use indexmap::IndexMap;
|
||||
use more_asserts::assert_ge;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A WebAssembly table initializer.
|
||||
#[derive(Clone, Debug, Hash, Serialize, Deserialize)]
|
||||
pub struct TableElements {
|
||||
/// The index of a table to initialize.
|
||||
pub table_index: TableIndex,
|
||||
/// Optionally, a global variable giving a base index.
|
||||
pub base: Option<GlobalIndex>,
|
||||
/// The offset to add to the base.
|
||||
pub offset: usize,
|
||||
/// The values to write into the table elements.
|
||||
pub elements: Box<[FuncIndex]>,
|
||||
}
|
||||
|
||||
/// Implemenation styles for WebAssembly linear memory.
|
||||
#[derive(Debug, Clone, Hash, Serialize, Deserialize)]
|
||||
pub enum MemoryStyle {
|
||||
@@ -42,10 +28,20 @@ impl MemoryStyle {
|
||||
// A heap with a maximum that doesn't exceed the static memory bound specified by the
|
||||
// tunables make it static.
|
||||
//
|
||||
// If the module doesn't declare an explicit maximum treat it as 4GiB.
|
||||
let maximum = memory.maximum.unwrap_or(WASM_MAX_PAGES);
|
||||
if maximum <= tunables.static_memory_bound {
|
||||
assert_ge!(tunables.static_memory_bound, memory.minimum);
|
||||
// If the module doesn't declare an explicit maximum treat it as 4GiB when not
|
||||
// requested to use the static memory bound itself as the maximum.
|
||||
let maximum = std::cmp::min(
|
||||
memory.maximum.unwrap_or(WASM_MAX_PAGES),
|
||||
if tunables.static_memory_bound_is_maximum {
|
||||
std::cmp::min(tunables.static_memory_bound, WASM_MAX_PAGES)
|
||||
} else {
|
||||
WASM_MAX_PAGES
|
||||
},
|
||||
);
|
||||
|
||||
// Ensure the minimum is less than the maximum; the minimum might exceed the maximum
|
||||
// when the memory is artificially bounded via `static_memory_bound_is_maximum` above
|
||||
if memory.minimum <= maximum && maximum <= tunables.static_memory_bound {
|
||||
return (
|
||||
Self::Static {
|
||||
bound: tunables.static_memory_bound,
|
||||
@@ -83,7 +79,157 @@ impl MemoryPlan {
|
||||
}
|
||||
}
|
||||
|
||||
/// Implemenation styles for WebAssembly tables.
|
||||
/// A WebAssembly linear memory initializer.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct MemoryInitializer {
|
||||
/// The index of a linear memory to initialize.
|
||||
pub memory_index: MemoryIndex,
|
||||
/// Optionally, a global variable giving a base index.
|
||||
pub base: Option<GlobalIndex>,
|
||||
/// The offset to add to the base.
|
||||
pub offset: usize,
|
||||
/// The data to write into the linear memory.
|
||||
pub data: Box<[u8]>,
|
||||
}
|
||||
|
||||
/// The type of WebAssembly linear memory initialization to use for a module.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum MemoryInitialization {
|
||||
/// Memory initialization is segmented.
|
||||
///
|
||||
/// Segmented initialization can be used for any module, but it is required if:
|
||||
///
|
||||
/// * A data segment referenced an imported memory.
|
||||
/// * A data segment uses a global base.
|
||||
///
|
||||
/// Segmented initialization is performed by processing the complete set of data segments
|
||||
/// when the module is instantiated.
|
||||
///
|
||||
/// This is the default memory initialization type.
|
||||
Segmented(Vec<MemoryInitializer>),
|
||||
/// Memory initialization is paged.
|
||||
///
|
||||
/// To be paged, the following requirements must be met:
|
||||
///
|
||||
/// * All data segments must reference defined memories.
|
||||
/// * All data segments must not use a global base.
|
||||
///
|
||||
/// Paged initialization is performed by copying (or mapping) entire WebAssembly pages to each linear memory.
|
||||
///
|
||||
/// The `uffd` feature makes use of this type of memory initialization because it can instruct the kernel
|
||||
/// to back an entire WebAssembly page from an existing set of in-memory pages.
|
||||
///
|
||||
/// By processing the data segments at module compilation time, the uffd fault handler doesn't have to do
|
||||
/// any work to point the kernel at the right linear memory page to use.
|
||||
Paged {
|
||||
/// The map of defined memory index to a list of initialization pages.
|
||||
/// The list of page data is sparse, with None representing a zero page.
|
||||
/// Each page of initialization data is WebAssembly page-sized (64 KiB).
|
||||
/// The size of the list will be the maximum page written to by a data segment.
|
||||
map: PrimaryMap<DefinedMemoryIndex, Vec<Option<Box<[u8]>>>>,
|
||||
/// Whether or not an out-of-bounds data segment was observed.
|
||||
/// This is used to fail module instantiation after the pages are initialized.
|
||||
out_of_bounds: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl MemoryInitialization {
|
||||
/// Attempts to convert segmented memory initialization into paged initialization for the given module.
|
||||
///
|
||||
/// Returns `None` if the initialization cannot be paged or if it is already paged.
|
||||
pub fn to_paged(&self, module: &Module) -> Option<Self> {
|
||||
const WASM_PAGE_SIZE: usize = crate::WASM_PAGE_SIZE as usize;
|
||||
|
||||
match self {
|
||||
Self::Paged { .. } => None,
|
||||
Self::Segmented(initializers) => {
|
||||
let num_defined_memories = module.memory_plans.len() - module.num_imported_memories;
|
||||
let mut out_of_bounds = false;
|
||||
let mut map = PrimaryMap::with_capacity(num_defined_memories);
|
||||
|
||||
for _ in 0..num_defined_memories {
|
||||
map.push(Vec::new());
|
||||
}
|
||||
|
||||
for initializer in initializers {
|
||||
match (
|
||||
module.defined_memory_index(initializer.memory_index),
|
||||
initializer.base.is_some(),
|
||||
) {
|
||||
(None, _) | (_, true) => {
|
||||
// If the initializer references an imported memory or uses a global base,
|
||||
// the complete set of segments will need to be processed at module instantiation
|
||||
return None;
|
||||
}
|
||||
(Some(index), false) => {
|
||||
if out_of_bounds {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Perform a bounds check on the segment
|
||||
// As this segment is referencing a defined memory without a global base, the last byte
|
||||
// written to by the segment cannot exceed the memory's initial minimum size
|
||||
if (initializer.offset + initializer.data.len())
|
||||
> ((module.memory_plans[initializer.memory_index].memory.minimum
|
||||
as usize)
|
||||
* WASM_PAGE_SIZE)
|
||||
{
|
||||
out_of_bounds = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
let pages = &mut map[index];
|
||||
let mut page_index = initializer.offset / WASM_PAGE_SIZE;
|
||||
let mut page_offset = initializer.offset % WASM_PAGE_SIZE;
|
||||
let mut data_offset = 0;
|
||||
let mut data_remaining = initializer.data.len();
|
||||
|
||||
if data_remaining == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Copy the initialization data by each WebAssembly-sized page (64 KiB)
|
||||
loop {
|
||||
if page_index >= pages.len() {
|
||||
pages.resize(page_index + 1, None);
|
||||
}
|
||||
|
||||
let page = pages[page_index].get_or_insert_with(|| {
|
||||
vec![0; WASM_PAGE_SIZE].into_boxed_slice()
|
||||
});
|
||||
let len =
|
||||
std::cmp::min(data_remaining, WASM_PAGE_SIZE - page_offset);
|
||||
|
||||
page[page_offset..page_offset + len].copy_from_slice(
|
||||
&initializer.data[data_offset..(data_offset + len)],
|
||||
);
|
||||
|
||||
if len == data_remaining {
|
||||
break;
|
||||
}
|
||||
|
||||
page_index += 1;
|
||||
page_offset = 0;
|
||||
data_offset += len;
|
||||
data_remaining -= len;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Some(Self::Paged { map, out_of_bounds })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MemoryInitialization {
|
||||
fn default() -> Self {
|
||||
Self::Segmented(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation styles for WebAssembly tables.
|
||||
#[derive(Debug, Clone, Hash, Serialize, Deserialize)]
|
||||
pub enum TableStyle {
|
||||
/// Signatures are stored in the table and checked in the caller.
|
||||
@@ -115,6 +261,19 @@ impl TablePlan {
|
||||
}
|
||||
}
|
||||
|
||||
/// A WebAssembly table initializer.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct TableInitializer {
|
||||
/// The index of a table to initialize.
|
||||
pub table_index: TableIndex,
|
||||
/// Optionally, a global variable giving a base index.
|
||||
pub base: Option<GlobalIndex>,
|
||||
/// The offset to add to the base.
|
||||
pub offset: usize,
|
||||
/// The values to write into the table elements.
|
||||
pub elements: Box<[FuncIndex]>,
|
||||
}
|
||||
|
||||
/// Different types that can appear in a module.
|
||||
///
|
||||
/// Note that each of these variants are intended to index further into a
|
||||
@@ -155,16 +314,25 @@ pub struct Module {
|
||||
pub start_func: Option<FuncIndex>,
|
||||
|
||||
/// WebAssembly table initializers.
|
||||
pub table_elements: Vec<TableElements>,
|
||||
pub table_initializers: Vec<TableInitializer>,
|
||||
|
||||
/// WebAssembly linear memory initializer.
|
||||
pub memory_initialization: MemoryInitialization,
|
||||
|
||||
/// WebAssembly passive elements.
|
||||
pub passive_elements: HashMap<ElemIndex, Box<[FuncIndex]>>,
|
||||
pub passive_elements: Vec<Box<[FuncIndex]>>,
|
||||
|
||||
/// The map from passive element index (element segment index space) to index in `passive_elements`.
|
||||
pub passive_elements_map: HashMap<ElemIndex, usize>,
|
||||
|
||||
/// WebAssembly passive data segments.
|
||||
#[serde(with = "passive_data_serde")]
|
||||
pub passive_data: HashMap<DataIndex, Arc<[u8]>>,
|
||||
pub passive_data: Vec<Arc<[u8]>>,
|
||||
|
||||
/// WebAssembly table initializers.
|
||||
/// The map from passive data index (data segment index space) to index in `passive_data`.
|
||||
pub passive_data_map: HashMap<DataIndex, usize>,
|
||||
|
||||
/// WebAssembly function names.
|
||||
pub func_names: HashMap<FuncIndex, String>,
|
||||
|
||||
/// Types declared in the wasm module.
|
||||
@@ -229,7 +397,7 @@ pub enum Initializer {
|
||||
export: String,
|
||||
},
|
||||
|
||||
/// A module is being instantiated with previously configured intializers
|
||||
/// A module is being instantiated with previously configured initializers
|
||||
/// as arguments.
|
||||
Instantiate {
|
||||
/// The module that this instance is instantiating.
|
||||
@@ -241,7 +409,7 @@ pub enum Initializer {
|
||||
|
||||
/// A module is being created from a set of compiled artifacts.
|
||||
CreateModule {
|
||||
/// The index of the artifact that's being convereted into a module.
|
||||
/// The index of the artifact that's being converted into a module.
|
||||
artifact_index: usize,
|
||||
/// The list of artifacts that this module value will be inheriting.
|
||||
artifacts: Vec<usize>,
|
||||
@@ -272,7 +440,8 @@ impl Module {
|
||||
|
||||
/// Get the given passive element, if it exists.
|
||||
pub fn get_passive_element(&self, index: ElemIndex) -> Option<&[FuncIndex]> {
|
||||
self.passive_elements.get(&index).map(|es| &**es)
|
||||
let index = *self.passive_elements_map.get(&index)?;
|
||||
Some(self.passive_elements[index].as_ref())
|
||||
}
|
||||
|
||||
/// Convert a `DefinedFuncIndex` into a `FuncIndex`.
|
||||
@@ -419,47 +588,45 @@ pub struct InstanceSignature {
|
||||
}
|
||||
|
||||
mod passive_data_serde {
|
||||
use super::{Arc, DataIndex, HashMap};
|
||||
use serde::{de::MapAccess, de::Visitor, ser::SerializeMap, Deserializer, Serializer};
|
||||
use super::Arc;
|
||||
use serde::{de::SeqAccess, de::Visitor, ser::SerializeSeq, Deserializer, Serializer};
|
||||
use std::fmt;
|
||||
|
||||
pub(super) fn serialize<S>(
|
||||
data: &HashMap<DataIndex, Arc<[u8]>>,
|
||||
ser: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
pub(super) fn serialize<S>(data: &Vec<Arc<[u8]>>, ser: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut map = ser.serialize_map(Some(data.len()))?;
|
||||
for (k, v) in data {
|
||||
map.serialize_entry(k, v.as_ref())?;
|
||||
let mut seq = ser.serialize_seq(Some(data.len()))?;
|
||||
for v in data {
|
||||
seq.serialize_element(v.as_ref())?;
|
||||
}
|
||||
map.end()
|
||||
seq.end()
|
||||
}
|
||||
|
||||
struct PassiveDataVisitor;
|
||||
impl<'de> Visitor<'de> for PassiveDataVisitor {
|
||||
type Value = HashMap<DataIndex, Arc<[u8]>>;
|
||||
type Value = Vec<Arc<[u8]>>;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a passive_data map")
|
||||
formatter.write_str("a passive data sequence")
|
||||
}
|
||||
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
|
||||
|
||||
fn visit_seq<M>(self, mut access: M) -> Result<Self::Value, M::Error>
|
||||
where
|
||||
M: MapAccess<'de>,
|
||||
M: SeqAccess<'de>,
|
||||
{
|
||||
let mut map = HashMap::with_capacity(access.size_hint().unwrap_or(0));
|
||||
while let Some((key, value)) = access.next_entry::<_, Vec<u8>>()? {
|
||||
map.insert(key, value.into());
|
||||
let mut data = Vec::with_capacity(access.size_hint().unwrap_or(0));
|
||||
while let Some(value) = access.next_element::<Vec<u8>>()? {
|
||||
data.push(value.into());
|
||||
}
|
||||
Ok(map)
|
||||
Ok(data)
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn deserialize<'de, D>(de: D) -> Result<HashMap<DataIndex, Arc<[u8]>>, D::Error>
|
||||
pub(super) fn deserialize<'de, D>(de: D) -> Result<Vec<Arc<[u8]>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
de.deserialize_map(PassiveDataVisitor)
|
||||
de.deserialize_seq(PassiveDataVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::module::{
|
||||
Initializer, InstanceSignature, MemoryPlan, Module, ModuleSignature, ModuleType, ModuleUpvar,
|
||||
TableElements, TablePlan, TypeTables,
|
||||
Initializer, InstanceSignature, MemoryInitialization, MemoryInitializer, MemoryPlan, Module,
|
||||
ModuleSignature, ModuleType, ModuleUpvar, TableInitializer, TablePlan, TypeTables,
|
||||
};
|
||||
use crate::tunables::Tunables;
|
||||
use cranelift_codegen::ir;
|
||||
@@ -13,7 +13,6 @@ use cranelift_wasm::{
|
||||
ModuleIndex, ModuleTypeIndex, SignatureIndex, Table, TableIndex, TargetEnvironment, TypeIndex,
|
||||
WasmError, WasmFuncType, WasmResult,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{hash_map::Entry, HashMap};
|
||||
use std::convert::TryFrom;
|
||||
use std::mem;
|
||||
@@ -60,9 +59,6 @@ pub struct ModuleTranslation<'data> {
|
||||
/// References to the function bodies.
|
||||
pub function_body_inputs: PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
|
||||
|
||||
/// References to the data initializers.
|
||||
pub data_initializers: Vec<DataInitializer<'data>>,
|
||||
|
||||
/// DWARF debug information, if enabled, parsed from the module.
|
||||
pub debuginfo: DebugInfoData<'data>,
|
||||
|
||||
@@ -684,7 +680,7 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
|
||||
fn reserve_table_elements(&mut self, num: u32) -> WasmResult<()> {
|
||||
self.result
|
||||
.module
|
||||
.table_elements
|
||||
.table_initializers
|
||||
.reserve_exact(usize::try_from(num).unwrap());
|
||||
Ok(())
|
||||
}
|
||||
@@ -696,12 +692,15 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
|
||||
offset: usize,
|
||||
elements: Box<[FuncIndex]>,
|
||||
) -> WasmResult<()> {
|
||||
self.result.module.table_elements.push(TableElements {
|
||||
table_index,
|
||||
base,
|
||||
offset,
|
||||
elements,
|
||||
});
|
||||
self.result
|
||||
.module
|
||||
.table_initializers
|
||||
.push(TableInitializer {
|
||||
table_index,
|
||||
base,
|
||||
offset,
|
||||
elements,
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -710,11 +709,13 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
|
||||
elem_index: ElemIndex,
|
||||
segments: Box<[FuncIndex]>,
|
||||
) -> WasmResult<()> {
|
||||
let index = self.result.module.passive_elements.len();
|
||||
self.result.module.passive_elements.push(segments);
|
||||
let old = self
|
||||
.result
|
||||
.module
|
||||
.passive_elements
|
||||
.insert(elem_index, segments);
|
||||
.passive_elements_map
|
||||
.insert(elem_index, index);
|
||||
debug_assert!(
|
||||
old.is_none(),
|
||||
"should never get duplicate element indices, that would be a bug in `cranelift_wasm`'s \
|
||||
@@ -758,9 +759,12 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
|
||||
}
|
||||
|
||||
fn reserve_data_initializers(&mut self, num: u32) -> WasmResult<()> {
|
||||
self.result
|
||||
.data_initializers
|
||||
.reserve_exact(usize::try_from(num).unwrap());
|
||||
match &mut self.result.module.memory_initialization {
|
||||
MemoryInitialization::Segmented(initializers) => {
|
||||
initializers.reserve_exact(usize::try_from(num).unwrap())
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -771,28 +775,35 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
|
||||
offset: usize,
|
||||
data: &'data [u8],
|
||||
) -> WasmResult<()> {
|
||||
self.result.data_initializers.push(DataInitializer {
|
||||
location: DataInitializerLocation {
|
||||
memory_index,
|
||||
base,
|
||||
offset,
|
||||
},
|
||||
data,
|
||||
});
|
||||
match &mut self.result.module.memory_initialization {
|
||||
MemoryInitialization::Segmented(initializers) => {
|
||||
initializers.push(MemoryInitializer {
|
||||
memory_index,
|
||||
base,
|
||||
offset,
|
||||
data: data.into(),
|
||||
});
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn reserve_passive_data(&mut self, count: u32) -> WasmResult<()> {
|
||||
self.result.module.passive_data.reserve(count as usize);
|
||||
fn reserve_passive_data(&mut self, _count: u32) -> WasmResult<()> {
|
||||
// Note: the count passed in here is the *total* segment count
|
||||
// There is no way to reserve for just the passive segments as they are discovered when iterating the data section entries
|
||||
// Given that the total segment count might be much larger than the passive count, do not reserve
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn declare_passive_data(&mut self, data_index: DataIndex, data: &'data [u8]) -> WasmResult<()> {
|
||||
let index = self.result.module.passive_data.len();
|
||||
self.result.module.passive_data.push(Arc::from(data));
|
||||
let old = self
|
||||
.result
|
||||
.module
|
||||
.passive_data
|
||||
.insert(data_index, Arc::from(data));
|
||||
.passive_data_map
|
||||
.insert(data_index, index);
|
||||
debug_assert!(
|
||||
old.is_none(),
|
||||
"a module can't have duplicate indices, this would be a cranelift-wasm bug"
|
||||
@@ -1065,26 +1076,3 @@ pub fn translate_signature(mut sig: ir::Signature, pointer_type: ir::Type) -> ir
|
||||
sig.params.insert(1, AbiParam::new(pointer_type));
|
||||
sig
|
||||
}
|
||||
|
||||
/// A memory index and offset within that memory where a data initialization
|
||||
/// should is to be performed.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct DataInitializerLocation {
|
||||
/// The index of the memory to initialize.
|
||||
pub memory_index: MemoryIndex,
|
||||
|
||||
/// Optionally a globalvar base to initialize at.
|
||||
pub base: Option<GlobalIndex>,
|
||||
|
||||
/// A constant offset to initialize at.
|
||||
pub offset: usize,
|
||||
}
|
||||
|
||||
/// A data initializer for linear memory.
|
||||
pub struct DataInitializer<'data> {
|
||||
/// The location where the initialization is to be performed.
|
||||
pub location: DataInitializerLocation,
|
||||
|
||||
/// The initialization data.
|
||||
pub data: &'data [u8],
|
||||
}
|
||||
|
||||
@@ -27,6 +27,9 @@ pub struct Tunables {
|
||||
/// Whether or not fuel is enabled for generated code, meaning that fuel
|
||||
/// will be consumed every time a wasm instruction is executed.
|
||||
pub consume_fuel: bool,
|
||||
|
||||
/// Whether or not to treat the static memory bound as the maximum for unbounded heaps.
|
||||
pub static_memory_bound_is_maximum: bool,
|
||||
}
|
||||
|
||||
impl Default for Tunables {
|
||||
@@ -62,6 +65,7 @@ impl Default for Tunables {
|
||||
parse_wasm_debuginfo: true,
|
||||
interruptable: false,
|
||||
consume_fuel: false,
|
||||
static_memory_bound_is_maximum: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ fn align(offset: u32, width: u32) -> u32 {
|
||||
|
||||
/// This class computes offsets to fields within `VMContext` and other
|
||||
/// related structs that JIT code accesses directly.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct VMOffsets {
|
||||
/// The size in bytes of a pointer on the target.
|
||||
pub pointer_size: u8,
|
||||
|
||||
@@ -51,6 +51,27 @@ impl<'a, Resume, Yield, Return> Fiber<'a, Resume, Yield, Return> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new fiber with existing stack space that will execute `func`.
|
||||
///
|
||||
/// This function returns a `Fiber` which, when resumed, will execute `func`
|
||||
/// to completion. When desired the `func` can suspend itself via
|
||||
/// `Fiber::suspend`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must properly allocate the stack space with a guard page and
|
||||
/// make the pages accessible for correct behavior.
|
||||
pub unsafe fn new_with_stack(
|
||||
top_of_stack: *mut u8,
|
||||
func: impl FnOnce(Resume, &Suspend<Resume, Yield, Return>) -> Return + 'a,
|
||||
) -> io::Result<Fiber<'a, Resume, Yield, Return>> {
|
||||
Ok(Fiber {
|
||||
inner: imp::Fiber::new_with_stack(top_of_stack, func)?,
|
||||
done: Cell::new(false),
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Resumes execution of this fiber.
|
||||
///
|
||||
/// This function will transfer execution to the fiber and resume from where
|
||||
|
||||
@@ -35,10 +35,10 @@ use std::io;
|
||||
use std::ptr;
|
||||
|
||||
pub struct Fiber {
|
||||
// Description of the mmap region we own. This should be abstracted
|
||||
// eventually so we aren't personally mmap-ing this region.
|
||||
mmap: *mut libc::c_void,
|
||||
mmap_len: usize,
|
||||
// The top of the stack; for stacks allocated by the fiber implementation itself,
|
||||
// the base address of the allocation will be `top_of_stack.sub(alloc_len.unwrap())`
|
||||
top_of_stack: *mut u8,
|
||||
alloc_len: Option<usize>,
|
||||
}
|
||||
|
||||
pub struct Suspend {
|
||||
@@ -66,21 +66,40 @@ where
|
||||
}
|
||||
|
||||
impl Fiber {
|
||||
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Fiber>
|
||||
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Self>
|
||||
where
|
||||
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
|
||||
{
|
||||
let fiber = Self::alloc_with_stack(stack_size)?;
|
||||
fiber.init(func);
|
||||
Ok(fiber)
|
||||
}
|
||||
|
||||
pub fn new_with_stack<F, A, B, C>(top_of_stack: *mut u8, func: F) -> io::Result<Self>
|
||||
where
|
||||
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
|
||||
{
|
||||
let fiber = Self {
|
||||
top_of_stack,
|
||||
alloc_len: None,
|
||||
};
|
||||
|
||||
fiber.init(func);
|
||||
|
||||
Ok(fiber)
|
||||
}
|
||||
|
||||
fn init<F, A, B, C>(&self, func: F)
|
||||
where
|
||||
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
|
||||
{
|
||||
let fiber = Fiber::alloc_with_stack(stack_size)?;
|
||||
unsafe {
|
||||
// Initialize the top of the stack to be resumed from
|
||||
let top_of_stack = fiber.top_of_stack();
|
||||
let data = Box::into_raw(Box::new(func)).cast();
|
||||
wasmtime_fiber_init(top_of_stack, fiber_start::<F, A, B, C>, data);
|
||||
Ok(fiber)
|
||||
wasmtime_fiber_init(self.top_of_stack, fiber_start::<F, A, B, C>, data);
|
||||
}
|
||||
}
|
||||
|
||||
fn alloc_with_stack(stack_size: usize) -> io::Result<Fiber> {
|
||||
fn alloc_with_stack(stack_size: usize) -> io::Result<Self> {
|
||||
unsafe {
|
||||
// Round up our stack size request to the nearest multiple of the
|
||||
// page size.
|
||||
@@ -104,7 +123,10 @@ impl Fiber {
|
||||
if mmap == libc::MAP_FAILED {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
let ret = Fiber { mmap, mmap_len };
|
||||
let ret = Self {
|
||||
top_of_stack: mmap.cast::<u8>().add(mmap_len),
|
||||
alloc_len: Some(mmap_len),
|
||||
};
|
||||
let res = libc::mprotect(
|
||||
mmap.cast::<u8>().add(page_size).cast(),
|
||||
stack_size,
|
||||
@@ -124,27 +146,24 @@ impl Fiber {
|
||||
// stack, otherwise known as our reserved slot for this information.
|
||||
//
|
||||
// In the diagram above this is updating address 0xAff8
|
||||
let top_of_stack = self.top_of_stack();
|
||||
let addr = top_of_stack.cast::<usize>().offset(-1);
|
||||
let addr = self.top_of_stack.cast::<usize>().offset(-1);
|
||||
addr.write(result as *const _ as usize);
|
||||
|
||||
wasmtime_fiber_switch(top_of_stack);
|
||||
wasmtime_fiber_switch(self.top_of_stack);
|
||||
|
||||
// null this out to help catch use-after-free
|
||||
addr.write(0);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn top_of_stack(&self) -> *mut u8 {
|
||||
self.mmap.cast::<u8>().add(self.mmap_len)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Fiber {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let ret = libc::munmap(self.mmap, self.mmap_len);
|
||||
debug_assert!(ret == 0);
|
||||
if let Some(alloc_len) = self.alloc_len {
|
||||
let ret = libc::munmap(self.top_of_stack.sub(alloc_len) as _, alloc_len);
|
||||
debug_assert!(ret == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::cell::Cell;
|
||||
use std::io;
|
||||
use std::ptr;
|
||||
use winapi::shared::minwindef::*;
|
||||
use winapi::shared::winerror::ERROR_NOT_SUPPORTED;
|
||||
use winapi::um::fibersapi::*;
|
||||
use winapi::um::winbase::*;
|
||||
|
||||
@@ -40,7 +41,7 @@ where
|
||||
}
|
||||
|
||||
impl Fiber {
|
||||
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Fiber>
|
||||
pub fn new<F, A, B, C>(stack_size: usize, func: F) -> io::Result<Self>
|
||||
where
|
||||
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
|
||||
{
|
||||
@@ -61,11 +62,18 @@ impl Fiber {
|
||||
drop(Box::from_raw(state.initial_closure.get().cast::<F>()));
|
||||
Err(io::Error::last_os_error())
|
||||
} else {
|
||||
Ok(Fiber { fiber, state })
|
||||
Ok(Self { fiber, state })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_stack<F, A, B, C>(_top_of_stack: *mut u8, _func: F) -> io::Result<Self>
|
||||
where
|
||||
F: FnOnce(A, &super::Suspend<A, B, C>) -> C,
|
||||
{
|
||||
Err(io::Error::from_raw_os_error(ERROR_NOT_SUPPORTED as i32))
|
||||
}
|
||||
|
||||
pub(crate) fn resume<A, B, C>(&self, result: &Cell<RunResult<A, B, C>>) {
|
||||
unsafe {
|
||||
let is_fiber = IsThreadAFiber() != 0;
|
||||
|
||||
@@ -25,7 +25,7 @@ wasmtime-debug = { path = "../debug", version = "0.24.0" }
|
||||
wasmtime-profiling = { path = "../profiling", version = "0.24.0" }
|
||||
wasmtime-obj = { path = "../obj", version = "0.24.0" }
|
||||
rayon = { version = "1.0", optional = true }
|
||||
region = "2.1.0"
|
||||
region = "2.2.0"
|
||||
thiserror = "1.0.4"
|
||||
target-lexicon = { version = "0.11.0", default-features = false }
|
||||
wasmparser = "0.76"
|
||||
|
||||
@@ -25,7 +25,7 @@ struct CodeMemoryEntry {
|
||||
|
||||
impl CodeMemoryEntry {
|
||||
fn with_capacity(cap: usize) -> Result<Self, String> {
|
||||
let mmap = ManuallyDrop::new(Mmap::with_at_least(cap)?);
|
||||
let mmap = ManuallyDrop::new(Mmap::with_at_least(cap).map_err(|e| e.to_string())?);
|
||||
let registry = ManuallyDrop::new(UnwindRegistry::new(mmap.as_ptr() as usize));
|
||||
Ok(Self {
|
||||
mmap,
|
||||
|
||||
@@ -11,7 +11,6 @@ use object::File as ObjectFile;
|
||||
#[cfg(feature = "parallel-compilation")]
|
||||
use rayon::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
@@ -22,16 +21,11 @@ use wasmtime_environ::wasm::{
|
||||
DefinedFuncIndex, InstanceTypeIndex, ModuleTypeIndex, SignatureIndex, WasmFuncType,
|
||||
};
|
||||
use wasmtime_environ::{
|
||||
CompileError, DataInitializer, DataInitializerLocation, DebugInfoData, FunctionAddressMap,
|
||||
InstanceSignature, Module, ModuleEnvironment, ModuleSignature, ModuleTranslation,
|
||||
StackMapInformation, TrapInformation,
|
||||
CompileError, DebugInfoData, FunctionAddressMap, InstanceSignature, Module, ModuleEnvironment,
|
||||
ModuleSignature, ModuleTranslation, StackMapInformation, TrapInformation,
|
||||
};
|
||||
use wasmtime_profiling::ProfilingAgent;
|
||||
use wasmtime_runtime::{
|
||||
GdbJitImageRegistration, Imports, InstanceHandle, InstantiationError, RuntimeMemoryCreator,
|
||||
StackMapRegistry, VMExternRefActivationsTable, VMFunctionBody, VMInterrupts,
|
||||
VMSharedSignatureIndex, VMTrampoline,
|
||||
};
|
||||
use wasmtime_runtime::{GdbJitImageRegistration, InstantiationError, VMFunctionBody, VMTrampoline};
|
||||
|
||||
/// An error condition while setting up a wasm instance, be it validation,
|
||||
/// compilation, or instantiation.
|
||||
@@ -59,7 +53,8 @@ pub enum SetupError {
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CompilationArtifacts {
|
||||
/// Module metadata.
|
||||
module: Module,
|
||||
#[serde(with = "arc_serde")]
|
||||
module: Arc<Module>,
|
||||
|
||||
/// ELF image with functions code.
|
||||
obj: Box<[u8]>,
|
||||
@@ -67,9 +62,6 @@ pub struct CompilationArtifacts {
|
||||
/// Unwind information for function code.
|
||||
unwind_info: Box<[ObjectUnwindInfo]>,
|
||||
|
||||
/// Data initiailizers.
|
||||
data_initializers: Box<[OwnedDataInitializer]>,
|
||||
|
||||
/// Descriptions of compiled functions
|
||||
funcs: PrimaryMap<DefinedFuncIndex, FunctionInfo>,
|
||||
|
||||
@@ -102,9 +94,14 @@ struct DebugInfo {
|
||||
|
||||
impl CompilationArtifacts {
|
||||
/// Creates a `CompilationArtifacts` for a singular translated wasm module.
|
||||
///
|
||||
/// The `use_paged_init` argument controls whether or not an attempt is made to
|
||||
/// organize linear memory initialization data as entire pages or to leave
|
||||
/// the memory initialization data as individual segments.
|
||||
pub fn build(
|
||||
compiler: &Compiler,
|
||||
data: &[u8],
|
||||
use_paged_mem_init: bool,
|
||||
) -> Result<(usize, Vec<CompilationArtifacts>, TypeTables), SetupError> {
|
||||
let (main_module, translations, types) = ModuleEnvironment::new(
|
||||
compiler.frontend_config(),
|
||||
@@ -123,30 +120,28 @@ impl CompilationArtifacts {
|
||||
} = compiler.compile(&mut translation, &types)?;
|
||||
|
||||
let ModuleTranslation {
|
||||
module,
|
||||
data_initializers,
|
||||
mut module,
|
||||
debuginfo,
|
||||
has_unparsed_debuginfo,
|
||||
..
|
||||
} = translation;
|
||||
|
||||
let data_initializers = data_initializers
|
||||
.into_iter()
|
||||
.map(OwnedDataInitializer::new)
|
||||
.collect::<Vec<_>>()
|
||||
.into_boxed_slice();
|
||||
if use_paged_mem_init {
|
||||
if let Some(init) = module.memory_initialization.to_paged(&module) {
|
||||
module.memory_initialization = init;
|
||||
}
|
||||
}
|
||||
|
||||
let obj = obj.write().map_err(|_| {
|
||||
SetupError::Instantiate(InstantiationError::Resource(
|
||||
"failed to create image memory".to_string(),
|
||||
))
|
||||
SetupError::Instantiate(InstantiationError::Resource(anyhow::anyhow!(
|
||||
"failed to create image memory"
|
||||
)))
|
||||
})?;
|
||||
|
||||
Ok(CompilationArtifacts {
|
||||
module,
|
||||
module: Arc::new(module),
|
||||
obj: obj.into_boxed_slice(),
|
||||
unwind_info: unwind_info.into_boxed_slice(),
|
||||
data_initializers,
|
||||
funcs: funcs
|
||||
.into_iter()
|
||||
.map(|(_, func)| FunctionInfo {
|
||||
@@ -208,7 +203,6 @@ pub struct ModuleCode {
|
||||
/// A compiled wasm module, ready to be instantiated.
|
||||
pub struct CompiledModule {
|
||||
artifacts: CompilationArtifacts,
|
||||
module: Arc<Module>,
|
||||
code: Arc<ModuleCode>,
|
||||
finished_functions: FinishedFunctions,
|
||||
trampolines: PrimaryMap<SignatureIndex, VMTrampoline>,
|
||||
@@ -242,7 +236,7 @@ impl CompiledModule {
|
||||
&artifacts.unwind_info,
|
||||
)
|
||||
.map_err(|message| {
|
||||
SetupError::Instantiate(InstantiationError::Resource(format!(
|
||||
SetupError::Instantiate(InstantiationError::Resource(anyhow::anyhow!(
|
||||
"failed to build code memory for functions: {}",
|
||||
message
|
||||
)))
|
||||
@@ -267,7 +261,6 @@ impl CompiledModule {
|
||||
let finished_functions = FinishedFunctions(finished_functions);
|
||||
|
||||
Ok(Arc::new(Self {
|
||||
module: Arc::new(artifacts.module.clone()),
|
||||
artifacts,
|
||||
code: Arc::new(ModuleCode {
|
||||
code_memory,
|
||||
@@ -278,62 +271,19 @@ impl CompiledModule {
|
||||
}))
|
||||
}
|
||||
|
||||
/// Crate an `Instance` from this `CompiledModule`.
|
||||
///
|
||||
/// Note that if only one instance of this module is needed, it may be more
|
||||
/// efficient to call the top-level `instantiate`, since that avoids copying
|
||||
/// the data initializers.
|
||||
///
|
||||
/// # Unsafety
|
||||
///
|
||||
/// See `InstanceHandle::new`
|
||||
pub unsafe fn instantiate(
|
||||
&self,
|
||||
imports: Imports<'_>,
|
||||
lookup_shared_signature: &dyn Fn(SignatureIndex) -> VMSharedSignatureIndex,
|
||||
mem_creator: Option<&dyn RuntimeMemoryCreator>,
|
||||
interrupts: *const VMInterrupts,
|
||||
host_state: Box<dyn Any>,
|
||||
externref_activations_table: *mut VMExternRefActivationsTable,
|
||||
stack_map_registry: *mut StackMapRegistry,
|
||||
) -> Result<InstanceHandle, InstantiationError> {
|
||||
InstanceHandle::new(
|
||||
self.module.clone(),
|
||||
&self.finished_functions.0,
|
||||
imports,
|
||||
mem_creator,
|
||||
lookup_shared_signature,
|
||||
host_state,
|
||||
interrupts,
|
||||
externref_activations_table,
|
||||
stack_map_registry,
|
||||
)
|
||||
}
|
||||
/// Extracts `CompilationArtifacts` from the compiled module.
|
||||
pub fn compilation_artifacts(&self) -> &CompilationArtifacts {
|
||||
&self.artifacts
|
||||
}
|
||||
|
||||
/// Returns data initializers to pass to `InstanceHandle::initialize`
|
||||
pub fn data_initializers(&self) -> Vec<DataInitializer<'_>> {
|
||||
self.artifacts
|
||||
.data_initializers
|
||||
.iter()
|
||||
.map(|init| DataInitializer {
|
||||
location: init.location.clone(),
|
||||
data: &*init.data,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return a reference-counting pointer to a module.
|
||||
pub fn module(&self) -> &Arc<Module> {
|
||||
&self.module
|
||||
&self.artifacts.module
|
||||
}
|
||||
|
||||
/// Return a reference to a mutable module (if possible).
|
||||
pub fn module_mut(&mut self) -> Option<&mut Module> {
|
||||
Arc::get_mut(&mut self.module)
|
||||
Arc::get_mut(&mut self.artifacts.module)
|
||||
}
|
||||
|
||||
/// Returns the map of all finished JIT functions compiled for this module
|
||||
@@ -470,26 +420,6 @@ impl SymbolizeContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to `DataInitializer`, but owns its own copy of the data rather
|
||||
/// than holding a slice of the original module.
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct OwnedDataInitializer {
|
||||
/// The location where the initialization is to be performed.
|
||||
location: DataInitializerLocation,
|
||||
|
||||
/// The initialization data.
|
||||
data: Box<[u8]>,
|
||||
}
|
||||
|
||||
impl OwnedDataInitializer {
|
||||
fn new(borrowed: DataInitializer<'_>) -> Self {
|
||||
Self {
|
||||
location: borrowed.location.clone(),
|
||||
data: borrowed.data.to_vec().into_boxed_slice(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_dbg_image(
|
||||
obj: Vec<u8>,
|
||||
code_range: (*const u8, usize),
|
||||
@@ -586,3 +516,24 @@ impl From<DebugInfoData<'_>> for DebugInfo {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod arc_serde {
|
||||
use super::Arc;
|
||||
use serde::{de::Deserialize, ser::Serialize, Deserializer, Serializer};
|
||||
|
||||
pub(super) fn serialize<S, T>(arc: &Arc<T>, ser: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
T: Serialize,
|
||||
{
|
||||
(**arc).serialize(ser)
|
||||
}
|
||||
|
||||
pub(super) fn deserialize<'de, D, T>(de: D) -> Result<Arc<T>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
T: Deserialize<'de>,
|
||||
{
|
||||
Ok(Arc::new(T::deserialize(de)?))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,9 @@ pub fn make_trampoline(
|
||||
assert!(compiled_function.relocations.is_empty());
|
||||
let ptr = code_memory
|
||||
.allocate_for_function(&compiled_function)
|
||||
.map_err(|message| SetupError::Instantiate(InstantiationError::Resource(message)))?
|
||||
.map_err(|message| {
|
||||
SetupError::Instantiate(InstantiationError::Resource(anyhow::anyhow!(message)))
|
||||
})?
|
||||
.as_ptr();
|
||||
Ok(unsafe { std::mem::transmute::<*const VMFunctionBody, VMTrampoline>(ptr) })
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use anyhow::Result;
|
||||
use object::write::{Object, StandardSection, Symbol, SymbolSection};
|
||||
use object::{SymbolFlags, SymbolKind, SymbolScope};
|
||||
use wasmtime_environ::DataInitializer;
|
||||
use wasmtime_environ::MemoryInitializer;
|
||||
|
||||
/// Declares data segment symbol
|
||||
pub fn declare_data_segment(
|
||||
obj: &mut Object,
|
||||
_data_initaliazer: &DataInitializer,
|
||||
_memory_initializer: &MemoryInitializer,
|
||||
index: usize,
|
||||
) -> Result<()> {
|
||||
let name = format!("_memory_{}", index);
|
||||
@@ -26,12 +26,12 @@ pub fn declare_data_segment(
|
||||
/// Emit segment data and initialization location
|
||||
pub fn emit_data_segment(
|
||||
obj: &mut Object,
|
||||
data_initaliazer: &DataInitializer,
|
||||
memory_initializer: &MemoryInitializer,
|
||||
index: usize,
|
||||
) -> Result<()> {
|
||||
let name = format!("_memory_{}", index);
|
||||
let symbol_id = obj.symbol_id(name.as_bytes()).unwrap();
|
||||
let section_id = obj.section_id(StandardSection::Data);
|
||||
obj.add_symbol_data(symbol_id, section_id, data_initaliazer.data, 1);
|
||||
obj.add_symbol_data(symbol_id, section_id, &memory_initializer.data, 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use object::write::{Object, Relocation, StandardSection, Symbol, SymbolSection};
|
||||
use object::{RelocationEncoding, RelocationKind, SymbolFlags, SymbolKind, SymbolScope};
|
||||
use wasmtime_debug::DwarfSection;
|
||||
use wasmtime_environ::isa::TargetFrontendConfig;
|
||||
use wasmtime_environ::{CompiledFunctions, DataInitializer, Module};
|
||||
use wasmtime_environ::{CompiledFunctions, MemoryInitialization, Module};
|
||||
|
||||
fn emit_vmcontext_init(
|
||||
obj: &mut Object,
|
||||
@@ -54,24 +54,32 @@ pub fn emit_module(
|
||||
target_config: &TargetFrontendConfig,
|
||||
compilation: CompiledFunctions,
|
||||
dwarf_sections: Vec<DwarfSection>,
|
||||
data_initializers: &[DataInitializer],
|
||||
) -> Result<Object> {
|
||||
let mut builder = ObjectBuilder::new(target, module, &compilation);
|
||||
builder.set_dwarf_sections(dwarf_sections);
|
||||
let mut obj = builder.build()?;
|
||||
|
||||
// Append data, table and vmcontext_init code to the object file.
|
||||
|
||||
for (i, initializer) in data_initializers.iter().enumerate() {
|
||||
declare_data_segment(&mut obj, initializer, i)?;
|
||||
match &module.memory_initialization {
|
||||
MemoryInitialization::Segmented(initializers) => {
|
||||
for (i, initializer) in initializers.iter().enumerate() {
|
||||
declare_data_segment(&mut obj, initializer, i)?;
|
||||
}
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
|
||||
for i in 0..module.table_plans.len() {
|
||||
declare_table(&mut obj, i)?;
|
||||
}
|
||||
|
||||
for (i, initializer) in data_initializers.iter().enumerate() {
|
||||
emit_data_segment(&mut obj, initializer, i)?;
|
||||
match &module.memory_initialization {
|
||||
MemoryInitialization::Segmented(initializers) => {
|
||||
for (i, initializer) in initializers.iter().enumerate() {
|
||||
emit_data_segment(&mut obj, initializer, i)?;
|
||||
}
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
|
||||
for i in 0..module.table_plans.len() {
|
||||
|
||||
@@ -24,12 +24,23 @@ cfg-if = "1.0"
|
||||
backtrace = "0.3.55"
|
||||
lazy_static = "1.3.0"
|
||||
psm = "0.1.11"
|
||||
rand = "0.7.3"
|
||||
anyhow = "1.0.38"
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
winapi = { version = "0.3.7", features = ["winbase", "memoryapi", "errhandlingapi"] }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
userfaultfd = { version = "0.3.0", optional = true }
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0"
|
||||
|
||||
[badges]
|
||||
maintenance = { status = "actively-developed" }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
# Enables support for userfaultfd in the pooling allocator when building on Linux
|
||||
uffd = ["userfaultfd"]
|
||||
|
||||
@@ -351,8 +351,35 @@ impl VMExternRef {
|
||||
ptr
|
||||
}
|
||||
|
||||
/// Consume this `VMExternRef` into a raw, untyped pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This method forgets self, so it is possible to create a leak of the
|
||||
/// underlying reference counted data if not used carefully.
|
||||
///
|
||||
/// Use `from_raw` to recreate the `VMExternRef`.
|
||||
pub unsafe fn into_raw(self) -> *mut u8 {
|
||||
let ptr = self.0.cast::<u8>().as_ptr();
|
||||
std::mem::forget(self);
|
||||
ptr
|
||||
}
|
||||
|
||||
/// Recreate a `VMExternRef` from a pointer returned from a previous call to
|
||||
/// `VMExternRef::as_raw`.
|
||||
/// `as_raw`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Unlike `clone_from_raw`, this does not increment the reference count of the
|
||||
/// underlying data. It is not safe to continue to use the pointer passed to this
|
||||
/// function.
|
||||
pub unsafe fn from_raw(ptr: *mut u8) -> Self {
|
||||
debug_assert!(!ptr.is_null());
|
||||
VMExternRef(NonNull::new_unchecked(ptr).cast())
|
||||
}
|
||||
|
||||
/// Recreate a `VMExternRef` from a pointer returned from a previous call to
|
||||
/// `as_raw`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
|
||||
@@ -4,12 +4,11 @@
|
||||
|
||||
use crate::export::Export;
|
||||
use crate::externref::{StackMapRegistry, VMExternRefActivationsTable};
|
||||
use crate::imports::Imports;
|
||||
use crate::memory::{DefaultMemoryCreator, RuntimeLinearMemory, RuntimeMemoryCreator};
|
||||
use crate::memory::{Memory, RuntimeMemoryCreator};
|
||||
use crate::table::{Table, TableElement};
|
||||
use crate::traphandlers::Trap;
|
||||
use crate::vmcontext::{
|
||||
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
|
||||
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionImport,
|
||||
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryDefinition, VMMemoryImport,
|
||||
VMSharedSignatureIndex, VMTableDefinition, VMTableImport,
|
||||
};
|
||||
@@ -17,23 +16,26 @@ use crate::{ExportFunction, ExportGlobal, ExportMemory, ExportTable};
|
||||
use indexmap::IndexMap;
|
||||
use memoffset::offset_of;
|
||||
use more_asserts::assert_lt;
|
||||
use std::alloc::{self, Layout};
|
||||
use std::alloc::Layout;
|
||||
use std::any::Any;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use std::hash::Hash;
|
||||
use std::ptr::NonNull;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use std::{mem, ptr, slice};
|
||||
use thiserror::Error;
|
||||
use wasmtime_environ::entity::{packed_option::ReservedValue, BoxedSlice, EntityRef, PrimaryMap};
|
||||
use wasmtime_environ::entity::{packed_option::ReservedValue, EntityRef, EntitySet, PrimaryMap};
|
||||
use wasmtime_environ::wasm::{
|
||||
DataIndex, DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
|
||||
ElemIndex, EntityIndex, FuncIndex, GlobalIndex, GlobalInit, MemoryIndex, SignatureIndex,
|
||||
TableElementType, TableIndex, WasmType,
|
||||
DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, EntityIndex,
|
||||
FuncIndex, GlobalIndex, MemoryIndex, TableElementType, TableIndex,
|
||||
};
|
||||
use wasmtime_environ::{ir, DataInitializer, Module, ModuleType, TableElements, VMOffsets};
|
||||
use wasmtime_environ::{ir, Module, VMOffsets};
|
||||
|
||||
mod allocator;
|
||||
|
||||
pub use allocator::*;
|
||||
|
||||
/// Runtime representation of an instance value, which erases all `Instance`
|
||||
/// information since instances are just a collection of values.
|
||||
@@ -51,19 +53,18 @@ pub(crate) struct Instance {
|
||||
offsets: VMOffsets,
|
||||
|
||||
/// WebAssembly linear memory data.
|
||||
memories: BoxedSlice<DefinedMemoryIndex, Box<dyn RuntimeLinearMemory>>,
|
||||
memories: PrimaryMap<DefinedMemoryIndex, Memory>,
|
||||
|
||||
/// WebAssembly table data.
|
||||
tables: BoxedSlice<DefinedTableIndex, Table>,
|
||||
tables: PrimaryMap<DefinedTableIndex, Table>,
|
||||
|
||||
/// Passive elements in this instantiation. As `elem.drop`s happen, these
|
||||
/// entries get removed. A missing entry is considered equivalent to an
|
||||
/// empty slice.
|
||||
passive_elements: RefCell<HashMap<ElemIndex, Box<[*mut VMCallerCheckedAnyfunc]>>>,
|
||||
/// Stores the dropped passive element segments in this instantiation by index.
|
||||
/// If the index is present in the set, the segment has been dropped.
|
||||
dropped_elements: RefCell<EntitySet<ElemIndex>>,
|
||||
|
||||
/// Passive data segments from our module. As `data.drop`s happen, entries
|
||||
/// get removed. A missing entry is considered equivalent to an empty slice.
|
||||
passive_data: RefCell<HashMap<DataIndex, Arc<[u8]>>>,
|
||||
/// Stores the dropped passive data segments in this instantiation by index.
|
||||
/// If the index is present in the set, the segment has been dropped.
|
||||
dropped_data: RefCell<EntitySet<DataIndex>>,
|
||||
|
||||
/// Hosts can store arbitrary per-instance information here.
|
||||
host_state: Box<dyn Any>,
|
||||
@@ -533,6 +534,22 @@ impl Instance {
|
||||
self.vmctx_plus_offset(self.offsets.vmctx_anyfunc(index))
|
||||
}
|
||||
|
||||
fn find_passive_segment<'a, I, D, T>(
|
||||
index: I,
|
||||
index_map: &HashMap<I, usize>,
|
||||
data: &'a Vec<D>,
|
||||
dropped: &RefCell<EntitySet<I>>,
|
||||
) -> &'a [T]
|
||||
where
|
||||
D: AsRef<[T]>,
|
||||
I: EntityRef + Hash,
|
||||
{
|
||||
match index_map.get(&index) {
|
||||
Some(index) if !dropped.borrow().contains(I::new(*index)) => data[*index].as_ref(),
|
||||
_ => &[],
|
||||
}
|
||||
}
|
||||
|
||||
/// The `table.init` operation: initializes a portion of a table with a
|
||||
/// passive element.
|
||||
///
|
||||
@@ -551,15 +568,17 @@ impl Instance {
|
||||
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
|
||||
|
||||
let table = self.get_table(table_index);
|
||||
let passive_elements = self.passive_elements.borrow();
|
||||
let elem = passive_elements
|
||||
.get(&elem_index)
|
||||
.map(|e| &**e)
|
||||
.unwrap_or_else(|| &[]);
|
||||
|
||||
let elements = Self::find_passive_segment(
|
||||
elem_index,
|
||||
&self.module.passive_elements_map,
|
||||
&self.module.passive_elements,
|
||||
&self.dropped_elements,
|
||||
);
|
||||
|
||||
if src
|
||||
.checked_add(len)
|
||||
.map_or(true, |n| n as usize > elem.len())
|
||||
.map_or(true, |n| n as usize > elements.len())
|
||||
|| dst.checked_add(len).map_or(true, |m| m > table.size())
|
||||
{
|
||||
return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds));
|
||||
@@ -567,8 +586,14 @@ impl Instance {
|
||||
|
||||
// TODO(#983): investigate replacing this get/set loop with a `memcpy`.
|
||||
for (dst, src) in (dst..dst + len).zip(src..src + len) {
|
||||
let elem = self
|
||||
.get_caller_checked_anyfunc(elements[src as usize])
|
||||
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
|
||||
f as *const VMCallerCheckedAnyfunc as *mut _
|
||||
});
|
||||
|
||||
table
|
||||
.set(dst, TableElement::FuncRef(elem[src as usize]))
|
||||
.set(dst, TableElement::FuncRef(elem))
|
||||
.expect("should never panic because we already did the bounds check above");
|
||||
}
|
||||
|
||||
@@ -579,10 +604,14 @@ impl Instance {
|
||||
pub(crate) fn elem_drop(&self, elem_index: ElemIndex) {
|
||||
// https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
|
||||
|
||||
let mut passive_elements = self.passive_elements.borrow_mut();
|
||||
passive_elements.remove(&elem_index);
|
||||
// Note that we don't check that we actually removed an element because
|
||||
// dropping a non-passive element is a no-op (not a trap).
|
||||
if let Some(index) = self.module.passive_elements_map.get(&elem_index) {
|
||||
self.dropped_elements
|
||||
.borrow_mut()
|
||||
.insert(ElemIndex::new(*index));
|
||||
}
|
||||
|
||||
// Note that we don't check that we actually removed a segment because
|
||||
// dropping a non-passive segment is a no-op (not a trap).
|
||||
}
|
||||
|
||||
/// Do a `memory.copy`
|
||||
@@ -701,10 +730,13 @@ impl Instance {
|
||||
// https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
|
||||
|
||||
let memory = self.get_memory(memory_index);
|
||||
let passive_data = self.passive_data.borrow();
|
||||
let data = passive_data
|
||||
.get(&data_index)
|
||||
.map_or(&[][..], |data| &**data);
|
||||
|
||||
let data = Self::find_passive_segment(
|
||||
data_index,
|
||||
&self.module.passive_data_map,
|
||||
&self.module.passive_data,
|
||||
&self.dropped_data,
|
||||
);
|
||||
|
||||
if src
|
||||
.checked_add(len)
|
||||
@@ -729,8 +761,14 @@ impl Instance {
|
||||
|
||||
/// Drop the given data segment, truncating its length to zero.
|
||||
pub(crate) fn data_drop(&self, data_index: DataIndex) {
|
||||
let mut passive_data = self.passive_data.borrow_mut();
|
||||
passive_data.remove(&data_index);
|
||||
if let Some(index) = self.module.passive_data_map.get(&data_index) {
|
||||
self.dropped_data
|
||||
.borrow_mut()
|
||||
.insert(DataIndex::new(*index));
|
||||
}
|
||||
|
||||
// Note that we don't check that we actually removed a segment because
|
||||
// dropping a non-passive segment is a no-op (not a trap).
|
||||
}
|
||||
|
||||
/// Get a table by index regardless of whether it is locally-defined or an
|
||||
@@ -780,197 +818,8 @@ pub struct InstanceHandle {
|
||||
}
|
||||
|
||||
impl InstanceHandle {
|
||||
/// Create a new `InstanceHandle` pointing at a new `Instance`.
|
||||
///
|
||||
/// # Unsafety
|
||||
///
|
||||
/// This method is not necessarily inherently unsafe to call, but in general
|
||||
/// the APIs of an `Instance` are quite unsafe and have not been really
|
||||
/// audited for safety that much. As a result the unsafety here on this
|
||||
/// method is a low-overhead way of saying "this is an extremely unsafe type
|
||||
/// to work with".
|
||||
///
|
||||
/// Extreme care must be taken when working with `InstanceHandle` and it's
|
||||
/// recommended to have relatively intimate knowledge of how it works
|
||||
/// internally if you'd like to do so. If possible it's recommended to use
|
||||
/// the `wasmtime` crate API rather than this type since that is vetted for
|
||||
/// safety.
|
||||
///
|
||||
/// It is your responsibility to ensure that the given raw
|
||||
/// `externref_activations_table` and `stack_map_registry` outlive this
|
||||
/// instance.
|
||||
pub unsafe fn new(
|
||||
module: Arc<Module>,
|
||||
finished_functions: &PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
|
||||
imports: Imports,
|
||||
mem_creator: Option<&dyn RuntimeMemoryCreator>,
|
||||
lookup_shared_signature: &dyn Fn(SignatureIndex) -> VMSharedSignatureIndex,
|
||||
host_state: Box<dyn Any>,
|
||||
interrupts: *const VMInterrupts,
|
||||
externref_activations_table: *mut VMExternRefActivationsTable,
|
||||
stack_map_registry: *mut StackMapRegistry,
|
||||
) -> Result<Self, InstantiationError> {
|
||||
debug_assert!(!externref_activations_table.is_null());
|
||||
debug_assert!(!stack_map_registry.is_null());
|
||||
|
||||
let tables = create_tables(&module);
|
||||
let memories = create_memories(&module, mem_creator.unwrap_or(&DefaultMemoryCreator {}))?;
|
||||
|
||||
let vmctx_tables = tables
|
||||
.values()
|
||||
.map(Table::vmtable)
|
||||
.collect::<PrimaryMap<DefinedTableIndex, _>>()
|
||||
.into_boxed_slice();
|
||||
|
||||
let vmctx_memories = memories
|
||||
.values()
|
||||
.map(|a| a.vmmemory())
|
||||
.collect::<PrimaryMap<DefinedMemoryIndex, _>>()
|
||||
.into_boxed_slice();
|
||||
|
||||
let vmctx_globals = create_globals(&module);
|
||||
|
||||
let offsets = VMOffsets::new(mem::size_of::<*const u8>() as u8, &module);
|
||||
|
||||
let passive_data = RefCell::new(module.passive_data.clone());
|
||||
|
||||
let handle = {
|
||||
let instance = Instance {
|
||||
module,
|
||||
offsets,
|
||||
memories,
|
||||
tables,
|
||||
passive_elements: Default::default(),
|
||||
passive_data,
|
||||
host_state,
|
||||
vmctx: VMContext {},
|
||||
};
|
||||
let layout = instance.alloc_layout();
|
||||
let instance_ptr = alloc::alloc(layout) as *mut Instance;
|
||||
if instance_ptr.is_null() {
|
||||
alloc::handle_alloc_error(layout);
|
||||
}
|
||||
ptr::write(instance_ptr, instance);
|
||||
InstanceHandle {
|
||||
instance: instance_ptr,
|
||||
}
|
||||
};
|
||||
let instance = handle.instance();
|
||||
|
||||
let mut ptr = instance.signature_ids_ptr();
|
||||
for sig in handle.module().types.values() {
|
||||
*ptr = match sig {
|
||||
ModuleType::Function(sig) => lookup_shared_signature(*sig),
|
||||
_ => VMSharedSignatureIndex::new(u32::max_value()),
|
||||
};
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
debug_assert_eq!(imports.functions.len(), handle.module().num_imported_funcs);
|
||||
ptr::copy(
|
||||
imports.functions.as_ptr(),
|
||||
instance.imported_functions_ptr() as *mut VMFunctionImport,
|
||||
imports.functions.len(),
|
||||
);
|
||||
debug_assert_eq!(imports.tables.len(), handle.module().num_imported_tables);
|
||||
ptr::copy(
|
||||
imports.tables.as_ptr(),
|
||||
instance.imported_tables_ptr() as *mut VMTableImport,
|
||||
imports.tables.len(),
|
||||
);
|
||||
debug_assert_eq!(
|
||||
imports.memories.len(),
|
||||
handle.module().num_imported_memories
|
||||
);
|
||||
ptr::copy(
|
||||
imports.memories.as_ptr(),
|
||||
instance.imported_memories_ptr() as *mut VMMemoryImport,
|
||||
imports.memories.len(),
|
||||
);
|
||||
debug_assert_eq!(imports.globals.len(), handle.module().num_imported_globals);
|
||||
ptr::copy(
|
||||
imports.globals.as_ptr(),
|
||||
instance.imported_globals_ptr() as *mut VMGlobalImport,
|
||||
imports.globals.len(),
|
||||
);
|
||||
ptr::copy(
|
||||
vmctx_tables.values().as_slice().as_ptr(),
|
||||
instance.tables_ptr() as *mut VMTableDefinition,
|
||||
vmctx_tables.len(),
|
||||
);
|
||||
ptr::copy(
|
||||
vmctx_memories.values().as_slice().as_ptr(),
|
||||
instance.memories_ptr() as *mut VMMemoryDefinition,
|
||||
vmctx_memories.len(),
|
||||
);
|
||||
ptr::copy(
|
||||
vmctx_globals.values().as_slice().as_ptr(),
|
||||
instance.globals_ptr() as *mut VMGlobalDefinition,
|
||||
vmctx_globals.len(),
|
||||
);
|
||||
ptr::write(
|
||||
instance.builtin_functions_ptr() as *mut VMBuiltinFunctionsArray,
|
||||
VMBuiltinFunctionsArray::initialized(),
|
||||
);
|
||||
*instance.interrupts() = interrupts;
|
||||
*instance.externref_activations_table() = externref_activations_table;
|
||||
*instance.stack_map_registry() = stack_map_registry;
|
||||
|
||||
for (index, sig) in instance.module.functions.iter() {
|
||||
let type_index = lookup_shared_signature(*sig);
|
||||
|
||||
let (func_ptr, vmctx) =
|
||||
if let Some(def_index) = instance.module.defined_func_index(index) {
|
||||
(
|
||||
NonNull::new(finished_functions[def_index] as *mut _).unwrap(),
|
||||
instance.vmctx_ptr(),
|
||||
)
|
||||
} else {
|
||||
let import = instance.imported_function(index);
|
||||
(import.body, import.vmctx)
|
||||
};
|
||||
|
||||
ptr::write(
|
||||
instance.anyfunc_ptr(index),
|
||||
VMCallerCheckedAnyfunc {
|
||||
func_ptr,
|
||||
type_index,
|
||||
vmctx,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Perform infallible initialization in this constructor, while fallible
|
||||
// initialization is deferred to the `initialize` method.
|
||||
initialize_passive_elements(instance);
|
||||
initialize_globals(instance);
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Finishes the instantiation process started by `Instance::new`.
|
||||
///
|
||||
/// Only safe to call immediately after instantiation.
|
||||
pub unsafe fn initialize(
|
||||
&self,
|
||||
is_bulk_memory: bool,
|
||||
data_initializers: &[DataInitializer<'_>],
|
||||
) -> Result<(), InstantiationError> {
|
||||
// Check initializer bounds before initializing anything. Only do this
|
||||
// when bulk memory is disabled, since the bulk memory proposal changes
|
||||
// instantiation such that the intermediate results of failed
|
||||
// initializations are visible.
|
||||
if !is_bulk_memory {
|
||||
check_table_init_bounds(self.instance())?;
|
||||
check_memory_init_bounds(self.instance(), data_initializers)?;
|
||||
}
|
||||
|
||||
// Apply fallible initializers. Note that this can "leak" state even if
|
||||
// it fails.
|
||||
initialize_tables(self.instance())?;
|
||||
initialize_memories(self.instance(), data_initializers)?;
|
||||
|
||||
Ok(())
|
||||
pub(crate) unsafe fn new(instance: *mut Instance) -> Self {
|
||||
Self { instance }
|
||||
}
|
||||
|
||||
/// Create a new `InstanceHandle` pointing at the instance
|
||||
@@ -1126,305 +975,4 @@ impl InstanceHandle {
|
||||
instance: self.instance,
|
||||
}
|
||||
}
|
||||
|
||||
/// Deallocates memory associated with this instance.
|
||||
///
|
||||
/// Note that this is unsafe because there might be other handles to this
|
||||
/// `InstanceHandle` elsewhere, and there's nothing preventing usage of
|
||||
/// this handle after this function is called.
|
||||
pub unsafe fn dealloc(&self) {
|
||||
let instance = self.instance();
|
||||
let layout = instance.alloc_layout();
|
||||
ptr::drop_in_place(self.instance);
|
||||
alloc::dealloc(self.instance.cast(), layout);
|
||||
}
|
||||
}
|
||||
|
||||
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module().table_elements {
|
||||
let start = get_table_init_start(init, instance);
|
||||
let table = instance.get_table(init.table_index);
|
||||
|
||||
let size = usize::try_from(table.size()).unwrap();
|
||||
if size < start + init.elements.len() {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"table out of bounds: elements segment does not fit".to_owned(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compute the offset for a memory data initializer.
|
||||
fn get_memory_init_start(init: &DataInitializer<'_>, instance: &Instance) -> usize {
|
||||
let mut start = init.location.offset;
|
||||
|
||||
if let Some(base) = init.location.base {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
start += usize::try_from(val).unwrap();
|
||||
}
|
||||
|
||||
start
|
||||
}
|
||||
|
||||
/// Return a byte-slice view of a memory's data.
|
||||
unsafe fn get_memory_slice<'instance>(
|
||||
init: &DataInitializer<'_>,
|
||||
instance: &'instance Instance,
|
||||
) -> &'instance mut [u8] {
|
||||
let memory = if let Some(defined_memory_index) = instance
|
||||
.module
|
||||
.defined_memory_index(init.location.memory_index)
|
||||
{
|
||||
instance.memory(defined_memory_index)
|
||||
} else {
|
||||
let import = instance.imported_memory(init.location.memory_index);
|
||||
let foreign_instance = (&mut *(import).vmctx).instance();
|
||||
let foreign_memory = &mut *(import).from;
|
||||
let foreign_index = foreign_instance.memory_index(foreign_memory);
|
||||
foreign_instance.memory(foreign_index)
|
||||
};
|
||||
slice::from_raw_parts_mut(memory.base, memory.current_length)
|
||||
}
|
||||
|
||||
fn check_memory_init_bounds(
|
||||
instance: &Instance,
|
||||
data_initializers: &[DataInitializer<'_>],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in data_initializers {
|
||||
let start = get_memory_init_start(init, instance);
|
||||
unsafe {
|
||||
let mem_slice = get_memory_slice(init, instance);
|
||||
if mem_slice.get_mut(start..start + init.data.len()).is_none() {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"memory out of bounds: data segment does not fit".into(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allocate memory for just the tables of the current module.
|
||||
fn create_tables(module: &Module) -> BoxedSlice<DefinedTableIndex, Table> {
|
||||
let num_imports = module.num_imported_tables;
|
||||
let mut tables: PrimaryMap<DefinedTableIndex, _> =
|
||||
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
|
||||
for table in &module.table_plans.values().as_slice()[num_imports..] {
|
||||
tables.push(Table::new(table));
|
||||
}
|
||||
tables.into_boxed_slice()
|
||||
}
|
||||
|
||||
/// Compute the offset for a table element initializer.
|
||||
fn get_table_init_start(init: &TableElements, instance: &Instance) -> usize {
|
||||
let mut start = init.offset;
|
||||
|
||||
if let Some(base) = init.base {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
start += usize::try_from(val).unwrap();
|
||||
}
|
||||
|
||||
start
|
||||
}
|
||||
|
||||
/// Initialize the table memory from the provided initializers.
|
||||
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module().table_elements {
|
||||
let start = get_table_init_start(init, instance);
|
||||
let table = instance.get_table(init.table_index);
|
||||
|
||||
if start
|
||||
.checked_add(init.elements.len())
|
||||
.map_or(true, |end| end > table.size() as usize)
|
||||
{
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::TableOutOfBounds,
|
||||
)));
|
||||
}
|
||||
|
||||
for (i, func_idx) in init.elements.iter().enumerate() {
|
||||
let item = match table.element_type() {
|
||||
TableElementType::Func => instance
|
||||
.get_caller_checked_anyfunc(*func_idx)
|
||||
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
|
||||
f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc
|
||||
})
|
||||
.into(),
|
||||
TableElementType::Val(_) => {
|
||||
assert!(*func_idx == FuncIndex::reserved_value());
|
||||
TableElement::ExternRef(None)
|
||||
}
|
||||
};
|
||||
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize the `Instance::passive_elements` map by resolving the
|
||||
/// `Module::passive_elements`'s `FuncIndex`s into `VMCallerCheckedAnyfunc`s for
|
||||
/// this instance.
|
||||
fn initialize_passive_elements(instance: &Instance) {
|
||||
let mut passive_elements = instance.passive_elements.borrow_mut();
|
||||
debug_assert!(
|
||||
passive_elements.is_empty(),
|
||||
"should only be called once, at initialization time"
|
||||
);
|
||||
|
||||
passive_elements.extend(
|
||||
instance
|
||||
.module
|
||||
.passive_elements
|
||||
.iter()
|
||||
.filter(|(_, segments)| !segments.is_empty())
|
||||
.map(|(idx, segments)| {
|
||||
(
|
||||
*idx,
|
||||
segments
|
||||
.iter()
|
||||
.map(|s| {
|
||||
instance.get_caller_checked_anyfunc(*s).map_or(
|
||||
ptr::null_mut(),
|
||||
|f: &VMCallerCheckedAnyfunc| {
|
||||
f as *const VMCallerCheckedAnyfunc as *mut _
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
/// Allocate memory for just the memories of the current module.
|
||||
fn create_memories(
|
||||
module: &Module,
|
||||
mem_creator: &dyn RuntimeMemoryCreator,
|
||||
) -> Result<BoxedSlice<DefinedMemoryIndex, Box<dyn RuntimeLinearMemory>>, InstantiationError> {
|
||||
let num_imports = module.num_imported_memories;
|
||||
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
|
||||
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
|
||||
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
|
||||
memories.push(
|
||||
mem_creator
|
||||
.new_memory(plan)
|
||||
.map_err(InstantiationError::Resource)?,
|
||||
);
|
||||
}
|
||||
Ok(memories.into_boxed_slice())
|
||||
}
|
||||
|
||||
/// Initialize the table memory from the provided initializers.
|
||||
fn initialize_memories(
|
||||
instance: &Instance,
|
||||
data_initializers: &[DataInitializer<'_>],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in data_initializers {
|
||||
let memory = instance.get_memory(init.location.memory_index);
|
||||
|
||||
let start = get_memory_init_start(init, instance);
|
||||
if start
|
||||
.checked_add(init.data.len())
|
||||
.map_or(true, |end| end > memory.current_length)
|
||||
{
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
)));
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let mem_slice = get_memory_slice(init, instance);
|
||||
let end = start + init.data.len();
|
||||
let to_init = &mut mem_slice[start..end];
|
||||
to_init.copy_from_slice(init.data);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Allocate memory for just the globals of the current module,
|
||||
/// with initializers applied.
|
||||
fn create_globals(module: &Module) -> BoxedSlice<DefinedGlobalIndex, VMGlobalDefinition> {
|
||||
let num_imports = module.num_imported_globals;
|
||||
let mut vmctx_globals = PrimaryMap::with_capacity(module.globals.len() - num_imports);
|
||||
|
||||
for _ in &module.globals.values().as_slice()[num_imports..] {
|
||||
vmctx_globals.push(VMGlobalDefinition::new());
|
||||
}
|
||||
|
||||
vmctx_globals.into_boxed_slice()
|
||||
}
|
||||
|
||||
fn initialize_globals(instance: &Instance) {
|
||||
let module = instance.module();
|
||||
let num_imports = module.num_imported_globals;
|
||||
for (index, global) in module.globals.iter().skip(num_imports) {
|
||||
let def_index = module.defined_global_index(index).unwrap();
|
||||
unsafe {
|
||||
let to = instance.global_ptr(def_index);
|
||||
match global.initializer {
|
||||
GlobalInit::I32Const(x) => *(*to).as_i32_mut() = x,
|
||||
GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
|
||||
GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
|
||||
GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
|
||||
GlobalInit::V128Const(x) => *(*to).as_u128_bits_mut() = x.0,
|
||||
GlobalInit::GetGlobal(x) => {
|
||||
let from = if let Some(def_x) = module.defined_global_index(x) {
|
||||
instance.global(def_x)
|
||||
} else {
|
||||
*instance.imported_global(x).from
|
||||
};
|
||||
*to = from;
|
||||
}
|
||||
GlobalInit::RefFunc(f) => {
|
||||
*(*to).as_anyfunc_mut() = instance.get_caller_checked_anyfunc(f).unwrap()
|
||||
as *const VMCallerCheckedAnyfunc;
|
||||
}
|
||||
GlobalInit::RefNullConst => match global.wasm_ty {
|
||||
WasmType::FuncRef => *(*to).as_anyfunc_mut() = ptr::null(),
|
||||
WasmType::ExternRef => *(*to).as_externref_mut() = None,
|
||||
ty => panic!("unsupported reference type for global: {:?}", ty),
|
||||
},
|
||||
GlobalInit::Import => panic!("locally-defined global initialized as import"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An link error while instantiating a module.
|
||||
#[derive(Error, Debug)]
|
||||
#[error("Link error: {0}")]
|
||||
pub struct LinkError(pub String);
|
||||
|
||||
/// An error while instantiating a module.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum InstantiationError {
|
||||
/// Insufficient resources available for execution.
|
||||
#[error("Insufficient resources: {0}")]
|
||||
Resource(String),
|
||||
|
||||
/// A wasm link error occured.
|
||||
#[error("Failed to link module")]
|
||||
Link(#[from] LinkError),
|
||||
|
||||
/// A trap ocurred during instantiation, after linking.
|
||||
#[error("Trap occurred during instantiation")]
|
||||
Trap(Trap),
|
||||
}
|
||||
|
||||
642
crates/runtime/src/instance/allocator.rs
Normal file
642
crates/runtime/src/instance/allocator.rs
Normal file
@@ -0,0 +1,642 @@
|
||||
use crate::externref::{StackMapRegistry, VMExternRefActivationsTable};
|
||||
use crate::imports::Imports;
|
||||
use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator};
|
||||
use crate::memory::{DefaultMemoryCreator, Memory};
|
||||
use crate::table::{Table, TableElement};
|
||||
use crate::traphandlers::Trap;
|
||||
use crate::vmcontext::{
|
||||
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
|
||||
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryImport, VMSharedSignatureIndex,
|
||||
VMTableImport,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use std::alloc;
|
||||
use std::any::Any;
|
||||
use std::cell::RefCell;
|
||||
use std::convert::TryFrom;
|
||||
use std::ptr::{self, NonNull};
|
||||
use std::slice;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
use wasmtime_environ::entity::{packed_option::ReservedValue, EntityRef, EntitySet, PrimaryMap};
|
||||
use wasmtime_environ::wasm::{
|
||||
DefinedFuncIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, GlobalInit, SignatureIndex,
|
||||
TableElementType, WasmType,
|
||||
};
|
||||
use wasmtime_environ::{
|
||||
ir, MemoryInitialization, MemoryInitializer, Module, ModuleType, TableInitializer, VMOffsets,
|
||||
WASM_PAGE_SIZE,
|
||||
};
|
||||
|
||||
mod pooling;
|
||||
|
||||
pub use self::pooling::{
|
||||
InstanceLimits, ModuleLimits, PoolingAllocationStrategy, PoolingInstanceAllocator,
|
||||
};
|
||||
|
||||
/// Represents a request for a new runtime instance.
|
||||
pub struct InstanceAllocationRequest<'a> {
|
||||
/// The module being instantiated.
|
||||
pub module: Arc<Module>,
|
||||
|
||||
/// The finished (JIT) functions for the module.
|
||||
pub finished_functions: &'a PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
|
||||
|
||||
/// The imports to use for the instantiation.
|
||||
pub imports: Imports<'a>,
|
||||
|
||||
/// A callback for looking up shared signature indexes.
|
||||
pub lookup_shared_signature: &'a dyn Fn(SignatureIndex) -> VMSharedSignatureIndex,
|
||||
|
||||
/// The host state to associate with the instance.
|
||||
pub host_state: Box<dyn Any>,
|
||||
|
||||
/// The pointer to the VM interrupts structure to use for the instance.
|
||||
pub interrupts: *const VMInterrupts,
|
||||
|
||||
/// The pointer to the reference activations table to use for the instance.
|
||||
pub externref_activations_table: *mut VMExternRefActivationsTable,
|
||||
|
||||
/// The pointer to the stack map registry to use for the instance.
|
||||
pub stack_map_registry: *mut StackMapRegistry,
|
||||
}
|
||||
|
||||
/// An link error while instantiating a module.
|
||||
#[derive(Error, Debug)]
|
||||
#[error("Link error: {0}")]
|
||||
pub struct LinkError(pub String);
|
||||
|
||||
/// An error while instantiating a module.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum InstantiationError {
|
||||
/// Insufficient resources available for execution.
|
||||
#[error("Insufficient resources: {0}")]
|
||||
Resource(anyhow::Error),
|
||||
|
||||
/// A wasm link error occured.
|
||||
#[error("Failed to link module")]
|
||||
Link(#[from] LinkError),
|
||||
|
||||
/// A trap ocurred during instantiation, after linking.
|
||||
#[error("Trap occurred during instantiation")]
|
||||
Trap(Trap),
|
||||
|
||||
/// A limit on how many instances are supported has been reached.
|
||||
#[error("Limit of {0} concurrent instances has been reached")]
|
||||
Limit(u32),
|
||||
}
|
||||
|
||||
/// An error while creating a fiber stack.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum FiberStackError {
|
||||
/// Insufficient resources available for the request.
|
||||
#[error("Insufficient resources: {0}")]
|
||||
Resource(anyhow::Error),
|
||||
/// An error for when the allocator doesn't support custom fiber stacks.
|
||||
#[error("Custom fiber stacks are not supported by the allocator")]
|
||||
NotSupported,
|
||||
/// A limit on how many fibers are supported has been reached.
|
||||
#[error("Limit of {0} concurrent fibers has been reached")]
|
||||
Limit(u32),
|
||||
}
|
||||
|
||||
/// Represents a runtime instance allocator.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This trait is unsafe as it requires knowledge of Wasmtime's runtime internals to implement correctly.
|
||||
pub unsafe trait InstanceAllocator: Send + Sync {
|
||||
/// Validates that a module is supported by the allocator.
|
||||
fn validate(&self, module: &Module) -> Result<()> {
|
||||
drop(module);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Adjusts the tunables prior to creation of any JIT compiler.
|
||||
///
|
||||
/// This method allows the instance allocator control over tunables passed to a `wasmtime_jit::Compiler`.
|
||||
fn adjust_tunables(&self, tunables: &mut wasmtime_environ::Tunables) {
|
||||
drop(tunables);
|
||||
}
|
||||
|
||||
/// Allocates an instance for the given allocation request.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This method is not inherently unsafe, but care must be made to ensure
|
||||
/// pointers passed in the allocation request outlive the returned instance.
|
||||
unsafe fn allocate(
|
||||
&self,
|
||||
req: InstanceAllocationRequest,
|
||||
) -> Result<InstanceHandle, InstantiationError>;
|
||||
|
||||
/// Finishes the instantiation process started by an instance allocator.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This method is only safe to call immediately after an instance has been allocated.
|
||||
unsafe fn initialize(
|
||||
&self,
|
||||
handle: &InstanceHandle,
|
||||
is_bulk_memory: bool,
|
||||
) -> Result<(), InstantiationError>;
|
||||
|
||||
/// Deallocates a previously allocated instance.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe because there are no guarantees that the given handle
|
||||
/// is the only owner of the underlying instance to deallocate.
|
||||
///
|
||||
/// Use extreme care when deallocating an instance so that there are no dangling instance pointers.
|
||||
unsafe fn deallocate(&self, handle: &InstanceHandle);
|
||||
|
||||
/// Allocates a fiber stack for calling async functions on.
|
||||
///
|
||||
/// Returns the top of the fiber stack if successfully allocated.
|
||||
fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError>;
|
||||
|
||||
/// Deallocates a fiber stack that was previously allocated.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe because there are no guarantees that the given stack
|
||||
/// is no longer in use.
|
||||
///
|
||||
/// Additionally, passing a stack pointer that was not returned from `allocate_fiber_stack`
|
||||
/// will lead to undefined behavior.
|
||||
unsafe fn deallocate_fiber_stack(&self, stack: *mut u8);
|
||||
}
|
||||
|
||||
fn get_table_init_start(
|
||||
init: &TableInitializer,
|
||||
instance: &Instance,
|
||||
) -> Result<usize, InstantiationError> {
|
||||
match init.base {
|
||||
Some(base) => {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
|
||||
init.offset.checked_add(val as usize).ok_or_else(|| {
|
||||
InstantiationError::Link(LinkError(
|
||||
"element segment global base overflows".to_owned(),
|
||||
))
|
||||
})
|
||||
}
|
||||
None => Ok(init.offset),
|
||||
}
|
||||
}
|
||||
|
||||
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module.table_initializers {
|
||||
let table = instance.get_table(init.table_index);
|
||||
let start = get_table_init_start(init, instance)?;
|
||||
let end = start.checked_add(init.elements.len());
|
||||
|
||||
match end {
|
||||
Some(end) if end <= table.size() as usize => {
|
||||
// Initializer is in bounds
|
||||
}
|
||||
_ => {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"table out of bounds: elements segment does not fit".to_owned(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module.table_initializers {
|
||||
let table = instance.get_table(init.table_index);
|
||||
let start = get_table_init_start(init, instance)?;
|
||||
let end = start.checked_add(init.elements.len());
|
||||
|
||||
match end {
|
||||
Some(end) if end <= table.size() as usize => {
|
||||
for (i, func_idx) in init.elements.iter().enumerate() {
|
||||
let item = match table.element_type() {
|
||||
TableElementType::Func => instance
|
||||
.get_caller_checked_anyfunc(*func_idx)
|
||||
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
|
||||
f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc
|
||||
})
|
||||
.into(),
|
||||
TableElementType::Val(_) => {
|
||||
assert!(*func_idx == FuncIndex::reserved_value());
|
||||
TableElement::ExternRef(None)
|
||||
}
|
||||
};
|
||||
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::TableOutOfBounds,
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_memory_init_start(
|
||||
init: &MemoryInitializer,
|
||||
instance: &Instance,
|
||||
) -> Result<usize, InstantiationError> {
|
||||
match init.base {
|
||||
Some(base) => {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
|
||||
init.offset.checked_add(val as usize).ok_or_else(|| {
|
||||
InstantiationError::Link(LinkError("data segment global base overflows".to_owned()))
|
||||
})
|
||||
}
|
||||
None => Ok(init.offset),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn get_memory_slice<'instance>(
|
||||
init: &MemoryInitializer,
|
||||
instance: &'instance Instance,
|
||||
) -> &'instance mut [u8] {
|
||||
let memory = if let Some(defined_memory_index) =
|
||||
instance.module.defined_memory_index(init.memory_index)
|
||||
{
|
||||
instance.memory(defined_memory_index)
|
||||
} else {
|
||||
let import = instance.imported_memory(init.memory_index);
|
||||
let foreign_instance = (&mut *(import).vmctx).instance();
|
||||
let foreign_memory = &mut *(import).from;
|
||||
let foreign_index = foreign_instance.memory_index(foreign_memory);
|
||||
foreign_instance.memory(foreign_index)
|
||||
};
|
||||
&mut *ptr::slice_from_raw_parts_mut(memory.base, memory.current_length)
|
||||
}
|
||||
|
||||
fn check_memory_init_bounds(
|
||||
instance: &Instance,
|
||||
initializers: &[MemoryInitializer],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in initializers {
|
||||
let memory = instance.get_memory(init.memory_index);
|
||||
let start = get_memory_init_start(init, instance)?;
|
||||
let end = start.checked_add(init.data.len());
|
||||
|
||||
match end {
|
||||
Some(end) if end <= memory.current_length => {
|
||||
// Initializer is in bounds
|
||||
}
|
||||
_ => {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"memory out of bounds: data segment does not fit".into(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn initialize_memories(
|
||||
instance: &Instance,
|
||||
initializers: &[MemoryInitializer],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in initializers {
|
||||
let memory = instance.get_memory(init.memory_index);
|
||||
let start = get_memory_init_start(init, instance)?;
|
||||
let end = start.checked_add(init.data.len());
|
||||
|
||||
match end {
|
||||
Some(end) if end <= memory.current_length => {
|
||||
let mem_slice = unsafe { get_memory_slice(init, instance) };
|
||||
mem_slice[start..end].copy_from_slice(&init.data);
|
||||
}
|
||||
_ => {
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
check_table_init_bounds(instance)?;
|
||||
|
||||
match &instance.module.memory_initialization {
|
||||
MemoryInitialization::Paged { out_of_bounds, .. } => {
|
||||
if *out_of_bounds {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"memory out of bounds: data segment does not fit".into(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
MemoryInitialization::Segmented(initializers) => {
|
||||
check_memory_init_bounds(instance, initializers)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn initialize_instance(
|
||||
instance: &Instance,
|
||||
is_bulk_memory: bool,
|
||||
) -> Result<(), InstantiationError> {
|
||||
// If bulk memory is not enabled, bounds check the data and element segments before
|
||||
// making any changes. With bulk memory enabled, initializers are processed
|
||||
// in-order and side effects are observed up to the point of an out-of-bounds
|
||||
// initializer, so the early checking is not desired.
|
||||
if !is_bulk_memory {
|
||||
check_init_bounds(instance)?;
|
||||
}
|
||||
|
||||
// Initialize the tables
|
||||
initialize_tables(instance)?;
|
||||
|
||||
// Initialize the memories
|
||||
match &instance.module.memory_initialization {
|
||||
MemoryInitialization::Paged { map, out_of_bounds } => {
|
||||
for (index, pages) in map {
|
||||
let memory = instance.memory(index);
|
||||
let slice =
|
||||
unsafe { slice::from_raw_parts_mut(memory.base, memory.current_length) };
|
||||
|
||||
for (page_index, page) in pages.iter().enumerate() {
|
||||
if let Some(data) = page {
|
||||
debug_assert_eq!(data.len(), WASM_PAGE_SIZE as usize);
|
||||
slice[page_index * WASM_PAGE_SIZE as usize..].copy_from_slice(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for out of bound access after initializing the pages to maintain
|
||||
// the expected behavior of the bulk memory spec.
|
||||
if *out_of_bounds {
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
)));
|
||||
}
|
||||
}
|
||||
MemoryInitialization::Segmented(initializers) => {
|
||||
initialize_memories(instance, initializers)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn initialize_vmcontext(instance: &Instance, req: InstanceAllocationRequest) {
|
||||
let module = &instance.module;
|
||||
|
||||
*instance.interrupts() = req.interrupts;
|
||||
*instance.externref_activations_table() = req.externref_activations_table;
|
||||
*instance.stack_map_registry() = req.stack_map_registry;
|
||||
|
||||
// Initialize shared signatures
|
||||
let mut ptr = instance.signature_ids_ptr();
|
||||
for sig in module.types.values() {
|
||||
*ptr = match sig {
|
||||
ModuleType::Function(sig) => (req.lookup_shared_signature)(*sig),
|
||||
_ => VMSharedSignatureIndex::new(u32::max_value()),
|
||||
};
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
// Initialize the built-in functions
|
||||
ptr::write(
|
||||
instance.builtin_functions_ptr() as *mut VMBuiltinFunctionsArray,
|
||||
VMBuiltinFunctionsArray::initialized(),
|
||||
);
|
||||
|
||||
// Initialize the imports
|
||||
debug_assert_eq!(req.imports.functions.len(), module.num_imported_funcs);
|
||||
ptr::copy(
|
||||
req.imports.functions.as_ptr(),
|
||||
instance.imported_functions_ptr() as *mut VMFunctionImport,
|
||||
req.imports.functions.len(),
|
||||
);
|
||||
debug_assert_eq!(req.imports.tables.len(), module.num_imported_tables);
|
||||
ptr::copy(
|
||||
req.imports.tables.as_ptr(),
|
||||
instance.imported_tables_ptr() as *mut VMTableImport,
|
||||
req.imports.tables.len(),
|
||||
);
|
||||
debug_assert_eq!(req.imports.memories.len(), module.num_imported_memories);
|
||||
ptr::copy(
|
||||
req.imports.memories.as_ptr(),
|
||||
instance.imported_memories_ptr() as *mut VMMemoryImport,
|
||||
req.imports.memories.len(),
|
||||
);
|
||||
debug_assert_eq!(req.imports.globals.len(), module.num_imported_globals);
|
||||
ptr::copy(
|
||||
req.imports.globals.as_ptr(),
|
||||
instance.imported_globals_ptr() as *mut VMGlobalImport,
|
||||
req.imports.globals.len(),
|
||||
);
|
||||
|
||||
// Initialize the functions
|
||||
for (index, sig) in instance.module.functions.iter() {
|
||||
let type_index = (req.lookup_shared_signature)(*sig);
|
||||
|
||||
let (func_ptr, vmctx) = if let Some(def_index) = instance.module.defined_func_index(index) {
|
||||
(
|
||||
NonNull::new(req.finished_functions[def_index] as *mut _).unwrap(),
|
||||
instance.vmctx_ptr(),
|
||||
)
|
||||
} else {
|
||||
let import = instance.imported_function(index);
|
||||
(import.body, import.vmctx)
|
||||
};
|
||||
|
||||
ptr::write(
|
||||
instance.anyfunc_ptr(index),
|
||||
VMCallerCheckedAnyfunc {
|
||||
func_ptr,
|
||||
type_index,
|
||||
vmctx,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Initialize the defined tables
|
||||
let mut ptr = instance.tables_ptr();
|
||||
for i in 0..module.table_plans.len() - module.num_imported_tables {
|
||||
ptr::write(ptr, instance.tables[DefinedTableIndex::new(i)].vmtable());
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
// Initialize the defined memories
|
||||
let mut ptr = instance.memories_ptr();
|
||||
for i in 0..module.memory_plans.len() - module.num_imported_memories {
|
||||
ptr::write(
|
||||
ptr,
|
||||
instance.memories[DefinedMemoryIndex::new(i)].vmmemory(),
|
||||
);
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
// Initialize the defined globals
|
||||
initialize_vmcontext_globals(instance);
|
||||
}
|
||||
|
||||
unsafe fn initialize_vmcontext_globals(instance: &Instance) {
|
||||
let module = &instance.module;
|
||||
let num_imports = module.num_imported_globals;
|
||||
for (index, global) in module.globals.iter().skip(num_imports) {
|
||||
let def_index = module.defined_global_index(index).unwrap();
|
||||
let to = instance.global_ptr(def_index);
|
||||
|
||||
// Initialize the global before writing to it
|
||||
ptr::write(to, VMGlobalDefinition::new());
|
||||
|
||||
match global.initializer {
|
||||
GlobalInit::I32Const(x) => *(*to).as_i32_mut() = x,
|
||||
GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
|
||||
GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
|
||||
GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
|
||||
GlobalInit::V128Const(x) => *(*to).as_u128_bits_mut() = x.0,
|
||||
GlobalInit::GetGlobal(x) => {
|
||||
let from = if let Some(def_x) = module.defined_global_index(x) {
|
||||
instance.global(def_x)
|
||||
} else {
|
||||
*instance.imported_global(x).from
|
||||
};
|
||||
*to = from;
|
||||
}
|
||||
GlobalInit::RefFunc(f) => {
|
||||
*(*to).as_anyfunc_mut() = instance.get_caller_checked_anyfunc(f).unwrap()
|
||||
as *const VMCallerCheckedAnyfunc;
|
||||
}
|
||||
GlobalInit::RefNullConst => match global.wasm_ty {
|
||||
WasmType::FuncRef => *(*to).as_anyfunc_mut() = ptr::null(),
|
||||
WasmType::ExternRef => *(*to).as_externref_mut() = None,
|
||||
ty => panic!("unsupported reference type for global: {:?}", ty),
|
||||
},
|
||||
GlobalInit::Import => panic!("locally-defined global initialized as import"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the on-demand instance allocator.
|
||||
#[derive(Clone)]
|
||||
pub struct OnDemandInstanceAllocator {
|
||||
mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
|
||||
}
|
||||
|
||||
impl OnDemandInstanceAllocator {
|
||||
/// Creates a new on-demand instance allocator.
|
||||
pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>) -> Self {
|
||||
Self { mem_creator }
|
||||
}
|
||||
|
||||
fn create_tables(module: &Module) -> PrimaryMap<DefinedTableIndex, Table> {
|
||||
let num_imports = module.num_imported_tables;
|
||||
let mut tables: PrimaryMap<DefinedTableIndex, _> =
|
||||
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
|
||||
for table in &module.table_plans.values().as_slice()[num_imports..] {
|
||||
tables.push(Table::new_dynamic(table));
|
||||
}
|
||||
tables
|
||||
}
|
||||
|
||||
fn create_memories(
|
||||
&self,
|
||||
module: &Module,
|
||||
) -> Result<PrimaryMap<DefinedMemoryIndex, Memory>, InstantiationError> {
|
||||
let creator = self
|
||||
.mem_creator
|
||||
.as_deref()
|
||||
.unwrap_or_else(|| &DefaultMemoryCreator);
|
||||
let num_imports = module.num_imported_memories;
|
||||
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
|
||||
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
|
||||
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
|
||||
memories
|
||||
.push(Memory::new_dynamic(plan, creator).map_err(InstantiationError::Resource)?);
|
||||
}
|
||||
Ok(memories)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
|
||||
unsafe fn allocate(
|
||||
&self,
|
||||
mut req: InstanceAllocationRequest,
|
||||
) -> Result<InstanceHandle, InstantiationError> {
|
||||
debug_assert!(!req.externref_activations_table.is_null());
|
||||
debug_assert!(!req.stack_map_registry.is_null());
|
||||
|
||||
let memories = self.create_memories(&req.module)?;
|
||||
let tables = Self::create_tables(&req.module);
|
||||
|
||||
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
|
||||
|
||||
let handle = {
|
||||
let instance = Instance {
|
||||
module: req.module.clone(),
|
||||
offsets: VMOffsets::new(std::mem::size_of::<*const u8>() as u8, &req.module),
|
||||
memories,
|
||||
tables,
|
||||
dropped_elements: RefCell::new(EntitySet::with_capacity(
|
||||
req.module.passive_elements.len(),
|
||||
)),
|
||||
dropped_data: RefCell::new(EntitySet::with_capacity(req.module.passive_data.len())),
|
||||
host_state,
|
||||
vmctx: VMContext {},
|
||||
};
|
||||
let layout = instance.alloc_layout();
|
||||
let instance_ptr = alloc::alloc(layout) as *mut Instance;
|
||||
if instance_ptr.is_null() {
|
||||
alloc::handle_alloc_error(layout);
|
||||
}
|
||||
ptr::write(instance_ptr, instance);
|
||||
InstanceHandle::new(instance_ptr)
|
||||
};
|
||||
|
||||
initialize_vmcontext(handle.instance(), req);
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
unsafe fn initialize(
|
||||
&self,
|
||||
handle: &InstanceHandle,
|
||||
is_bulk_memory: bool,
|
||||
) -> Result<(), InstantiationError> {
|
||||
initialize_instance(handle.instance(), is_bulk_memory)
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, handle: &InstanceHandle) {
|
||||
let layout = handle.instance().alloc_layout();
|
||||
ptr::drop_in_place(handle.instance);
|
||||
alloc::dealloc(handle.instance.cast(), layout);
|
||||
}
|
||||
|
||||
fn allocate_fiber_stack(&self) -> Result<*mut u8, FiberStackError> {
|
||||
// The on-demand allocator does not support allocating fiber stacks
|
||||
Err(FiberStackError::NotSupported)
|
||||
}
|
||||
|
||||
unsafe fn deallocate_fiber_stack(&self, _stack: *mut u8) {
|
||||
// This should never be called as `allocate_fiber_stack` never returns success
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
1626
crates/runtime/src/instance/allocator/pooling.rs
Normal file
1626
crates/runtime/src/instance/allocator/pooling.rs
Normal file
File diff suppressed because it is too large
Load Diff
58
crates/runtime/src/instance/allocator/pooling/linux.rs
Normal file
58
crates/runtime/src/instance/allocator/pooling/linux.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use anyhow::{bail, Context, Result};
|
||||
|
||||
fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
if protect {
|
||||
region::protect(addr, len, region::Protection::NONE)
|
||||
.context("failed to protect memory pages")?;
|
||||
}
|
||||
|
||||
// On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access
|
||||
if libc::madvise(addr as _, len, libc::MADV_DONTNEED) != 0 {
|
||||
bail!(
|
||||
"madvise failed to decommit: {}",
|
||||
std::io::Error::last_os_error()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Just change the protection level to READ|WRITE
|
||||
unsafe {
|
||||
region::protect(addr, len, region::Protection::READ_WRITE)
|
||||
.context("failed to make linear memory pages read/write")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len, true)
|
||||
}
|
||||
|
||||
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||
// A no-op as table pages remain READ|WRITE
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len, false)
|
||||
}
|
||||
|
||||
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||
// A no-op as stack pages remain READ|WRITE
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len, false)
|
||||
}
|
||||
596
crates/runtime/src/instance/allocator/pooling/uffd.rs
Normal file
596
crates/runtime/src/instance/allocator/pooling/uffd.rs
Normal file
@@ -0,0 +1,596 @@
|
||||
//! This module implements user space page fault handling with the `userfaultfd` ("uffd") system call on Linux.
|
||||
//!
|
||||
//! Handling page faults for memory accesses in regions relating to WebAssembly instances
|
||||
//! enables the runtime to protect guard pages in user space rather than kernel space (i.e. without `mprotect`).
|
||||
//!
|
||||
//! Additionally, linear memories can be lazy-initialized upon first access.
|
||||
//!
|
||||
//! Handling faults in user space is slower than handling faults in the kernel. However,
|
||||
//! in use cases where there is a high number of concurrently executing instances, handling the faults
|
||||
//! in user space requires rarely changing memory protection levels. This can improve concurrency
|
||||
//! by not taking kernel memory manager locks and may decrease TLB shootdowns as fewer page table entries need
|
||||
//! to continually change.
|
||||
//!
|
||||
//! Here's how the `uffd` feature works:
|
||||
//!
|
||||
//! 1. A user fault file descriptor is created to monitor specific areas of the address space.
|
||||
//! 2. A thread is spawned to continually read events from the user fault file descriptor.
|
||||
//! 3. When a page fault event is received, the handler thread calculates where the fault occurred:
|
||||
//! a) If the fault occurs on a linear memory page, it is handled by either copying the page from
|
||||
//! initialization data or zeroing it.
|
||||
//! b) If the fault occurs on a guard page, the protection level of the guard page is changed to
|
||||
//! force the kernel to signal SIGBUS on the next retry. The faulting page is recorded so the
|
||||
//! protection level can be reset in the future.
|
||||
//! 4. Faults to address space relating to an instance may occur from both Wasmtime (e.g. instance
|
||||
//! initialization) or from WebAssembly code (e.g. reading from or writing to linear memory),
|
||||
//! therefore the user fault handling must do as little work as possible to handle the fault.
|
||||
//! 5. When the pooling allocator is dropped, it will drop the memory mappings relating to the pool; this
|
||||
//! generates unmap events for the fault handling thread, which responds by decrementing the mapping
|
||||
//! count. When the count reaches zero, the user fault handling thread will gracefully terminate.
|
||||
//!
|
||||
//! This feature requires a Linux kernel 4.11 or newer to use.
|
||||
|
||||
use super::{InstancePool, MemoryPool};
|
||||
use crate::instance::Instance;
|
||||
use anyhow::{bail, Context, Result};
|
||||
use std::thread;
|
||||
use userfaultfd::{Event, FeatureFlags, IoctlFlags, Uffd, UffdBuilder};
|
||||
use wasmtime_environ::{entity::EntityRef, wasm::DefinedMemoryIndex, MemoryInitialization};
|
||||
|
||||
const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize;
|
||||
|
||||
fn decommit(addr: *mut u8, len: usize) -> Result<()> {
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// On Linux, this tells the kernel to discard the backing of the pages in the range.
|
||||
// If the discarded pages are part of a uffd region, then the next access will fault
|
||||
// and the user fault handler will receive the event.
|
||||
// If the pages are not monitored by uffd, the kernel will zero the page on next access,
|
||||
// as if it were mmap'd for the first time.
|
||||
if libc::madvise(addr as _, len, libc::MADV_DONTNEED) != 0 {
|
||||
bail!(
|
||||
"madvise failed to decommit: {}",
|
||||
std::io::Error::last_os_error()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn commit_memory_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||
// A no-op as memory pages remain READ|WRITE with uffd
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len)
|
||||
}
|
||||
|
||||
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||
// A no-op as table pages remain READ|WRITE
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len)
|
||||
}
|
||||
|
||||
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||
// A no-op as stack pages remain READ|WRITE
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len)
|
||||
}
|
||||
|
||||
/// This is used to initialize the memory pool when uffd is enabled.
|
||||
///
|
||||
/// Without uffd, all of the memory pool's pages are initially protected with `NONE` to treat the entire
|
||||
/// range as guard pages. When an instance is created, the initial pages of the memory are
|
||||
/// changed to `READ_WRITE`.
|
||||
///
|
||||
/// With uffd, however, the potentially accessible pages of the each linear memory are made `READ_WRITE` and
|
||||
/// the page fault handler will detect an out of bounds access and treat the page, temporarily,
|
||||
/// as a guard page.
|
||||
pub(super) fn initialize_memory_pool(pool: &MemoryPool) -> Result<()> {
|
||||
if pool.memory_size == 0 || pool.max_wasm_pages == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for i in 0..pool.max_instances {
|
||||
for base in pool.get(i) {
|
||||
unsafe {
|
||||
region::protect(
|
||||
base as _,
|
||||
pool.max_wasm_pages as usize * WASM_PAGE_SIZE,
|
||||
region::Protection::READ_WRITE,
|
||||
)
|
||||
.context("failed to initialize memory pool for uffd")?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This is used to reset a linear memory's guard page back to read-write as the page might be accessible
|
||||
/// again in the future depending on how the linear memory grows.
|
||||
fn reset_guard_page(addr: *mut u8, len: usize) -> Result<()> {
|
||||
unsafe {
|
||||
region::protect(addr, len, region::Protection::READ_WRITE)
|
||||
.context("failed to reset guard page")
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a location of a page fault within monitored regions of memory.
|
||||
enum FaultLocation<'a> {
|
||||
/// The address location is in a WebAssembly linear memory page.
|
||||
/// The fault handler will copy the pages from initialization data if necessary.
|
||||
MemoryPage {
|
||||
/// The address of the page being accessed.
|
||||
page_addr: *mut u8,
|
||||
/// The length of the page being accessed.
|
||||
len: usize,
|
||||
/// The instance related to the memory page that was accessed.
|
||||
instance: &'a Instance,
|
||||
/// The index of the memory that was accessed.
|
||||
memory_index: DefinedMemoryIndex,
|
||||
/// The Wasm page index to initialize if the access was not a guard page.
|
||||
page_index: Option<usize>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Used to resolve fault addresses to a location.
|
||||
///
|
||||
/// This implementation relies heavily on how the linear memory pool organizes its memory.
|
||||
///
|
||||
/// `usize` is used here instead of pointers to keep this `Send` as it gets sent to the handler thread.
|
||||
struct FaultLocator {
|
||||
instances_start: usize,
|
||||
instance_size: usize,
|
||||
max_instances: usize,
|
||||
memories_start: usize,
|
||||
memories_end: usize,
|
||||
memory_size: usize,
|
||||
max_memories: usize,
|
||||
}
|
||||
|
||||
impl FaultLocator {
|
||||
fn new(instances: &InstancePool) -> Self {
|
||||
let instances_start = instances.mapping.as_ptr() as usize;
|
||||
let memories_start = instances.memories.mapping.as_ptr() as usize;
|
||||
let memories_end = memories_start + instances.memories.mapping.len();
|
||||
|
||||
// Should always have instances
|
||||
debug_assert!(instances_start != 0);
|
||||
|
||||
Self {
|
||||
instances_start,
|
||||
instance_size: instances.instance_size,
|
||||
max_instances: instances.max_instances,
|
||||
memories_start,
|
||||
memories_end,
|
||||
memory_size: instances.memories.memory_size,
|
||||
max_memories: instances.memories.max_memories,
|
||||
}
|
||||
}
|
||||
|
||||
/// This is super-duper unsafe as it is used from the handler thread
|
||||
/// to access instance data without any locking primitives.
|
||||
///
|
||||
/// It is assumed that the thread that owns the instance being accessed is
|
||||
/// currently suspended waiting on a fault to be handled.
|
||||
///
|
||||
/// Of course a stray faulting memory access from a thread that does not own
|
||||
/// the instance might introduce a race, but this implementation considers
|
||||
/// such to be a serious soundness bug not originating in this code.
|
||||
///
|
||||
/// If the assumption holds true, accessing the instance data from the handler thread
|
||||
/// should, in theory, be safe.
|
||||
unsafe fn get_instance(&self, index: usize) -> &Instance {
|
||||
debug_assert!(index < self.max_instances);
|
||||
&*((self.instances_start + (index * self.instance_size)) as *const Instance)
|
||||
}
|
||||
|
||||
unsafe fn locate(&self, addr: usize) -> Option<FaultLocation> {
|
||||
// Check for a linear memory location
|
||||
if addr >= self.memories_start && addr < self.memories_end {
|
||||
let index = (addr - self.memories_start) / self.memory_size;
|
||||
let memory_index = DefinedMemoryIndex::new(index % self.max_memories);
|
||||
let memory_start = self.memories_start + (index * self.memory_size);
|
||||
let page_index = (addr - memory_start) / WASM_PAGE_SIZE;
|
||||
let instance = self.get_instance(index / self.max_memories);
|
||||
|
||||
let init_page_index = instance.memories.get(memory_index).and_then(|m| {
|
||||
if page_index < m.size() as usize {
|
||||
Some(page_index)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
return Some(FaultLocation::MemoryPage {
|
||||
page_addr: (memory_start + page_index * WASM_PAGE_SIZE) as _,
|
||||
len: WASM_PAGE_SIZE,
|
||||
instance,
|
||||
memory_index,
|
||||
page_index: init_page_index,
|
||||
});
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// This is called following a fault on a guard page.
|
||||
///
|
||||
/// Because the region being monitored is protected read-write, this needs to set the
|
||||
/// protection level to `NONE` before waking the page.
|
||||
///
|
||||
/// This will cause the kernel to raise a SIGBUS when retrying the fault.
|
||||
unsafe fn wake_guard_page_access(uffd: &Uffd, page_addr: *const u8, len: usize) -> Result<()> {
|
||||
// Set the page to NONE to induce a SIGBUS for the access on the next retry
|
||||
region::protect(page_addr, len, region::Protection::NONE)
|
||||
.context("failed to change guard page protection")?;
|
||||
|
||||
uffd.wake(page_addr as _, len)
|
||||
.context("failed to wake guard page access")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This is called to initialize a linear memory page (64 KiB).
|
||||
///
|
||||
/// If paged initialization is used for the module, then we can instruct the kernel to back the page with
|
||||
/// what is already stored in the initialization data; if the page isn't in the initialization data,
|
||||
/// it will be zeroed instead.
|
||||
///
|
||||
/// If paged initialization isn't being used, we zero the page. Initialization happens
|
||||
/// at module instantiation in this case and the segment data will be then copied to the zeroed page.
|
||||
unsafe fn initialize_wasm_page(
|
||||
uffd: &Uffd,
|
||||
instance: &Instance,
|
||||
page_addr: *const u8,
|
||||
memory_index: DefinedMemoryIndex,
|
||||
page_index: usize,
|
||||
) -> Result<()> {
|
||||
// Check for paged initialization and copy the page if present in the initialization data
|
||||
if let MemoryInitialization::Paged { map, .. } = &instance.module.memory_initialization {
|
||||
let pages = &map[memory_index];
|
||||
|
||||
if let Some(Some(data)) = pages.get(page_index) {
|
||||
debug_assert_eq!(data.len(), WASM_PAGE_SIZE);
|
||||
|
||||
log::trace!(
|
||||
"copying linear memory page from {:p} to {:p}",
|
||||
data.as_ptr(),
|
||||
page_addr
|
||||
);
|
||||
|
||||
uffd.copy(data.as_ptr() as _, page_addr as _, WASM_PAGE_SIZE, true)
|
||||
.context("failed to copy linear memory page")?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
log::trace!("zeroing linear memory page at {:p}", page_addr);
|
||||
|
||||
uffd.zeropage(page_addr as _, WASM_PAGE_SIZE, true)
|
||||
.context("failed to zero linear memory page")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn handle_page_fault(
|
||||
uffd: &Uffd,
|
||||
locator: &FaultLocator,
|
||||
addr: *mut std::ffi::c_void,
|
||||
) -> Result<()> {
|
||||
match locator.locate(addr as usize) {
|
||||
Some(FaultLocation::MemoryPage {
|
||||
page_addr,
|
||||
len,
|
||||
instance,
|
||||
memory_index,
|
||||
page_index,
|
||||
}) => {
|
||||
log::trace!(
|
||||
"handling fault in linear memory at address {:p} on page {:p}",
|
||||
addr,
|
||||
page_addr
|
||||
);
|
||||
|
||||
match page_index {
|
||||
Some(page_index) => {
|
||||
initialize_wasm_page(&uffd, instance, page_addr, memory_index, page_index)?;
|
||||
}
|
||||
None => {
|
||||
log::trace!("out of bounds memory access at {:p}", addr);
|
||||
|
||||
// Record the guard page fault so the page protection level can be reset later
|
||||
instance.memories[memory_index].record_guard_page_fault(
|
||||
page_addr,
|
||||
len,
|
||||
reset_guard_page,
|
||||
);
|
||||
wake_guard_page_access(&uffd, page_addr, len)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
bail!(
|
||||
"failed to locate fault address {:p} in registered memory regions",
|
||||
addr
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fault_handler_thread(uffd: Uffd, locator: FaultLocator) -> Result<()> {
|
||||
loop {
|
||||
match uffd.read_event().expect("failed to read event") {
|
||||
Some(Event::Unmap { start, end }) => {
|
||||
log::trace!("memory region unmapped: {:p}-{:p}", start, end);
|
||||
|
||||
let (start, end) = (start as usize, end as usize);
|
||||
|
||||
if start == locator.memories_start && end == locator.memories_end {
|
||||
break;
|
||||
} else {
|
||||
panic!("unexpected memory region unmapped");
|
||||
}
|
||||
}
|
||||
Some(Event::Pagefault { addr, .. }) => unsafe {
|
||||
handle_page_fault(&uffd, &locator, addr as _)?
|
||||
},
|
||||
Some(_) => continue,
|
||||
None => bail!("no event was read from the user fault descriptor"),
|
||||
}
|
||||
}
|
||||
|
||||
log::trace!("fault handler thread has successfully terminated");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PageFaultHandler {
|
||||
thread: Option<thread::JoinHandle<Result<()>>>,
|
||||
}
|
||||
|
||||
impl PageFaultHandler {
|
||||
pub(super) fn new(instances: &InstancePool) -> Result<Self> {
|
||||
let uffd = UffdBuilder::new()
|
||||
.close_on_exec(true)
|
||||
.require_features(FeatureFlags::EVENT_UNMAP)
|
||||
.create()
|
||||
.context("failed to create user fault descriptor")?;
|
||||
|
||||
// Register the linear memory pool with the userfault fd
|
||||
let start = instances.memories.mapping.as_ptr();
|
||||
let len = instances.memories.mapping.len();
|
||||
|
||||
let thread = if !start.is_null() && len > 0 {
|
||||
let ioctls = uffd
|
||||
.register(start as _, len)
|
||||
.context("failed to register user fault range")?;
|
||||
|
||||
if !ioctls.contains(IoctlFlags::WAKE | IoctlFlags::COPY | IoctlFlags::ZEROPAGE) {
|
||||
bail!(
|
||||
"required user fault ioctls not supported by the kernel; found: {:?}",
|
||||
ioctls,
|
||||
);
|
||||
}
|
||||
|
||||
log::trace!(
|
||||
"user fault handling enabled on linear memory pool at {:p} with size {}",
|
||||
start,
|
||||
len
|
||||
);
|
||||
|
||||
let locator = FaultLocator::new(&instances);
|
||||
|
||||
Some(
|
||||
thread::Builder::new()
|
||||
.name("page fault handler".into())
|
||||
.spawn(move || fault_handler_thread(uffd, locator))
|
||||
.context("failed to spawn page fault handler thread")?,
|
||||
)
|
||||
} else {
|
||||
log::trace!("user fault handling disabled as there is no linear memory pool");
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Self { thread })
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PageFaultHandler {
|
||||
fn drop(&mut self) {
|
||||
// The handler thread should terminate once all monitored regions of memory are unmapped.
|
||||
// The pooling instance allocator ensures that the regions are unmapped prior to dropping
|
||||
// the page fault handler.
|
||||
if let Some(thread) = self.thread.take() {
|
||||
thread
|
||||
.join()
|
||||
.expect("failed to join page fault handler thread")
|
||||
.expect("fault handler thread failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
Imports, InstanceAllocationRequest, InstanceLimits, ModuleLimits,
|
||||
PoolingAllocationStrategy, VMSharedSignatureIndex,
|
||||
};
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use wasmtime_environ::{entity::PrimaryMap, wasm::Memory, MemoryPlan, MemoryStyle, Module};
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[test]
|
||||
fn test_address_locator() {
|
||||
let module_limits = ModuleLimits {
|
||||
imported_functions: 0,
|
||||
imported_tables: 0,
|
||||
imported_memories: 0,
|
||||
imported_globals: 0,
|
||||
types: 0,
|
||||
functions: 0,
|
||||
tables: 0,
|
||||
memories: 2,
|
||||
globals: 0,
|
||||
table_elements: 0,
|
||||
memory_pages: 2,
|
||||
};
|
||||
let instance_limits = InstanceLimits {
|
||||
count: 3,
|
||||
memory_reservation_size: (WASM_PAGE_SIZE * 10) as u64,
|
||||
};
|
||||
|
||||
let instances =
|
||||
InstancePool::new(&module_limits, &instance_limits).expect("should allocate");
|
||||
|
||||
let locator = FaultLocator::new(&instances);
|
||||
|
||||
assert_eq!(locator.instances_start, instances.mapping.as_ptr() as usize);
|
||||
assert_eq!(locator.instance_size, 4096);
|
||||
assert_eq!(locator.max_instances, 3);
|
||||
assert_eq!(
|
||||
locator.memories_start,
|
||||
instances.memories.mapping.as_ptr() as usize
|
||||
);
|
||||
assert_eq!(
|
||||
locator.memories_end,
|
||||
locator.memories_start + instances.memories.mapping.len()
|
||||
);
|
||||
assert_eq!(locator.memory_size, WASM_PAGE_SIZE * 10);
|
||||
assert_eq!(locator.max_memories, 2);
|
||||
|
||||
unsafe {
|
||||
assert!(locator.locate(0).is_none());
|
||||
assert!(locator.locate(locator.memories_end).is_none());
|
||||
|
||||
let mut module = Module::new();
|
||||
|
||||
for _ in 0..module_limits.memories {
|
||||
module.memory_plans.push(MemoryPlan {
|
||||
memory: Memory {
|
||||
minimum: 2,
|
||||
maximum: Some(2),
|
||||
shared: false,
|
||||
},
|
||||
style: MemoryStyle::Static { bound: 1 },
|
||||
offset_guard_size: 0,
|
||||
});
|
||||
}
|
||||
|
||||
module_limits.validate(&module).expect("should validate");
|
||||
|
||||
let mut handles = Vec::new();
|
||||
let module = Arc::new(module);
|
||||
let finished_functions = &PrimaryMap::new();
|
||||
|
||||
// Allocate the maximum number of instances with the maximum number of memories
|
||||
for _ in 0..instances.max_instances {
|
||||
handles.push(
|
||||
instances
|
||||
.allocate(
|
||||
PoolingAllocationStrategy::Random,
|
||||
InstanceAllocationRequest {
|
||||
module: module.clone(),
|
||||
finished_functions,
|
||||
imports: Imports {
|
||||
functions: &[],
|
||||
tables: &[],
|
||||
memories: &[],
|
||||
globals: &[],
|
||||
},
|
||||
lookup_shared_signature: &|_| VMSharedSignatureIndex::default(),
|
||||
host_state: Box::new(()),
|
||||
interrupts: ptr::null(),
|
||||
externref_activations_table: ptr::null_mut(),
|
||||
stack_map_registry: ptr::null_mut(),
|
||||
},
|
||||
)
|
||||
.expect("instance should allocate"),
|
||||
);
|
||||
}
|
||||
|
||||
// Validate memory locations
|
||||
for instance_index in 0..instances.max_instances {
|
||||
for memory_index in 0..instances.memories.max_memories {
|
||||
let memory_start = locator.memories_start
|
||||
+ (instance_index * locator.memory_size * locator.max_memories)
|
||||
+ (memory_index * locator.memory_size);
|
||||
|
||||
// Test for access to first page
|
||||
match locator.locate(memory_start + 10000) {
|
||||
Some(FaultLocation::MemoryPage {
|
||||
page_addr,
|
||||
len,
|
||||
instance: _,
|
||||
memory_index: mem_index,
|
||||
page_index,
|
||||
}) => {
|
||||
assert_eq!(page_addr, memory_start as _);
|
||||
assert_eq!(len, WASM_PAGE_SIZE);
|
||||
assert_eq!(mem_index, DefinedMemoryIndex::new(memory_index));
|
||||
assert_eq!(page_index, Some(0));
|
||||
}
|
||||
_ => panic!("expected a memory page location"),
|
||||
}
|
||||
|
||||
// Test for access to second page
|
||||
match locator.locate(memory_start + 1024 + WASM_PAGE_SIZE) {
|
||||
Some(FaultLocation::MemoryPage {
|
||||
page_addr,
|
||||
len,
|
||||
instance: _,
|
||||
memory_index: mem_index,
|
||||
page_index,
|
||||
}) => {
|
||||
assert_eq!(page_addr, (memory_start + WASM_PAGE_SIZE) as _);
|
||||
assert_eq!(len, WASM_PAGE_SIZE);
|
||||
assert_eq!(mem_index, DefinedMemoryIndex::new(memory_index));
|
||||
assert_eq!(page_index, Some(1));
|
||||
}
|
||||
_ => panic!("expected a memory page location"),
|
||||
}
|
||||
|
||||
// Test for guard page
|
||||
match locator.locate(memory_start + 10 + 9 * WASM_PAGE_SIZE) {
|
||||
Some(FaultLocation::MemoryPage {
|
||||
page_addr,
|
||||
len,
|
||||
instance: _,
|
||||
memory_index: mem_index,
|
||||
page_index,
|
||||
}) => {
|
||||
assert_eq!(page_addr, (memory_start + (9 * WASM_PAGE_SIZE)) as _);
|
||||
assert_eq!(len, WASM_PAGE_SIZE);
|
||||
assert_eq!(mem_index, DefinedMemoryIndex::new(memory_index));
|
||||
assert_eq!(page_index, None);
|
||||
}
|
||||
_ => panic!("expected a memory page location"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for handle in handles.drain(..) {
|
||||
instances.deallocate(&handle);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
68
crates/runtime/src/instance/allocator/pooling/unix.rs
Normal file
68
crates/runtime/src/instance/allocator/pooling/unix.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use anyhow::{bail, Context, Result};
|
||||
|
||||
fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// By creating a new mapping at the same location, this will discard the
|
||||
// mapping for the pages in the given range.
|
||||
// The new mapping will be to the CoW zero page, so this effectively
|
||||
// zeroes the pages.
|
||||
if unsafe {
|
||||
libc::mmap(
|
||||
addr as _,
|
||||
len,
|
||||
if protect {
|
||||
libc::PROT_NONE
|
||||
} else {
|
||||
libc::PROT_READ | libc::PROT_WRITE
|
||||
},
|
||||
libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED,
|
||||
-1,
|
||||
0,
|
||||
) as *mut u8
|
||||
} != addr
|
||||
{
|
||||
bail!(
|
||||
"mmap failed to remap pages: {}",
|
||||
std::io::Error::last_os_error()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Just change the protection level to READ|WRITE
|
||||
unsafe {
|
||||
region::protect(addr, len, region::Protection::READ_WRITE)
|
||||
.context("failed to make linear memory pages read/write")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len, true)
|
||||
}
|
||||
|
||||
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||
// A no-op as table pages remain READ|WRITE
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len, false)
|
||||
}
|
||||
|
||||
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||
// A no-op as stack pages remain READ|WRITE
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len, false)
|
||||
}
|
||||
55
crates/runtime/src/instance/allocator/pooling/windows.rs
Normal file
55
crates/runtime/src/instance/allocator/pooling/windows.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use anyhow::{bail, Result};
|
||||
use winapi::um::memoryapi::{VirtualAlloc, VirtualFree};
|
||||
use winapi::um::winnt::{MEM_COMMIT, MEM_DECOMMIT, PAGE_READWRITE};
|
||||
|
||||
pub fn commit(addr: *mut u8, len: usize) -> Result<()> {
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Memory needs to be committed, so don't use the `region` crate
|
||||
if unsafe { VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null() } {
|
||||
bail!("failed to commit memory as read/write");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decommit(addr: *mut u8, len: usize) -> Result<()> {
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if unsafe { VirtualFree(addr as _, len, MEM_DECOMMIT) } == 0 {
|
||||
bail!(
|
||||
"failed to decommit memory pages: {}",
|
||||
std::io::Error::last_os_error()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
commit(addr, len)
|
||||
}
|
||||
|
||||
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len)
|
||||
}
|
||||
|
||||
pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
commit(addr, len)
|
||||
}
|
||||
|
||||
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len)
|
||||
}
|
||||
|
||||
pub fn commit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
commit(addr, len)
|
||||
}
|
||||
|
||||
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||
decommit(addr, len)
|
||||
}
|
||||
@@ -37,9 +37,13 @@ pub mod libcalls;
|
||||
pub use crate::export::*;
|
||||
pub use crate::externref::*;
|
||||
pub use crate::imports::Imports;
|
||||
pub use crate::instance::{InstanceHandle, InstantiationError, LinkError, RuntimeInstance};
|
||||
pub use crate::instance::{
|
||||
FiberStackError, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, InstanceLimits,
|
||||
InstantiationError, LinkError, ModuleLimits, OnDemandInstanceAllocator,
|
||||
PoolingAllocationStrategy, PoolingInstanceAllocator, RuntimeInstance,
|
||||
};
|
||||
pub use crate::jit_int::GdbJitImageRegistration;
|
||||
pub use crate::memory::{RuntimeLinearMemory, RuntimeMemoryCreator};
|
||||
pub use crate::memory::{Memory, RuntimeLinearMemory, RuntimeMemoryCreator};
|
||||
pub use crate::mmap::Mmap;
|
||||
pub use crate::table::{Table, TableElement};
|
||||
pub use crate::traphandlers::{
|
||||
|
||||
@@ -4,15 +4,18 @@
|
||||
|
||||
use crate::mmap::Mmap;
|
||||
use crate::vmcontext::VMMemoryDefinition;
|
||||
use anyhow::Result;
|
||||
use more_asserts::{assert_ge, assert_le};
|
||||
use std::cell::RefCell;
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::cmp::min;
|
||||
use std::convert::TryFrom;
|
||||
use std::ptr;
|
||||
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
|
||||
|
||||
/// A memory allocator
|
||||
pub trait RuntimeMemoryCreator: Send + Sync {
|
||||
/// Create new RuntimeLinearMemory
|
||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String>;
|
||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>>;
|
||||
}
|
||||
|
||||
/// A default memory allocator used by Wasmtime
|
||||
@@ -20,8 +23,8 @@ pub struct DefaultMemoryCreator;
|
||||
|
||||
impl RuntimeMemoryCreator for DefaultMemoryCreator {
|
||||
/// Create new MmapMemory
|
||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String> {
|
||||
Ok(Box::new(MmapMemory::new(plan)?) as Box<dyn RuntimeLinearMemory>)
|
||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>> {
|
||||
Ok(Box::new(MmapMemory::new(plan)?) as _)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,10 +55,6 @@ pub struct MmapMemory {
|
||||
// Size in bytes of extra guard pages after the end to optimize loads and stores with
|
||||
// constant offsets.
|
||||
offset_guard_size: usize,
|
||||
|
||||
// Records whether we're using a bounds-checking strategy which requires
|
||||
// handlers to catch trapping accesses.
|
||||
pub(crate) needs_signal_handlers: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -68,22 +67,13 @@ struct WasmMmap {
|
||||
|
||||
impl MmapMemory {
|
||||
/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
|
||||
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
|
||||
pub fn new(plan: &MemoryPlan) -> Result<Self> {
|
||||
// `maximum` cannot be set to more than `65536` pages.
|
||||
assert_le!(plan.memory.minimum, WASM_MAX_PAGES);
|
||||
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
|
||||
|
||||
let offset_guard_bytes = plan.offset_guard_size as usize;
|
||||
|
||||
// If we have an offset guard, or if we're doing the static memory
|
||||
// allocation strategy, we need signal handlers to catch out of bounds
|
||||
// acceses.
|
||||
let needs_signal_handlers = offset_guard_bytes > 0
|
||||
|| match plan.style {
|
||||
MemoryStyle::Dynamic => false,
|
||||
MemoryStyle::Static { .. } => true,
|
||||
};
|
||||
|
||||
let minimum_pages = match plan.style {
|
||||
MemoryStyle::Dynamic => plan.memory.minimum,
|
||||
MemoryStyle::Static { bound } => {
|
||||
@@ -105,7 +95,6 @@ impl MmapMemory {
|
||||
mmap: mmap.into(),
|
||||
maximum: plan.memory.maximum,
|
||||
offset_guard_size: offset_guard_bytes,
|
||||
needs_signal_handlers,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -177,10 +166,192 @@ impl RuntimeLinearMemory for MmapMemory {
|
||||
|
||||
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
|
||||
fn vmmemory(&self) -> VMMemoryDefinition {
|
||||
let mut mmap = self.mmap.borrow_mut();
|
||||
let mmap = self.mmap.borrow();
|
||||
VMMemoryDefinition {
|
||||
base: mmap.alloc.as_mut_ptr(),
|
||||
current_length: mmap.size as usize * WASM_PAGE_SIZE as usize,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum MemoryStorage {
|
||||
Static {
|
||||
base: *mut u8,
|
||||
size: Cell<u32>,
|
||||
maximum: u32,
|
||||
make_accessible: fn(*mut u8, usize) -> Result<()>,
|
||||
/// Stores the pages in the linear memory that have faulted as guard pages when using the `uffd` feature.
|
||||
/// These pages need their protection level reset before the memory can grow.
|
||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||
guard_page_faults: RefCell<Vec<(*mut u8, usize, fn(*mut u8, usize) -> Result<()>)>>,
|
||||
},
|
||||
Dynamic(Box<dyn RuntimeLinearMemory>),
|
||||
}
|
||||
|
||||
/// Represents an instantiation of a WebAssembly memory.
|
||||
pub struct Memory(MemoryStorage);
|
||||
|
||||
impl Memory {
|
||||
/// Create a new dynamic (movable) memory instance for the specified plan.
|
||||
pub fn new_dynamic(plan: &MemoryPlan, creator: &dyn RuntimeMemoryCreator) -> Result<Self> {
|
||||
Ok(Self(MemoryStorage::Dynamic(creator.new_memory(plan)?)))
|
||||
}
|
||||
|
||||
/// Create a new static (immovable) memory instance for the specified plan.
|
||||
pub fn new_static(
|
||||
plan: &MemoryPlan,
|
||||
base: *mut u8,
|
||||
maximum: u32,
|
||||
make_accessible: fn(*mut u8, usize) -> Result<()>,
|
||||
) -> Result<Self> {
|
||||
if plan.memory.minimum > 0 {
|
||||
make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize)?;
|
||||
}
|
||||
|
||||
Ok(Self(MemoryStorage::Static {
|
||||
base,
|
||||
size: Cell::new(plan.memory.minimum),
|
||||
maximum: min(plan.memory.maximum.unwrap_or(maximum), maximum),
|
||||
make_accessible,
|
||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||
guard_page_faults: RefCell::new(Vec::new()),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Returns the number of allocated wasm pages.
|
||||
pub fn size(&self) -> u32 {
|
||||
match &self.0 {
|
||||
MemoryStorage::Static { size, .. } => size.get(),
|
||||
MemoryStorage::Dynamic(mem) => mem.size(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether or not the underlying storage of the memory is "static".
|
||||
pub(crate) fn is_static(&self) -> bool {
|
||||
if let MemoryStorage::Static { .. } = &self.0 {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Grow memory by the specified amount of wasm pages.
|
||||
///
|
||||
/// Returns `None` if memory can't be grown by the specified amount
|
||||
/// of wasm pages.
|
||||
pub fn grow(&self, delta: u32) -> Option<u32> {
|
||||
match &self.0 {
|
||||
MemoryStorage::Static {
|
||||
base,
|
||||
size,
|
||||
maximum,
|
||||
make_accessible,
|
||||
..
|
||||
} => {
|
||||
// Reset any faulted guard pages before growing the memory.
|
||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||
self.reset_guard_pages().ok()?;
|
||||
|
||||
let old_size = size.get();
|
||||
if delta == 0 {
|
||||
return Some(old_size);
|
||||
}
|
||||
|
||||
let new_size = old_size.checked_add(delta)?;
|
||||
|
||||
if new_size > *maximum || new_size >= WASM_MAX_PAGES {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
|
||||
make_accessible(unsafe { base.add(start) }, len).ok()?;
|
||||
|
||||
size.set(new_size);
|
||||
|
||||
Some(old_size)
|
||||
}
|
||||
MemoryStorage::Dynamic(mem) => mem.grow(delta),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
|
||||
pub fn vmmemory(&self) -> VMMemoryDefinition {
|
||||
match &self.0 {
|
||||
MemoryStorage::Static { base, size, .. } => VMMemoryDefinition {
|
||||
base: *base,
|
||||
current_length: size.get() as usize * WASM_PAGE_SIZE as usize,
|
||||
},
|
||||
MemoryStorage::Dynamic(mem) => mem.vmmemory(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Records a faulted guard page in a static memory.
|
||||
///
|
||||
/// This is used to track faulted guard pages that need to be reset for the uffd feature.
|
||||
///
|
||||
/// This function will panic if called on a dynamic memory.
|
||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||
pub(crate) fn record_guard_page_fault(
|
||||
&self,
|
||||
page_addr: *mut u8,
|
||||
size: usize,
|
||||
reset: fn(*mut u8, usize) -> Result<()>,
|
||||
) {
|
||||
match &self.0 {
|
||||
MemoryStorage::Static {
|
||||
guard_page_faults, ..
|
||||
} => {
|
||||
guard_page_faults
|
||||
.borrow_mut()
|
||||
.push((page_addr, size, reset));
|
||||
}
|
||||
MemoryStorage::Dynamic(_) => {
|
||||
unreachable!("dynamic memories should not have guard page faults")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resets the previously faulted guard pages of a static memory.
|
||||
///
|
||||
/// This is used to reset the protection of any guard pages that were previously faulted.
|
||||
///
|
||||
/// This function will panic if called on a dynamic memory.
|
||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||
pub(crate) fn reset_guard_pages(&self) -> Result<()> {
|
||||
match &self.0 {
|
||||
MemoryStorage::Static {
|
||||
guard_page_faults, ..
|
||||
} => {
|
||||
let mut faults = guard_page_faults.borrow_mut();
|
||||
for (addr, len, reset) in faults.drain(..) {
|
||||
reset(addr, len)?;
|
||||
}
|
||||
}
|
||||
MemoryStorage::Dynamic(_) => {
|
||||
unreachable!("dynamic memories should not have guard page faults")
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// The default memory representation is an empty memory that cannot grow.
|
||||
impl Default for Memory {
|
||||
fn default() -> Self {
|
||||
fn make_accessible(_ptr: *mut u8, _len: usize) -> Result<()> {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
Self(MemoryStorage::Static {
|
||||
base: ptr::null_mut(),
|
||||
size: Cell::new(0),
|
||||
maximum: 0,
|
||||
make_accessible,
|
||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||
guard_page_faults: RefCell::new(Vec::new()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//! Low-level abstraction for allocating and managing zero-filled pages
|
||||
//! of memory.
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use more_asserts::assert_le;
|
||||
use more_asserts::assert_lt;
|
||||
use std::io;
|
||||
@@ -38,7 +39,7 @@ impl Mmap {
|
||||
}
|
||||
|
||||
/// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
|
||||
pub fn with_at_least(size: usize) -> Result<Self, String> {
|
||||
pub fn with_at_least(size: usize) -> Result<Self> {
|
||||
let page_size = region::page::size();
|
||||
let rounded_size = round_up_to_page_size(size, page_size);
|
||||
Self::accessible_reserved(rounded_size, rounded_size)
|
||||
@@ -48,10 +49,7 @@ impl Mmap {
|
||||
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
|
||||
/// must be native page-size multiples.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
pub fn accessible_reserved(
|
||||
accessible_size: usize,
|
||||
mapping_size: usize,
|
||||
) -> Result<Self, String> {
|
||||
pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result<Self> {
|
||||
let page_size = region::page::size();
|
||||
assert_le!(accessible_size, mapping_size);
|
||||
assert_eq!(mapping_size & (page_size - 1), 0);
|
||||
@@ -76,7 +74,7 @@ impl Mmap {
|
||||
)
|
||||
};
|
||||
if ptr as isize == -1_isize {
|
||||
return Err(io::Error::last_os_error().to_string());
|
||||
bail!("mmap failed: {}", io::Error::last_os_error());
|
||||
}
|
||||
|
||||
Self {
|
||||
@@ -96,7 +94,7 @@ impl Mmap {
|
||||
)
|
||||
};
|
||||
if ptr as isize == -1_isize {
|
||||
return Err(io::Error::last_os_error().to_string());
|
||||
bail!("mmap failed: {}", io::Error::last_os_error());
|
||||
}
|
||||
|
||||
let mut result = Self {
|
||||
@@ -117,13 +115,14 @@ impl Mmap {
|
||||
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
|
||||
/// must be native page-size multiples.
|
||||
#[cfg(target_os = "windows")]
|
||||
pub fn accessible_reserved(
|
||||
accessible_size: usize,
|
||||
mapping_size: usize,
|
||||
) -> Result<Self, String> {
|
||||
pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result<Self> {
|
||||
use winapi::um::memoryapi::VirtualAlloc;
|
||||
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
|
||||
|
||||
if mapping_size == 0 {
|
||||
return Ok(Self::new());
|
||||
}
|
||||
|
||||
let page_size = region::page::size();
|
||||
assert_le!(accessible_size, mapping_size);
|
||||
assert_eq!(mapping_size & (page_size - 1), 0);
|
||||
@@ -140,7 +139,7 @@ impl Mmap {
|
||||
)
|
||||
};
|
||||
if ptr.is_null() {
|
||||
return Err(io::Error::last_os_error().to_string());
|
||||
bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
|
||||
}
|
||||
|
||||
Self {
|
||||
@@ -152,7 +151,7 @@ impl Mmap {
|
||||
let ptr =
|
||||
unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
|
||||
if ptr.is_null() {
|
||||
return Err(io::Error::last_os_error().to_string());
|
||||
bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
|
||||
}
|
||||
|
||||
let mut result = Self {
|
||||
@@ -173,7 +172,7 @@ impl Mmap {
|
||||
/// `start` and `len` must be native page-size multiples and describe a range within
|
||||
/// `self`'s reserved memory.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
|
||||
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> {
|
||||
let page_size = region::page::size();
|
||||
assert_eq!(start & (page_size - 1), 0);
|
||||
assert_eq!(len & (page_size - 1), 0);
|
||||
@@ -182,15 +181,18 @@ impl Mmap {
|
||||
|
||||
// Commit the accessible size.
|
||||
let ptr = self.ptr as *const u8;
|
||||
unsafe { region::protect(ptr.add(start), len, region::Protection::READ_WRITE) }
|
||||
.map_err(|e| e.to_string())
|
||||
unsafe {
|
||||
region::protect(ptr.add(start), len, region::Protection::READ_WRITE)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Make the memory starting at `start` and extending for `len` bytes accessible.
|
||||
/// `start` and `len` must be native page-size multiples and describe a range within
|
||||
/// `self`'s reserved memory.
|
||||
#[cfg(target_os = "windows")]
|
||||
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
|
||||
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> {
|
||||
use winapi::ctypes::c_void;
|
||||
use winapi::um::memoryapi::VirtualAlloc;
|
||||
use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
|
||||
@@ -212,7 +214,7 @@ impl Mmap {
|
||||
}
|
||||
.is_null()
|
||||
{
|
||||
return Err(io::Error::last_os_error().to_string());
|
||||
bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -234,7 +236,7 @@ impl Mmap {
|
||||
}
|
||||
|
||||
/// Return the allocated memory as a mutable pointer to u8.
|
||||
pub fn as_mut_ptr(&mut self) -> *mut u8 {
|
||||
pub fn as_mut_ptr(&self) -> *mut u8 {
|
||||
self.ptr as *mut u8
|
||||
}
|
||||
|
||||
@@ -247,6 +249,11 @@ impl Mmap {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) unsafe fn from_raw(ptr: usize, len: usize) -> Self {
|
||||
Self { ptr, len }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Mmap {
|
||||
|
||||
@@ -4,20 +4,17 @@
|
||||
|
||||
use crate::vmcontext::{VMCallerCheckedAnyfunc, VMTableDefinition};
|
||||
use crate::{Trap, VMExternRef};
|
||||
use std::cell::RefCell;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::cmp::min;
|
||||
use std::convert::TryInto;
|
||||
use std::ops::Range;
|
||||
use std::ptr;
|
||||
use wasmtime_environ::wasm::TableElementType;
|
||||
use wasmtime_environ::{ir, TablePlan, TableStyle};
|
||||
|
||||
/// A table instance.
|
||||
#[derive(Debug)]
|
||||
pub struct Table {
|
||||
elements: RefCell<TableElements>,
|
||||
maximum: Option<u32>,
|
||||
}
|
||||
use wasmtime_environ::{ir, TablePlan};
|
||||
|
||||
/// An element going into or coming out of a table.
|
||||
///
|
||||
/// Table elements are stored as pointers and are default-initialized with `ptr::null_mut`.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum TableElement {
|
||||
/// A `funcref`.
|
||||
@@ -26,44 +23,150 @@ pub enum TableElement {
|
||||
ExternRef(Option<VMExternRef>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum TableElements {
|
||||
FuncRefs(Vec<*mut VMCallerCheckedAnyfunc>),
|
||||
ExternRefs(Vec<Option<VMExternRef>>),
|
||||
impl TableElement {
|
||||
/// Consumes the given raw pointer into a table element.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This is unsafe as it will *not* clone any externref, leaving the reference count unchanged.
|
||||
///
|
||||
/// This should only be used if the raw pointer is no longer in use.
|
||||
unsafe fn from_raw(ty: TableElementType, ptr: *mut u8) -> Self {
|
||||
match ty {
|
||||
TableElementType::Func => Self::FuncRef(ptr as _),
|
||||
TableElementType::Val(_) => Self::ExternRef(if ptr.is_null() {
|
||||
None
|
||||
} else {
|
||||
Some(VMExternRef::from_raw(ptr))
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clones a table element from the underlying raw pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This is unsafe as it will clone any externref, incrementing the reference count.
|
||||
unsafe fn clone_from_raw(ty: TableElementType, ptr: *mut u8) -> Self {
|
||||
match ty {
|
||||
TableElementType::Func => Self::FuncRef(ptr as _),
|
||||
TableElementType::Val(_) => Self::ExternRef(if ptr.is_null() {
|
||||
None
|
||||
} else {
|
||||
Some(VMExternRef::clone_from_raw(ptr))
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes a table element into a raw pointer.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This is unsafe as it will consume any underlying externref into a raw pointer without modifying
|
||||
/// the reference count.
|
||||
///
|
||||
/// Use `from_raw` to properly drop any table elements stored as raw pointers.
|
||||
unsafe fn into_raw(self) -> *mut u8 {
|
||||
match self {
|
||||
Self::FuncRef(e) => e as _,
|
||||
Self::ExternRef(e) => e.map(|e| e.into_raw()).unwrap_or(ptr::null_mut()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<*mut VMCallerCheckedAnyfunc> for TableElement {
|
||||
fn from(f: *mut VMCallerCheckedAnyfunc) -> TableElement {
|
||||
TableElement::FuncRef(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<VMExternRef>> for TableElement {
|
||||
fn from(x: Option<VMExternRef>) -> TableElement {
|
||||
TableElement::ExternRef(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VMExternRef> for TableElement {
|
||||
fn from(x: VMExternRef) -> TableElement {
|
||||
TableElement::ExternRef(Some(x))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum TableStorage {
|
||||
Static {
|
||||
data: *mut *mut u8,
|
||||
size: Cell<u32>,
|
||||
ty: TableElementType,
|
||||
maximum: u32,
|
||||
},
|
||||
Dynamic {
|
||||
elements: RefCell<Vec<*mut u8>>,
|
||||
ty: TableElementType,
|
||||
maximum: Option<u32>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Represents an instance's table.
|
||||
#[derive(Debug)]
|
||||
pub struct Table(TableStorage);
|
||||
|
||||
impl Table {
|
||||
/// Create a new table instance with specified minimum and maximum number of elements.
|
||||
pub fn new(plan: &TablePlan) -> Self {
|
||||
let min = usize::try_from(plan.table.minimum).unwrap();
|
||||
let elements = RefCell::new(match plan.table.ty {
|
||||
TableElementType::Func => TableElements::FuncRefs(vec![ptr::null_mut(); min]),
|
||||
TableElementType::Val(ty) => {
|
||||
debug_assert_eq!(ty, crate::ref_type());
|
||||
TableElements::ExternRefs(vec![None; min])
|
||||
}
|
||||
});
|
||||
match plan.style {
|
||||
TableStyle::CallerChecksSignature => Self {
|
||||
elements,
|
||||
maximum: plan.table.maximum,
|
||||
},
|
||||
}
|
||||
/// Create a new dynamic (movable) table instance for the specified table plan.
|
||||
pub fn new_dynamic(plan: &TablePlan) -> Self {
|
||||
let elements = RefCell::new(vec![ptr::null_mut(); plan.table.minimum as usize]);
|
||||
let ty = plan.table.ty.clone();
|
||||
let maximum = plan.table.maximum;
|
||||
Self(TableStorage::Dynamic {
|
||||
elements,
|
||||
ty,
|
||||
maximum,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new static (immovable) table instance for the specified table plan.
|
||||
pub fn new_static(plan: &TablePlan, data: *mut *mut u8, maximum: u32) -> Self {
|
||||
let size = Cell::new(plan.table.minimum);
|
||||
let ty = plan.table.ty.clone();
|
||||
let maximum = min(plan.table.maximum.unwrap_or(maximum), maximum);
|
||||
Self(TableStorage::Static {
|
||||
data,
|
||||
size,
|
||||
ty,
|
||||
maximum,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the type of the elements in this table.
|
||||
pub fn element_type(&self) -> TableElementType {
|
||||
match &*self.elements.borrow() {
|
||||
TableElements::FuncRefs(_) => TableElementType::Func,
|
||||
TableElements::ExternRefs(_) => TableElementType::Val(crate::ref_type()),
|
||||
match &self.0 {
|
||||
TableStorage::Static { ty, .. } => *ty,
|
||||
TableStorage::Dynamic { ty, .. } => *ty,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether or not the underlying storage of the table is "static".
|
||||
pub(crate) fn is_static(&self) -> bool {
|
||||
if let TableStorage::Static { .. } = &self.0 {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of allocated elements.
|
||||
pub fn size(&self) -> u32 {
|
||||
match &*self.elements.borrow() {
|
||||
TableElements::FuncRefs(x) => x.len().try_into().unwrap(),
|
||||
TableElements::ExternRefs(x) => x.len().try_into().unwrap(),
|
||||
match &self.0 {
|
||||
TableStorage::Static { size, .. } => size.get(),
|
||||
TableStorage::Dynamic { elements, .. } => elements.borrow().len().try_into().unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the maximum number of elements.
|
||||
pub fn maximum(&self) -> Option<u32> {
|
||||
match &self.0 {
|
||||
TableStorage::Static { maximum, .. } => Some(*maximum),
|
||||
TableStorage::Dynamic { maximum, .. } => maximum.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,20 +174,30 @@ impl Table {
|
||||
///
|
||||
/// Returns a trap error on out-of-bounds accesses.
|
||||
pub fn fill(&self, dst: u32, val: TableElement, len: u32) -> Result<(), Trap> {
|
||||
let start = dst;
|
||||
let start = dst as usize;
|
||||
let end = start
|
||||
.checked_add(len)
|
||||
.checked_add(len as usize)
|
||||
.ok_or_else(|| Trap::wasm(ir::TrapCode::TableOutOfBounds))?;
|
||||
|
||||
if end > self.size() {
|
||||
if end > self.size() as usize {
|
||||
return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds));
|
||||
}
|
||||
|
||||
for i in start..end {
|
||||
self.set(i, val.clone()).unwrap();
|
||||
}
|
||||
debug_assert!(self.type_matches(&val));
|
||||
|
||||
Ok(())
|
||||
self.with_elements_mut(|elements| {
|
||||
if let Some((last, elements)) = elements[start..end].split_last_mut() {
|
||||
let ty = self.element_type();
|
||||
|
||||
for e in elements {
|
||||
Self::set_raw(ty, e, val.clone());
|
||||
}
|
||||
|
||||
Self::set_raw(self.element_type(), last, val);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Grow table by the specified amount of elements.
|
||||
@@ -104,40 +217,43 @@ impl Table {
|
||||
/// Generally, prefer using `InstanceHandle::table_grow`, which encapsulates
|
||||
/// this unsafety.
|
||||
pub unsafe fn grow(&self, delta: u32, init_value: TableElement) -> Option<u32> {
|
||||
let size = self.size();
|
||||
let old_size = self.size();
|
||||
|
||||
let new_len = size.checked_add(delta)?;
|
||||
if let Some(max) = self.maximum {
|
||||
if new_len > max {
|
||||
let new_size = old_size.checked_add(delta)?;
|
||||
if let Some(max) = self.maximum() {
|
||||
if new_size > max {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
let new_len = usize::try_from(new_len).unwrap();
|
||||
|
||||
match &mut *self.elements.borrow_mut() {
|
||||
TableElements::FuncRefs(x) => {
|
||||
let init_value = init_value.try_into().ok()?;
|
||||
x.resize(new_len, init_value)
|
||||
debug_assert!(self.type_matches(&init_value));
|
||||
|
||||
// First resize the storage and then fill with the init value
|
||||
match &self.0 {
|
||||
TableStorage::Static { size, .. } => {
|
||||
size.set(new_size);
|
||||
}
|
||||
TableElements::ExternRefs(x) => {
|
||||
let init_value = init_value.try_into().ok()?;
|
||||
x.resize(new_len, init_value)
|
||||
TableStorage::Dynamic { elements, .. } => {
|
||||
let mut elements = elements.borrow_mut();
|
||||
elements.resize(new_size as usize, ptr::null_mut());
|
||||
}
|
||||
}
|
||||
|
||||
Some(size)
|
||||
self.fill(old_size, init_value, delta)
|
||||
.expect("table should not be out of bounds");
|
||||
|
||||
Some(old_size)
|
||||
}
|
||||
|
||||
/// Get reference to the specified element.
|
||||
///
|
||||
/// Returns `None` if the index is out of bounds.
|
||||
pub fn get(&self, index: u32) -> Option<TableElement> {
|
||||
match &*self.elements.borrow() {
|
||||
TableElements::FuncRefs(x) => x.get(index as usize).cloned().map(TableElement::FuncRef),
|
||||
TableElements::ExternRefs(x) => {
|
||||
x.get(index as usize).cloned().map(TableElement::ExternRef)
|
||||
}
|
||||
}
|
||||
self.with_elements(|elements| {
|
||||
elements
|
||||
.get(index as usize)
|
||||
.map(|p| unsafe { TableElement::clone_from_raw(self.element_type(), *p) })
|
||||
})
|
||||
}
|
||||
|
||||
/// Set reference to the specified element.
|
||||
@@ -147,18 +263,15 @@ impl Table {
|
||||
/// Returns an error if `index` is out of bounds or if this table type does
|
||||
/// not match the element type.
|
||||
pub fn set(&self, index: u32, elem: TableElement) -> Result<(), ()> {
|
||||
let mut elems = self.elements.borrow_mut();
|
||||
match &mut *elems {
|
||||
TableElements::FuncRefs(x) => {
|
||||
let slot = x.get_mut(index as usize).ok_or(())?;
|
||||
*slot = elem.try_into().or(Err(()))?;
|
||||
}
|
||||
TableElements::ExternRefs(x) => {
|
||||
let slot = x.get_mut(index as usize).ok_or(())?;
|
||||
*slot = elem.try_into().or(Err(()))?;
|
||||
}
|
||||
if !self.type_matches(&elem) {
|
||||
return Err(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
self.with_elements_mut(|elements| {
|
||||
let e = elements.get_mut(index as usize).ok_or(())?;
|
||||
Self::set_raw(self.element_type(), e, elem);
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Copy `len` elements from `src_table[src_index..]` into `dst_table[dst_index..]`.
|
||||
@@ -186,21 +299,19 @@ impl Table {
|
||||
return Err(Trap::wasm(ir::TrapCode::TableOutOfBounds));
|
||||
}
|
||||
|
||||
let srcs = src_index..src_index + len;
|
||||
let dsts = dst_index..dst_index + len;
|
||||
debug_assert!(
|
||||
dst_table.element_type() == src_table.element_type(),
|
||||
"table element type mismatch"
|
||||
);
|
||||
|
||||
// Note on the unwraps: the bounds check above means that these will
|
||||
// never panic.
|
||||
//
|
||||
// TODO(#983): investigate replacing this get/set loop with a `memcpy`.
|
||||
if dst_index <= src_index {
|
||||
for (s, d) in (srcs).zip(dsts) {
|
||||
dst_table.set(d, src_table.get(s).unwrap()).unwrap();
|
||||
}
|
||||
let src_range = src_index as usize..src_index as usize + len as usize;
|
||||
let dst_range = dst_index as usize..dst_index as usize + len as usize;
|
||||
|
||||
// Check if the tables are the same as we cannot mutably borrow and also borrow the same `RefCell`
|
||||
if ptr::eq(dst_table, src_table) {
|
||||
Self::copy_elements_within(dst_table, dst_range, src_range);
|
||||
} else {
|
||||
for (s, d) in srcs.rev().zip(dsts.rev()) {
|
||||
dst_table.set(d, src_table.get(s).unwrap()).unwrap();
|
||||
}
|
||||
Self::copy_elements(dst_table, src_table, dst_range, src_range);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -208,55 +319,155 @@ impl Table {
|
||||
|
||||
/// Return a `VMTableDefinition` for exposing the table to compiled wasm code.
|
||||
pub fn vmtable(&self) -> VMTableDefinition {
|
||||
match &*self.elements.borrow() {
|
||||
TableElements::FuncRefs(x) => VMTableDefinition {
|
||||
base: x.as_ptr() as *const u8 as *mut u8,
|
||||
current_elements: x.len().try_into().unwrap(),
|
||||
match &self.0 {
|
||||
TableStorage::Static { data, size, .. } => VMTableDefinition {
|
||||
base: *data as _,
|
||||
current_elements: size.get(),
|
||||
},
|
||||
TableElements::ExternRefs(x) => VMTableDefinition {
|
||||
base: x.as_ptr() as *const u8 as *mut u8,
|
||||
current_elements: x.len().try_into().unwrap(),
|
||||
TableStorage::Dynamic { elements, .. } => {
|
||||
let elements = elements.borrow();
|
||||
VMTableDefinition {
|
||||
base: elements.as_ptr() as _,
|
||||
current_elements: elements.len().try_into().unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn type_matches(&self, val: &TableElement) -> bool {
|
||||
match (&val, self.element_type()) {
|
||||
(TableElement::FuncRef(_), TableElementType::Func) => true,
|
||||
(TableElement::ExternRef(_), TableElementType::Val(_)) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn with_elements<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&[*mut u8]) -> R,
|
||||
{
|
||||
match &self.0 {
|
||||
TableStorage::Static { data, size, .. } => unsafe {
|
||||
f(std::slice::from_raw_parts(*data, size.get() as usize))
|
||||
},
|
||||
TableStorage::Dynamic { elements, .. } => {
|
||||
let elements = elements.borrow();
|
||||
f(elements.as_slice())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn with_elements_mut<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut [*mut u8]) -> R,
|
||||
{
|
||||
match &self.0 {
|
||||
TableStorage::Static { data, size, .. } => unsafe {
|
||||
f(std::slice::from_raw_parts_mut(*data, size.get() as usize))
|
||||
},
|
||||
TableStorage::Dynamic { elements, .. } => {
|
||||
let mut elements = elements.borrow_mut();
|
||||
f(elements.as_mut_slice())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_raw(ty: TableElementType, elem: &mut *mut u8, val: TableElement) {
|
||||
unsafe {
|
||||
let old = *elem;
|
||||
*elem = val.into_raw();
|
||||
|
||||
// Drop the old element
|
||||
let _ = TableElement::from_raw(ty, old);
|
||||
}
|
||||
}
|
||||
|
||||
fn copy_elements(
|
||||
dst_table: &Self,
|
||||
src_table: &Self,
|
||||
dst_range: Range<usize>,
|
||||
src_range: Range<usize>,
|
||||
) {
|
||||
// This can only be used when copying between different tables
|
||||
debug_assert!(!ptr::eq(dst_table, src_table));
|
||||
|
||||
let ty = dst_table.element_type();
|
||||
|
||||
match ty {
|
||||
TableElementType::Func => {
|
||||
// `funcref` are `Copy`, so just do a mempcy
|
||||
dst_table.with_elements_mut(|dst| {
|
||||
src_table.with_elements(|src| dst[dst_range].copy_from_slice(&src[src_range]))
|
||||
});
|
||||
}
|
||||
TableElementType::Val(_) => {
|
||||
// We need to clone each `externref`
|
||||
dst_table.with_elements_mut(|dst| {
|
||||
src_table.with_elements(|src| {
|
||||
for (s, d) in src_range.zip(dst_range) {
|
||||
let elem = unsafe { TableElement::clone_from_raw(ty, src[s]) };
|
||||
Self::set_raw(ty, &mut dst[d], elem);
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn copy_elements_within(table: &Self, dst_range: Range<usize>, src_range: Range<usize>) {
|
||||
let ty = table.element_type();
|
||||
|
||||
match ty {
|
||||
TableElementType::Func => {
|
||||
// `funcref` are `Copy`, so just do a memmove
|
||||
table.with_elements_mut(|dst| dst.copy_within(src_range, dst_range.start));
|
||||
}
|
||||
TableElementType::Val(_) => {
|
||||
// We need to clone each `externref` while handling overlapping ranges
|
||||
table.with_elements_mut(|dst| {
|
||||
if dst_range.start <= src_range.start {
|
||||
for (s, d) in src_range.zip(dst_range) {
|
||||
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
|
||||
Self::set_raw(ty, &mut dst[d], elem);
|
||||
}
|
||||
} else {
|
||||
for (s, d) in src_range.rev().zip(dst_range.rev()) {
|
||||
let elem = unsafe { TableElement::clone_from_raw(ty, dst[s]) };
|
||||
Self::set_raw(ty, &mut dst[d], elem);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TableElement> for *mut VMCallerCheckedAnyfunc {
|
||||
type Error = TableElement;
|
||||
impl Drop for Table {
|
||||
fn drop(&mut self) {
|
||||
let ty = self.element_type();
|
||||
|
||||
fn try_from(e: TableElement) -> Result<Self, Self::Error> {
|
||||
match e {
|
||||
TableElement::FuncRef(f) => Ok(f),
|
||||
_ => Err(e),
|
||||
// funcref tables can skip this
|
||||
if let TableElementType::Func = ty {
|
||||
return;
|
||||
}
|
||||
|
||||
// Properly drop any table elements stored in the table
|
||||
self.with_elements(|elements| {
|
||||
for element in elements.iter() {
|
||||
let _ = unsafe { TableElement::from_raw(ty, *element) };
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TableElement> for Option<VMExternRef> {
|
||||
type Error = TableElement;
|
||||
|
||||
fn try_from(e: TableElement) -> Result<Self, Self::Error> {
|
||||
match e {
|
||||
TableElement::ExternRef(x) => Ok(x),
|
||||
_ => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<*mut VMCallerCheckedAnyfunc> for TableElement {
|
||||
fn from(f: *mut VMCallerCheckedAnyfunc) -> TableElement {
|
||||
TableElement::FuncRef(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<VMExternRef>> for TableElement {
|
||||
fn from(x: Option<VMExternRef>) -> TableElement {
|
||||
TableElement::ExternRef(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<VMExternRef> for TableElement {
|
||||
fn from(x: VMExternRef) -> TableElement {
|
||||
TableElement::ExternRef(Some(x))
|
||||
// The default table representation is an empty funcref table that cannot grow.
|
||||
impl Default for Table {
|
||||
fn default() -> Self {
|
||||
Self(TableStorage::Static {
|
||||
data: std::ptr::null_mut(),
|
||||
size: Cell::new(0),
|
||||
ty: TableElementType::Func,
|
||||
maximum: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -750,7 +750,7 @@ impl VMContext {
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Trampoline function pointer type.
|
||||
pub type VMTrampoline = unsafe extern "C" fn(
|
||||
*mut VMContext, // callee vmctx
|
||||
*mut VMContext, // caller vmctx
|
||||
|
||||
@@ -73,3 +73,6 @@ experimental_x64 = ["wasmtime-jit/experimental_x64"]
|
||||
# Enables support for "async stores" as well as defining host functions as
|
||||
# `async fn` and calling functions asynchronously.
|
||||
async = ["wasmtime-fiber"]
|
||||
|
||||
# Enables userfaultfd support in the runtime's pooling allocator when building on Linux
|
||||
uffd = ["wasmtime-runtime/uffd"]
|
||||
|
||||
@@ -14,6 +14,257 @@ use wasmtime_environ::settings::{self, Configurable, SetError};
|
||||
use wasmtime_environ::{isa, isa::TargetIsa, Tunables};
|
||||
use wasmtime_jit::{native, CompilationStrategy, Compiler};
|
||||
use wasmtime_profiling::{JitDumpAgent, NullProfilerAgent, ProfilingAgent, VTuneAgent};
|
||||
use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator, PoolingInstanceAllocator};
|
||||
|
||||
/// Represents the limits placed on a module for compiling with the pooling instance allocation strategy.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ModuleLimits {
|
||||
/// The maximum number of imported functions for a module (default is 1000).
|
||||
pub imported_functions: u32,
|
||||
|
||||
/// The maximum number of imported tables for a module (default is 0).
|
||||
pub imported_tables: u32,
|
||||
|
||||
/// The maximum number of imported linear memories for a module (default is 0).
|
||||
pub imported_memories: u32,
|
||||
|
||||
/// The maximum number of imported globals for a module (default is 0).
|
||||
pub imported_globals: u32,
|
||||
|
||||
/// The maximum number of defined types for a module (default is 100).
|
||||
pub types: u32,
|
||||
|
||||
/// The maximum number of defined functions for a module (default is 10000).
|
||||
pub functions: u32,
|
||||
|
||||
/// The maximum number of defined tables for a module (default is 1).
|
||||
pub tables: u32,
|
||||
|
||||
/// The maximum number of defined linear memories for a module (default is 1).
|
||||
pub memories: u32,
|
||||
|
||||
/// The maximum number of defined globals for a module (default is 10).
|
||||
pub globals: u32,
|
||||
|
||||
/// The maximum table elements for any table defined in a module (default is 10000).
|
||||
///
|
||||
/// If a table's minimum element limit is greater than this value, the module will
|
||||
/// fail to compile.
|
||||
///
|
||||
/// If a table's maximum element limit is unbounded or greater than this value,
|
||||
/// the maximum will be `table_elements` for the purpose of any `table.grow` instruction.
|
||||
pub table_elements: u32,
|
||||
|
||||
/// The maximum number of pages for any linear memory defined in a module (default is 160).
|
||||
///
|
||||
/// The default of 160 means at most 10 MiB of host memory may be committed for each instance.
|
||||
///
|
||||
/// If a memory's minimum page limit is greater than this value, the module will
|
||||
/// fail to compile.
|
||||
///
|
||||
/// If a memory's maximum page limit is unbounded or greater than this value,
|
||||
/// the maximum will be `memory_pages` for the purpose of any `memory.grow` instruction.
|
||||
///
|
||||
/// This value cannot exceed any memory reservation size limits placed on instances.
|
||||
pub memory_pages: u32,
|
||||
}
|
||||
|
||||
impl Default for ModuleLimits {
|
||||
fn default() -> Self {
|
||||
// Use the defaults from the runtime
|
||||
let wasmtime_runtime::ModuleLimits {
|
||||
imported_functions,
|
||||
imported_tables,
|
||||
imported_memories,
|
||||
imported_globals,
|
||||
types,
|
||||
functions,
|
||||
tables,
|
||||
memories,
|
||||
globals,
|
||||
table_elements,
|
||||
memory_pages,
|
||||
} = wasmtime_runtime::ModuleLimits::default();
|
||||
|
||||
Self {
|
||||
imported_functions,
|
||||
imported_tables,
|
||||
imported_memories,
|
||||
imported_globals,
|
||||
types,
|
||||
functions,
|
||||
tables,
|
||||
memories,
|
||||
globals,
|
||||
table_elements,
|
||||
memory_pages,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This exists so we can convert between the public Wasmtime API and the runtime representation
|
||||
// without having to export runtime types from the Wasmtime API.
|
||||
#[doc(hidden)]
|
||||
impl Into<wasmtime_runtime::ModuleLimits> for ModuleLimits {
|
||||
fn into(self) -> wasmtime_runtime::ModuleLimits {
|
||||
let Self {
|
||||
imported_functions,
|
||||
imported_tables,
|
||||
imported_memories,
|
||||
imported_globals,
|
||||
types,
|
||||
functions,
|
||||
tables,
|
||||
memories,
|
||||
globals,
|
||||
table_elements,
|
||||
memory_pages,
|
||||
} = self;
|
||||
|
||||
wasmtime_runtime::ModuleLimits {
|
||||
imported_functions,
|
||||
imported_tables,
|
||||
imported_memories,
|
||||
imported_globals,
|
||||
types,
|
||||
functions,
|
||||
tables,
|
||||
memories,
|
||||
globals,
|
||||
table_elements,
|
||||
memory_pages,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the limits placed on instances by the pooling instance allocation strategy.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct InstanceLimits {
|
||||
/// The maximum number of concurrent instances supported (default is 1000).
|
||||
pub count: u32,
|
||||
|
||||
/// The maximum size, in bytes, of host address space to reserve for each linear memory of an instance.
|
||||
///
|
||||
/// Note: this value has important performance ramifications.
|
||||
///
|
||||
/// On 64-bit platforms, the default for this value will be 6 GiB. A value of less than 4 GiB will
|
||||
/// force runtime bounds checking for memory accesses and thus will negatively impact performance.
|
||||
/// Any value above 4 GiB will start eliding bounds checks provided the `offset` of the memory access is
|
||||
/// less than (`memory_reservation_size` - 4 GiB). A value of 8 GiB will completely elide *all* bounds
|
||||
/// checks; consequently, 8 GiB will be the maximum supported value. The default of 6 GiB reserves
|
||||
/// less host address space for each instance, but a memory access with an offset above 2 GiB will incur
|
||||
/// runtime bounds checks.
|
||||
///
|
||||
/// On 32-bit platforms, the default for this value will be 10 MiB. A 32-bit host has very limited address
|
||||
/// space to reserve for a lot of concurrent instances. As a result, runtime bounds checking will be used
|
||||
/// for all memory accesses. For better runtime performance, a 64-bit host is recommended.
|
||||
///
|
||||
/// This value will be rounded up by the WebAssembly page size (64 KiB).
|
||||
pub memory_reservation_size: u64,
|
||||
}
|
||||
|
||||
impl Default for InstanceLimits {
|
||||
fn default() -> Self {
|
||||
let wasmtime_runtime::InstanceLimits {
|
||||
count,
|
||||
memory_reservation_size,
|
||||
} = wasmtime_runtime::InstanceLimits::default();
|
||||
|
||||
Self {
|
||||
count,
|
||||
memory_reservation_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This exists so we can convert between the public Wasmtime API and the runtime representation
|
||||
// without having to export runtime types from the Wasmtime API.
|
||||
#[doc(hidden)]
|
||||
impl Into<wasmtime_runtime::InstanceLimits> for InstanceLimits {
|
||||
fn into(self) -> wasmtime_runtime::InstanceLimits {
|
||||
let Self {
|
||||
count,
|
||||
memory_reservation_size,
|
||||
} = self;
|
||||
|
||||
wasmtime_runtime::InstanceLimits {
|
||||
count,
|
||||
memory_reservation_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The allocation strategy to use for the pooling instance allocation strategy.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum PoolingAllocationStrategy {
|
||||
/// Allocate from the next available instance.
|
||||
NextAvailable,
|
||||
/// Allocate from a random available instance.
|
||||
Random,
|
||||
}
|
||||
|
||||
impl Default for PoolingAllocationStrategy {
|
||||
fn default() -> Self {
|
||||
match wasmtime_runtime::PoolingAllocationStrategy::default() {
|
||||
wasmtime_runtime::PoolingAllocationStrategy::NextAvailable => Self::NextAvailable,
|
||||
wasmtime_runtime::PoolingAllocationStrategy::Random => Self::Random,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This exists so we can convert between the public Wasmtime API and the runtime representation
|
||||
// without having to export runtime types from the Wasmtime API.
|
||||
#[doc(hidden)]
|
||||
impl Into<wasmtime_runtime::PoolingAllocationStrategy> for PoolingAllocationStrategy {
|
||||
fn into(self) -> wasmtime_runtime::PoolingAllocationStrategy {
|
||||
match self {
|
||||
Self::NextAvailable => wasmtime_runtime::PoolingAllocationStrategy::NextAvailable,
|
||||
Self::Random => wasmtime_runtime::PoolingAllocationStrategy::Random,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the module instance allocation strategy to use.
|
||||
#[derive(Clone)]
|
||||
pub enum InstanceAllocationStrategy {
|
||||
/// The on-demand instance allocation strategy.
|
||||
///
|
||||
/// Resources related to a module instance are allocated at instantiation time and
|
||||
/// immediately deallocated when the `Store` referencing the instance is dropped.
|
||||
///
|
||||
/// This is the default allocation strategy for Wasmtime.
|
||||
OnDemand,
|
||||
/// The pooling instance allocation strategy.
|
||||
///
|
||||
/// A pool of resources is created in advance and module instantiation reuses resources
|
||||
/// from the pool. Resources are returned to the pool when the `Store` referencing the instance
|
||||
/// is dropped.
|
||||
Pooling {
|
||||
/// The allocation strategy to use.
|
||||
strategy: PoolingAllocationStrategy,
|
||||
/// The module limits to use.
|
||||
module_limits: ModuleLimits,
|
||||
/// The instance limits to use.
|
||||
instance_limits: InstanceLimits,
|
||||
},
|
||||
}
|
||||
|
||||
impl InstanceAllocationStrategy {
|
||||
/// The default pooling instance allocation strategy.
|
||||
pub fn pooling() -> Self {
|
||||
Self::Pooling {
|
||||
strategy: PoolingAllocationStrategy::default(),
|
||||
module_limits: ModuleLimits::default(),
|
||||
instance_limits: InstanceLimits::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for InstanceAllocationStrategy {
|
||||
fn default() -> Self {
|
||||
Self::OnDemand
|
||||
}
|
||||
}
|
||||
|
||||
/// Global configuration options used to create an [`Engine`](crate::Engine)
|
||||
/// and customize its behavior.
|
||||
@@ -29,13 +280,18 @@ pub struct Config {
|
||||
#[cfg(feature = "cache")]
|
||||
pub(crate) cache_config: CacheConfig,
|
||||
pub(crate) profiler: Arc<dyn ProfilingAgent>,
|
||||
pub(crate) memory_creator: Option<MemoryCreatorProxy>,
|
||||
pub(crate) instance_allocator: Option<Arc<dyn InstanceAllocator>>,
|
||||
// The default instance allocator is used for instantiating host objects
|
||||
// and for module instantiation when `instance_allocator` is None
|
||||
pub(crate) default_instance_allocator: OnDemandInstanceAllocator,
|
||||
pub(crate) max_wasm_stack: usize,
|
||||
pub(crate) features: WasmFeatures,
|
||||
pub(crate) wasm_backtrace_details_env_used: bool,
|
||||
pub(crate) max_instances: usize,
|
||||
pub(crate) max_tables: usize,
|
||||
pub(crate) max_memories: usize,
|
||||
#[cfg(feature = "async")]
|
||||
pub(crate) async_stack_size: usize,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@@ -73,7 +329,8 @@ impl Config {
|
||||
#[cfg(feature = "cache")]
|
||||
cache_config: CacheConfig::new_cache_disabled(),
|
||||
profiler: Arc::new(NullProfilerAgent),
|
||||
memory_creator: None,
|
||||
instance_allocator: None,
|
||||
default_instance_allocator: OnDemandInstanceAllocator::new(None),
|
||||
max_wasm_stack: 1 << 20,
|
||||
wasm_backtrace_details_env_used: false,
|
||||
features: WasmFeatures {
|
||||
@@ -85,6 +342,8 @@ impl Config {
|
||||
max_instances: 10_000,
|
||||
max_tables: 10_000,
|
||||
max_memories: 10_000,
|
||||
#[cfg(feature = "async")]
|
||||
async_stack_size: 2 << 20,
|
||||
};
|
||||
ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
|
||||
return ret;
|
||||
@@ -159,23 +418,75 @@ impl Config {
|
||||
self
|
||||
}
|
||||
|
||||
/// Configures the maximum amount of native stack space available to
|
||||
/// Configures the maximum amount of stack space available for
|
||||
/// executing WebAssembly code.
|
||||
///
|
||||
/// WebAssembly code currently executes on the native call stack for its own
|
||||
/// call frames. WebAssembly, however, also has well-defined semantics on
|
||||
/// stack overflow. This is intended to be a knob which can help configure
|
||||
/// how much native stack space a wasm module is allowed to consume. Note
|
||||
/// that the number here is not super-precise, but rather wasm will take at
|
||||
/// most "pretty close to this much" stack space.
|
||||
/// WebAssembly has well-defined semantics on stack overflow. This is
|
||||
/// intended to be a knob which can help configure how much stack space
|
||||
/// wasm execution is allowed to consume. Note that the number here is not
|
||||
/// super-precise, but rather wasm will take at most "pretty close to this
|
||||
/// much" stack space.
|
||||
///
|
||||
/// If a wasm call (or series of nested wasm calls) take more stack space
|
||||
/// than the `size` specified then a stack overflow trap will be raised.
|
||||
///
|
||||
/// By default this option is 1 MB.
|
||||
pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
|
||||
/// When the `async` feature is enabled, this value cannot exceed the
|
||||
/// `async_stack_size` option. Be careful not to set this value too close
|
||||
/// to `async_stack_size` as doing so may limit how much stack space
|
||||
/// is available for host functions. Unlike wasm functions that trap
|
||||
/// on stack overflow, a host function that overflows the stack will
|
||||
/// abort the process.
|
||||
///
|
||||
/// `max_wasm_stack` must be set prior to setting an instance allocation
|
||||
/// strategy.
|
||||
///
|
||||
/// By default this option is 1 MiB.
|
||||
pub fn max_wasm_stack(&mut self, size: usize) -> Result<&mut Self> {
|
||||
#[cfg(feature = "async")]
|
||||
if size > self.async_stack_size {
|
||||
bail!("wasm stack size cannot exceed the async stack size");
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
bail!("wasm stack size cannot be zero");
|
||||
}
|
||||
|
||||
if self.instance_allocator.is_some() {
|
||||
bail!(
|
||||
"wasm stack size cannot be modified after setting an instance allocation strategy"
|
||||
);
|
||||
}
|
||||
|
||||
self.max_wasm_stack = size;
|
||||
self
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Configures the size of the stacks used for asynchronous execution.
|
||||
///
|
||||
/// This setting configures the size of the stacks that are allocated for
|
||||
/// asynchronous execution. The value cannot be less than `max_wasm_stack`.
|
||||
///
|
||||
/// The amount of stack space guaranteed for host functions is
|
||||
/// `async_stack_size - max_wasm_stack`, so take care not to set these two values
|
||||
/// close to one another; doing so may cause host functions to overflow the
|
||||
/// stack and abort the process.
|
||||
///
|
||||
/// `async_stack_size` must be set prior to setting an instance allocation
|
||||
/// strategy.
|
||||
///
|
||||
/// By default this option is 2 MiB.
|
||||
#[cfg(feature = "async")]
|
||||
pub fn async_stack_size(&mut self, size: usize) -> Result<&mut Self> {
|
||||
if size < self.max_wasm_stack {
|
||||
bail!("async stack size cannot be less than the maximum wasm stack size");
|
||||
}
|
||||
if self.instance_allocator.is_some() {
|
||||
bail!(
|
||||
"async stack size cannot be modified after setting an instance allocation strategy"
|
||||
);
|
||||
}
|
||||
self.async_stack_size = size;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Configures whether the WebAssembly threads proposal will be enabled for
|
||||
@@ -504,12 +815,51 @@ impl Config {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Sets a custom memory creator
|
||||
/// Sets a custom memory creator.
|
||||
///
|
||||
/// Custom memory creators are used when creating host `Memory` objects or when
|
||||
/// creating instance linear memories for the on-demand instance allocation strategy.
|
||||
pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
|
||||
self.memory_creator = Some(MemoryCreatorProxy { mem_creator });
|
||||
self.default_instance_allocator =
|
||||
OnDemandInstanceAllocator::new(Some(Arc::new(MemoryCreatorProxy(mem_creator))));
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the instance allocation strategy to use.
|
||||
///
|
||||
/// When using the pooling instance allocation strategy, all linear memories will be created as "static".
|
||||
///
|
||||
/// This means the [`Config::static_memory_maximum_size`] and [`Config::static_memory_guard_size`] options
|
||||
/// will be ignored in favor of [`InstanceLimits::memory_reservation_size`] when the pooling instance
|
||||
/// allocation strategy is used.
|
||||
pub fn with_allocation_strategy(
|
||||
&mut self,
|
||||
strategy: InstanceAllocationStrategy,
|
||||
) -> Result<&mut Self> {
|
||||
self.instance_allocator = match strategy {
|
||||
InstanceAllocationStrategy::OnDemand => None,
|
||||
InstanceAllocationStrategy::Pooling {
|
||||
strategy,
|
||||
module_limits,
|
||||
instance_limits,
|
||||
} => {
|
||||
#[cfg(feature = "async")]
|
||||
let stack_size = self.async_stack_size;
|
||||
|
||||
#[cfg(not(feature = "async"))]
|
||||
let stack_size = 0;
|
||||
|
||||
Some(Arc::new(PoolingInstanceAllocator::new(
|
||||
strategy.into(),
|
||||
module_limits.into(),
|
||||
instance_limits.into(),
|
||||
stack_size,
|
||||
)?))
|
||||
}
|
||||
};
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Configures the maximum size, in bytes, where a linear memory is
|
||||
/// considered static, above which it'll be considered dynamic.
|
||||
///
|
||||
@@ -726,7 +1076,15 @@ impl Config {
|
||||
|
||||
pub(crate) fn build_compiler(&self) -> Compiler {
|
||||
let isa = self.target_isa();
|
||||
Compiler::new(isa, self.strategy, self.tunables.clone(), self.features)
|
||||
let mut tunables = self.tunables.clone();
|
||||
self.instance_allocator().adjust_tunables(&mut tunables);
|
||||
Compiler::new(isa, self.strategy, tunables, self.features)
|
||||
}
|
||||
|
||||
pub(crate) fn instance_allocator(&self) -> &dyn InstanceAllocator {
|
||||
self.instance_allocator
|
||||
.as_deref()
|
||||
.unwrap_or(&self.default_instance_allocator)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -547,10 +547,13 @@ impl Table {
|
||||
bail!("cross-`Store` table copies are not supported");
|
||||
}
|
||||
|
||||
if dst_table.ty() != src_table.ty() {
|
||||
bail!("tables do not have the same element type");
|
||||
}
|
||||
|
||||
// NB: We must use the `dst_table`'s `wasmtime_handle` for the
|
||||
// `dst_table_index` and vice versa for `src_table` since each table can
|
||||
// come from different modules.
|
||||
|
||||
let dst_table_index = dst_table.wasmtime_table_index();
|
||||
let dst_table_index = dst_table.instance.get_defined_table(dst_table_index);
|
||||
|
||||
@@ -579,6 +582,11 @@ impl Table {
|
||||
bail!("cross-`Store` table fills are not supported");
|
||||
}
|
||||
|
||||
// Ensure the fill value is the correct type
|
||||
if self.ty().element() != &val.ty() {
|
||||
bail!("mismatched element fill type");
|
||||
}
|
||||
|
||||
let table_index = self.wasmtime_table_index();
|
||||
self.instance
|
||||
.handle
|
||||
|
||||
@@ -12,9 +12,9 @@ use wasmtime_environ::wasm::{
|
||||
};
|
||||
use wasmtime_environ::Initializer;
|
||||
use wasmtime_runtime::{
|
||||
Imports, InstantiationError, RuntimeInstance, StackMapRegistry, VMContext,
|
||||
VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMGlobalImport, VMMemoryImport,
|
||||
VMTableImport,
|
||||
Imports, InstanceAllocationRequest, InstantiationError, RuntimeInstance, StackMapRegistry,
|
||||
VMContext, VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMGlobalImport,
|
||||
VMMemoryImport, VMTableImport,
|
||||
};
|
||||
|
||||
/// An instantiated WebAssembly module.
|
||||
@@ -492,18 +492,26 @@ impl<'a> Instantiator<'a> {
|
||||
// compiled JIT code within the `Store`.
|
||||
self.store.register_module(&self.cur.module);
|
||||
|
||||
let config = self.store.engine().config();
|
||||
unsafe {
|
||||
let instance = compiled_module.instantiate(
|
||||
self.cur.build(),
|
||||
&self.store.lookup_shared_signature(self.cur.module.types()),
|
||||
config.memory_creator.as_ref().map(|a| a as _),
|
||||
self.store.interrupts(),
|
||||
Box::new(()),
|
||||
self.store.externref_activations_table() as *const VMExternRefActivationsTable
|
||||
let config = self.store.engine().config();
|
||||
|
||||
let allocator = config.instance_allocator();
|
||||
|
||||
let instance = allocator.allocate(InstanceAllocationRequest {
|
||||
module: compiled_module.module().clone(),
|
||||
finished_functions: compiled_module.finished_functions(),
|
||||
imports: self.cur.build(),
|
||||
lookup_shared_signature: &self
|
||||
.store
|
||||
.lookup_shared_signature(self.cur.module.types()),
|
||||
host_state: Box::new(()),
|
||||
interrupts: self.store.interrupts(),
|
||||
externref_activations_table: self.store.externref_activations_table()
|
||||
as *const VMExternRefActivationsTable
|
||||
as *mut _,
|
||||
self.store.stack_map_registry() as *const StackMapRegistry as *mut _,
|
||||
)?;
|
||||
stack_map_registry: self.store.stack_map_registry() as *const StackMapRegistry
|
||||
as *mut _,
|
||||
})?;
|
||||
|
||||
// After we've created the `InstanceHandle` we still need to run
|
||||
// initialization to set up data/elements/etc. We do this after adding
|
||||
@@ -512,12 +520,9 @@ impl<'a> Instantiator<'a> {
|
||||
// initializers may have run which placed elements into other instance's
|
||||
// tables. This means that from this point on, regardless of whether
|
||||
// initialization is successful, we need to keep the instance alive.
|
||||
let instance = self.store.add_instance(instance);
|
||||
instance
|
||||
.initialize(
|
||||
config.features.bulk_memory,
|
||||
&compiled_module.data_initializers(),
|
||||
)
|
||||
let instance = self.store.add_instance(instance, false);
|
||||
allocator
|
||||
.initialize(&instance.handle, config.features.bulk_memory)
|
||||
.map_err(|e| -> Error {
|
||||
match e {
|
||||
InstantiationError::Trap(trap) => {
|
||||
|
||||
@@ -172,6 +172,12 @@
|
||||
//! * `vtune` - Not enabled by default, this feature compiles in support for
|
||||
//! supporting VTune profiling of JIT code.
|
||||
//!
|
||||
//! * `uffd` - Not enabled by default. This feature enables `userfaultfd` support
|
||||
//! when using the pooling instance allocator. As handling page faults in user space
|
||||
//! comes with a performance penalty, this feature should only be enabled when kernel
|
||||
//! lock contention is hampering multithreading throughput. This feature is only
|
||||
//! supported on Linux and requires a Linux kernel version 4.11 or higher.
|
||||
//!
|
||||
//! ## Examples
|
||||
//!
|
||||
//! In addition to the examples below be sure to check out the [online embedding
|
||||
|
||||
@@ -307,15 +307,22 @@ impl Module {
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn from_binary(engine: &Engine, binary: &[u8]) -> Result<Module> {
|
||||
#[cfg(feature = "cache")]
|
||||
let (main_module, artifacts, types) =
|
||||
ModuleCacheEntry::new("wasmtime", engine.cache_config())
|
||||
const USE_PAGED_MEM_INIT: bool = cfg!(all(feature = "uffd", target_os = "linux"));
|
||||
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(feature = "cache")] {
|
||||
let (main_module, artifacts, types) = ModuleCacheEntry::new(
|
||||
"wasmtime",
|
||||
engine.cache_config(),
|
||||
)
|
||||
.get_data((engine.compiler(), binary), |(compiler, binary)| {
|
||||
CompilationArtifacts::build(compiler, binary)
|
||||
CompilationArtifacts::build(compiler, binary, USE_PAGED_MEM_INIT)
|
||||
})?;
|
||||
#[cfg(not(feature = "cache"))]
|
||||
let (main_module, artifacts, types) =
|
||||
CompilationArtifacts::build(engine.compiler(), binary)?;
|
||||
} else {
|
||||
let (main_module, artifacts, types) =
|
||||
CompilationArtifacts::build(engine.compiler(), binary, USE_PAGED_MEM_INIT)?;
|
||||
}
|
||||
};
|
||||
|
||||
let mut modules = CompiledModule::from_artifacts_list(
|
||||
artifacts,
|
||||
@@ -324,6 +331,12 @@ impl Module {
|
||||
)?;
|
||||
let module = modules.remove(main_module);
|
||||
|
||||
// Validate the module can be used with the current allocator
|
||||
engine
|
||||
.config()
|
||||
.instance_allocator()
|
||||
.validate(module.module())?;
|
||||
|
||||
Ok(Module {
|
||||
inner: Arc::new(ModuleInner {
|
||||
engine: engine.clone(),
|
||||
|
||||
@@ -18,10 +18,19 @@ use std::task::{Context, Poll};
|
||||
use wasmtime_environ::wasm;
|
||||
use wasmtime_jit::{CompiledModule, ModuleCode, TypeTables};
|
||||
use wasmtime_runtime::{
|
||||
InstanceHandle, RuntimeMemoryCreator, SignalHandler, StackMapRegistry, TrapInfo, VMContext,
|
||||
InstanceAllocator, InstanceHandle, SignalHandler, StackMapRegistry, TrapInfo, VMContext,
|
||||
VMExternRef, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex,
|
||||
};
|
||||
|
||||
/// Used to associate instances with the store.
|
||||
///
|
||||
/// This is needed to track if the instance was allocated expliclty with the default
|
||||
/// instance allocator.
|
||||
struct StoreInstance {
|
||||
handle: InstanceHandle,
|
||||
use_default_allocator: bool,
|
||||
}
|
||||
|
||||
/// A `Store` is a collection of WebAssembly instances and host-defined items.
|
||||
///
|
||||
/// All WebAssembly instances and items will be attached to and refer to a
|
||||
@@ -63,7 +72,7 @@ pub(crate) struct StoreInner {
|
||||
engine: Engine,
|
||||
interrupts: Arc<VMInterrupts>,
|
||||
signatures: RefCell<SignatureRegistry>,
|
||||
instances: RefCell<Vec<InstanceHandle>>,
|
||||
instances: RefCell<Vec<StoreInstance>>,
|
||||
signal_handler: RefCell<Option<Box<SignalHandler<'static>>>>,
|
||||
externref_activations_table: VMExternRefActivationsTable,
|
||||
stack_map_registry: StackMapRegistry,
|
||||
@@ -254,15 +263,6 @@ impl Store {
|
||||
&self.inner.engine
|
||||
}
|
||||
|
||||
/// Returns an optional reference to a ['RuntimeMemoryCreator']
|
||||
pub(crate) fn memory_creator(&self) -> Option<&dyn RuntimeMemoryCreator> {
|
||||
self.engine()
|
||||
.config()
|
||||
.memory_creator
|
||||
.as_ref()
|
||||
.map(|x| x as _)
|
||||
}
|
||||
|
||||
pub(crate) fn signatures(&self) -> &RefCell<SignatureRegistry> {
|
||||
&self.inner.signatures
|
||||
}
|
||||
@@ -383,8 +383,15 @@ impl Store {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) unsafe fn add_instance(&self, handle: InstanceHandle) -> StoreInstanceHandle {
|
||||
self.inner.instances.borrow_mut().push(handle.clone());
|
||||
pub(crate) unsafe fn add_instance(
|
||||
&self,
|
||||
handle: InstanceHandle,
|
||||
use_default_allocator: bool,
|
||||
) -> StoreInstanceHandle {
|
||||
self.inner.instances.borrow_mut().push(StoreInstance {
|
||||
handle: handle.clone(),
|
||||
use_default_allocator,
|
||||
});
|
||||
StoreInstanceHandle {
|
||||
store: self.clone(),
|
||||
handle,
|
||||
@@ -397,7 +404,7 @@ impl Store {
|
||||
.instances
|
||||
.borrow()
|
||||
.iter()
|
||||
.any(|i| i.vmctx_ptr() == handle.vmctx_ptr()));
|
||||
.any(|i| i.handle.vmctx_ptr() == handle.vmctx_ptr()));
|
||||
StoreInstanceHandle {
|
||||
store: self.clone(),
|
||||
handle,
|
||||
@@ -752,12 +759,14 @@ impl Store {
|
||||
/// that the various comments are illuminating as to what's going on here.
|
||||
#[cfg(feature = "async")]
|
||||
pub(crate) async fn on_fiber<R>(&self, func: impl FnOnce() -> R) -> Result<R, Trap> {
|
||||
debug_assert!(self.is_async());
|
||||
let config = self.inner.engine.config();
|
||||
|
||||
// TODO: allocation of a fiber should be much more abstract where we
|
||||
// shouldn't be allocating huge stacks on every async wasm function call.
|
||||
debug_assert!(self.is_async());
|
||||
debug_assert!(config.async_stack_size > 0);
|
||||
|
||||
type SuspendType = wasmtime_fiber::Suspend<Result<(), Trap>, (), Result<(), Trap>>;
|
||||
let mut slot = None;
|
||||
let fiber = wasmtime_fiber::Fiber::new(10 * 1024 * 1024, |keep_going, suspend| {
|
||||
let func = |keep_going, suspend: &SuspendType| {
|
||||
// First check and see if we were interrupted/dropped, and only
|
||||
// continue if we haven't been.
|
||||
keep_going?;
|
||||
@@ -775,18 +784,46 @@ impl Store {
|
||||
|
||||
slot = Some(func());
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|e| Trap::from(anyhow::Error::from(e)))?;
|
||||
};
|
||||
|
||||
let (fiber, stack) = match config.instance_allocator().allocate_fiber_stack() {
|
||||
Ok(stack) => {
|
||||
// Use the returned stack and deallocate it when finished
|
||||
(
|
||||
unsafe {
|
||||
wasmtime_fiber::Fiber::new_with_stack(stack, func)
|
||||
.map_err(|e| Trap::from(anyhow::Error::from(e)))?
|
||||
},
|
||||
stack,
|
||||
)
|
||||
}
|
||||
Err(wasmtime_runtime::FiberStackError::NotSupported) => {
|
||||
// The allocator doesn't support custom fiber stacks for the current platform
|
||||
// Request that the fiber itself allocate the stack
|
||||
(
|
||||
wasmtime_fiber::Fiber::new(config.async_stack_size, func)
|
||||
.map_err(|e| Trap::from(anyhow::Error::from(e)))?,
|
||||
std::ptr::null_mut(),
|
||||
)
|
||||
}
|
||||
Err(e) => return Err(Trap::from(anyhow::Error::from(e))),
|
||||
};
|
||||
|
||||
// Once we have the fiber representing our synchronous computation, we
|
||||
// wrap that in a custom future implementation which does the
|
||||
// translation from the future protocol to our fiber API.
|
||||
FiberFuture { fiber, store: self }.await?;
|
||||
FiberFuture {
|
||||
fiber,
|
||||
store: self,
|
||||
stack,
|
||||
}
|
||||
.await?;
|
||||
return Ok(slot.unwrap());
|
||||
|
||||
struct FiberFuture<'a> {
|
||||
fiber: wasmtime_fiber::Fiber<'a, Result<(), Trap>, (), Result<(), Trap>>,
|
||||
store: &'a Store,
|
||||
stack: *mut u8,
|
||||
}
|
||||
|
||||
impl Future for FiberFuture<'_> {
|
||||
@@ -843,15 +880,23 @@ impl Store {
|
||||
// completion.
|
||||
impl Drop for FiberFuture<'_> {
|
||||
fn drop(&mut self) {
|
||||
if self.fiber.done() {
|
||||
return;
|
||||
if !self.fiber.done() {
|
||||
let result = self.fiber.resume(Err(Trap::new("future dropped")));
|
||||
// This resumption with an error should always complete the
|
||||
// fiber. While it's technically possible for host code to catch
|
||||
// the trap and re-resume, we'd ideally like to signal that to
|
||||
// callers that they shouldn't be doing that.
|
||||
debug_assert!(result.is_ok());
|
||||
}
|
||||
if !self.stack.is_null() {
|
||||
unsafe {
|
||||
self.store
|
||||
.engine()
|
||||
.config()
|
||||
.instance_allocator()
|
||||
.deallocate_fiber_stack(self.stack)
|
||||
};
|
||||
}
|
||||
let result = self.fiber.resume(Err(Trap::new("future dropped")));
|
||||
// This resumption with an error should always complete the
|
||||
// fiber. While it's technically possible for host code to catch
|
||||
// the trap and re-resume, we'd ideally like to signal that to
|
||||
// callers that they shouldn't be doing that.
|
||||
debug_assert!(result.is_ok());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -974,9 +1019,17 @@ impl fmt::Debug for Store {
|
||||
|
||||
impl Drop for StoreInner {
|
||||
fn drop(&mut self) {
|
||||
for instance in self.instances.get_mut().iter() {
|
||||
let allocator = self.engine.config().instance_allocator();
|
||||
for instance in self.instances.borrow().iter() {
|
||||
unsafe {
|
||||
instance.dealloc();
|
||||
if instance.use_default_allocator {
|
||||
self.engine
|
||||
.config()
|
||||
.default_instance_allocator
|
||||
.deallocate(&instance.handle);
|
||||
} else {
|
||||
allocator.deallocate(&instance.handle);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,15 +9,15 @@ use wasmtime_environ::entity::PrimaryMap;
|
||||
use wasmtime_environ::wasm::DefinedFuncIndex;
|
||||
use wasmtime_environ::Module;
|
||||
use wasmtime_runtime::{
|
||||
Imports, InstanceHandle, StackMapRegistry, VMExternRefActivationsTable, VMFunctionBody,
|
||||
VMFunctionImport, VMSharedSignatureIndex,
|
||||
Imports, InstanceAllocationRequest, InstanceAllocator, StackMapRegistry,
|
||||
VMExternRefActivationsTable, VMFunctionBody, VMFunctionImport, VMSharedSignatureIndex,
|
||||
};
|
||||
|
||||
pub(crate) fn create_handle(
|
||||
module: Module,
|
||||
store: &Store,
|
||||
finished_functions: PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
|
||||
state: Box<dyn Any>,
|
||||
host_state: Box<dyn Any>,
|
||||
func_imports: &[VMFunctionImport],
|
||||
shared_signature_id: Option<VMSharedSignatureIndex>,
|
||||
) -> Result<StoreInstanceHandle> {
|
||||
@@ -26,17 +26,26 @@ pub(crate) fn create_handle(
|
||||
let module = Arc::new(module);
|
||||
|
||||
unsafe {
|
||||
let handle = InstanceHandle::new(
|
||||
module,
|
||||
&finished_functions,
|
||||
imports,
|
||||
store.memory_creator(),
|
||||
&|_| shared_signature_id.unwrap(),
|
||||
state,
|
||||
store.interrupts(),
|
||||
store.externref_activations_table() as *const VMExternRefActivationsTable as *mut _,
|
||||
store.stack_map_registry() as *const StackMapRegistry as *mut _,
|
||||
)?;
|
||||
Ok(store.add_instance(handle))
|
||||
// Use the default allocator when creating handles associated with host objects
|
||||
// The configured instance allocator should only be used when creating module instances
|
||||
// as we don't want host objects to count towards instance limits.
|
||||
let handle = store
|
||||
.engine()
|
||||
.config()
|
||||
.default_instance_allocator
|
||||
.allocate(InstanceAllocationRequest {
|
||||
module: module.clone(),
|
||||
finished_functions: &finished_functions,
|
||||
imports,
|
||||
lookup_shared_signature: &|_| shared_signature_id.unwrap(),
|
||||
host_state,
|
||||
interrupts: store.interrupts(),
|
||||
externref_activations_table: store.externref_activations_table()
|
||||
as *const VMExternRefActivationsTable
|
||||
as *mut _,
|
||||
stack_map_registry: store.stack_map_registry() as *const StackMapRegistry as *mut _,
|
||||
})?;
|
||||
|
||||
Ok(store.add_instance(handle, true))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::memory::{LinearMemory, MemoryCreator};
|
||||
use crate::trampoline::StoreInstanceHandle;
|
||||
use crate::Store;
|
||||
use crate::{Limits, MemoryType};
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use wasmtime_environ::entity::PrimaryMap;
|
||||
use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE};
|
||||
use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition};
|
||||
@@ -54,19 +54,18 @@ impl RuntimeLinearMemory for LinearMemoryProxy {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MemoryCreatorProxy {
|
||||
pub(crate) mem_creator: Arc<dyn MemoryCreator>,
|
||||
}
|
||||
pub(crate) struct MemoryCreatorProxy(pub Arc<dyn MemoryCreator>);
|
||||
|
||||
impl RuntimeMemoryCreator for MemoryCreatorProxy {
|
||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String> {
|
||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>> {
|
||||
let ty = MemoryType::new(Limits::new(plan.memory.minimum, plan.memory.maximum));
|
||||
let reserved_size_in_bytes = match plan.style {
|
||||
MemoryStyle::Static { bound } => Some(bound as u64 * WASM_PAGE_SIZE as u64),
|
||||
MemoryStyle::Dynamic => None,
|
||||
};
|
||||
self.mem_creator
|
||||
self.0
|
||||
.new_memory(ty, reserved_size_in_bytes, plan.offset_guard_size)
|
||||
.map(|mem| Box::new(LinearMemoryProxy { mem }) as Box<dyn RuntimeLinearMemory>)
|
||||
.map_err(|e| anyhow!(e))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -364,3 +364,37 @@ fn fuel_eventually_finishes() {
|
||||
let instance = Instance::new_async(&store, &module, &[]);
|
||||
run(instance).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn async_with_pooling_stacks() {
|
||||
let mut config = Config::new();
|
||||
config
|
||||
.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 1,
|
||||
table_elements: 0,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
memory_reservation_size: 1,
|
||||
},
|
||||
})
|
||||
.expect("pooling allocator created");
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
let store = Store::new_async(&engine);
|
||||
let func = Func::new_async(
|
||||
&store,
|
||||
FuncType::new(None, None),
|
||||
(),
|
||||
move |_caller, _state, _params, _results| Box::new(async { Ok(()) }),
|
||||
);
|
||||
run(func.call_async(&[])).unwrap();
|
||||
run(func.call_async(&[])).unwrap();
|
||||
let future1 = func.call_async(&[]);
|
||||
let future2 = func.call_async(&[]);
|
||||
run(future2).unwrap();
|
||||
run(future1).unwrap();
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ mod module;
|
||||
mod module_linking;
|
||||
mod module_serialize;
|
||||
mod name;
|
||||
mod pooling_allocator;
|
||||
mod stack_overflow;
|
||||
mod table;
|
||||
mod traps;
|
||||
|
||||
436
tests/all/pooling_allocator.rs
Normal file
436
tests/all/pooling_allocator.rs
Normal file
@@ -0,0 +1,436 @@
|
||||
use anyhow::Result;
|
||||
use wasmtime::*;
|
||||
|
||||
#[test]
|
||||
fn successful_instantiation() -> Result<()> {
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 1,
|
||||
table_elements: 10,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
memory_reservation_size: 1,
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
let module = Module::new(&engine, r#"(module (memory 1) (table 10 funcref))"#)?;
|
||||
|
||||
// Module should instantiate
|
||||
let store = Store::new(&engine);
|
||||
Instance::new(&store, &module, &[])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn memory_limit() -> Result<()> {
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 3,
|
||||
table_elements: 10,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
memory_reservation_size: 196608,
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
|
||||
// Module should fail to validate because the minimum is greater than the configured limit
|
||||
match Module::new(&engine, r#"(module (memory 4))"#) {
|
||||
Ok(_) => panic!("module compilation should fail"),
|
||||
Err(e) => assert_eq!(
|
||||
e.to_string(),
|
||||
"memory index 0 has a minimum page size of 4 which exceeds the limit of 3"
|
||||
),
|
||||
}
|
||||
|
||||
let module = Module::new(
|
||||
&engine,
|
||||
r#"(module (memory (export "m") 0) (func (export "f") (result i32) (memory.grow (i32.const 1))))"#,
|
||||
)?;
|
||||
|
||||
// Instantiate the module and grow the memory via the `f` function
|
||||
{
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
let f = instance.get_func("f").unwrap().get0::<i32>().unwrap();
|
||||
|
||||
assert_eq!(f().expect("function should not trap"), 0);
|
||||
assert_eq!(f().expect("function should not trap"), 1);
|
||||
assert_eq!(f().expect("function should not trap"), 2);
|
||||
assert_eq!(f().expect("function should not trap"), -1);
|
||||
assert_eq!(f().expect("function should not trap"), -1);
|
||||
}
|
||||
|
||||
// Instantiate the module and grow the memory via the Wasmtime API
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
|
||||
let memory = instance.get_memory("m").unwrap();
|
||||
assert_eq!(memory.size(), 0);
|
||||
assert_eq!(memory.grow(1).expect("memory should grow"), 0);
|
||||
assert_eq!(memory.size(), 1);
|
||||
assert_eq!(memory.grow(1).expect("memory should grow"), 1);
|
||||
assert_eq!(memory.size(), 2);
|
||||
assert_eq!(memory.grow(1).expect("memory should grow"), 2);
|
||||
assert_eq!(memory.size(), 3);
|
||||
assert!(memory.grow(1).is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn memory_init() -> Result<()> {
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 2,
|
||||
table_elements: 0,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
..Default::default()
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
|
||||
let module = Module::new(
|
||||
&engine,
|
||||
r#"(module (memory (export "m") 2) (data (i32.const 65530) "this data spans multiple pages") (data (i32.const 10) "hello world"))"#,
|
||||
)?;
|
||||
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
let memory = instance.get_memory("m").unwrap();
|
||||
|
||||
unsafe {
|
||||
assert_eq!(
|
||||
&memory.data_unchecked()[65530..65560],
|
||||
b"this data spans multiple pages"
|
||||
);
|
||||
assert_eq!(&memory.data_unchecked()[10..21], b"hello world");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn memory_guard_page_trap() -> Result<()> {
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 2,
|
||||
table_elements: 0,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
..Default::default()
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
|
||||
let module = Module::new(
|
||||
&engine,
|
||||
r#"(module (memory (export "m") 0) (func (export "f") (param i32) local.get 0 i32.load drop))"#,
|
||||
)?;
|
||||
|
||||
// Instantiate the module and check for out of bounds trap
|
||||
for _ in 0..10 {
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
let m = instance.get_memory("m").unwrap();
|
||||
let f = instance.get_func("f").unwrap().get1::<i32, ()>().unwrap();
|
||||
|
||||
let trap = f(0).expect_err("function should trap");
|
||||
assert!(trap.to_string().contains("out of bounds"));
|
||||
|
||||
let trap = f(1).expect_err("function should trap");
|
||||
assert!(trap.to_string().contains("out of bounds"));
|
||||
|
||||
m.grow(1).expect("memory should grow");
|
||||
f(0).expect("function should not trap");
|
||||
|
||||
let trap = f(65536).expect_err("function should trap");
|
||||
assert!(trap.to_string().contains("out of bounds"));
|
||||
|
||||
let trap = f(65537).expect_err("function should trap");
|
||||
assert!(trap.to_string().contains("out of bounds"));
|
||||
|
||||
m.grow(1).expect("memory should grow");
|
||||
f(65536).expect("function should not trap");
|
||||
|
||||
m.grow(1).expect_err("memory should be at the limit");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "aarch64", ignore)] // https://github.com/bytecodealliance/wasmtime/pull/2518#issuecomment-747280133
|
||||
fn memory_zeroed() -> Result<()> {
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 1,
|
||||
table_elements: 0,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
memory_reservation_size: 1,
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
|
||||
let module = Module::new(&engine, r#"(module (memory (export "m") 1))"#)?;
|
||||
|
||||
// Instantiate the module repeatedly after writing data to the entire memory
|
||||
for _ in 0..10 {
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
let memory = instance.get_memory("m").unwrap();
|
||||
|
||||
assert_eq!(memory.size(), 1);
|
||||
assert_eq!(memory.data_size(), 65536);
|
||||
|
||||
let ptr = memory.data_ptr();
|
||||
|
||||
unsafe {
|
||||
for i in 0..8192 {
|
||||
assert_eq!(*ptr.cast::<u64>().offset(i), 0);
|
||||
}
|
||||
std::ptr::write_bytes(ptr, 0xFE, memory.data_size());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn table_limit() -> Result<()> {
|
||||
const TABLE_ELEMENTS: u32 = 10;
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 1,
|
||||
table_elements: TABLE_ELEMENTS,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
memory_reservation_size: 1,
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
|
||||
// Module should fail to validate because the minimum is greater than the configured limit
|
||||
match Module::new(&engine, r#"(module (table 31 funcref))"#) {
|
||||
Ok(_) => panic!("module compilation should fail"),
|
||||
Err(e) => assert_eq!(
|
||||
e.to_string(),
|
||||
"table index 0 has a minimum element size of 31 which exceeds the limit of 10"
|
||||
),
|
||||
}
|
||||
|
||||
let module = Module::new(
|
||||
&engine,
|
||||
r#"(module (table (export "t") 0 funcref) (func (export "f") (result i32) (table.grow (ref.null func) (i32.const 1))))"#,
|
||||
)?;
|
||||
|
||||
// Instantiate the module and grow the table via the `f` function
|
||||
{
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
let f = instance.get_func("f").unwrap().get0::<i32>().unwrap();
|
||||
|
||||
for i in 0..TABLE_ELEMENTS {
|
||||
assert_eq!(f().expect("function should not trap"), i as i32);
|
||||
}
|
||||
|
||||
assert_eq!(f().expect("function should not trap"), -1);
|
||||
assert_eq!(f().expect("function should not trap"), -1);
|
||||
}
|
||||
|
||||
// Instantiate the module and grow the table via the Wasmtime API
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
|
||||
let table = instance.get_table("t").unwrap();
|
||||
|
||||
for i in 0..TABLE_ELEMENTS {
|
||||
assert_eq!(table.size(), i);
|
||||
assert_eq!(
|
||||
table
|
||||
.grow(1, Val::FuncRef(None))
|
||||
.expect("table should grow"),
|
||||
i
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(table.size(), TABLE_ELEMENTS);
|
||||
assert!(table.grow(1, Val::FuncRef(None)).is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn table_init() -> Result<()> {
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 0,
|
||||
table_elements: 6,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
..Default::default()
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
|
||||
let module = Module::new(
|
||||
&engine,
|
||||
r#"(module (table (export "t") 6 funcref) (elem (i32.const 1) 1 2 3 4) (elem (i32.const 0) 0) (func) (func (param i32)) (func (param i32 i32)) (func (param i32 i32 i32)) (func (param i32 i32 i32 i32)))"#,
|
||||
)?;
|
||||
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
let table = instance.get_table("t").unwrap();
|
||||
|
||||
for i in 0..5 {
|
||||
let v = table.get(i).expect("table should have entry");
|
||||
let f = v
|
||||
.funcref()
|
||||
.expect("expected funcref")
|
||||
.expect("expected non-null value");
|
||||
assert_eq!(f.ty().params().len(), i as usize);
|
||||
}
|
||||
|
||||
assert!(
|
||||
table
|
||||
.get(5)
|
||||
.expect("table should have entry")
|
||||
.funcref()
|
||||
.expect("expected funcref")
|
||||
.is_none(),
|
||||
"funcref should be null"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(target_arch = "aarch64", ignore)] // https://github.com/bytecodealliance/wasmtime/pull/2518#issuecomment-747280133
|
||||
fn table_zeroed() -> Result<()> {
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 1,
|
||||
table_elements: 10,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 1,
|
||||
memory_reservation_size: 1,
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
|
||||
let module = Module::new(&engine, r#"(module (table (export "t") 10 funcref))"#)?;
|
||||
|
||||
// Instantiate the module repeatedly after filling table elements
|
||||
for _ in 0..10 {
|
||||
let store = Store::new(&engine);
|
||||
let instance = Instance::new(&store, &module, &[])?;
|
||||
let table = instance.get_table("t").unwrap();
|
||||
let f = Func::wrap(&store, || {});
|
||||
|
||||
assert_eq!(table.size(), 10);
|
||||
|
||||
for i in 0..10 {
|
||||
match table.get(i).unwrap() {
|
||||
Val::FuncRef(r) => assert!(r.is_none()),
|
||||
_ => panic!("expected a funcref"),
|
||||
}
|
||||
table.set(i, Val::FuncRef(Some(f.clone()))).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn instantiation_limit() -> Result<()> {
|
||||
const INSTANCE_LIMIT: u32 = 10;
|
||||
let mut config = Config::new();
|
||||
config.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
memory_pages: 1,
|
||||
table_elements: 10,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: INSTANCE_LIMIT,
|
||||
memory_reservation_size: 1,
|
||||
},
|
||||
})?;
|
||||
|
||||
let engine = Engine::new(&config);
|
||||
let module = Module::new(&engine, r#"(module)"#)?;
|
||||
|
||||
// Instantiate to the limit
|
||||
{
|
||||
let store = Store::new(&engine);
|
||||
|
||||
for _ in 0..INSTANCE_LIMIT {
|
||||
Instance::new(&store, &module, &[])?;
|
||||
}
|
||||
|
||||
match Instance::new(&store, &module, &[]) {
|
||||
Ok(_) => panic!("instantiation should fail"),
|
||||
Err(e) => assert_eq!(
|
||||
e.to_string(),
|
||||
format!(
|
||||
"Limit of {} concurrent instances has been reached",
|
||||
INSTANCE_LIMIT
|
||||
)
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// With the above store dropped, ensure instantiations can be made
|
||||
|
||||
let store = Store::new(&engine);
|
||||
|
||||
for _ in 0..INSTANCE_LIMIT {
|
||||
Instance::new(&store, &module, &[])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -11,3 +11,42 @@ fn get_none() {
|
||||
}
|
||||
assert!(table.get(1).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fill_wrong() {
|
||||
let store = Store::default();
|
||||
let ty = TableType::new(ValType::FuncRef, Limits::new(1, None));
|
||||
let table = Table::new(&store, ty, Val::FuncRef(None)).unwrap();
|
||||
assert_eq!(
|
||||
table
|
||||
.fill(0, Val::ExternRef(None), 1)
|
||||
.map_err(|e| e.to_string())
|
||||
.unwrap_err(),
|
||||
"mismatched element fill type"
|
||||
);
|
||||
|
||||
let ty = TableType::new(ValType::ExternRef, Limits::new(1, None));
|
||||
let table = Table::new(&store, ty, Val::ExternRef(None)).unwrap();
|
||||
assert_eq!(
|
||||
table
|
||||
.fill(0, Val::FuncRef(None), 1)
|
||||
.map_err(|e| e.to_string())
|
||||
.unwrap_err(),
|
||||
"mismatched element fill type"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn copy_wrong() {
|
||||
let store = Store::default();
|
||||
let ty = TableType::new(ValType::FuncRef, Limits::new(1, None));
|
||||
let table1 = Table::new(&store, ty, Val::FuncRef(None)).unwrap();
|
||||
let ty = TableType::new(ValType::ExternRef, Limits::new(1, None));
|
||||
let table2 = Table::new(&store, ty, Val::ExternRef(None)).unwrap();
|
||||
assert_eq!(
|
||||
Table::copy(&table1, 0, &table2, 0, 1)
|
||||
.map_err(|e| e.to_string())
|
||||
.unwrap_err(),
|
||||
"tables do not have the same element type"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use std::path::Path;
|
||||
use wasmtime::{Config, Engine, Store, Strategy};
|
||||
use wasmtime::{
|
||||
Config, Engine, InstanceAllocationStrategy, InstanceLimits, ModuleLimits,
|
||||
PoolingAllocationStrategy, Store, Strategy,
|
||||
};
|
||||
use wasmtime_wast::WastContext;
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/wast_testsuite_tests.rs"));
|
||||
@@ -7,7 +10,7 @@ include!(concat!(env!("OUT_DIR"), "/wast_testsuite_tests.rs"));
|
||||
// Each of the tests included from `wast_testsuite_tests` will call this
|
||||
// function which actually executes the `wast` test suite given the `strategy`
|
||||
// to compile it.
|
||||
fn run_wast(wast: &str, strategy: Strategy) -> anyhow::Result<()> {
|
||||
fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()> {
|
||||
let wast = Path::new(wast);
|
||||
|
||||
let simd = wast.iter().any(|s| s == "simd");
|
||||
@@ -44,6 +47,30 @@ fn run_wast(wast: &str, strategy: Strategy) -> anyhow::Result<()> {
|
||||
cfg.static_memory_maximum_size(0);
|
||||
}
|
||||
|
||||
if pooling {
|
||||
// The limits here are crafted such that the wast tests should pass.
|
||||
// However, these limits may become insufficient in the future as the wast tests change.
|
||||
// If a wast test fails because of a limit being "exceeded" or if memory/table
|
||||
// fails to grow, the values here will need to be adjusted.
|
||||
cfg.with_allocation_strategy(InstanceAllocationStrategy::Pooling {
|
||||
strategy: PoolingAllocationStrategy::NextAvailable,
|
||||
module_limits: ModuleLimits {
|
||||
imported_memories: 2,
|
||||
imported_tables: 2,
|
||||
imported_globals: 11,
|
||||
memories: 2,
|
||||
tables: 4,
|
||||
globals: 11,
|
||||
memory_pages: 805,
|
||||
..Default::default()
|
||||
},
|
||||
instance_limits: InstanceLimits {
|
||||
count: 450,
|
||||
..Default::default()
|
||||
},
|
||||
})?;
|
||||
}
|
||||
|
||||
let store = Store::new(&Engine::new(&cfg));
|
||||
let mut wast_context = WastContext::new(store);
|
||||
wast_context.register_spectest()?;
|
||||
|
||||
Reference in New Issue
Block a user