Files
wasmtime/benches/instantiation.rs
Alex Crichton 63d80fc509 Remove the need to have a Store for an InstancePre (#5683)
* Remove the need to have a `Store` for an `InstancePre`

This commit relaxes a requirement of the `InstancePre` API, notably its
construction via `Linker::instantiate_pre`. Previously this function
required a `Store<T>` to be present to be able to perform type-checking
on the contents of the linker, and now this requirement has been
removed.

Items stored within a linker are either a `HostFunc`, which has type
information inside of it, or an `Extern`, which doesn't have type
information inside of it. Due to the usage of `Extern` this is why a
`Store` was required during the `InstancePre` construction process, it's
used to extract the type of an `Extern`. This commit implements a
solution where the type information of an `Extern` is stored alongside
the `Extern` itself, meaning that the `InstancePre` construction process
no longer requires a `Store<T>`.

One caveat of this implementation is that some items, such as tables and
memories, technically have a "dynamic type" where during type checking
their current size is consulted to match against the minimum size
required of an import. This no longer works when using
`Linker::instantiate_pre` as the current size used is the one when it
was inserted into the linker rather than the one available at
instantiation time. It's hoped, however, that this is a relatively
esoteric use case that doesn't impact many real-world users.

Additionally note that this is an API-breaking change. Not only is the
`Store` argument removed from `Linker::instantiate_pre`, but some other
methods such as `Linker::define` grew a `Store` argument as the type
needs to be extracted when an item is inserted into a linker.

Closes #5675

* Fix the C API

* Fix benchmark compilation

* Add C API docs

* Update crates/wasmtime/src/linker.rs

Co-authored-by: Andrew Brown <andrew.brown@intel.com>

---------

Co-authored-by: Andrew Brown <andrew.brown@intel.com>
2023-02-02 11:54:20 -06:00

216 lines
7.2 KiB
Rust

use anyhow::Result;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use once_cell::unsync::Lazy;
use std::path::Path;
use std::process::Command;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst};
use std::sync::Arc;
use std::thread;
use wasmtime::*;
use wasmtime_wasi::{sync::WasiCtxBuilder, WasiCtx};
fn store(engine: &Engine) -> Store<WasiCtx> {
let wasi = WasiCtxBuilder::new().build();
Store::new(engine, wasi)
}
fn instantiate(pre: &InstancePre<WasiCtx>, engine: &Engine) -> Result<()> {
let mut store = store(engine);
let _instance = pre.instantiate(&mut store)?;
Ok(())
}
fn benchmark_name<'a>(strategy: &InstanceAllocationStrategy) -> &'static str {
match strategy {
InstanceAllocationStrategy::OnDemand => "default",
InstanceAllocationStrategy::Pooling { .. } => "pooling",
}
}
fn bench_sequential(c: &mut Criterion, path: &Path) {
let mut group = c.benchmark_group("sequential");
for strategy in strategies() {
let id = BenchmarkId::new(
benchmark_name(&strategy),
path.file_name().unwrap().to_str().unwrap(),
);
let state = Lazy::new(|| {
let mut config = Config::default();
config.allocation_strategy(strategy.clone());
let engine = Engine::new(&config).expect("failed to create engine");
let module = Module::from_file(&engine, path).unwrap_or_else(|e| {
panic!("failed to load benchmark `{}`: {:?}", path.display(), e)
});
let mut linker = Linker::new(&engine);
wasmtime_wasi::add_to_linker(&mut linker, |cx| cx).unwrap();
let pre = linker
.instantiate_pre(&module)
.expect("failed to pre-instantiate");
(engine, pre)
});
group.bench_function(id, |b| {
let (engine, pre) = &*state;
b.iter(|| {
instantiate(&pre, &engine).expect("failed to instantiate module");
});
});
}
group.finish();
}
fn bench_parallel(c: &mut Criterion, path: &Path) {
let mut group = c.benchmark_group("parallel");
for strategy in strategies() {
let state = Lazy::new(|| {
let mut config = Config::default();
config.allocation_strategy(strategy.clone());
let engine = Engine::new(&config).expect("failed to create engine");
let module =
Module::from_file(&engine, path).expect("failed to load WASI example module");
let mut linker = Linker::new(&engine);
wasmtime_wasi::add_to_linker(&mut linker, |cx| cx).unwrap();
let pre = Arc::new(
linker
.instantiate_pre(&module)
.expect("failed to pre-instantiate"),
);
(engine, pre)
});
for threads in 1..=num_cpus::get_physical().min(16) {
let name = format!(
"{}: with {} thread{}",
path.file_name().unwrap().to_str().unwrap(),
threads,
if threads == 1 { "" } else { "s" }
);
let id = BenchmarkId::new(benchmark_name(&strategy), name);
group.bench_function(id, |b| {
let (engine, pre) = &*state;
// Spin up N-1 threads doing background instantiations to
// simulate concurrent instantiations.
let done = Arc::new(AtomicBool::new(false));
let count = Arc::new(AtomicUsize::new(0));
let workers = (0..threads - 1)
.map(|_| {
let pre = pre.clone();
let done = done.clone();
let engine = engine.clone();
let count = count.clone();
thread::spawn(move || {
count.fetch_add(1, SeqCst);
while !done.load(SeqCst) {
instantiate(&pre, &engine).unwrap();
}
})
})
.collect::<Vec<_>>();
// Wait for our workers to all get started and have
// instantiated their first module, at which point they'll
// all be spinning.
while count.load(SeqCst) != threads - 1 {
thread::yield_now();
}
// Now that our background work is configured we can
// benchmark the amount of time it takes to instantiate this
// module.
b.iter(|| {
instantiate(&pre, &engine).expect("failed to instantiate module");
});
// Shut down this benchmark iteration by signalling to
// worker threads they should exit and then wait for them to
// have reached the exit point.
done.store(true, SeqCst);
for t in workers {
t.join().unwrap();
}
});
}
}
group.finish();
}
fn bench_deserialize_module(c: &mut Criterion, path: &Path) {
let mut group = c.benchmark_group("deserialize");
let name = path.file_name().unwrap().to_str().unwrap();
let tmpfile = tempfile::NamedTempFile::new().unwrap();
let state = Lazy::new(|| {
let engine = Engine::default();
let module = Module::from_file(&engine, path).expect("failed to load WASI example module");
std::fs::write(tmpfile.path(), module.serialize().unwrap()).unwrap();
(engine, tmpfile.path())
});
group.bench_function(BenchmarkId::new("deserialize", name), |b| {
let (engine, path) = &*state;
b.iter(|| unsafe {
Module::deserialize_file(&engine, path).unwrap();
});
});
group.finish();
}
fn build_wasi_example() {
println!("Building WASI example module...");
if !Command::new("cargo")
.args(&[
"build",
"--release",
"-p",
"example-wasi-wasm",
"--target",
"wasm32-wasi",
])
.spawn()
.expect("failed to run cargo to build WASI example")
.wait()
.expect("failed to wait for cargo to build")
.success()
{
panic!("failed to build WASI example for target `wasm32-wasi`");
}
std::fs::copy(
"target/wasm32-wasi/release/wasi.wasm",
"benches/instantiation/wasi.wasm",
)
.expect("failed to copy WASI example module");
}
fn bench_instantiation(c: &mut Criterion) {
build_wasi_example();
for file in std::fs::read_dir("benches/instantiation").unwrap() {
let path = file.unwrap().path();
bench_sequential(c, &path);
bench_parallel(c, &path);
bench_deserialize_module(c, &path);
}
}
fn strategies() -> impl Iterator<Item = InstanceAllocationStrategy> {
[
InstanceAllocationStrategy::OnDemand,
InstanceAllocationStrategy::Pooling({
let mut config = PoolingAllocationConfig::default();
config.instance_memory_pages(10_000);
config
}),
]
.into_iter()
}
criterion_group!(benches, bench_instantiation);
criterion_main!(benches);