diff --git a/Cargo.lock b/Cargo.lock index 82cd188973..4505929943 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3443,6 +3443,7 @@ dependencies = [ "humantime 2.1.0", "libc", "listenfd", + "log", "memchr", "num_cpus", "once_cell", diff --git a/Cargo.toml b/Cargo.toml index 70410df76f..8f9a3df1ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,7 @@ rustix = { version = "0.35.6", features = ["mm", "param"] } # depend again on wasmtime to activate its default features for tests wasmtime = { path = "crates/wasmtime", version = "0.41.0", features = ['component-model'] } env_logger = "0.9.0" +log = "0.4.8" filecheck = "0.5.0" tempfile = "3.1.0" test-programs = { path = "crates/test-programs" } diff --git a/crates/runtime/src/traphandlers.rs b/crates/runtime/src/traphandlers.rs index 84775b15e4..a08a0507f4 100644 --- a/crates/runtime/src/traphandlers.rs +++ b/crates/runtime/src/traphandlers.rs @@ -7,7 +7,7 @@ use crate::{VMContext, VMRuntimeLimits}; use anyhow::Error; use std::any::Any; use std::cell::{Cell, UnsafeCell}; -use std::mem::{self, MaybeUninit}; +use std::mem::MaybeUninit; use std::ptr; use std::sync::Once; use wasmtime_environ::TrapCode; @@ -182,19 +182,7 @@ where { let limits = (*caller).instance().runtime_limits(); - let old_last_wasm_exit_fp = mem::replace(&mut *(**limits).last_wasm_exit_fp.get(), 0); - let old_last_wasm_exit_pc = mem::replace(&mut *(**limits).last_wasm_exit_pc.get(), 0); - let old_last_wasm_entry_sp = mem::replace(&mut *(**limits).last_wasm_entry_sp.get(), 0); - - let result = CallThreadState::new( - signal_handler, - capture_backtrace, - old_last_wasm_exit_fp, - old_last_wasm_exit_pc, - old_last_wasm_entry_sp, - *limits, - ) - .with(|cx| { + let result = CallThreadState::new(signal_handler, capture_backtrace, *limits).with(|cx| { wasmtime_setjmp( cx.jmp_buf.as_ptr(), call_closure::, @@ -203,10 +191,6 @@ where ) }); - *(**limits).last_wasm_exit_fp.get() = old_last_wasm_exit_fp; - *(**limits).last_wasm_exit_pc.get() = old_last_wasm_exit_pc; - *(**limits).last_wasm_entry_sp.get() = old_last_wasm_entry_sp; - return match result { Ok(x) => Ok(x), Err((UnwindReason::Trap(reason), backtrace)) => Err(Box::new(Trap { reason, backtrace })), @@ -221,20 +205,159 @@ where } } -/// Temporary state stored on the stack which is registered in the `tls` module -/// below for calls into wasm. -pub struct CallThreadState { - unwind: UnsafeCell)>>, - jmp_buf: Cell<*const u8>, - handling_trap: Cell, - signal_handler: Option<*const SignalHandler<'static>>, - prev: Cell, - capture_backtrace: bool, - pub(crate) old_last_wasm_exit_fp: usize, - pub(crate) old_last_wasm_exit_pc: usize, - pub(crate) old_last_wasm_entry_sp: usize, - pub(crate) limits: *const VMRuntimeLimits, +// Module to hide visibility of the `CallThreadState::prev` field and force +// usage of its accessor methods. +mod call_thread_state { + use super::*; + use std::mem; + + /// Temporary state stored on the stack which is registered in the `tls` module + /// below for calls into wasm. + pub struct CallThreadState { + pub(super) unwind: UnsafeCell)>>, + pub(super) jmp_buf: Cell<*const u8>, + pub(super) handling_trap: Cell, + pub(super) signal_handler: Option<*const SignalHandler<'static>>, + pub(super) capture_backtrace: bool, + + pub(crate) limits: *const VMRuntimeLimits, + + prev: Cell, + + // The values of `VMRuntimeLimits::last_wasm_{exit_{pc,fp},entry_sp}` for + // the *previous* `CallThreadState`. Our *current* last wasm PC/FP/SP are + // saved in `self.limits`. We save a copy of the old registers here because + // the `VMRuntimeLimits` typically doesn't change across nested calls into + // Wasm (i.e. they are typically calls back into the same store and + // `self.limits == self.prev.limits`) and we must to maintain the list of + // contiguous-Wasm-frames stack regions for backtracing purposes. + old_last_wasm_exit_fp: Cell, + old_last_wasm_exit_pc: Cell, + old_last_wasm_entry_sp: Cell, + } + + impl CallThreadState { + #[inline] + pub(super) fn new( + signal_handler: Option<*const SignalHandler<'static>>, + capture_backtrace: bool, + limits: *const VMRuntimeLimits, + ) -> CallThreadState { + CallThreadState { + unwind: UnsafeCell::new(MaybeUninit::uninit()), + jmp_buf: Cell::new(ptr::null()), + handling_trap: Cell::new(false), + signal_handler, + capture_backtrace, + limits, + prev: Cell::new(ptr::null()), + old_last_wasm_exit_fp: Cell::new(0), + old_last_wasm_exit_pc: Cell::new(0), + old_last_wasm_entry_sp: Cell::new(0), + } + } + + /// Get the saved FP upon exit from Wasm for the previous `CallThreadState`. + pub fn old_last_wasm_exit_fp(&self) -> usize { + self.old_last_wasm_exit_fp.get() + } + + /// Get the saved PC upon exit from Wasm for the previous `CallThreadState`. + pub fn old_last_wasm_exit_pc(&self) -> usize { + self.old_last_wasm_exit_pc.get() + } + + /// Get the saved SP upon entry into Wasm for the previous `CallThreadState`. + pub fn old_last_wasm_entry_sp(&self) -> usize { + self.old_last_wasm_entry_sp.get() + } + + /// Get the previous `CallThreadState`. + pub fn prev(&self) -> tls::Ptr { + self.prev.get() + } + + /// Connect the link to the previous `CallThreadState`. + /// + /// Synchronizes the last wasm FP, PC, and SP on `self` and the old + /// `self.prev` for the given new `prev`, and returns the old + /// `self.prev`. + pub unsafe fn set_prev(&self, prev: tls::Ptr) -> tls::Ptr { + let old_prev = self.prev.get(); + + // Restore the old `prev`'s saved registers in its + // `VMRuntimeLimits`. This is necessary for when we are async + // suspending the top `CallThreadState` and doing `set_prev(null)` + // on it, and so any stack walking we do subsequently will start at + // the old `prev` and look at its `VMRuntimeLimits` to get the + // initial saved registers. + if let Some(old_prev) = old_prev.as_ref() { + *(*old_prev.limits).last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp(); + *(*old_prev.limits).last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc(); + *(*old_prev.limits).last_wasm_entry_sp.get() = self.old_last_wasm_entry_sp(); + } + + self.prev.set(prev); + + let mut old_last_wasm_exit_fp = 0; + let mut old_last_wasm_exit_pc = 0; + let mut old_last_wasm_entry_sp = 0; + if let Some(prev) = prev.as_ref() { + // We are entering a new `CallThreadState` or resuming a + // previously suspended one. This means we will push new Wasm + // frames that save the new Wasm FP/SP/PC registers into + // `VMRuntimeLimits`, we need to first save the old Wasm + // FP/SP/PC registers into this new `CallThreadState` to + // maintain our list of contiguous Wasm frame regions that we + // use when capturing stack traces. + // + // NB: the Wasm<--->host trampolines saved the Wasm FP/SP/PC + // registers in the active-at-that-time store's + // `VMRuntimeLimits`. For the most recent FP/PC/SP that is the + // `state.prev.limits` (since we haven't entered this + // `CallThreadState` yet). And that can be a different + // `VMRuntimeLimits` instance from the currently active + // `state.limits`, which will be used by the upcoming call into + // Wasm! Consider the case where we have multiple, nested calls + // across stores (with host code in between, by necessity, since + // only things in the same store can be linked directly + // together): + // + // | ... | + // | Host | | + // +-----------------+ | stack + // | Wasm in store A | | grows + // +-----------------+ | down + // | Host | | + // +-----------------+ | + // | Wasm in store B | V + // +-----------------+ + // + // In this scenario `state.limits != state.prev.limits`, + // i.e. `B.limits != A.limits`! Therefore we must take care to + // read the old FP/SP/PC from `state.prev.limits`, rather than + // `state.limits`, and store those saved registers into the + // current `state`. + // + // See also the comment above the + // `CallThreadState::old_last_wasm_*` fields. + old_last_wasm_exit_fp = + mem::replace(&mut *(*prev.limits).last_wasm_exit_fp.get(), 0); + old_last_wasm_exit_pc = + mem::replace(&mut *(*prev.limits).last_wasm_exit_pc.get(), 0); + old_last_wasm_entry_sp = + mem::replace(&mut *(*prev.limits).last_wasm_entry_sp.get(), 0); + } + + self.old_last_wasm_exit_fp.set(old_last_wasm_exit_fp); + self.old_last_wasm_exit_pc.set(old_last_wasm_exit_pc); + self.old_last_wasm_entry_sp.set(old_last_wasm_entry_sp); + + old_prev + } + } } +pub use call_thread_state::*; enum UnwindReason { Panic(Box), @@ -242,34 +365,11 @@ enum UnwindReason { } impl CallThreadState { - #[inline] - fn new( - signal_handler: Option<*const SignalHandler<'static>>, - capture_backtrace: bool, - old_last_wasm_exit_fp: usize, - old_last_wasm_exit_pc: usize, - old_last_wasm_entry_sp: usize, - limits: *const VMRuntimeLimits, - ) -> CallThreadState { - CallThreadState { - unwind: UnsafeCell::new(MaybeUninit::uninit()), - jmp_buf: Cell::new(ptr::null()), - handling_trap: Cell::new(false), - signal_handler, - prev: Cell::new(ptr::null()), - capture_backtrace, - old_last_wasm_exit_fp, - old_last_wasm_exit_pc, - old_last_wasm_entry_sp, - limits, - } - } - fn with( - self, + mut self, closure: impl FnOnce(&CallThreadState) -> i32, ) -> Result<(), (UnwindReason, Option)> { - let ret = tls::set(&self, || closure(&self)); + let ret = tls::set(&mut self, |me| closure(me)); if ret != 0 { Ok(()) } else { @@ -366,7 +466,7 @@ impl CallThreadState { let mut state = Some(self); std::iter::from_fn(move || { let this = state?; - state = unsafe { this.prev.get().as_ref() }; + state = unsafe { this.prev().as_ref() }; Some(this) }) } @@ -462,7 +562,9 @@ mod tls { /// Opaque state used to help control TLS state across stack switches for /// async support. - pub struct TlsRestore(raw::Ptr); + pub struct TlsRestore { + state: raw::Ptr, + } impl TlsRestore { /// Takes the TLS state that is currently configured and returns a @@ -476,14 +578,16 @@ mod tls { // removing ourselves from the call-stack, and in the process we // null out our own previous field for safety in case it's // accidentally used later. - let raw = raw::get(); - if !raw.is_null() { - let prev = (*raw).prev.replace(ptr::null()); - raw::replace(prev); + let state = raw::get(); + if let Some(state) = state.as_ref() { + let prev_state = state.set_prev(ptr::null()); + raw::replace(prev_state); + } else { + // Null case: we aren't in a wasm context, so theres no tls to + // save for restoration. } - // Null case: we aren't in a wasm context, so theres no tls - // to save for restoration. - TlsRestore(raw) + + TlsRestore { state } } /// Restores a previous tls state back into this thread's TLS. @@ -493,40 +597,50 @@ mod tls { pub unsafe fn replace(self) { // Null case: we aren't in a wasm context, so theres no tls // to restore. - if self.0.is_null() { + if self.state.is_null() { return; } + // We need to configure our previous TLS pointer to whatever is in // TLS at this time, and then we set the current state to ourselves. let prev = raw::get(); - assert!((*self.0).prev.get().is_null()); - (*self.0).prev.set(prev); - raw::replace(self.0); + assert!((*self.state).prev().is_null()); + (*self.state).set_prev(prev); + raw::replace(self.state); } } /// Configures thread local state such that for the duration of the - /// execution of `closure` any call to `with` will yield `ptr`, unless this - /// is recursively called again. + /// execution of `closure` any call to `with` will yield `state`, unless + /// this is recursively called again. #[inline] - pub fn set(state: &CallThreadState, closure: impl FnOnce() -> R) -> R { - struct Reset<'a>(&'a CallThreadState); + pub fn set(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R { + struct Reset<'a> { + state: &'a CallThreadState, + } impl Drop for Reset<'_> { #[inline] fn drop(&mut self) { - raw::replace(self.0.prev.replace(ptr::null())); + unsafe { + let prev = self.state.set_prev(ptr::null()); + let old_state = raw::replace(prev); + debug_assert!(std::ptr::eq(old_state, self.state)); + } } } let prev = raw::replace(state); - state.prev.set(prev); - let _reset = Reset(state); - closure() + + unsafe { + state.set_prev(prev); + + let reset = Reset { state }; + closure(reset.state) + } } - /// Returns the last pointer configured with `set` above. Panics if `set` - /// has not been previously called. + /// Returns the last pointer configured with `set` above, if any. pub fn with(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R { let p = raw::get(); unsafe { closure(if p.is_null() { None } else { Some(&*p) }) } diff --git a/crates/runtime/src/traphandlers/backtrace.rs b/crates/runtime/src/traphandlers/backtrace.rs index 53cfcd93d4..0982f36b92 100644 --- a/crates/runtime/src/traphandlers/backtrace.rs +++ b/crates/runtime/src/traphandlers/backtrace.rs @@ -149,18 +149,18 @@ impl Backtrace { // trace through (since each `CallTheadState` saves the *previous* // call into Wasm's saved registers, and the youngest call into // Wasm's registers are saved in the `VMRuntimeLimits`) - if state.prev.get().is_null() { - debug_assert_eq!(state.old_last_wasm_exit_pc, 0); - debug_assert_eq!(state.old_last_wasm_exit_fp, 0); - debug_assert_eq!(state.old_last_wasm_entry_sp, 0); + if state.prev().is_null() { + debug_assert_eq!(state.old_last_wasm_exit_pc(), 0); + debug_assert_eq!(state.old_last_wasm_exit_fp(), 0); + debug_assert_eq!(state.old_last_wasm_entry_sp(), 0); log::trace!("====== Done Capturing Backtrace ======"); return; } if let ControlFlow::Break(()) = Self::trace_through_wasm( - state.old_last_wasm_exit_pc, - state.old_last_wasm_exit_fp, - state.old_last_wasm_entry_sp, + state.old_last_wasm_exit_pc(), + state.old_last_wasm_exit_fp(), + state.old_last_wasm_entry_sp(), &mut f, ) { log::trace!("====== Done Capturing Backtrace ======"); @@ -266,7 +266,7 @@ impl Backtrace { } /// Iterate over the frames inside this backtrace. - pub fn frames<'a>(&'a self) -> impl Iterator + 'a { + pub fn frames<'a>(&'a self) -> impl ExactSizeIterator + 'a { self.0.iter() } } diff --git a/crates/wasmtime/src/trap.rs b/crates/wasmtime/src/trap.rs index 1d7e0095b2..eb488554e1 100644 --- a/crates/wasmtime/src/trap.rs +++ b/crates/wasmtime/src/trap.rs @@ -5,7 +5,6 @@ use std::fmt; use std::sync::Arc; use wasmtime_environ::{EntityRef, FilePos, TrapCode as EnvTrapCode}; use wasmtime_jit::{demangle_function_name, demangle_function_name_or_index}; -use wasmtime_runtime::Backtrace; /// A struct representing an aborted instruction execution, with a message /// indicating the cause. @@ -140,19 +139,24 @@ impl fmt::Display for TrapCode { #[derive(Debug)] pub(crate) struct TrapBacktrace { wasm_trace: Vec, - native_trace: Backtrace, + runtime_trace: wasmtime_runtime::Backtrace, hint_wasm_backtrace_details_env: bool, } impl TrapBacktrace { - pub fn new(store: &StoreOpaque, native_trace: Backtrace, trap_pc: Option) -> Self { - let mut wasm_trace = Vec::::new(); + pub fn new( + store: &StoreOpaque, + runtime_trace: wasmtime_runtime::Backtrace, + trap_pc: Option, + ) -> Self { + let mut wasm_trace = Vec::::with_capacity(runtime_trace.frames().len()); let mut hint_wasm_backtrace_details_env = false; let wasm_backtrace_details_env_used = store.engine().config().wasm_backtrace_details_env_used; - for frame in native_trace.frames() { + for frame in runtime_trace.frames() { debug_assert!(frame.pc() != 0); + // Note that we need to be careful about the pc we pass in // here to lookup frame information. This program counter is // used to translate back to an original source location in @@ -168,6 +172,31 @@ impl TrapBacktrace { } else { frame.pc() - 1 }; + + // NB: The PC we are looking up _must_ be a Wasm PC since + // `wasmtime_runtime::Backtrace` only contains Wasm frames. + // + // However, consider the case where we have multiple, nested calls + // across stores (with host code in between, by necessity, since + // only things in the same store can be linked directly together): + // + // | ... | + // | Host | | + // +-----------------+ | stack + // | Wasm in store A | | grows + // +-----------------+ | down + // | Host | | + // +-----------------+ | + // | Wasm in store B | V + // +-----------------+ + // + // In this scenario, the `wasmtime_runtime::Backtrace` will contain + // two frames: Wasm in store B followed by Wasm in store A. But + // `store.modules()` will only have the module information for + // modules instantiated within this store. Therefore, we use `if let + // Some(..)` instead of the `unwrap` you might otherwise expect and + // we ignore frames from modules that were not registered in this + // store's module registry. if let Some((info, module)) = store.modules().lookup_frame_info(pc_to_lookup) { wasm_trace.push(info); @@ -186,7 +215,7 @@ impl TrapBacktrace { Self { wasm_trace, - native_trace, + runtime_trace, hint_wasm_backtrace_details_env, } } @@ -203,7 +232,9 @@ fn _assert_trap_is_sync_and_send(t: &Trap) -> (&dyn Sync, &dyn Send) { impl Trap { /// Creates a new `Trap` with `message`. + /// /// # Example + /// /// ``` /// let trap = wasmtime::Trap::new("unexpected error"); /// assert!(trap.to_string().contains("unexpected error")); @@ -343,7 +374,7 @@ impl fmt::Debug for Trap { f.field("reason", &self.inner.reason); if let Some(backtrace) = self.inner.backtrace.get() { f.field("wasm_trace", &backtrace.wasm_trace) - .field("native_trace", &backtrace.native_trace); + .field("runtime_trace", &backtrace.runtime_trace); } f.finish() } diff --git a/tests/all/async_functions.rs b/tests/all/async_functions.rs index 7ea40ec09c..f741d86e07 100644 --- a/tests/all/async_functions.rs +++ b/tests/all/async_functions.rs @@ -482,6 +482,8 @@ async fn resume_separate_thread2() { #[tokio::test] async fn resume_separate_thread3() { + let _ = env_logger::try_init(); + // This test doesn't actually do anything with cross-thread polls, but // instead it deals with scheduling futures at "odd" times. // diff --git a/tests/all/traps.rs b/tests/all/traps.rs index d80221a38f..ddbc657381 100644 --- a/tests/all/traps.rs +++ b/tests/all/traps.rs @@ -743,3 +743,252 @@ fn traps_without_address_map() -> Result<()> { assert_eq!(trace[1].module_offset(), None); Ok(()) } + +#[test] +fn catch_trap_calling_across_stores() -> Result<()> { + let _ = env_logger::try_init(); + + let engine = Engine::default(); + + let mut child_store = Store::new(&engine, ()); + let child_module = Module::new( + child_store.engine(), + r#" + (module $child + (func $trap (export "trap") + unreachable + ) + ) + "#, + )?; + let child_instance = Instance::new(&mut child_store, &child_module, &[])?; + + struct ParentCtx { + child_store: Store<()>, + child_instance: Instance, + } + + let mut linker = Linker::new(&engine); + linker.func_wrap( + "host", + "catch_child_trap", + move |mut caller: Caller<'_, ParentCtx>| { + let mut ctx = caller.as_context_mut(); + let data = ctx.data_mut(); + let func = data + .child_instance + .get_typed_func::<(), (), _>(&mut data.child_store, "trap") + .expect("trap function should be exported"); + + let trap = func + .call(&mut data.child_store, ()) + .err() + .expect("should trap"); + assert!( + trap.to_string().contains("unreachable"), + "trap should contain 'unreachable', got: {trap}" + ); + + let trace = trap.trace().unwrap(); + + assert_eq!(trace.len(), 1); + assert_eq!(trace[0].func_name(), Some("trap")); + // For now, we only get stack frames for Wasm in this store, not + // across all stores. + // + // assert_eq!(trace[1].func_name(), Some("run")); + + Ok(()) + }, + )?; + + let mut store = Store::new( + &engine, + ParentCtx { + child_store, + child_instance, + }, + ); + + let parent_module = Module::new( + store.engine(), + r#" + (module $parent + (func $host.catch_child_trap (import "host" "catch_child_trap")) + (func $run (export "run") + call $host.catch_child_trap + ) + ) + "#, + )?; + + let parent_instance = linker.instantiate(&mut store, &parent_module)?; + + let func = parent_instance.get_typed_func::<(), (), _>(&mut store, "run")?; + func.call(store, ())?; + + Ok(()) +} + +#[tokio::test] +async fn async_then_sync_trap() -> Result<()> { + // Test the trapping and capturing the stack with the following sequence of + // calls: + // + // a[async] ---> b[host] ---> c[sync] + + drop(env_logger::try_init()); + + let wat = r#" + (module + (import "" "b" (func $b)) + (func $a (export "a") + call $b + ) + (func $c (export "c") + unreachable + ) + ) + "#; + + let mut sync_store = Store::new(&Engine::default(), ()); + + let sync_module = Module::new(sync_store.engine(), wat)?; + + let mut sync_linker = Linker::new(sync_store.engine()); + sync_linker.func_wrap("", "b", |_caller: Caller<_>| unreachable!())?; + + let sync_instance = sync_linker.instantiate(&mut sync_store, &sync_module)?; + + struct AsyncCtx { + sync_instance: Instance, + sync_store: Store<()>, + } + + let mut async_store = Store::new( + &Engine::new(Config::new().async_support(true)).unwrap(), + AsyncCtx { + sync_instance, + sync_store, + }, + ); + + let async_module = Module::new(async_store.engine(), wat)?; + + let mut async_linker = Linker::new(async_store.engine()); + async_linker.func_wrap("", "b", move |mut caller: Caller| { + log::info!("Called `b`..."); + let sync_instance = caller.data().sync_instance; + let sync_store = &mut caller.data_mut().sync_store; + + log::info!("Calling `c`..."); + let c = sync_instance + .get_typed_func::<(), (), _>(&mut *sync_store, "c") + .unwrap(); + c.call(sync_store, ())?; + Ok(()) + })?; + + let async_instance = async_linker + .instantiate_async(&mut async_store, &async_module) + .await?; + + log::info!("Calling `a`..."); + let a = async_instance + .get_typed_func::<(), (), _>(&mut async_store, "a") + .unwrap(); + let trap = a.call_async(&mut async_store, ()).await.unwrap_err(); + + let trace = trap.trace().unwrap(); + // We don't support cross-store or cross-engine symbolication currently, so + // the other frames are ignored. + assert_eq!(trace.len(), 1); + assert_eq!(trace[0].func_name(), Some("c")); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn sync_then_async_trap() -> Result<()> { + // Test the trapping and capturing the stack with the following sequence of + // calls: + // + // a[sync] ---> b[host] ---> c[async] + + drop(env_logger::try_init()); + + let wat = r#" + (module + (import "" "b" (func $b)) + (func $a (export "a") + call $b + ) + (func $c (export "c") + unreachable + ) + ) + "#; + + let mut async_store = Store::new(&Engine::new(Config::new().async_support(true)).unwrap(), ()); + + let async_module = Module::new(async_store.engine(), wat)?; + + let mut async_linker = Linker::new(async_store.engine()); + async_linker.func_wrap("", "b", |_caller: Caller<_>| unreachable!())?; + + let async_instance = async_linker + .instantiate_async(&mut async_store, &async_module) + .await?; + + struct SyncCtx { + async_instance: Instance, + async_store: Store<()>, + } + + let mut sync_store = Store::new( + &Engine::default(), + SyncCtx { + async_instance, + async_store, + }, + ); + + let sync_module = Module::new(sync_store.engine(), wat)?; + + let mut sync_linker = Linker::new(sync_store.engine()); + sync_linker.func_wrap( + "", + "b", + move |mut caller: Caller| -> Result<(), Trap> { + log::info!("Called `b`..."); + let async_instance = caller.data().async_instance; + let async_store = &mut caller.data_mut().async_store; + + log::info!("Calling `c`..."); + let c = async_instance + .get_typed_func::<(), (), _>(&mut *async_store, "c") + .unwrap(); + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(async move { c.call_async(async_store, ()).await }) + })?; + Ok(()) + }, + )?; + + let sync_instance = sync_linker.instantiate(&mut sync_store, &sync_module)?; + + log::info!("Calling `a`..."); + let a = sync_instance + .get_typed_func::<(), (), _>(&mut sync_store, "a") + .unwrap(); + let trap = a.call(&mut sync_store, ()).unwrap_err(); + + let trace = trap.trace().unwrap(); + // We don't support cross-store or cross-engine symbolication currently, so + // the other frames are ignored. + assert_eq!(trace.len(), 1); + assert_eq!(trace[0].func_name(), Some("c")); + + Ok(()) +}