Wasmtime: fix stack walking across frames from different stores (#4779)
We were previously implicitly assuming that all Wasm frames in a stack used the
same `VMRuntimeLimits` as the previous frame we walked, but this is not true
when Wasm in store A calls into the host which then calls into Wasm in store B:
| ... |
| Host | |
+-----------------+ | stack
| Wasm in store A | | grows
+-----------------+ | down
| Host | |
+-----------------+ |
| Wasm in store B | V
+-----------------+
Trying to walk this stack would previously result in a runtime panic.
The solution is to push the maintenance of our list of saved Wasm FP/SP/PC
registers that allow us to identify contiguous regions of Wasm frames on the
stack deeper into `CallThreadState`. The saved registers list is now maintained
whenever updating the `CallThreadState` linked list by making the
`CallThreadState::prev` field private and only accessible via a getter and
setter, where the setter always maintains our invariants.
This commit is contained in:
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -3443,6 +3443,7 @@ dependencies = [
|
|||||||
"humantime 2.1.0",
|
"humantime 2.1.0",
|
||||||
"libc",
|
"libc",
|
||||||
"listenfd",
|
"listenfd",
|
||||||
|
"log",
|
||||||
"memchr",
|
"memchr",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ rustix = { version = "0.35.6", features = ["mm", "param"] }
|
|||||||
# depend again on wasmtime to activate its default features for tests
|
# depend again on wasmtime to activate its default features for tests
|
||||||
wasmtime = { path = "crates/wasmtime", version = "0.41.0", features = ['component-model'] }
|
wasmtime = { path = "crates/wasmtime", version = "0.41.0", features = ['component-model'] }
|
||||||
env_logger = "0.9.0"
|
env_logger = "0.9.0"
|
||||||
|
log = "0.4.8"
|
||||||
filecheck = "0.5.0"
|
filecheck = "0.5.0"
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
test-programs = { path = "crates/test-programs" }
|
test-programs = { path = "crates/test-programs" }
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use crate::{VMContext, VMRuntimeLimits};
|
|||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::cell::{Cell, UnsafeCell};
|
use std::cell::{Cell, UnsafeCell};
|
||||||
use std::mem::{self, MaybeUninit};
|
use std::mem::MaybeUninit;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use std::sync::Once;
|
use std::sync::Once;
|
||||||
use wasmtime_environ::TrapCode;
|
use wasmtime_environ::TrapCode;
|
||||||
@@ -182,19 +182,7 @@ where
|
|||||||
{
|
{
|
||||||
let limits = (*caller).instance().runtime_limits();
|
let limits = (*caller).instance().runtime_limits();
|
||||||
|
|
||||||
let old_last_wasm_exit_fp = mem::replace(&mut *(**limits).last_wasm_exit_fp.get(), 0);
|
let result = CallThreadState::new(signal_handler, capture_backtrace, *limits).with(|cx| {
|
||||||
let old_last_wasm_exit_pc = mem::replace(&mut *(**limits).last_wasm_exit_pc.get(), 0);
|
|
||||||
let old_last_wasm_entry_sp = mem::replace(&mut *(**limits).last_wasm_entry_sp.get(), 0);
|
|
||||||
|
|
||||||
let result = CallThreadState::new(
|
|
||||||
signal_handler,
|
|
||||||
capture_backtrace,
|
|
||||||
old_last_wasm_exit_fp,
|
|
||||||
old_last_wasm_exit_pc,
|
|
||||||
old_last_wasm_entry_sp,
|
|
||||||
*limits,
|
|
||||||
)
|
|
||||||
.with(|cx| {
|
|
||||||
wasmtime_setjmp(
|
wasmtime_setjmp(
|
||||||
cx.jmp_buf.as_ptr(),
|
cx.jmp_buf.as_ptr(),
|
||||||
call_closure::<F>,
|
call_closure::<F>,
|
||||||
@@ -203,10 +191,6 @@ where
|
|||||||
)
|
)
|
||||||
});
|
});
|
||||||
|
|
||||||
*(**limits).last_wasm_exit_fp.get() = old_last_wasm_exit_fp;
|
|
||||||
*(**limits).last_wasm_exit_pc.get() = old_last_wasm_exit_pc;
|
|
||||||
*(**limits).last_wasm_entry_sp.get() = old_last_wasm_entry_sp;
|
|
||||||
|
|
||||||
return match result {
|
return match result {
|
||||||
Ok(x) => Ok(x),
|
Ok(x) => Ok(x),
|
||||||
Err((UnwindReason::Trap(reason), backtrace)) => Err(Box::new(Trap { reason, backtrace })),
|
Err((UnwindReason::Trap(reason), backtrace)) => Err(Box::new(Trap { reason, backtrace })),
|
||||||
@@ -221,20 +205,159 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Temporary state stored on the stack which is registered in the `tls` module
|
// Module to hide visibility of the `CallThreadState::prev` field and force
|
||||||
/// below for calls into wasm.
|
// usage of its accessor methods.
|
||||||
pub struct CallThreadState {
|
mod call_thread_state {
|
||||||
unwind: UnsafeCell<MaybeUninit<(UnwindReason, Option<Backtrace>)>>,
|
use super::*;
|
||||||
jmp_buf: Cell<*const u8>,
|
use std::mem;
|
||||||
handling_trap: Cell<bool>,
|
|
||||||
signal_handler: Option<*const SignalHandler<'static>>,
|
/// Temporary state stored on the stack which is registered in the `tls` module
|
||||||
prev: Cell<tls::Ptr>,
|
/// below for calls into wasm.
|
||||||
capture_backtrace: bool,
|
pub struct CallThreadState {
|
||||||
pub(crate) old_last_wasm_exit_fp: usize,
|
pub(super) unwind: UnsafeCell<MaybeUninit<(UnwindReason, Option<Backtrace>)>>,
|
||||||
pub(crate) old_last_wasm_exit_pc: usize,
|
pub(super) jmp_buf: Cell<*const u8>,
|
||||||
pub(crate) old_last_wasm_entry_sp: usize,
|
pub(super) handling_trap: Cell<bool>,
|
||||||
pub(crate) limits: *const VMRuntimeLimits,
|
pub(super) signal_handler: Option<*const SignalHandler<'static>>,
|
||||||
|
pub(super) capture_backtrace: bool,
|
||||||
|
|
||||||
|
pub(crate) limits: *const VMRuntimeLimits,
|
||||||
|
|
||||||
|
prev: Cell<tls::Ptr>,
|
||||||
|
|
||||||
|
// The values of `VMRuntimeLimits::last_wasm_{exit_{pc,fp},entry_sp}` for
|
||||||
|
// the *previous* `CallThreadState`. Our *current* last wasm PC/FP/SP are
|
||||||
|
// saved in `self.limits`. We save a copy of the old registers here because
|
||||||
|
// the `VMRuntimeLimits` typically doesn't change across nested calls into
|
||||||
|
// Wasm (i.e. they are typically calls back into the same store and
|
||||||
|
// `self.limits == self.prev.limits`) and we must to maintain the list of
|
||||||
|
// contiguous-Wasm-frames stack regions for backtracing purposes.
|
||||||
|
old_last_wasm_exit_fp: Cell<usize>,
|
||||||
|
old_last_wasm_exit_pc: Cell<usize>,
|
||||||
|
old_last_wasm_entry_sp: Cell<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CallThreadState {
|
||||||
|
#[inline]
|
||||||
|
pub(super) fn new(
|
||||||
|
signal_handler: Option<*const SignalHandler<'static>>,
|
||||||
|
capture_backtrace: bool,
|
||||||
|
limits: *const VMRuntimeLimits,
|
||||||
|
) -> CallThreadState {
|
||||||
|
CallThreadState {
|
||||||
|
unwind: UnsafeCell::new(MaybeUninit::uninit()),
|
||||||
|
jmp_buf: Cell::new(ptr::null()),
|
||||||
|
handling_trap: Cell::new(false),
|
||||||
|
signal_handler,
|
||||||
|
capture_backtrace,
|
||||||
|
limits,
|
||||||
|
prev: Cell::new(ptr::null()),
|
||||||
|
old_last_wasm_exit_fp: Cell::new(0),
|
||||||
|
old_last_wasm_exit_pc: Cell::new(0),
|
||||||
|
old_last_wasm_entry_sp: Cell::new(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the saved FP upon exit from Wasm for the previous `CallThreadState`.
|
||||||
|
pub fn old_last_wasm_exit_fp(&self) -> usize {
|
||||||
|
self.old_last_wasm_exit_fp.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the saved PC upon exit from Wasm for the previous `CallThreadState`.
|
||||||
|
pub fn old_last_wasm_exit_pc(&self) -> usize {
|
||||||
|
self.old_last_wasm_exit_pc.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the saved SP upon entry into Wasm for the previous `CallThreadState`.
|
||||||
|
pub fn old_last_wasm_entry_sp(&self) -> usize {
|
||||||
|
self.old_last_wasm_entry_sp.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the previous `CallThreadState`.
|
||||||
|
pub fn prev(&self) -> tls::Ptr {
|
||||||
|
self.prev.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connect the link to the previous `CallThreadState`.
|
||||||
|
///
|
||||||
|
/// Synchronizes the last wasm FP, PC, and SP on `self` and the old
|
||||||
|
/// `self.prev` for the given new `prev`, and returns the old
|
||||||
|
/// `self.prev`.
|
||||||
|
pub unsafe fn set_prev(&self, prev: tls::Ptr) -> tls::Ptr {
|
||||||
|
let old_prev = self.prev.get();
|
||||||
|
|
||||||
|
// Restore the old `prev`'s saved registers in its
|
||||||
|
// `VMRuntimeLimits`. This is necessary for when we are async
|
||||||
|
// suspending the top `CallThreadState` and doing `set_prev(null)`
|
||||||
|
// on it, and so any stack walking we do subsequently will start at
|
||||||
|
// the old `prev` and look at its `VMRuntimeLimits` to get the
|
||||||
|
// initial saved registers.
|
||||||
|
if let Some(old_prev) = old_prev.as_ref() {
|
||||||
|
*(*old_prev.limits).last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp();
|
||||||
|
*(*old_prev.limits).last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc();
|
||||||
|
*(*old_prev.limits).last_wasm_entry_sp.get() = self.old_last_wasm_entry_sp();
|
||||||
|
}
|
||||||
|
|
||||||
|
self.prev.set(prev);
|
||||||
|
|
||||||
|
let mut old_last_wasm_exit_fp = 0;
|
||||||
|
let mut old_last_wasm_exit_pc = 0;
|
||||||
|
let mut old_last_wasm_entry_sp = 0;
|
||||||
|
if let Some(prev) = prev.as_ref() {
|
||||||
|
// We are entering a new `CallThreadState` or resuming a
|
||||||
|
// previously suspended one. This means we will push new Wasm
|
||||||
|
// frames that save the new Wasm FP/SP/PC registers into
|
||||||
|
// `VMRuntimeLimits`, we need to first save the old Wasm
|
||||||
|
// FP/SP/PC registers into this new `CallThreadState` to
|
||||||
|
// maintain our list of contiguous Wasm frame regions that we
|
||||||
|
// use when capturing stack traces.
|
||||||
|
//
|
||||||
|
// NB: the Wasm<--->host trampolines saved the Wasm FP/SP/PC
|
||||||
|
// registers in the active-at-that-time store's
|
||||||
|
// `VMRuntimeLimits`. For the most recent FP/PC/SP that is the
|
||||||
|
// `state.prev.limits` (since we haven't entered this
|
||||||
|
// `CallThreadState` yet). And that can be a different
|
||||||
|
// `VMRuntimeLimits` instance from the currently active
|
||||||
|
// `state.limits`, which will be used by the upcoming call into
|
||||||
|
// Wasm! Consider the case where we have multiple, nested calls
|
||||||
|
// across stores (with host code in between, by necessity, since
|
||||||
|
// only things in the same store can be linked directly
|
||||||
|
// together):
|
||||||
|
//
|
||||||
|
// | ... |
|
||||||
|
// | Host | |
|
||||||
|
// +-----------------+ | stack
|
||||||
|
// | Wasm in store A | | grows
|
||||||
|
// +-----------------+ | down
|
||||||
|
// | Host | |
|
||||||
|
// +-----------------+ |
|
||||||
|
// | Wasm in store B | V
|
||||||
|
// +-----------------+
|
||||||
|
//
|
||||||
|
// In this scenario `state.limits != state.prev.limits`,
|
||||||
|
// i.e. `B.limits != A.limits`! Therefore we must take care to
|
||||||
|
// read the old FP/SP/PC from `state.prev.limits`, rather than
|
||||||
|
// `state.limits`, and store those saved registers into the
|
||||||
|
// current `state`.
|
||||||
|
//
|
||||||
|
// See also the comment above the
|
||||||
|
// `CallThreadState::old_last_wasm_*` fields.
|
||||||
|
old_last_wasm_exit_fp =
|
||||||
|
mem::replace(&mut *(*prev.limits).last_wasm_exit_fp.get(), 0);
|
||||||
|
old_last_wasm_exit_pc =
|
||||||
|
mem::replace(&mut *(*prev.limits).last_wasm_exit_pc.get(), 0);
|
||||||
|
old_last_wasm_entry_sp =
|
||||||
|
mem::replace(&mut *(*prev.limits).last_wasm_entry_sp.get(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.old_last_wasm_exit_fp.set(old_last_wasm_exit_fp);
|
||||||
|
self.old_last_wasm_exit_pc.set(old_last_wasm_exit_pc);
|
||||||
|
self.old_last_wasm_entry_sp.set(old_last_wasm_entry_sp);
|
||||||
|
|
||||||
|
old_prev
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
pub use call_thread_state::*;
|
||||||
|
|
||||||
enum UnwindReason {
|
enum UnwindReason {
|
||||||
Panic(Box<dyn Any + Send>),
|
Panic(Box<dyn Any + Send>),
|
||||||
@@ -242,34 +365,11 @@ enum UnwindReason {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CallThreadState {
|
impl CallThreadState {
|
||||||
#[inline]
|
|
||||||
fn new(
|
|
||||||
signal_handler: Option<*const SignalHandler<'static>>,
|
|
||||||
capture_backtrace: bool,
|
|
||||||
old_last_wasm_exit_fp: usize,
|
|
||||||
old_last_wasm_exit_pc: usize,
|
|
||||||
old_last_wasm_entry_sp: usize,
|
|
||||||
limits: *const VMRuntimeLimits,
|
|
||||||
) -> CallThreadState {
|
|
||||||
CallThreadState {
|
|
||||||
unwind: UnsafeCell::new(MaybeUninit::uninit()),
|
|
||||||
jmp_buf: Cell::new(ptr::null()),
|
|
||||||
handling_trap: Cell::new(false),
|
|
||||||
signal_handler,
|
|
||||||
prev: Cell::new(ptr::null()),
|
|
||||||
capture_backtrace,
|
|
||||||
old_last_wasm_exit_fp,
|
|
||||||
old_last_wasm_exit_pc,
|
|
||||||
old_last_wasm_entry_sp,
|
|
||||||
limits,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn with(
|
fn with(
|
||||||
self,
|
mut self,
|
||||||
closure: impl FnOnce(&CallThreadState) -> i32,
|
closure: impl FnOnce(&CallThreadState) -> i32,
|
||||||
) -> Result<(), (UnwindReason, Option<Backtrace>)> {
|
) -> Result<(), (UnwindReason, Option<Backtrace>)> {
|
||||||
let ret = tls::set(&self, || closure(&self));
|
let ret = tls::set(&mut self, |me| closure(me));
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
@@ -366,7 +466,7 @@ impl CallThreadState {
|
|||||||
let mut state = Some(self);
|
let mut state = Some(self);
|
||||||
std::iter::from_fn(move || {
|
std::iter::from_fn(move || {
|
||||||
let this = state?;
|
let this = state?;
|
||||||
state = unsafe { this.prev.get().as_ref() };
|
state = unsafe { this.prev().as_ref() };
|
||||||
Some(this)
|
Some(this)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -462,7 +562,9 @@ mod tls {
|
|||||||
|
|
||||||
/// Opaque state used to help control TLS state across stack switches for
|
/// Opaque state used to help control TLS state across stack switches for
|
||||||
/// async support.
|
/// async support.
|
||||||
pub struct TlsRestore(raw::Ptr);
|
pub struct TlsRestore {
|
||||||
|
state: raw::Ptr,
|
||||||
|
}
|
||||||
|
|
||||||
impl TlsRestore {
|
impl TlsRestore {
|
||||||
/// Takes the TLS state that is currently configured and returns a
|
/// Takes the TLS state that is currently configured and returns a
|
||||||
@@ -476,14 +578,16 @@ mod tls {
|
|||||||
// removing ourselves from the call-stack, and in the process we
|
// removing ourselves from the call-stack, and in the process we
|
||||||
// null out our own previous field for safety in case it's
|
// null out our own previous field for safety in case it's
|
||||||
// accidentally used later.
|
// accidentally used later.
|
||||||
let raw = raw::get();
|
let state = raw::get();
|
||||||
if !raw.is_null() {
|
if let Some(state) = state.as_ref() {
|
||||||
let prev = (*raw).prev.replace(ptr::null());
|
let prev_state = state.set_prev(ptr::null());
|
||||||
raw::replace(prev);
|
raw::replace(prev_state);
|
||||||
|
} else {
|
||||||
|
// Null case: we aren't in a wasm context, so theres no tls to
|
||||||
|
// save for restoration.
|
||||||
}
|
}
|
||||||
// Null case: we aren't in a wasm context, so theres no tls
|
|
||||||
// to save for restoration.
|
TlsRestore { state }
|
||||||
TlsRestore(raw)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restores a previous tls state back into this thread's TLS.
|
/// Restores a previous tls state back into this thread's TLS.
|
||||||
@@ -493,40 +597,50 @@ mod tls {
|
|||||||
pub unsafe fn replace(self) {
|
pub unsafe fn replace(self) {
|
||||||
// Null case: we aren't in a wasm context, so theres no tls
|
// Null case: we aren't in a wasm context, so theres no tls
|
||||||
// to restore.
|
// to restore.
|
||||||
if self.0.is_null() {
|
if self.state.is_null() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to configure our previous TLS pointer to whatever is in
|
// We need to configure our previous TLS pointer to whatever is in
|
||||||
// TLS at this time, and then we set the current state to ourselves.
|
// TLS at this time, and then we set the current state to ourselves.
|
||||||
let prev = raw::get();
|
let prev = raw::get();
|
||||||
assert!((*self.0).prev.get().is_null());
|
assert!((*self.state).prev().is_null());
|
||||||
(*self.0).prev.set(prev);
|
(*self.state).set_prev(prev);
|
||||||
raw::replace(self.0);
|
raw::replace(self.state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configures thread local state such that for the duration of the
|
/// Configures thread local state such that for the duration of the
|
||||||
/// execution of `closure` any call to `with` will yield `ptr`, unless this
|
/// execution of `closure` any call to `with` will yield `state`, unless
|
||||||
/// is recursively called again.
|
/// this is recursively called again.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn set<R>(state: &CallThreadState, closure: impl FnOnce() -> R) -> R {
|
pub fn set<R>(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R {
|
||||||
struct Reset<'a>(&'a CallThreadState);
|
struct Reset<'a> {
|
||||||
|
state: &'a CallThreadState,
|
||||||
|
}
|
||||||
|
|
||||||
impl Drop for Reset<'_> {
|
impl Drop for Reset<'_> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
raw::replace(self.0.prev.replace(ptr::null()));
|
unsafe {
|
||||||
|
let prev = self.state.set_prev(ptr::null());
|
||||||
|
let old_state = raw::replace(prev);
|
||||||
|
debug_assert!(std::ptr::eq(old_state, self.state));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let prev = raw::replace(state);
|
let prev = raw::replace(state);
|
||||||
state.prev.set(prev);
|
|
||||||
let _reset = Reset(state);
|
unsafe {
|
||||||
closure()
|
state.set_prev(prev);
|
||||||
|
|
||||||
|
let reset = Reset { state };
|
||||||
|
closure(reset.state)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the last pointer configured with `set` above. Panics if `set`
|
/// Returns the last pointer configured with `set` above, if any.
|
||||||
/// has not been previously called.
|
|
||||||
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
|
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
|
||||||
let p = raw::get();
|
let p = raw::get();
|
||||||
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
|
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
|
||||||
|
|||||||
@@ -149,18 +149,18 @@ impl Backtrace {
|
|||||||
// trace through (since each `CallTheadState` saves the *previous*
|
// trace through (since each `CallTheadState` saves the *previous*
|
||||||
// call into Wasm's saved registers, and the youngest call into
|
// call into Wasm's saved registers, and the youngest call into
|
||||||
// Wasm's registers are saved in the `VMRuntimeLimits`)
|
// Wasm's registers are saved in the `VMRuntimeLimits`)
|
||||||
if state.prev.get().is_null() {
|
if state.prev().is_null() {
|
||||||
debug_assert_eq!(state.old_last_wasm_exit_pc, 0);
|
debug_assert_eq!(state.old_last_wasm_exit_pc(), 0);
|
||||||
debug_assert_eq!(state.old_last_wasm_exit_fp, 0);
|
debug_assert_eq!(state.old_last_wasm_exit_fp(), 0);
|
||||||
debug_assert_eq!(state.old_last_wasm_entry_sp, 0);
|
debug_assert_eq!(state.old_last_wasm_entry_sp(), 0);
|
||||||
log::trace!("====== Done Capturing Backtrace ======");
|
log::trace!("====== Done Capturing Backtrace ======");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let ControlFlow::Break(()) = Self::trace_through_wasm(
|
if let ControlFlow::Break(()) = Self::trace_through_wasm(
|
||||||
state.old_last_wasm_exit_pc,
|
state.old_last_wasm_exit_pc(),
|
||||||
state.old_last_wasm_exit_fp,
|
state.old_last_wasm_exit_fp(),
|
||||||
state.old_last_wasm_entry_sp,
|
state.old_last_wasm_entry_sp(),
|
||||||
&mut f,
|
&mut f,
|
||||||
) {
|
) {
|
||||||
log::trace!("====== Done Capturing Backtrace ======");
|
log::trace!("====== Done Capturing Backtrace ======");
|
||||||
@@ -266,7 +266,7 @@ impl Backtrace {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate over the frames inside this backtrace.
|
/// Iterate over the frames inside this backtrace.
|
||||||
pub fn frames<'a>(&'a self) -> impl Iterator<Item = &'a Frame> + 'a {
|
pub fn frames<'a>(&'a self) -> impl ExactSizeIterator<Item = &'a Frame> + 'a {
|
||||||
self.0.iter()
|
self.0.iter()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use std::fmt;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use wasmtime_environ::{EntityRef, FilePos, TrapCode as EnvTrapCode};
|
use wasmtime_environ::{EntityRef, FilePos, TrapCode as EnvTrapCode};
|
||||||
use wasmtime_jit::{demangle_function_name, demangle_function_name_or_index};
|
use wasmtime_jit::{demangle_function_name, demangle_function_name_or_index};
|
||||||
use wasmtime_runtime::Backtrace;
|
|
||||||
|
|
||||||
/// A struct representing an aborted instruction execution, with a message
|
/// A struct representing an aborted instruction execution, with a message
|
||||||
/// indicating the cause.
|
/// indicating the cause.
|
||||||
@@ -140,19 +139,24 @@ impl fmt::Display for TrapCode {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct TrapBacktrace {
|
pub(crate) struct TrapBacktrace {
|
||||||
wasm_trace: Vec<FrameInfo>,
|
wasm_trace: Vec<FrameInfo>,
|
||||||
native_trace: Backtrace,
|
runtime_trace: wasmtime_runtime::Backtrace,
|
||||||
hint_wasm_backtrace_details_env: bool,
|
hint_wasm_backtrace_details_env: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TrapBacktrace {
|
impl TrapBacktrace {
|
||||||
pub fn new(store: &StoreOpaque, native_trace: Backtrace, trap_pc: Option<usize>) -> Self {
|
pub fn new(
|
||||||
let mut wasm_trace = Vec::<FrameInfo>::new();
|
store: &StoreOpaque,
|
||||||
|
runtime_trace: wasmtime_runtime::Backtrace,
|
||||||
|
trap_pc: Option<usize>,
|
||||||
|
) -> Self {
|
||||||
|
let mut wasm_trace = Vec::<FrameInfo>::with_capacity(runtime_trace.frames().len());
|
||||||
let mut hint_wasm_backtrace_details_env = false;
|
let mut hint_wasm_backtrace_details_env = false;
|
||||||
let wasm_backtrace_details_env_used =
|
let wasm_backtrace_details_env_used =
|
||||||
store.engine().config().wasm_backtrace_details_env_used;
|
store.engine().config().wasm_backtrace_details_env_used;
|
||||||
|
|
||||||
for frame in native_trace.frames() {
|
for frame in runtime_trace.frames() {
|
||||||
debug_assert!(frame.pc() != 0);
|
debug_assert!(frame.pc() != 0);
|
||||||
|
|
||||||
// Note that we need to be careful about the pc we pass in
|
// Note that we need to be careful about the pc we pass in
|
||||||
// here to lookup frame information. This program counter is
|
// here to lookup frame information. This program counter is
|
||||||
// used to translate back to an original source location in
|
// used to translate back to an original source location in
|
||||||
@@ -168,6 +172,31 @@ impl TrapBacktrace {
|
|||||||
} else {
|
} else {
|
||||||
frame.pc() - 1
|
frame.pc() - 1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// NB: The PC we are looking up _must_ be a Wasm PC since
|
||||||
|
// `wasmtime_runtime::Backtrace` only contains Wasm frames.
|
||||||
|
//
|
||||||
|
// However, consider the case where we have multiple, nested calls
|
||||||
|
// across stores (with host code in between, by necessity, since
|
||||||
|
// only things in the same store can be linked directly together):
|
||||||
|
//
|
||||||
|
// | ... |
|
||||||
|
// | Host | |
|
||||||
|
// +-----------------+ | stack
|
||||||
|
// | Wasm in store A | | grows
|
||||||
|
// +-----------------+ | down
|
||||||
|
// | Host | |
|
||||||
|
// +-----------------+ |
|
||||||
|
// | Wasm in store B | V
|
||||||
|
// +-----------------+
|
||||||
|
//
|
||||||
|
// In this scenario, the `wasmtime_runtime::Backtrace` will contain
|
||||||
|
// two frames: Wasm in store B followed by Wasm in store A. But
|
||||||
|
// `store.modules()` will only have the module information for
|
||||||
|
// modules instantiated within this store. Therefore, we use `if let
|
||||||
|
// Some(..)` instead of the `unwrap` you might otherwise expect and
|
||||||
|
// we ignore frames from modules that were not registered in this
|
||||||
|
// store's module registry.
|
||||||
if let Some((info, module)) = store.modules().lookup_frame_info(pc_to_lookup) {
|
if let Some((info, module)) = store.modules().lookup_frame_info(pc_to_lookup) {
|
||||||
wasm_trace.push(info);
|
wasm_trace.push(info);
|
||||||
|
|
||||||
@@ -186,7 +215,7 @@ impl TrapBacktrace {
|
|||||||
|
|
||||||
Self {
|
Self {
|
||||||
wasm_trace,
|
wasm_trace,
|
||||||
native_trace,
|
runtime_trace,
|
||||||
hint_wasm_backtrace_details_env,
|
hint_wasm_backtrace_details_env,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -203,7 +232,9 @@ fn _assert_trap_is_sync_and_send(t: &Trap) -> (&dyn Sync, &dyn Send) {
|
|||||||
|
|
||||||
impl Trap {
|
impl Trap {
|
||||||
/// Creates a new `Trap` with `message`.
|
/// Creates a new `Trap` with `message`.
|
||||||
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// let trap = wasmtime::Trap::new("unexpected error");
|
/// let trap = wasmtime::Trap::new("unexpected error");
|
||||||
/// assert!(trap.to_string().contains("unexpected error"));
|
/// assert!(trap.to_string().contains("unexpected error"));
|
||||||
@@ -343,7 +374,7 @@ impl fmt::Debug for Trap {
|
|||||||
f.field("reason", &self.inner.reason);
|
f.field("reason", &self.inner.reason);
|
||||||
if let Some(backtrace) = self.inner.backtrace.get() {
|
if let Some(backtrace) = self.inner.backtrace.get() {
|
||||||
f.field("wasm_trace", &backtrace.wasm_trace)
|
f.field("wasm_trace", &backtrace.wasm_trace)
|
||||||
.field("native_trace", &backtrace.native_trace);
|
.field("runtime_trace", &backtrace.runtime_trace);
|
||||||
}
|
}
|
||||||
f.finish()
|
f.finish()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -482,6 +482,8 @@ async fn resume_separate_thread2() {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn resume_separate_thread3() {
|
async fn resume_separate_thread3() {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
// This test doesn't actually do anything with cross-thread polls, but
|
// This test doesn't actually do anything with cross-thread polls, but
|
||||||
// instead it deals with scheduling futures at "odd" times.
|
// instead it deals with scheduling futures at "odd" times.
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -743,3 +743,252 @@ fn traps_without_address_map() -> Result<()> {
|
|||||||
assert_eq!(trace[1].module_offset(), None);
|
assert_eq!(trace[1].module_offset(), None);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn catch_trap_calling_across_stores() -> Result<()> {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
|
let engine = Engine::default();
|
||||||
|
|
||||||
|
let mut child_store = Store::new(&engine, ());
|
||||||
|
let child_module = Module::new(
|
||||||
|
child_store.engine(),
|
||||||
|
r#"
|
||||||
|
(module $child
|
||||||
|
(func $trap (export "trap")
|
||||||
|
unreachable
|
||||||
|
)
|
||||||
|
)
|
||||||
|
"#,
|
||||||
|
)?;
|
||||||
|
let child_instance = Instance::new(&mut child_store, &child_module, &[])?;
|
||||||
|
|
||||||
|
struct ParentCtx {
|
||||||
|
child_store: Store<()>,
|
||||||
|
child_instance: Instance,
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut linker = Linker::new(&engine);
|
||||||
|
linker.func_wrap(
|
||||||
|
"host",
|
||||||
|
"catch_child_trap",
|
||||||
|
move |mut caller: Caller<'_, ParentCtx>| {
|
||||||
|
let mut ctx = caller.as_context_mut();
|
||||||
|
let data = ctx.data_mut();
|
||||||
|
let func = data
|
||||||
|
.child_instance
|
||||||
|
.get_typed_func::<(), (), _>(&mut data.child_store, "trap")
|
||||||
|
.expect("trap function should be exported");
|
||||||
|
|
||||||
|
let trap = func
|
||||||
|
.call(&mut data.child_store, ())
|
||||||
|
.err()
|
||||||
|
.expect("should trap");
|
||||||
|
assert!(
|
||||||
|
trap.to_string().contains("unreachable"),
|
||||||
|
"trap should contain 'unreachable', got: {trap}"
|
||||||
|
);
|
||||||
|
|
||||||
|
let trace = trap.trace().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(trace.len(), 1);
|
||||||
|
assert_eq!(trace[0].func_name(), Some("trap"));
|
||||||
|
// For now, we only get stack frames for Wasm in this store, not
|
||||||
|
// across all stores.
|
||||||
|
//
|
||||||
|
// assert_eq!(trace[1].func_name(), Some("run"));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut store = Store::new(
|
||||||
|
&engine,
|
||||||
|
ParentCtx {
|
||||||
|
child_store,
|
||||||
|
child_instance,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let parent_module = Module::new(
|
||||||
|
store.engine(),
|
||||||
|
r#"
|
||||||
|
(module $parent
|
||||||
|
(func $host.catch_child_trap (import "host" "catch_child_trap"))
|
||||||
|
(func $run (export "run")
|
||||||
|
call $host.catch_child_trap
|
||||||
|
)
|
||||||
|
)
|
||||||
|
"#,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let parent_instance = linker.instantiate(&mut store, &parent_module)?;
|
||||||
|
|
||||||
|
let func = parent_instance.get_typed_func::<(), (), _>(&mut store, "run")?;
|
||||||
|
func.call(store, ())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn async_then_sync_trap() -> Result<()> {
|
||||||
|
// Test the trapping and capturing the stack with the following sequence of
|
||||||
|
// calls:
|
||||||
|
//
|
||||||
|
// a[async] ---> b[host] ---> c[sync]
|
||||||
|
|
||||||
|
drop(env_logger::try_init());
|
||||||
|
|
||||||
|
let wat = r#"
|
||||||
|
(module
|
||||||
|
(import "" "b" (func $b))
|
||||||
|
(func $a (export "a")
|
||||||
|
call $b
|
||||||
|
)
|
||||||
|
(func $c (export "c")
|
||||||
|
unreachable
|
||||||
|
)
|
||||||
|
)
|
||||||
|
"#;
|
||||||
|
|
||||||
|
let mut sync_store = Store::new(&Engine::default(), ());
|
||||||
|
|
||||||
|
let sync_module = Module::new(sync_store.engine(), wat)?;
|
||||||
|
|
||||||
|
let mut sync_linker = Linker::new(sync_store.engine());
|
||||||
|
sync_linker.func_wrap("", "b", |_caller: Caller<_>| unreachable!())?;
|
||||||
|
|
||||||
|
let sync_instance = sync_linker.instantiate(&mut sync_store, &sync_module)?;
|
||||||
|
|
||||||
|
struct AsyncCtx {
|
||||||
|
sync_instance: Instance,
|
||||||
|
sync_store: Store<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut async_store = Store::new(
|
||||||
|
&Engine::new(Config::new().async_support(true)).unwrap(),
|
||||||
|
AsyncCtx {
|
||||||
|
sync_instance,
|
||||||
|
sync_store,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let async_module = Module::new(async_store.engine(), wat)?;
|
||||||
|
|
||||||
|
let mut async_linker = Linker::new(async_store.engine());
|
||||||
|
async_linker.func_wrap("", "b", move |mut caller: Caller<AsyncCtx>| {
|
||||||
|
log::info!("Called `b`...");
|
||||||
|
let sync_instance = caller.data().sync_instance;
|
||||||
|
let sync_store = &mut caller.data_mut().sync_store;
|
||||||
|
|
||||||
|
log::info!("Calling `c`...");
|
||||||
|
let c = sync_instance
|
||||||
|
.get_typed_func::<(), (), _>(&mut *sync_store, "c")
|
||||||
|
.unwrap();
|
||||||
|
c.call(sync_store, ())?;
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let async_instance = async_linker
|
||||||
|
.instantiate_async(&mut async_store, &async_module)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
log::info!("Calling `a`...");
|
||||||
|
let a = async_instance
|
||||||
|
.get_typed_func::<(), (), _>(&mut async_store, "a")
|
||||||
|
.unwrap();
|
||||||
|
let trap = a.call_async(&mut async_store, ()).await.unwrap_err();
|
||||||
|
|
||||||
|
let trace = trap.trace().unwrap();
|
||||||
|
// We don't support cross-store or cross-engine symbolication currently, so
|
||||||
|
// the other frames are ignored.
|
||||||
|
assert_eq!(trace.len(), 1);
|
||||||
|
assert_eq!(trace[0].func_name(), Some("c"));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread")]
|
||||||
|
async fn sync_then_async_trap() -> Result<()> {
|
||||||
|
// Test the trapping and capturing the stack with the following sequence of
|
||||||
|
// calls:
|
||||||
|
//
|
||||||
|
// a[sync] ---> b[host] ---> c[async]
|
||||||
|
|
||||||
|
drop(env_logger::try_init());
|
||||||
|
|
||||||
|
let wat = r#"
|
||||||
|
(module
|
||||||
|
(import "" "b" (func $b))
|
||||||
|
(func $a (export "a")
|
||||||
|
call $b
|
||||||
|
)
|
||||||
|
(func $c (export "c")
|
||||||
|
unreachable
|
||||||
|
)
|
||||||
|
)
|
||||||
|
"#;
|
||||||
|
|
||||||
|
let mut async_store = Store::new(&Engine::new(Config::new().async_support(true)).unwrap(), ());
|
||||||
|
|
||||||
|
let async_module = Module::new(async_store.engine(), wat)?;
|
||||||
|
|
||||||
|
let mut async_linker = Linker::new(async_store.engine());
|
||||||
|
async_linker.func_wrap("", "b", |_caller: Caller<_>| unreachable!())?;
|
||||||
|
|
||||||
|
let async_instance = async_linker
|
||||||
|
.instantiate_async(&mut async_store, &async_module)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
struct SyncCtx {
|
||||||
|
async_instance: Instance,
|
||||||
|
async_store: Store<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut sync_store = Store::new(
|
||||||
|
&Engine::default(),
|
||||||
|
SyncCtx {
|
||||||
|
async_instance,
|
||||||
|
async_store,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let sync_module = Module::new(sync_store.engine(), wat)?;
|
||||||
|
|
||||||
|
let mut sync_linker = Linker::new(sync_store.engine());
|
||||||
|
sync_linker.func_wrap(
|
||||||
|
"",
|
||||||
|
"b",
|
||||||
|
move |mut caller: Caller<SyncCtx>| -> Result<(), Trap> {
|
||||||
|
log::info!("Called `b`...");
|
||||||
|
let async_instance = caller.data().async_instance;
|
||||||
|
let async_store = &mut caller.data_mut().async_store;
|
||||||
|
|
||||||
|
log::info!("Calling `c`...");
|
||||||
|
let c = async_instance
|
||||||
|
.get_typed_func::<(), (), _>(&mut *async_store, "c")
|
||||||
|
.unwrap();
|
||||||
|
tokio::task::block_in_place(|| {
|
||||||
|
tokio::runtime::Handle::current()
|
||||||
|
.block_on(async move { c.call_async(async_store, ()).await })
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let sync_instance = sync_linker.instantiate(&mut sync_store, &sync_module)?;
|
||||||
|
|
||||||
|
log::info!("Calling `a`...");
|
||||||
|
let a = sync_instance
|
||||||
|
.get_typed_func::<(), (), _>(&mut sync_store, "a")
|
||||||
|
.unwrap();
|
||||||
|
let trap = a.call(&mut sync_store, ()).unwrap_err();
|
||||||
|
|
||||||
|
let trace = trap.trace().unwrap();
|
||||||
|
// We don't support cross-store or cross-engine symbolication currently, so
|
||||||
|
// the other frames are ignored.
|
||||||
|
assert_eq!(trace.len(), 1);
|
||||||
|
assert_eq!(trace[0].func_name(), Some("c"));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user