Save exit Wasm FP and PC in component-to-host trampolines (#4601)

* Wasmtime: Add a pointer to `VMRuntimeLimits` in component contexts

* Save exit Wasm FP and PC in component-to-host trampolines

Fixes #4535

* Add comment about why we deref the trampoline's FP

* Update some tests to use new `vmruntime_limits_*` methods
This commit is contained in:
Nick Fitzgerald
2022-08-04 08:27:30 -07:00
committed by GitHub
parent f69acd6187
commit 70ce288dc7
16 changed files with 206 additions and 149 deletions

View File

@@ -367,6 +367,8 @@ impl ComponentInstance {
unsafe fn initialize_vmctx(&mut self, store: *mut dyn Store) {
*self.vmctx_plus_offset(self.offsets.magic()) = VMCOMPONENT_MAGIC;
*self.vmctx_plus_offset(self.offsets.store()) = store;
*self.vmctx_plus_offset(self.offsets.limits()) = (*store).vmruntime_limits();
for i in 0..self.offsets.num_runtime_component_instances {
let i = RuntimeComponentInstanceIndex::from_u32(i);
let mut def = VMGlobalDefinition::new();

View File

@@ -35,7 +35,7 @@ asm_func!(
#[cfg(test)]
mod host_to_wasm_trampoline_offsets_tests {
use wasmtime_environ::{Module, VMOffsets};
use wasmtime_environ::{Module, PtrSize, VMOffsets};
#[test]
fn test() {
@@ -43,7 +43,7 @@ mod host_to_wasm_trampoline_offsets_tests {
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
assert_eq!(8, offsets.vmctx_runtime_limits());
assert_eq!(40, offsets.vmruntime_limits_last_wasm_entry_sp());
assert_eq!(40, offsets.ptr.vmruntime_limits_last_wasm_entry_sp());
assert_eq!(16, offsets.vmctx_callee());
assert_eq!(0x65726f63, u32::from_le_bytes(*b"core"));
}
@@ -79,7 +79,7 @@ asm_func!(
mod wasm_to_host_trampoline_offsets_tests {
use crate::VMHostFuncContext;
use memoffset::offset_of;
use wasmtime_environ::{Module, VMOffsets};
use wasmtime_environ::{Module, PtrSize, VMOffsets};
#[test]
fn test() {
@@ -87,8 +87,8 @@ mod wasm_to_host_trampoline_offsets_tests {
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
assert_eq!(8, offsets.vmctx_runtime_limits());
assert_eq!(24, offsets.vmruntime_limits_last_wasm_exit_fp());
assert_eq!(32, offsets.vmruntime_limits_last_wasm_exit_pc());
assert_eq!(24, offsets.ptr.vmruntime_limits_last_wasm_exit_fp());
assert_eq!(32, offsets.ptr.vmruntime_limits_last_wasm_exit_pc());
assert_eq!(8, offset_of!(VMHostFuncContext, host_func));
}
}

View File

@@ -3,7 +3,7 @@
#[cfg(test)]
mod host_to_wasm_trampoline_offsets_tests {
use wasmtime_environ::{Module, VMOffsets};
use wasmtime_environ::{Module, PtrSize, VMOffsets};
#[test]
fn test() {
@@ -11,7 +11,7 @@ mod host_to_wasm_trampoline_offsets_tests {
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
assert_eq!(8, offsets.vmctx_runtime_limits());
assert_eq!(40, offsets.vmruntime_limits_last_wasm_entry_sp());
assert_eq!(40, offsets.ptr.vmruntime_limits_last_wasm_entry_sp());
assert_eq!(16, offsets.vmctx_callee());
assert_eq!(0x65726f63, u32::from_le_bytes(*b"core"));
}
@@ -24,7 +24,7 @@ mod host_to_wasm_trampoline_offsets_tests {
mod wasm_to_host_trampoline_offsets_tests {
use crate::VMHostFuncContext;
use memoffset::offset_of;
use wasmtime_environ::{Module, VMOffsets};
use wasmtime_environ::{Module, PtrSize, VMOffsets};
#[test]
fn test() {
@@ -32,8 +32,8 @@ mod wasm_to_host_trampoline_offsets_tests {
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
assert_eq!(8, offsets.vmctx_runtime_limits());
assert_eq!(24, offsets.vmruntime_limits_last_wasm_exit_fp());
assert_eq!(32, offsets.vmruntime_limits_last_wasm_exit_pc());
assert_eq!(24, offsets.ptr.vmruntime_limits_last_wasm_exit_fp());
assert_eq!(32, offsets.ptr.vmruntime_limits_last_wasm_exit_pc());
assert_eq!(8, offset_of!(VMHostFuncContext, host_func));
}
}

View File

@@ -47,7 +47,7 @@ asm_func!(
#[cfg(test)]
mod host_to_wasm_trampoline_offsets_tests {
use wasmtime_environ::{Module, VMOffsets};
use wasmtime_environ::{Module, PtrSize, VMOffsets};
#[test]
fn test() {
@@ -55,7 +55,7 @@ mod host_to_wasm_trampoline_offsets_tests {
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
assert_eq!(8, offsets.vmctx_runtime_limits());
assert_eq!(40, offsets.vmruntime_limits_last_wasm_entry_sp());
assert_eq!(40, offsets.ptr.vmruntime_limits_last_wasm_entry_sp());
assert_eq!(16, offsets.vmctx_callee());
assert_eq!(0x65726f63, u32::from_le_bytes(*b"core"));
}
@@ -92,7 +92,7 @@ asm_func!(
mod wasm_to_host_trampoline_offsets_tests {
use crate::VMHostFuncContext;
use memoffset::offset_of;
use wasmtime_environ::{Module, VMOffsets};
use wasmtime_environ::{Module, PtrSize, VMOffsets};
#[test]
fn test() {
@@ -100,8 +100,8 @@ mod wasm_to_host_trampoline_offsets_tests {
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
assert_eq!(8, offsets.vmctx_runtime_limits());
assert_eq!(24, offsets.vmruntime_limits_last_wasm_exit_fp());
assert_eq!(32, offsets.vmruntime_limits_last_wasm_exit_pc());
assert_eq!(24, offsets.ptr.vmruntime_limits_last_wasm_exit_fp());
assert_eq!(32, offsets.ptr.vmruntime_limits_last_wasm_exit_pc());
assert_eq!(8, offset_of!(VMHostFuncContext, host_func));
}
}

View File

@@ -114,6 +114,7 @@ impl Backtrace {
trap_pc_and_fp: Option<(usize, usize)>,
mut f: impl FnMut(Frame) -> ControlFlow<()>,
) {
log::trace!("====== Capturing Backtrace ======");
let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
// If we exited Wasm by catching a trap, then the Wasm-to-host
// trampoline did not get a chance to save the last Wasm PC and FP,
@@ -137,6 +138,7 @@ impl Backtrace {
*(*state.limits).last_wasm_entry_sp.get(),
&mut f,
) {
log::trace!("====== Done Capturing Backtrace ======");
return;
}
@@ -151,6 +153,7 @@ impl Backtrace {
debug_assert_eq!(state.old_last_wasm_exit_pc, 0);
debug_assert_eq!(state.old_last_wasm_exit_fp, 0);
debug_assert_eq!(state.old_last_wasm_entry_sp, 0);
log::trace!("====== Done Capturing Backtrace ======");
return;
}
@@ -160,9 +163,12 @@ impl Backtrace {
state.old_last_wasm_entry_sp,
&mut f,
) {
log::trace!("====== Done Capturing Backtrace ======");
return;
}
}
unreachable!()
}
/// Walk through a contiguous sequence of Wasm frames starting with the
@@ -245,7 +251,13 @@ impl Backtrace {
pc = arch::get_next_older_pc_from_fp(fp);
let next_older_fp = arch::get_next_older_fp_from_fp(fp);
// We rely on this offset being zero for all supported architectures
// in `crates/cranelift/src/component/compiler.rs` when we set the
// Wasm exit FP. If this ever changes, we will need to update that
// code as well!
assert_eq!(arch::NEXT_OLDER_FP_FROM_FP_OFFSET, 0);
let next_older_fp = *(fp as *mut usize).add(arch::NEXT_OLDER_FP_FROM_FP_OFFSET);
// Because the stack always grows down, the older FP must be greater
// than the current FP.
assert!(next_older_fp > fp, "{next_older_fp:#x} > {fp:#x}");

View File

@@ -30,9 +30,9 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
pc
}
pub unsafe fn get_next_older_fp_from_fp(fp: usize) -> usize {
*(fp as *mut usize)
}
// And the current frame pointer points to the next older frame pointer.
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;
pub fn reached_entry_sp(fp: usize, first_wasm_sp: usize) -> bool {
// Calls in aarch64 push two i64s (old FP and return PC) so our entry SP is

View File

@@ -5,11 +5,9 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
*(fp as *mut usize).offset(14)
}
pub unsafe fn get_next_older_fp_from_fp(fp: usize) -> usize {
// The next older "FP" (backchain pointer) was saved in the slot pointed to
// by the current "FP".
*(fp as *mut usize)
}
// The next older "FP" (backchain pointer) was saved in the slot pointed to
// by the current "FP".
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;
pub fn reached_entry_sp(fp: usize, first_wasm_sp: usize) -> bool {
// The "FP" (backchain pointer) holds the value of the stack pointer at

View File

@@ -4,10 +4,8 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
*(fp as *mut usize).offset(1)
}
pub unsafe fn get_next_older_fp_from_fp(fp: usize) -> usize {
// And the current frame pointer points to the next older frame pointer.
*(fp as *mut usize)
}
// And the current frame pointer points to the next older frame pointer.
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;
pub fn reached_entry_sp(fp: usize, first_wasm_sp: usize) -> bool {
// When the FP is just below the SP (because we are in a function prologue

View File

@@ -791,7 +791,7 @@ mod test_vmruntime_limits {
use super::VMRuntimeLimits;
use memoffset::offset_of;
use std::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
use wasmtime_environ::{Module, PtrSize, VMOffsets};
#[test]
fn field_offsets() {
@@ -799,27 +799,27 @@ mod test_vmruntime_limits {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
offset_of!(VMRuntimeLimits, stack_limit),
usize::from(offsets.vmruntime_limits_stack_limit())
usize::from(offsets.ptr.vmruntime_limits_stack_limit())
);
assert_eq!(
offset_of!(VMRuntimeLimits, fuel_consumed),
usize::from(offsets.vmruntime_limits_fuel_consumed())
usize::from(offsets.ptr.vmruntime_limits_fuel_consumed())
);
assert_eq!(
offset_of!(VMRuntimeLimits, epoch_deadline),
usize::from(offsets.vmruntime_limits_epoch_deadline())
usize::from(offsets.ptr.vmruntime_limits_epoch_deadline())
);
assert_eq!(
offset_of!(VMRuntimeLimits, last_wasm_exit_fp),
usize::from(offsets.vmruntime_limits_last_wasm_exit_fp())
usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_fp())
);
assert_eq!(
offset_of!(VMRuntimeLimits, last_wasm_exit_pc),
usize::from(offsets.vmruntime_limits_last_wasm_exit_pc())
usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_pc())
);
assert_eq!(
offset_of!(VMRuntimeLimits, last_wasm_entry_sp),
usize::from(offsets.vmruntime_limits_last_wasm_entry_sp())
usize::from(offsets.ptr.vmruntime_limits_last_wasm_entry_sp())
);
}
}