Save exit Wasm FP and PC in component-to-host trampolines (#4601)
* Wasmtime: Add a pointer to `VMRuntimeLimits` in component contexts * Save exit Wasm FP and PC in component-to-host trampolines Fixes #4535 * Add comment about why we deref the trampoline's FP * Update some tests to use new `vmruntime_limits_*` methods
This commit is contained in:
@@ -426,6 +426,12 @@ impl From<i32> for Offset32 {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u8> for Offset32 {
|
||||
fn from(val: u8) -> Offset32 {
|
||||
Self(val.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Offset32 {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
// 0 displays as an empty offset.
|
||||
|
||||
@@ -30,8 +30,8 @@ use std::mem;
|
||||
use std::sync::Mutex;
|
||||
use wasmtime_environ::{
|
||||
AddressMapSection, CompileError, FilePos, FlagValue, FunctionBodyData, FunctionInfo,
|
||||
InstructionAddressMap, Module, ModuleTranslation, ModuleTypes, StackMapInformation, Trampoline,
|
||||
TrapCode, TrapEncodingBuilder, TrapInformation, Tunables, VMOffsets,
|
||||
InstructionAddressMap, Module, ModuleTranslation, ModuleTypes, PtrSize, StackMapInformation,
|
||||
Trampoline, TrapCode, TrapEncodingBuilder, TrapInformation, Tunables, VMOffsets,
|
||||
};
|
||||
|
||||
#[cfg(feature = "component-model")]
|
||||
@@ -196,7 +196,7 @@ impl wasmtime_environ::Compiler for Compiler {
|
||||
});
|
||||
let stack_limit = context.func.create_global_value(ir::GlobalValueData::Load {
|
||||
base: interrupts_ptr,
|
||||
offset: i32::try_from(func_env.offsets.vmruntime_limits_stack_limit())
|
||||
offset: i32::try_from(func_env.offsets.ptr.vmruntime_limits_stack_limit())
|
||||
.unwrap()
|
||||
.into(),
|
||||
global_type: isa.pointer_type(),
|
||||
|
||||
@@ -13,7 +13,7 @@ use wasmtime_environ::component::{
|
||||
AlwaysTrapInfo, CanonicalOptions, Component, ComponentCompiler, ComponentTypes, FunctionInfo,
|
||||
LowerImport, LoweredIndex, RuntimeAlwaysTrapIndex, VMComponentOffsets,
|
||||
};
|
||||
use wasmtime_environ::{PrimaryMap, SignatureIndex, Trampoline, TrapCode, WasmFuncType};
|
||||
use wasmtime_environ::{PrimaryMap, PtrSize, SignatureIndex, Trampoline, TrapCode, WasmFuncType};
|
||||
|
||||
impl ComponentCompiler for Compiler {
|
||||
fn compile_lowered_trampoline(
|
||||
@@ -46,6 +46,44 @@ impl ComponentCompiler for Compiler {
|
||||
self.wasm_to_host_spill_args(ty, &mut builder, block0);
|
||||
let vmctx = builder.func.dfg.block_params(block0)[0];
|
||||
|
||||
// Save the exit FP and return address for stack walking purposes.
|
||||
//
|
||||
// First we need to get the `VMRuntimeLimits`.
|
||||
let limits = builder.ins().load(
|
||||
pointer_type,
|
||||
MemFlags::trusted(),
|
||||
vmctx,
|
||||
i32::try_from(offsets.limits()).unwrap(),
|
||||
);
|
||||
// Then save the exit Wasm FP to the limits. We dereference the current
|
||||
// FP to get the previous FP because the current FP is the trampoline's
|
||||
// FP, and we want the Wasm function's FP, which is the caller of this
|
||||
// trampoline.
|
||||
let trampoline_fp = builder.ins().get_frame_pointer(pointer_type);
|
||||
let wasm_fp = builder.ins().load(
|
||||
pointer_type,
|
||||
MemFlags::trusted(),
|
||||
trampoline_fp,
|
||||
// The FP always points to the next older FP for all supported
|
||||
// targets. See assertion in
|
||||
// `crates/runtime/src/traphandlers/backtrace.rs`.
|
||||
0,
|
||||
);
|
||||
builder.ins().store(
|
||||
MemFlags::trusted(),
|
||||
wasm_fp,
|
||||
limits,
|
||||
offsets.ptr.vmruntime_limits_last_wasm_exit_fp(),
|
||||
);
|
||||
// Finally save the Wasm return address to the limits.
|
||||
let wasm_pc = builder.ins().get_return_address(pointer_type);
|
||||
builder.ins().store(
|
||||
MemFlags::trusted(),
|
||||
wasm_pc,
|
||||
limits,
|
||||
offsets.ptr.vmruntime_limits_last_wasm_exit_pc(),
|
||||
);
|
||||
|
||||
// Below this will incrementally build both the signature of the host
|
||||
// function we're calling as well as the list of arguments since the
|
||||
// list is somewhat long.
|
||||
|
||||
@@ -528,7 +528,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
|
||||
) -> (ir::Value, ir::immediates::Offset32) {
|
||||
(
|
||||
builder.use_var(self.vmruntime_limits_ptr),
|
||||
i32::from(self.offsets.vmruntime_limits_fuel_consumed()).into(),
|
||||
i32::from(self.offsets.ptr.vmruntime_limits_fuel_consumed()).into(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -628,12 +628,15 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
|
||||
|
||||
fn epoch_load_deadline_into_var(&mut self, builder: &mut FunctionBuilder<'_>) {
|
||||
let interrupts = builder.use_var(self.vmruntime_limits_ptr);
|
||||
let deadline = builder.ins().load(
|
||||
ir::types::I64,
|
||||
ir::MemFlags::trusted(),
|
||||
interrupts,
|
||||
ir::immediates::Offset32::new(self.offsets.vmruntime_limits_epoch_deadline() as i32),
|
||||
);
|
||||
let deadline =
|
||||
builder.ins().load(
|
||||
ir::types::I64,
|
||||
ir::MemFlags::trusted(),
|
||||
interrupts,
|
||||
ir::immediates::Offset32::new(
|
||||
self.offsets.ptr.vmruntime_limits_epoch_deadline() as i32
|
||||
),
|
||||
);
|
||||
builder.def_var(self.epoch_deadline_var, deadline);
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// struct VMComponentContext {
|
||||
// magic: u32,
|
||||
// store: *mut dyn Store,
|
||||
// limits: *const VMRuntimeLimits,
|
||||
// flags: [VMGlobalDefinition; component.num_runtime_component_instances],
|
||||
// lowering_anyfuncs: [VMCallerCheckedAnyfunc; component.num_lowerings],
|
||||
// always_trap_anyfuncs: [VMCallerCheckedAnyfunc; component.num_always_trap],
|
||||
@@ -60,6 +61,7 @@ pub struct VMComponentOffsets<P> {
|
||||
// precalculated offsets of various member fields
|
||||
magic: u32,
|
||||
store: u32,
|
||||
limits: u32,
|
||||
flags: u32,
|
||||
lowering_anyfuncs: u32,
|
||||
always_trap_anyfuncs: u32,
|
||||
@@ -93,6 +95,7 @@ impl<P: PtrSize> VMComponentOffsets<P> {
|
||||
num_always_trap: component.num_always_trap,
|
||||
magic: 0,
|
||||
store: 0,
|
||||
limits: 0,
|
||||
flags: 0,
|
||||
lowering_anyfuncs: 0,
|
||||
always_trap_anyfuncs: 0,
|
||||
@@ -131,6 +134,7 @@ impl<P: PtrSize> VMComponentOffsets<P> {
|
||||
size(magic) = 4u32,
|
||||
align(u32::from(ret.ptr.size())),
|
||||
size(store) = cmul(2, ret.ptr.size()),
|
||||
size(limits) = ret.ptr.size(),
|
||||
align(16),
|
||||
size(flags) = cmul(ret.num_runtime_component_instances, ret.ptr.size_of_vmglobal_definition()),
|
||||
align(u32::from(ret.ptr.size())),
|
||||
@@ -177,6 +181,12 @@ impl<P: PtrSize> VMComponentOffsets<P> {
|
||||
self.store
|
||||
}
|
||||
|
||||
/// The offset of the `limits` field.
|
||||
#[inline]
|
||||
pub fn limits(&self) -> u32 {
|
||||
self.limits
|
||||
}
|
||||
|
||||
/// The offset of the `lowering_anyfuncs` field.
|
||||
#[inline]
|
||||
pub fn lowering_anyfuncs(&self) -> u32 {
|
||||
|
||||
@@ -135,6 +135,39 @@ pub trait PtrSize {
|
||||
fn size_of_vmglobal_definition(&self) -> u8 {
|
||||
16
|
||||
}
|
||||
|
||||
/// Return the offset of the `stack_limit` field of `VMRuntimeLimits`
|
||||
#[inline]
|
||||
fn vmruntime_limits_stack_limit(&self) -> u8 {
|
||||
0
|
||||
}
|
||||
|
||||
/// Return the offset of the `fuel_consumed` field of `VMRuntimeLimits`
|
||||
#[inline]
|
||||
fn vmruntime_limits_fuel_consumed(&self) -> u8 {
|
||||
self.size()
|
||||
}
|
||||
|
||||
/// Return the offset of the `epoch_deadline` field of `VMRuntimeLimits`
|
||||
#[inline]
|
||||
fn vmruntime_limits_epoch_deadline(&self) -> u8 {
|
||||
self.vmruntime_limits_fuel_consumed() + 8 // `stack_limit` is a pointer; `fuel_consumed` is an `i64`
|
||||
}
|
||||
|
||||
/// Return the offset of the `last_wasm_exit_fp` field of `VMRuntimeLimits`.
|
||||
fn vmruntime_limits_last_wasm_exit_fp(&self) -> u8 {
|
||||
self.vmruntime_limits_epoch_deadline() + 8
|
||||
}
|
||||
|
||||
/// Return the offset of the `last_wasm_exit_pc` field of `VMRuntimeLimits`.
|
||||
fn vmruntime_limits_last_wasm_exit_pc(&self) -> u8 {
|
||||
self.vmruntime_limits_last_wasm_exit_fp() + self.size()
|
||||
}
|
||||
|
||||
/// Return the offset of the `last_enty_sp` field of `VMRuntimeLimits`.
|
||||
fn vmruntime_limits_last_wasm_entry_sp(&self) -> u8 {
|
||||
self.vmruntime_limits_last_wasm_exit_pc() + self.size()
|
||||
}
|
||||
}
|
||||
|
||||
/// Type representing the size of a pointer for the current compilation host
|
||||
@@ -545,42 +578,6 @@ impl<P: PtrSize> VMOffsets<P> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Offsets for `VMRuntimeLimits`.
|
||||
impl<P: PtrSize> VMOffsets<P> {
|
||||
/// Return the offset of the `stack_limit` field of `VMRuntimeLimits`
|
||||
#[inline]
|
||||
pub fn vmruntime_limits_stack_limit(&self) -> u8 {
|
||||
0
|
||||
}
|
||||
|
||||
/// Return the offset of the `fuel_consumed` field of `VMRuntimeLimits`
|
||||
#[inline]
|
||||
pub fn vmruntime_limits_fuel_consumed(&self) -> u8 {
|
||||
self.pointer_size()
|
||||
}
|
||||
|
||||
/// Return the offset of the `epoch_deadline` field of `VMRuntimeLimits`
|
||||
#[inline]
|
||||
pub fn vmruntime_limits_epoch_deadline(&self) -> u8 {
|
||||
self.vmruntime_limits_fuel_consumed() + 8 // `stack_limit` is a pointer; `fuel_consumed` is an `i64`
|
||||
}
|
||||
|
||||
/// Return the offset of the `last_wasm_exit_fp` field of `VMRuntimeLimits`.
|
||||
pub fn vmruntime_limits_last_wasm_exit_fp(&self) -> u8 {
|
||||
self.vmruntime_limits_epoch_deadline() + 8
|
||||
}
|
||||
|
||||
/// Return the offset of the `last_wasm_exit_pc` field of `VMRuntimeLimits`.
|
||||
pub fn vmruntime_limits_last_wasm_exit_pc(&self) -> u8 {
|
||||
self.vmruntime_limits_last_wasm_exit_fp() + self.pointer_size()
|
||||
}
|
||||
|
||||
/// Return the offset of the `last_enty_sp` field of `VMRuntimeLimits`.
|
||||
pub fn vmruntime_limits_last_wasm_entry_sp(&self) -> u8 {
|
||||
self.vmruntime_limits_last_wasm_exit_pc() + self.pointer_size()
|
||||
}
|
||||
}
|
||||
|
||||
/// Offsets for `VMContext`.
|
||||
impl<P: PtrSize> VMOffsets<P> {
|
||||
/// Return the offset to the `magic` value in this `VMContext`.
|
||||
|
||||
@@ -367,6 +367,8 @@ impl ComponentInstance {
|
||||
unsafe fn initialize_vmctx(&mut self, store: *mut dyn Store) {
|
||||
*self.vmctx_plus_offset(self.offsets.magic()) = VMCOMPONENT_MAGIC;
|
||||
*self.vmctx_plus_offset(self.offsets.store()) = store;
|
||||
*self.vmctx_plus_offset(self.offsets.limits()) = (*store).vmruntime_limits();
|
||||
|
||||
for i in 0..self.offsets.num_runtime_component_instances {
|
||||
let i = RuntimeComponentInstanceIndex::from_u32(i);
|
||||
let mut def = VMGlobalDefinition::new();
|
||||
|
||||
@@ -35,7 +35,7 @@ asm_func!(
|
||||
|
||||
#[cfg(test)]
|
||||
mod host_to_wasm_trampoline_offsets_tests {
|
||||
use wasmtime_environ::{Module, VMOffsets};
|
||||
use wasmtime_environ::{Module, PtrSize, VMOffsets};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
@@ -43,7 +43,7 @@ mod host_to_wasm_trampoline_offsets_tests {
|
||||
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
|
||||
|
||||
assert_eq!(8, offsets.vmctx_runtime_limits());
|
||||
assert_eq!(40, offsets.vmruntime_limits_last_wasm_entry_sp());
|
||||
assert_eq!(40, offsets.ptr.vmruntime_limits_last_wasm_entry_sp());
|
||||
assert_eq!(16, offsets.vmctx_callee());
|
||||
assert_eq!(0x65726f63, u32::from_le_bytes(*b"core"));
|
||||
}
|
||||
@@ -79,7 +79,7 @@ asm_func!(
|
||||
mod wasm_to_host_trampoline_offsets_tests {
|
||||
use crate::VMHostFuncContext;
|
||||
use memoffset::offset_of;
|
||||
use wasmtime_environ::{Module, VMOffsets};
|
||||
use wasmtime_environ::{Module, PtrSize, VMOffsets};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
@@ -87,8 +87,8 @@ mod wasm_to_host_trampoline_offsets_tests {
|
||||
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
|
||||
|
||||
assert_eq!(8, offsets.vmctx_runtime_limits());
|
||||
assert_eq!(24, offsets.vmruntime_limits_last_wasm_exit_fp());
|
||||
assert_eq!(32, offsets.vmruntime_limits_last_wasm_exit_pc());
|
||||
assert_eq!(24, offsets.ptr.vmruntime_limits_last_wasm_exit_fp());
|
||||
assert_eq!(32, offsets.ptr.vmruntime_limits_last_wasm_exit_pc());
|
||||
assert_eq!(8, offset_of!(VMHostFuncContext, host_func));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
#[cfg(test)]
|
||||
mod host_to_wasm_trampoline_offsets_tests {
|
||||
use wasmtime_environ::{Module, VMOffsets};
|
||||
use wasmtime_environ::{Module, PtrSize, VMOffsets};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
@@ -11,7 +11,7 @@ mod host_to_wasm_trampoline_offsets_tests {
|
||||
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
|
||||
|
||||
assert_eq!(8, offsets.vmctx_runtime_limits());
|
||||
assert_eq!(40, offsets.vmruntime_limits_last_wasm_entry_sp());
|
||||
assert_eq!(40, offsets.ptr.vmruntime_limits_last_wasm_entry_sp());
|
||||
assert_eq!(16, offsets.vmctx_callee());
|
||||
assert_eq!(0x65726f63, u32::from_le_bytes(*b"core"));
|
||||
}
|
||||
@@ -24,7 +24,7 @@ mod host_to_wasm_trampoline_offsets_tests {
|
||||
mod wasm_to_host_trampoline_offsets_tests {
|
||||
use crate::VMHostFuncContext;
|
||||
use memoffset::offset_of;
|
||||
use wasmtime_environ::{Module, VMOffsets};
|
||||
use wasmtime_environ::{Module, PtrSize, VMOffsets};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
@@ -32,8 +32,8 @@ mod wasm_to_host_trampoline_offsets_tests {
|
||||
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
|
||||
|
||||
assert_eq!(8, offsets.vmctx_runtime_limits());
|
||||
assert_eq!(24, offsets.vmruntime_limits_last_wasm_exit_fp());
|
||||
assert_eq!(32, offsets.vmruntime_limits_last_wasm_exit_pc());
|
||||
assert_eq!(24, offsets.ptr.vmruntime_limits_last_wasm_exit_fp());
|
||||
assert_eq!(32, offsets.ptr.vmruntime_limits_last_wasm_exit_pc());
|
||||
assert_eq!(8, offset_of!(VMHostFuncContext, host_func));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ asm_func!(
|
||||
|
||||
#[cfg(test)]
|
||||
mod host_to_wasm_trampoline_offsets_tests {
|
||||
use wasmtime_environ::{Module, VMOffsets};
|
||||
use wasmtime_environ::{Module, PtrSize, VMOffsets};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
@@ -55,7 +55,7 @@ mod host_to_wasm_trampoline_offsets_tests {
|
||||
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
|
||||
|
||||
assert_eq!(8, offsets.vmctx_runtime_limits());
|
||||
assert_eq!(40, offsets.vmruntime_limits_last_wasm_entry_sp());
|
||||
assert_eq!(40, offsets.ptr.vmruntime_limits_last_wasm_entry_sp());
|
||||
assert_eq!(16, offsets.vmctx_callee());
|
||||
assert_eq!(0x65726f63, u32::from_le_bytes(*b"core"));
|
||||
}
|
||||
@@ -92,7 +92,7 @@ asm_func!(
|
||||
mod wasm_to_host_trampoline_offsets_tests {
|
||||
use crate::VMHostFuncContext;
|
||||
use memoffset::offset_of;
|
||||
use wasmtime_environ::{Module, VMOffsets};
|
||||
use wasmtime_environ::{Module, PtrSize, VMOffsets};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
@@ -100,8 +100,8 @@ mod wasm_to_host_trampoline_offsets_tests {
|
||||
let offsets = VMOffsets::new(std::mem::size_of::<*mut u8>() as u8, &module);
|
||||
|
||||
assert_eq!(8, offsets.vmctx_runtime_limits());
|
||||
assert_eq!(24, offsets.vmruntime_limits_last_wasm_exit_fp());
|
||||
assert_eq!(32, offsets.vmruntime_limits_last_wasm_exit_pc());
|
||||
assert_eq!(24, offsets.ptr.vmruntime_limits_last_wasm_exit_fp());
|
||||
assert_eq!(32, offsets.ptr.vmruntime_limits_last_wasm_exit_pc());
|
||||
assert_eq!(8, offset_of!(VMHostFuncContext, host_func));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,6 +114,7 @@ impl Backtrace {
|
||||
trap_pc_and_fp: Option<(usize, usize)>,
|
||||
mut f: impl FnMut(Frame) -> ControlFlow<()>,
|
||||
) {
|
||||
log::trace!("====== Capturing Backtrace ======");
|
||||
let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
|
||||
// If we exited Wasm by catching a trap, then the Wasm-to-host
|
||||
// trampoline did not get a chance to save the last Wasm PC and FP,
|
||||
@@ -137,6 +138,7 @@ impl Backtrace {
|
||||
*(*state.limits).last_wasm_entry_sp.get(),
|
||||
&mut f,
|
||||
) {
|
||||
log::trace!("====== Done Capturing Backtrace ======");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -151,6 +153,7 @@ impl Backtrace {
|
||||
debug_assert_eq!(state.old_last_wasm_exit_pc, 0);
|
||||
debug_assert_eq!(state.old_last_wasm_exit_fp, 0);
|
||||
debug_assert_eq!(state.old_last_wasm_entry_sp, 0);
|
||||
log::trace!("====== Done Capturing Backtrace ======");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -160,9 +163,12 @@ impl Backtrace {
|
||||
state.old_last_wasm_entry_sp,
|
||||
&mut f,
|
||||
) {
|
||||
log::trace!("====== Done Capturing Backtrace ======");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
/// Walk through a contiguous sequence of Wasm frames starting with the
|
||||
@@ -245,7 +251,13 @@ impl Backtrace {
|
||||
|
||||
pc = arch::get_next_older_pc_from_fp(fp);
|
||||
|
||||
let next_older_fp = arch::get_next_older_fp_from_fp(fp);
|
||||
// We rely on this offset being zero for all supported architectures
|
||||
// in `crates/cranelift/src/component/compiler.rs` when we set the
|
||||
// Wasm exit FP. If this ever changes, we will need to update that
|
||||
// code as well!
|
||||
assert_eq!(arch::NEXT_OLDER_FP_FROM_FP_OFFSET, 0);
|
||||
|
||||
let next_older_fp = *(fp as *mut usize).add(arch::NEXT_OLDER_FP_FROM_FP_OFFSET);
|
||||
// Because the stack always grows down, the older FP must be greater
|
||||
// than the current FP.
|
||||
assert!(next_older_fp > fp, "{next_older_fp:#x} > {fp:#x}");
|
||||
|
||||
@@ -30,9 +30,9 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
|
||||
|
||||
pc
|
||||
}
|
||||
pub unsafe fn get_next_older_fp_from_fp(fp: usize) -> usize {
|
||||
*(fp as *mut usize)
|
||||
}
|
||||
|
||||
// And the current frame pointer points to the next older frame pointer.
|
||||
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;
|
||||
|
||||
pub fn reached_entry_sp(fp: usize, first_wasm_sp: usize) -> bool {
|
||||
// Calls in aarch64 push two i64s (old FP and return PC) so our entry SP is
|
||||
|
||||
@@ -5,11 +5,9 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
|
||||
*(fp as *mut usize).offset(14)
|
||||
}
|
||||
|
||||
pub unsafe fn get_next_older_fp_from_fp(fp: usize) -> usize {
|
||||
// The next older "FP" (backchain pointer) was saved in the slot pointed to
|
||||
// by the current "FP".
|
||||
*(fp as *mut usize)
|
||||
}
|
||||
// The next older "FP" (backchain pointer) was saved in the slot pointed to
|
||||
// by the current "FP".
|
||||
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;
|
||||
|
||||
pub fn reached_entry_sp(fp: usize, first_wasm_sp: usize) -> bool {
|
||||
// The "FP" (backchain pointer) holds the value of the stack pointer at
|
||||
|
||||
@@ -4,10 +4,8 @@ pub unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize {
|
||||
*(fp as *mut usize).offset(1)
|
||||
}
|
||||
|
||||
pub unsafe fn get_next_older_fp_from_fp(fp: usize) -> usize {
|
||||
// And the current frame pointer points to the next older frame pointer.
|
||||
*(fp as *mut usize)
|
||||
}
|
||||
// And the current frame pointer points to the next older frame pointer.
|
||||
pub const NEXT_OLDER_FP_FROM_FP_OFFSET: usize = 0;
|
||||
|
||||
pub fn reached_entry_sp(fp: usize, first_wasm_sp: usize) -> bool {
|
||||
// When the FP is just below the SP (because we are in a function prologue
|
||||
|
||||
@@ -791,7 +791,7 @@ mod test_vmruntime_limits {
|
||||
use super::VMRuntimeLimits;
|
||||
use memoffset::offset_of;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::{Module, VMOffsets};
|
||||
use wasmtime_environ::{Module, PtrSize, VMOffsets};
|
||||
|
||||
#[test]
|
||||
fn field_offsets() {
|
||||
@@ -799,27 +799,27 @@ mod test_vmruntime_limits {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
|
||||
assert_eq!(
|
||||
offset_of!(VMRuntimeLimits, stack_limit),
|
||||
usize::from(offsets.vmruntime_limits_stack_limit())
|
||||
usize::from(offsets.ptr.vmruntime_limits_stack_limit())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMRuntimeLimits, fuel_consumed),
|
||||
usize::from(offsets.vmruntime_limits_fuel_consumed())
|
||||
usize::from(offsets.ptr.vmruntime_limits_fuel_consumed())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMRuntimeLimits, epoch_deadline),
|
||||
usize::from(offsets.vmruntime_limits_epoch_deadline())
|
||||
usize::from(offsets.ptr.vmruntime_limits_epoch_deadline())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMRuntimeLimits, last_wasm_exit_fp),
|
||||
usize::from(offsets.vmruntime_limits_last_wasm_exit_fp())
|
||||
usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_fp())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMRuntimeLimits, last_wasm_exit_pc),
|
||||
usize::from(offsets.vmruntime_limits_last_wasm_exit_pc())
|
||||
usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_pc())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMRuntimeLimits, last_wasm_entry_sp),
|
||||
usize::from(offsets.vmruntime_limits_last_wasm_entry_sp())
|
||||
usize::from(offsets.ptr.vmruntime_limits_last_wasm_entry_sp())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,61 +222,57 @@ fn attempt_to_leave_during_malloc() -> Result<()> {
|
||||
let component = Component::new(&engine, component)?;
|
||||
let mut store = Store::new(&engine, ());
|
||||
|
||||
// TODO(#4535): we need to fold the Wasm<--->host trampoline functionality into
|
||||
// component trampolines. Until then, we panic when getting a backtrace here.
|
||||
if false {
|
||||
// Assert that during a host import if we return values to wasm that a trap
|
||||
// happens if we try to leave the instance.
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(), (), _>(&mut store, "run")?
|
||||
.call(&mut store, ())
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(
|
||||
trap.to_string().contains("cannot leave component instance"),
|
||||
"bad trap: {}",
|
||||
trap,
|
||||
);
|
||||
// Assert that during a host import if we return values to wasm that a trap
|
||||
// happens if we try to leave the instance.
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(), (), _>(&mut store, "run")?
|
||||
.call(&mut store, ())
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(
|
||||
trap.to_string().contains("cannot leave component instance"),
|
||||
"bad trap: {}",
|
||||
trap,
|
||||
);
|
||||
|
||||
let trace = trap.trace().unwrap();
|
||||
assert_eq!(trace.len(), 4);
|
||||
let trace = trap.trace().unwrap();
|
||||
assert_eq!(trace.len(), 4);
|
||||
|
||||
// This was our entry point...
|
||||
assert_eq!(trace[3].module_name(), Some("m"));
|
||||
assert_eq!(trace[3].func_name(), Some("run"));
|
||||
// This was our entry point...
|
||||
assert_eq!(trace[3].module_name(), Some("m"));
|
||||
assert_eq!(trace[3].func_name(), Some("run"));
|
||||
|
||||
// ... which called an imported function which ends up being originally
|
||||
// defined by the shim instance. The shim instance then does an indirect
|
||||
// call through a table which goes to the `canon.lower`'d host function
|
||||
assert_eq!(trace[2].module_name(), Some("host_shim"));
|
||||
assert_eq!(trace[2].func_name(), Some("shim_ret_string"));
|
||||
// ... which called an imported function which ends up being originally
|
||||
// defined by the shim instance. The shim instance then does an indirect
|
||||
// call through a table which goes to the `canon.lower`'d host function
|
||||
assert_eq!(trace[2].module_name(), Some("host_shim"));
|
||||
assert_eq!(trace[2].func_name(), Some("shim_ret_string"));
|
||||
|
||||
// ... and the lowered host function will call realloc to allocate space for
|
||||
// the result
|
||||
assert_eq!(trace[1].module_name(), Some("m"));
|
||||
assert_eq!(trace[1].func_name(), Some("realloc"));
|
||||
// ... and the lowered host function will call realloc to allocate space for
|
||||
// the result
|
||||
assert_eq!(trace[1].module_name(), Some("m"));
|
||||
assert_eq!(trace[1].func_name(), Some("realloc"));
|
||||
|
||||
// ... but realloc calls the shim instance and tries to exit the
|
||||
// component, triggering a dynamic trap
|
||||
assert_eq!(trace[0].module_name(), Some("host_shim"));
|
||||
assert_eq!(trace[0].func_name(), Some("shim_thunk"));
|
||||
// ... but realloc calls the shim instance and tries to exit the
|
||||
// component, triggering a dynamic trap
|
||||
assert_eq!(trace[0].module_name(), Some("host_shim"));
|
||||
assert_eq!(trace[0].func_name(), Some("shim_thunk"));
|
||||
|
||||
// In addition to the above trap also ensure that when we enter a wasm
|
||||
// component if we try to leave while lowering then that's also a dynamic
|
||||
// trap.
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(&str,), (), _>(&mut store, "take-string")?
|
||||
.call(&mut store, ("x",))
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(
|
||||
trap.to_string().contains("cannot leave component instance"),
|
||||
"bad trap: {}",
|
||||
trap,
|
||||
);
|
||||
}
|
||||
// In addition to the above trap also ensure that when we enter a wasm
|
||||
// component if we try to leave while lowering then that's also a dynamic
|
||||
// trap.
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(&str,), (), _>(&mut store, "take-string")?
|
||||
.call(&mut store, ("x",))
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(
|
||||
trap.to_string().contains("cannot leave component instance"),
|
||||
"bad trap: {}",
|
||||
trap,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -605,24 +601,21 @@ fn bad_import_alignment() -> Result<()> {
|
||||
let component = Component::new(&engine, component)?;
|
||||
let mut store = Store::new(&engine, ());
|
||||
|
||||
// TODO(#4535): we need to fold the Wasm<--->host trampoline functionality into
|
||||
// component trampolines. Until then, we panic when getting a backtrace here.
|
||||
if false {
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(), (), _>(&mut store, "unaligned-retptr")?
|
||||
.call(&mut store, ())
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(trap.to_string().contains("pointer not aligned"), "{}", trap);
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(), (), _>(&mut store, "unaligned-argptr")?
|
||||
.call(&mut store, ())
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(trap.to_string().contains("pointer not aligned"), "{}", trap);
|
||||
}
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(), (), _>(&mut store, "unaligned-retptr")?
|
||||
.call(&mut store, ())
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(trap.to_string().contains("pointer not aligned"), "{}", trap);
|
||||
let trap = linker
|
||||
.instantiate(&mut store, &component)?
|
||||
.get_typed_func::<(), (), _>(&mut store, "unaligned-argptr")?
|
||||
.call(&mut store, ())
|
||||
.unwrap_err()
|
||||
.downcast::<Trap>()?;
|
||||
assert!(trap.to_string().contains("pointer not aligned"), "{}", trap);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user