Backtrace WebAssembly function JIT frames (#759)

* Create backtrace

* Extend unwind information with FDE data.

* Expose backtrace via API/Trap

* wasmtime_call returns not-str

* Return Arc<JITFrameTag>

* rename frame -> function

* Fix windows crashes and unwrap UNWIND_HISTORY_TABLE

* mmaps -> entries

* pass a backtrace in ActionOutcome

* add test_trap_stack_overflow

* Update cranelift version.
This commit is contained in:
Yury Delendik
2020-01-15 13:48:24 -06:00
committed by GitHub
parent 0848a7eaaa
commit 2a50701f0a
26 changed files with 803 additions and 149 deletions

View File

@@ -6,7 +6,10 @@ use std::cmp::max;
use std::{fmt, mem, ptr, slice};
use thiserror::Error;
use wasmtime_environ::ir;
use wasmtime_runtime::{wasmtime_call_trampoline, Export, InstanceHandle, VMInvokeArgument};
use wasmtime_runtime::{
wasmtime_call_trampoline, Backtrace, Export, InstanceHandle, TrapMessageAndStack,
VMInvokeArgument,
};
/// A runtime value.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
@@ -103,6 +106,8 @@ pub enum ActionOutcome {
Trapped {
/// The trap message.
message: String,
/// Backtrace.
trace: Backtrace,
},
}
@@ -191,7 +196,7 @@ pub fn invoke(
compiler.publish_compiled_code();
// Call the trampoline.
if let Err(message) = unsafe {
if let Err(TrapMessageAndStack(message, trace)) = unsafe {
instance.with_signals_on(|| {
wasmtime_call_trampoline(
callee_vmctx,
@@ -200,7 +205,7 @@ pub fn invoke(
)
})
} {
return Ok(ActionOutcome::Trapped { message });
return Ok(ActionOutcome::Trapped { message, trace });
}
// Load the return values out of `values_vec`.

View File

@@ -2,14 +2,45 @@
use crate::function_table::FunctionTable;
use region;
use std::mem::ManuallyDrop;
use std::{cmp, mem};
use wasmtime_environ::{Compilation, CompiledFunction};
use wasmtime_runtime::{Mmap, VMFunctionBody};
struct CodeMemoryEntry {
mmap: ManuallyDrop<Mmap>,
table: ManuallyDrop<FunctionTable>,
}
impl CodeMemoryEntry {
fn new() -> Self {
Self {
mmap: ManuallyDrop::new(Mmap::new()),
table: ManuallyDrop::new(FunctionTable::new()),
}
}
fn with_capacity(cap: usize) -> Result<Self, String> {
Ok(Self {
mmap: ManuallyDrop::new(Mmap::with_at_least(cap)?),
table: ManuallyDrop::new(FunctionTable::new()),
})
}
}
impl Drop for CodeMemoryEntry {
fn drop(&mut self) {
unsafe {
// Table needs to be freed before mmap.
ManuallyDrop::drop(&mut self.table);
ManuallyDrop::drop(&mut self.mmap);
}
}
}
/// Memory manager for executable code.
pub struct CodeMemory {
current: (Mmap, FunctionTable),
mmaps: Vec<(Mmap, FunctionTable)>,
current: CodeMemoryEntry,
entries: Vec<CodeMemoryEntry>,
position: usize,
published: usize,
}
@@ -23,8 +54,8 @@ impl CodeMemory {
/// Create a new `CodeMemory` instance.
pub fn new() -> Self {
Self {
current: (Mmap::new(), FunctionTable::new()),
mmaps: Vec::new(),
current: CodeMemoryEntry::new(),
entries: Vec::new(),
position: 0,
published: 0,
}
@@ -81,19 +112,20 @@ impl CodeMemory {
self.push_current(0)
.expect("failed to push current memory map");
for (m, t) in &mut self.mmaps[self.published..] {
for CodeMemoryEntry { mmap: m, table: t } in &mut self.entries[self.published..] {
// Remove write access to the pages due to the relocation fixups.
t.publish(m.as_ptr() as u64)
.expect("failed to publish function table");
if !m.is_empty() {
unsafe {
region::protect(m.as_mut_ptr(), m.len(), region::Protection::ReadExecute)
}
.expect("unable to make memory readonly and executable");
}
t.publish(m.as_ptr() as u64)
.expect("failed to publish function table");
}
self.published = self.mmaps.len();
self.published = self.entries.len();
}
/// Allocate `size` bytes of memory which can be made executable later by
@@ -103,7 +135,7 @@ impl CodeMemory {
///
/// TODO: Add an alignment flag.
fn allocate(&mut self, size: usize) -> Result<(&mut [u8], &mut FunctionTable), String> {
if self.current.0.len() - self.position < size {
if self.current.mmap.len() - self.position < size {
self.push_current(cmp::max(0x10000, size))?;
}
@@ -111,8 +143,8 @@ impl CodeMemory {
self.position += size;
Ok((
&mut self.current.0.as_mut_slice()[old_position..self.position],
&mut self.current.1,
&mut self.current.mmap.as_mut_slice()[old_position..self.position],
&mut self.current.table,
))
}
@@ -153,12 +185,19 @@ impl CodeMemory {
// Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
let padding = ((func.body.len() + 3) & !3) - func.body.len();
let (unwind, remainder) = remainder.split_at_mut(padding + func.unwind_info.len());
unwind[padding..].copy_from_slice(&func.unwind_info);
let mut relocs = Vec::new();
func.unwind_info
.serialize(&mut unwind[padding..], &mut relocs);
let unwind_start = func_end + (padding as u32);
let unwind_end = unwind_start + (func.unwind_info.len() as u32);
table.add_function(func_start, func_end, unwind_start);
relocs.iter_mut().for_each(move |r| {
r.offset += unwind_start;
r.addend += func_start;
});
table.add_function(func_start, func_end, unwind_start, &relocs);
(unwind_end, remainder, table, vmfunc)
}
@@ -174,20 +213,17 @@ impl CodeMemory {
fn push_current(&mut self, new_size: usize) -> Result<(), String> {
let previous = mem::replace(
&mut self.current,
(
if new_size == 0 {
Mmap::new()
} else {
Mmap::with_at_least(cmp::max(0x10000, new_size))?
},
FunctionTable::new(),
),
if new_size == 0 {
CodeMemoryEntry::new()
} else {
CodeMemoryEntry::with_capacity(cmp::max(0x10000, new_size))?
},
);
if !previous.0.is_empty() {
self.mmaps.push(previous);
if !previous.mmap.is_empty() {
self.entries.push(previous);
} else {
assert_eq!(previous.1.len(), 0);
assert_eq!(previous.table.len(), 0);
}
self.position = 0;

View File

@@ -16,12 +16,12 @@ use wasmtime_environ::entity::{EntityRef, PrimaryMap};
use wasmtime_environ::isa::{TargetFrontendConfig, TargetIsa};
use wasmtime_environ::wasm::{DefinedFuncIndex, DefinedMemoryIndex};
use wasmtime_environ::{
Compilation, CompileError, CompiledFunction, Compiler as _C, FunctionBodyData, Module,
ModuleVmctxInfo, Relocations, Traps, Tunables, VMOffsets,
Compilation, CompileError, CompiledFunction, CompiledFunctionUnwindInfo, Compiler as _C,
FunctionBodyData, Module, ModuleVmctxInfo, Relocations, Traps, Tunables, VMOffsets,
};
use wasmtime_runtime::{
get_mut_trap_registry, InstantiationError, SignatureRegistry, TrapRegistrationGuard,
VMFunctionBody,
get_mut_trap_registry, jit_function_registry, InstantiationError, SignatureRegistry,
TrapRegistrationGuard, VMFunctionBody,
};
/// Select which kind of compilation to use.
@@ -51,6 +51,7 @@ pub struct Compiler {
code_memory: CodeMemory,
trap_registration_guards: Vec<TrapRegistrationGuard>,
jit_function_ranges: Vec<(usize, usize)>,
trampoline_park: HashMap<*const VMFunctionBody, *const VMFunctionBody>,
signatures: SignatureRegistry,
strategy: CompilationStrategy,
@@ -66,6 +67,7 @@ impl Compiler {
isa,
code_memory: CodeMemory::new(),
trap_registration_guards: Vec::new(),
jit_function_ranges: Vec::new(),
trampoline_park: HashMap::new(),
signatures: SignatureRegistry::new(),
fn_builder_ctx: FunctionBuilderContext::new(),
@@ -85,6 +87,10 @@ impl Drop for Compiler {
// Having a custom drop implementation we are independent from the field order
// in the struct what reduces potential human error.
self.trap_registration_guards.clear();
for (start, end) in self.jit_function_ranges.iter() {
jit_function_registry::unregister(*start, *end);
}
}
}
@@ -155,6 +161,18 @@ impl Compiler {
&mut self.trap_registration_guards,
);
for (i, allocated) in allocated_functions.iter() {
let ptr = (*allocated) as *const VMFunctionBody;
let body_len = compilation.get(i).body.len();
self.jit_function_ranges
.push((ptr as usize, ptr as usize + body_len));
let tag = jit_function_registry::JITFunctionTag {
module_id: module.name.clone(),
func_index: i.index(),
};
jit_function_registry::register(ptr as usize, ptr as usize + body_len, tag);
}
let dbg = if let Some(debug_data) = debug_data {
let target_config = self.isa.frontend_config();
let ofs = VMOffsets::new(target_config.pointer_bytes(), &module);
@@ -215,6 +233,7 @@ impl Compiler {
signature,
value_size,
)?;
entry.insert(body);
body
}
@@ -266,6 +285,7 @@ fn make_trampoline(
let mut context = Context::new();
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
context.func.collect_frame_layout_info();
{
let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx);
@@ -326,7 +346,6 @@ fn make_trampoline(
}
let mut code_buf = Vec::new();
let mut unwind_info = Vec::new();
let mut reloc_sink = RelocSink {};
let mut trap_sink = binemit::NullTrapSink {};
let mut stackmap_sink = binemit::NullStackmapSink {};
@@ -346,7 +365,7 @@ fn make_trampoline(
)))
})?;
context.emit_unwind_info(isa, &mut unwind_info);
let unwind_info = CompiledFunctionUnwindInfo::new(isa, &context);
Ok(code_memory
.allocate_for_function(&CompiledFunction {

View File

@@ -2,42 +2,7 @@
//!
//! This module is primarily used to track JIT functions on Windows for stack walking and unwind.
/// Represents a runtime function table.
///
/// The runtime function table is not implemented for non-Windows target platforms.
#[cfg(not(target_os = "windows"))]
pub(crate) struct FunctionTable;
#[cfg(not(target_os = "windows"))]
impl FunctionTable {
/// Creates a new function table.
pub fn new() -> Self {
Self
}
/// Returns the number of functions in the table, also referred to as its 'length'.
///
/// For non-Windows platforms, the table will always be empty.
pub fn len(&self) -> usize {
0
}
/// Adds a function to the table based off of the start offset, end offset, and unwind offset.
///
/// The offsets are from the "module base", which is provided when the table is published.
///
/// For non-Windows platforms, this is a no-op.
pub fn add_function(&mut self, _start: u32, _end: u32, _unwind: u32) {}
/// Publishes the function table using the given base address.
///
/// A published function table will automatically be deleted when it is dropped.
///
/// For non-Windows platforms, this is a no-op.
pub fn publish(&mut self, _base_address: u64) -> Result<(), String> {
Ok(())
}
}
type FunctionTableReloc = wasmtime_environ::CompiledFunctionUnwindInfoReloc;
/// Represents a runtime function table.
///
@@ -66,7 +31,14 @@ impl FunctionTable {
/// Adds a function to the table based off of the start offset, end offset, and unwind offset.
///
/// The offsets are from the "module base", which is provided when the table is published.
pub fn add_function(&mut self, start: u32, end: u32, unwind: u32) {
pub fn add_function(
&mut self,
start: u32,
end: u32,
unwind: u32,
_relocs: &[FunctionTableReloc],
) {
assert_eq!(_relocs.len(), 0);
use winapi::um::winnt;
assert!(!self.published, "table has already been published");
@@ -133,3 +105,106 @@ impl Drop for FunctionTable {
}
}
}
/// Represents a runtime function table.
///
/// This is used to register JIT code with the operating system to enable stack walking and unwinding.
#[cfg(any(target_os = "macos", target_os = "linux"))]
pub(crate) struct FunctionTable {
functions: Vec<u32>,
relocs: Vec<FunctionTableReloc>,
published: Option<Vec<usize>>,
}
#[cfg(any(target_os = "macos", target_os = "linux"))]
impl FunctionTable {
/// Creates a new function table.
pub fn new() -> Self {
Self {
functions: Vec::new(),
relocs: Vec::new(),
published: None,
}
}
/// Returns the number of functions in the table, also referred to as its 'length'.
pub fn len(&self) -> usize {
self.functions.len()
}
/// Adds a function to the table based off of the start offset, end offset, and unwind offset.
///
/// The offsets are from the "module base", which is provided when the table is published.
pub fn add_function(
&mut self,
_start: u32,
_end: u32,
unwind: u32,
relocs: &[FunctionTableReloc],
) {
assert!(self.published.is_none(), "table has already been published");
self.functions.push(unwind);
self.relocs.extend_from_slice(relocs);
}
/// Publishes the function table using the given base address.
///
/// A published function table will automatically be deleted when it is dropped.
pub fn publish(&mut self, base_address: u64) -> Result<(), String> {
if self.published.is_some() {
return Err("function table was already published".into());
}
if self.functions.is_empty() {
assert_eq!(self.relocs.len(), 0);
self.published = Some(vec![]);
return Ok(());
}
extern "C" {
// libunwind import
fn __register_frame(fde: *const u8);
}
for reloc in self.relocs.iter() {
let addr = base_address + (reloc.offset as u64);
let target = base_address + (reloc.addend as u64);
unsafe {
std::ptr::write(addr as *mut u64, target);
}
}
let mut fdes = Vec::with_capacity(self.functions.len());
for unwind_offset in self.functions.iter() {
let addr = base_address + (*unwind_offset as u64);
let off = unsafe { std::ptr::read::<u32>(addr as *const u32) } as usize + 4;
let fde = (addr + off as u64) as usize;
unsafe {
__register_frame(fde as *const _);
}
fdes.push(fde);
}
self.published = Some(fdes);
Ok(())
}
}
#[cfg(any(target_os = "macos", target_os = "linux"))]
impl Drop for FunctionTable {
fn drop(&mut self) {
extern "C" {
// libunwind import
fn __deregister_frame(fde: *const u8);
}
if self.published.is_some() {
unsafe {
for fde in self.published.as_ref().unwrap() {
__deregister_frame(*fde as *const _);
}
}
}
}
}