Merge pull request #1466 from peterhuene/fix-unwind-emit
Refactor unwind generation in Cranelift.
This commit is contained in:
@@ -1,37 +1,33 @@
|
||||
//! Memory management for executable code.
|
||||
|
||||
use crate::function_table::FunctionTable;
|
||||
use crate::unwind::UnwindRegistry;
|
||||
use region;
|
||||
use std::mem::ManuallyDrop;
|
||||
use std::{cmp, mem};
|
||||
use wasmtime_environ::{Compilation, CompiledFunction};
|
||||
use wasmtime_environ::{
|
||||
isa::{unwind::UnwindInfo, TargetIsa},
|
||||
Compilation, CompiledFunction,
|
||||
};
|
||||
use wasmtime_runtime::{Mmap, VMFunctionBody};
|
||||
|
||||
struct CodeMemoryEntry {
|
||||
mmap: ManuallyDrop<Mmap>,
|
||||
table: ManuallyDrop<FunctionTable>,
|
||||
registry: ManuallyDrop<UnwindRegistry>,
|
||||
}
|
||||
|
||||
impl CodeMemoryEntry {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
mmap: ManuallyDrop::new(Mmap::new()),
|
||||
table: ManuallyDrop::new(FunctionTable::new()),
|
||||
}
|
||||
}
|
||||
fn with_capacity(cap: usize) -> Result<Self, String> {
|
||||
Ok(Self {
|
||||
mmap: ManuallyDrop::new(Mmap::with_at_least(cap)?),
|
||||
table: ManuallyDrop::new(FunctionTable::new()),
|
||||
})
|
||||
let mmap = ManuallyDrop::new(Mmap::with_at_least(cap)?);
|
||||
let registry = ManuallyDrop::new(UnwindRegistry::new(mmap.as_ptr() as usize));
|
||||
Ok(Self { mmap, registry })
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CodeMemoryEntry {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
// Table needs to be freed before mmap.
|
||||
ManuallyDrop::drop(&mut self.table);
|
||||
// The registry needs to be dropped before the mmap
|
||||
ManuallyDrop::drop(&mut self.registry);
|
||||
ManuallyDrop::drop(&mut self.mmap);
|
||||
}
|
||||
}
|
||||
@@ -39,7 +35,7 @@ impl Drop for CodeMemoryEntry {
|
||||
|
||||
/// Memory manager for executable code.
|
||||
pub struct CodeMemory {
|
||||
current: CodeMemoryEntry,
|
||||
current: Option<CodeMemoryEntry>,
|
||||
entries: Vec<CodeMemoryEntry>,
|
||||
position: usize,
|
||||
published: usize,
|
||||
@@ -54,7 +50,7 @@ impl CodeMemory {
|
||||
/// Create a new `CodeMemory` instance.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
current: CodeMemoryEntry::new(),
|
||||
current: None,
|
||||
entries: Vec::new(),
|
||||
position: 0,
|
||||
published: 0,
|
||||
@@ -70,16 +66,14 @@ impl CodeMemory {
|
||||
) -> Result<&mut [VMFunctionBody], String> {
|
||||
let size = Self::function_allocation_size(func);
|
||||
|
||||
let (buf, table, start) = self.allocate(size)?;
|
||||
let (buf, registry, start) = self.allocate(size)?;
|
||||
|
||||
let (_, _, _, vmfunc) = Self::copy_function(func, start as u32, buf, table);
|
||||
let (_, _, vmfunc) = Self::copy_function(func, start as u32, buf, registry);
|
||||
|
||||
Ok(vmfunc)
|
||||
}
|
||||
|
||||
/// Allocate a continuous memory block for a compilation.
|
||||
///
|
||||
/// Allocates memory for both the function bodies as well as function unwind data.
|
||||
pub fn allocate_for_compilation(
|
||||
&mut self,
|
||||
compilation: &Compilation,
|
||||
@@ -88,33 +82,35 @@ impl CodeMemory {
|
||||
.into_iter()
|
||||
.fold(0, |acc, func| acc + Self::function_allocation_size(func));
|
||||
|
||||
let (mut buf, mut table, start) = self.allocate(total_len)?;
|
||||
let (mut buf, registry, start) = self.allocate(total_len)?;
|
||||
let mut result = Vec::with_capacity(compilation.len());
|
||||
let mut start = start as u32;
|
||||
|
||||
for func in compilation.into_iter() {
|
||||
let (next_start, next_buf, next_table, vmfunc) =
|
||||
Self::copy_function(func, start, buf, table);
|
||||
let (next_start, next_buf, vmfunc) = Self::copy_function(func, start, buf, registry);
|
||||
|
||||
result.push(vmfunc);
|
||||
|
||||
start = next_start;
|
||||
buf = next_buf;
|
||||
table = next_table;
|
||||
}
|
||||
|
||||
Ok(result.into_boxed_slice())
|
||||
}
|
||||
|
||||
/// Make all allocated memory executable.
|
||||
pub fn publish(&mut self) {
|
||||
pub fn publish(&mut self, isa: &dyn TargetIsa) {
|
||||
self.push_current(0)
|
||||
.expect("failed to push current memory map");
|
||||
|
||||
for CodeMemoryEntry { mmap: m, table: t } in &mut self.entries[self.published..] {
|
||||
for CodeMemoryEntry {
|
||||
mmap: m,
|
||||
registry: r,
|
||||
} in &mut self.entries[self.published..]
|
||||
{
|
||||
// Remove write access to the pages due to the relocation fixups.
|
||||
t.publish(m.as_ptr() as u64)
|
||||
.expect("failed to publish function table");
|
||||
r.publish(isa)
|
||||
.expect("failed to publish function unwind registry");
|
||||
|
||||
if !m.is_empty() {
|
||||
unsafe {
|
||||
@@ -139,73 +135,79 @@ impl CodeMemory {
|
||||
/// * The offset within the current mmap that the slice starts at
|
||||
///
|
||||
/// TODO: Add an alignment flag.
|
||||
fn allocate(&mut self, size: usize) -> Result<(&mut [u8], &mut FunctionTable, usize), String> {
|
||||
if self.current.mmap.len() - self.position < size {
|
||||
fn allocate(&mut self, size: usize) -> Result<(&mut [u8], &mut UnwindRegistry, usize), String> {
|
||||
assert!(size > 0);
|
||||
|
||||
if match &self.current {
|
||||
Some(e) => e.mmap.len() - self.position < size,
|
||||
None => true,
|
||||
} {
|
||||
self.push_current(cmp::max(0x10000, size))?;
|
||||
}
|
||||
|
||||
let old_position = self.position;
|
||||
self.position += size;
|
||||
|
||||
let e = self.current.as_mut().unwrap();
|
||||
|
||||
Ok((
|
||||
&mut self.current.mmap.as_mut_slice()[old_position..self.position],
|
||||
&mut self.current.table,
|
||||
&mut e.mmap.as_mut_slice()[old_position..self.position],
|
||||
&mut e.registry,
|
||||
old_position,
|
||||
))
|
||||
}
|
||||
|
||||
/// Calculates the allocation size of the given compiled function.
|
||||
fn function_allocation_size(func: &CompiledFunction) -> usize {
|
||||
if func.unwind_info.is_empty() {
|
||||
func.body.len()
|
||||
} else {
|
||||
// Account for necessary unwind information alignment padding (32-bit)
|
||||
((func.body.len() + 3) & !3) + func.unwind_info.len()
|
||||
match &func.unwind_info {
|
||||
Some(UnwindInfo::WindowsX64(info)) => {
|
||||
// Windows unwind information is required to be emitted into code memory
|
||||
// This is because it must be a positive relative offset from the start of the memory
|
||||
// Account for necessary unwind information alignment padding (32-bit alignment)
|
||||
((func.body.len() + 3) & !3) + info.emit_size()
|
||||
}
|
||||
_ => func.body.len(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Copies the data of the compiled function to the given buffer.
|
||||
///
|
||||
/// This will also add the function to the current function table.
|
||||
/// This will also add the function to the current unwind registry.
|
||||
fn copy_function<'a>(
|
||||
func: &CompiledFunction,
|
||||
func_start: u32,
|
||||
buf: &'a mut [u8],
|
||||
table: &'a mut FunctionTable,
|
||||
) -> (
|
||||
u32,
|
||||
&'a mut [u8],
|
||||
&'a mut FunctionTable,
|
||||
&'a mut [VMFunctionBody],
|
||||
) {
|
||||
let func_end = func_start + (func.body.len() as u32);
|
||||
registry: &mut UnwindRegistry,
|
||||
) -> (u32, &'a mut [u8], &'a mut [VMFunctionBody]) {
|
||||
let func_len = func.body.len();
|
||||
let mut func_end = func_start + (func_len as u32);
|
||||
|
||||
let (body, remainder) = buf.split_at_mut(func.body.len());
|
||||
let (body, mut remainder) = buf.split_at_mut(func_len);
|
||||
body.copy_from_slice(&func.body);
|
||||
let vmfunc = Self::view_as_mut_vmfunc_slice(body);
|
||||
|
||||
if func.unwind_info.is_empty() {
|
||||
return (func_end, remainder, table, vmfunc);
|
||||
if let Some(UnwindInfo::WindowsX64(info)) = &func.unwind_info {
|
||||
// Windows unwind information is written following the function body
|
||||
// Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
|
||||
let unwind_start = (func_end + 3) & !3;
|
||||
let unwind_size = info.emit_size();
|
||||
let padding = (unwind_start - func_end) as usize;
|
||||
|
||||
let (slice, r) = remainder.split_at_mut(padding + unwind_size);
|
||||
|
||||
info.emit(&mut slice[padding..]);
|
||||
|
||||
func_end = unwind_start + (unwind_size as u32);
|
||||
remainder = r;
|
||||
}
|
||||
|
||||
// Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
|
||||
let padding = ((func.body.len() + 3) & !3) - func.body.len();
|
||||
let (unwind, remainder) = remainder.split_at_mut(padding + func.unwind_info.len());
|
||||
let mut relocs = Vec::new();
|
||||
func.unwind_info
|
||||
.serialize(&mut unwind[padding..], &mut relocs);
|
||||
if let Some(info) = &func.unwind_info {
|
||||
registry
|
||||
.register(func_start, func_len as u32, info)
|
||||
.expect("failed to register unwind information");
|
||||
}
|
||||
|
||||
let unwind_start = func_end + (padding as u32);
|
||||
let unwind_end = unwind_start + (func.unwind_info.len() as u32);
|
||||
|
||||
relocs.iter_mut().for_each(move |r| {
|
||||
r.offset += unwind_start;
|
||||
r.addend += func_start;
|
||||
});
|
||||
|
||||
table.add_function(func_start, func_end, unwind_start, &relocs);
|
||||
|
||||
(unwind_end, remainder, table, vmfunc)
|
||||
(func_end, remainder, vmfunc)
|
||||
}
|
||||
|
||||
/// Convert mut a slice from u8 to VMFunctionBody.
|
||||
@@ -215,21 +217,19 @@ impl CodeMemory {
|
||||
unsafe { &mut *body_ptr }
|
||||
}
|
||||
|
||||
/// Pushes the current Mmap (and function table) and allocates a new Mmap of the given size.
|
||||
/// Pushes the current entry and allocates a new one with the given size.
|
||||
fn push_current(&mut self, new_size: usize) -> Result<(), String> {
|
||||
let previous = mem::replace(
|
||||
&mut self.current,
|
||||
if new_size == 0 {
|
||||
CodeMemoryEntry::new()
|
||||
None
|
||||
} else {
|
||||
CodeMemoryEntry::with_capacity(cmp::max(0x10000, new_size))?
|
||||
Some(CodeMemoryEntry::with_capacity(cmp::max(0x10000, new_size))?)
|
||||
},
|
||||
);
|
||||
|
||||
if !previous.mmap.is_empty() {
|
||||
self.entries.push(previous);
|
||||
} else {
|
||||
assert_eq!(previous.table.len(), 0);
|
||||
if let Some(e) = previous {
|
||||
self.entries.push(e);
|
||||
}
|
||||
|
||||
self.position = 0;
|
||||
|
||||
@@ -14,9 +14,9 @@ use wasmtime_environ::entity::{EntityRef, PrimaryMap};
|
||||
use wasmtime_environ::isa::{TargetFrontendConfig, TargetIsa};
|
||||
use wasmtime_environ::wasm::{DefinedFuncIndex, DefinedMemoryIndex, MemoryIndex};
|
||||
use wasmtime_environ::{
|
||||
CacheConfig, CompileError, CompiledFunction, CompiledFunctionUnwindInfo, Compiler as _C,
|
||||
ModuleAddressMap, ModuleMemoryOffset, ModuleTranslation, ModuleVmctxInfo, Relocation,
|
||||
RelocationTarget, Relocations, Traps, Tunables, VMOffsets,
|
||||
CacheConfig, CompileError, CompiledFunction, Compiler as _C, ModuleAddressMap,
|
||||
ModuleMemoryOffset, ModuleTranslation, ModuleVmctxInfo, Relocation, RelocationTarget,
|
||||
Relocations, Traps, Tunables, VMOffsets,
|
||||
};
|
||||
use wasmtime_runtime::{
|
||||
InstantiationError, SignatureRegistry, VMFunctionBody, VMSharedSignatureIndex, VMTrampoline,
|
||||
@@ -46,7 +46,6 @@ pub enum CompilationStrategy {
|
||||
/// TODO: Consider using cranelift-module.
|
||||
pub struct Compiler {
|
||||
isa: Box<dyn TargetIsa>,
|
||||
|
||||
code_memory: CodeMemory,
|
||||
signatures: SignatureRegistry,
|
||||
strategy: CompilationStrategy,
|
||||
@@ -102,34 +101,27 @@ impl Compiler {
|
||||
translation: &ModuleTranslation,
|
||||
debug_data: Option<DebugInfoData>,
|
||||
) -> Result<Compilation, SetupError> {
|
||||
let (
|
||||
compilation,
|
||||
relocations,
|
||||
address_transform,
|
||||
value_ranges,
|
||||
stack_slots,
|
||||
traps,
|
||||
frame_layouts,
|
||||
) = match self.strategy {
|
||||
// For now, interpret `Auto` as `Cranelift` since that's the most stable
|
||||
// implementation.
|
||||
CompilationStrategy::Auto | CompilationStrategy::Cranelift => {
|
||||
wasmtime_environ::cranelift::Cranelift::compile_module(
|
||||
translation,
|
||||
&*self.isa,
|
||||
&self.cache_config,
|
||||
)
|
||||
let (compilation, relocations, address_transform, value_ranges, stack_slots, traps) =
|
||||
match self.strategy {
|
||||
// For now, interpret `Auto` as `Cranelift` since that's the most stable
|
||||
// implementation.
|
||||
CompilationStrategy::Auto | CompilationStrategy::Cranelift => {
|
||||
wasmtime_environ::cranelift::Cranelift::compile_module(
|
||||
translation,
|
||||
&*self.isa,
|
||||
&self.cache_config,
|
||||
)
|
||||
}
|
||||
#[cfg(feature = "lightbeam")]
|
||||
CompilationStrategy::Lightbeam => {
|
||||
wasmtime_environ::lightbeam::Lightbeam::compile_module(
|
||||
translation,
|
||||
&*self.isa,
|
||||
&self.cache_config,
|
||||
)
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "lightbeam")]
|
||||
CompilationStrategy::Lightbeam => {
|
||||
wasmtime_environ::lightbeam::Lightbeam::compile_module(
|
||||
translation,
|
||||
&*self.isa,
|
||||
&self.cache_config,
|
||||
)
|
||||
}
|
||||
}
|
||||
.map_err(SetupError::Compile)?;
|
||||
.map_err(SetupError::Compile)?;
|
||||
|
||||
// Allocate all of the compiled functions into executable memory,
|
||||
// copying over their contents.
|
||||
@@ -202,8 +194,8 @@ impl Compiler {
|
||||
&module_vmctx_info,
|
||||
&address_transform,
|
||||
&value_ranges,
|
||||
&frame_layouts,
|
||||
&funcs,
|
||||
&compilation,
|
||||
)
|
||||
.map_err(SetupError::DebugInfo)?;
|
||||
Some(bytes)
|
||||
@@ -227,7 +219,7 @@ impl Compiler {
|
||||
|
||||
/// Make memory containing compiled code executable.
|
||||
pub(crate) fn publish_compiled_code(&mut self) {
|
||||
self.code_memory.publish();
|
||||
self.code_memory.publish(self.isa.as_ref());
|
||||
}
|
||||
|
||||
/// Shared signature registry.
|
||||
@@ -264,7 +256,6 @@ pub fn make_trampoline(
|
||||
|
||||
let mut context = Context::new();
|
||||
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
|
||||
context.func.collect_frame_layout_info();
|
||||
|
||||
{
|
||||
let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx);
|
||||
@@ -343,7 +334,7 @@ pub fn make_trampoline(
|
||||
)))
|
||||
})?;
|
||||
|
||||
let unwind_info = CompiledFunctionUnwindInfo::new(isa, &context).map_err(|error| {
|
||||
let unwind_info = context.create_unwind_info(isa).map_err(|error| {
|
||||
SetupError::Compile(CompileError::Codegen(pretty_error(
|
||||
&context.func,
|
||||
Some(isa),
|
||||
@@ -369,6 +360,10 @@ fn allocate_functions(
|
||||
code_memory: &mut CodeMemory,
|
||||
compilation: &wasmtime_environ::Compilation,
|
||||
) -> Result<PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>, String> {
|
||||
if compilation.is_empty() {
|
||||
return Ok(PrimaryMap::new());
|
||||
}
|
||||
|
||||
let fat_ptrs = code_memory.allocate_for_compilation(compilation)?;
|
||||
|
||||
// Second, create a PrimaryMap from result vector of pointers.
|
||||
@@ -377,6 +372,7 @@ fn allocate_functions(
|
||||
let fat_ptr: *mut [VMFunctionBody] = fat_ptrs[i];
|
||||
result.push(fat_ptr);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,224 +0,0 @@
|
||||
//! Runtime function table.
|
||||
//!
|
||||
//! This module is primarily used to track JIT functions on Windows for stack walking and unwind.
|
||||
|
||||
type FunctionTableReloc = wasmtime_environ::CompiledFunctionUnwindInfoReloc;
|
||||
|
||||
/// Represents a runtime function table.
|
||||
///
|
||||
/// This is used to register JIT code with the operating system to enable stack walking and unwinding.
|
||||
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
||||
pub(crate) struct FunctionTable {
|
||||
functions: Vec<winapi::um::winnt::RUNTIME_FUNCTION>,
|
||||
published: bool,
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
|
||||
impl FunctionTable {
|
||||
/// Creates a new function table.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
functions: Vec::new(),
|
||||
published: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of functions in the table, also referred to as its 'length'.
|
||||
pub fn len(&self) -> usize {
|
||||
self.functions.len()
|
||||
}
|
||||
|
||||
/// Adds a function to the table based off of the start offset, end offset, and unwind offset.
|
||||
///
|
||||
/// The offsets are from the "module base", which is provided when the table is published.
|
||||
pub fn add_function(
|
||||
&mut self,
|
||||
start: u32,
|
||||
end: u32,
|
||||
unwind: u32,
|
||||
_relocs: &[FunctionTableReloc],
|
||||
) {
|
||||
assert_eq!(_relocs.len(), 0);
|
||||
use winapi::um::winnt;
|
||||
|
||||
assert!(!self.published, "table has already been published");
|
||||
|
||||
let mut entry = winnt::RUNTIME_FUNCTION::default();
|
||||
|
||||
entry.BeginAddress = start;
|
||||
entry.EndAddress = end;
|
||||
|
||||
unsafe {
|
||||
*entry.u.UnwindInfoAddress_mut() = unwind;
|
||||
}
|
||||
|
||||
self.functions.push(entry);
|
||||
}
|
||||
|
||||
/// Publishes the function table using the given base address.
|
||||
///
|
||||
/// A published function table will automatically be deleted when it is dropped.
|
||||
pub fn publish(&mut self, base_address: u64) -> Result<(), String> {
|
||||
use winapi::um::winnt;
|
||||
|
||||
if self.published {
|
||||
return Err("function table was already published".into());
|
||||
}
|
||||
|
||||
self.published = true;
|
||||
|
||||
if self.functions.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// Windows heap allocations are 32-bit aligned, but assert just in case
|
||||
assert_eq!(
|
||||
(self.functions.as_mut_ptr() as u64) % 4,
|
||||
0,
|
||||
"function table allocation was not aligned"
|
||||
);
|
||||
|
||||
if winnt::RtlAddFunctionTable(
|
||||
self.functions.as_mut_ptr(),
|
||||
self.functions.len() as u32,
|
||||
base_address,
|
||||
) == 0
|
||||
{
|
||||
return Err("failed to add function table".into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
impl Drop for FunctionTable {
|
||||
fn drop(&mut self) {
|
||||
use winapi::um::winnt;
|
||||
|
||||
if self.published {
|
||||
unsafe {
|
||||
winnt::RtlDeleteFunctionTable(self.functions.as_mut_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a runtime function table.
|
||||
///
|
||||
/// This is used to register JIT code with the operating system to enable stack walking and unwinding.
|
||||
#[cfg(unix)]
|
||||
pub(crate) struct FunctionTable {
|
||||
functions: Vec<u32>,
|
||||
relocs: Vec<FunctionTableReloc>,
|
||||
published: Option<Vec<usize>>,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl FunctionTable {
|
||||
/// Creates a new function table.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
functions: Vec::new(),
|
||||
relocs: Vec::new(),
|
||||
published: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of functions in the table, also referred to as its 'length'.
|
||||
pub fn len(&self) -> usize {
|
||||
self.functions.len()
|
||||
}
|
||||
|
||||
/// Adds a function to the table based off of the start offset, end offset, and unwind offset.
|
||||
///
|
||||
/// The offsets are from the "module base", which is provided when the table is published.
|
||||
pub fn add_function(
|
||||
&mut self,
|
||||
_start: u32,
|
||||
_end: u32,
|
||||
unwind: u32,
|
||||
relocs: &[FunctionTableReloc],
|
||||
) {
|
||||
assert!(self.published.is_none(), "table has already been published");
|
||||
self.functions.push(unwind);
|
||||
self.relocs.extend_from_slice(relocs);
|
||||
}
|
||||
|
||||
/// Publishes the function table using the given base address.
|
||||
///
|
||||
/// A published function table will automatically be deleted when it is dropped.
|
||||
pub fn publish(&mut self, base_address: u64) -> Result<(), String> {
|
||||
if self.published.is_some() {
|
||||
return Err("function table was already published".into());
|
||||
}
|
||||
|
||||
if self.functions.is_empty() {
|
||||
assert_eq!(self.relocs.len(), 0);
|
||||
self.published = Some(vec![]);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
// libunwind import
|
||||
fn __register_frame(fde: *const u8);
|
||||
}
|
||||
|
||||
for reloc in self.relocs.iter() {
|
||||
let addr = base_address + (reloc.offset as u64);
|
||||
let target = base_address + (reloc.addend as u64);
|
||||
unsafe {
|
||||
std::ptr::write(addr as *mut u64, target);
|
||||
}
|
||||
}
|
||||
|
||||
let mut fdes = Vec::with_capacity(self.functions.len());
|
||||
for unwind_offset in self.functions.iter() {
|
||||
let addr = base_address + (*unwind_offset as u64);
|
||||
let off = unsafe { std::ptr::read::<u32>(addr as *const u32) } as usize + 4;
|
||||
|
||||
let fde = (addr + off as u64) as usize;
|
||||
unsafe {
|
||||
__register_frame(fde as *const _);
|
||||
}
|
||||
fdes.push(fde);
|
||||
}
|
||||
|
||||
self.published = Some(fdes);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl Drop for FunctionTable {
|
||||
fn drop(&mut self) {
|
||||
extern "C" {
|
||||
// libunwind import
|
||||
fn __deregister_frame(fde: *const u8);
|
||||
}
|
||||
|
||||
if let Some(published) = &self.published {
|
||||
unsafe {
|
||||
// I'm not really sure why, but it appears to be way faster to
|
||||
// unregister frames in reverse order rather than in-order. This
|
||||
// way we're deregistering in LIFO order, and maybe there's some
|
||||
// vec shifting or something like that in libgcc?
|
||||
//
|
||||
// Locally on Ubuntu 18.04 a wasm module with 40k empty
|
||||
// functions takes 0.1s to compile and drop with reverse
|
||||
// iteration. With forward iteration it takes 3s to compile and
|
||||
// drop!
|
||||
//
|
||||
// Poking around libgcc sources seems to indicate that some sort
|
||||
// of linked list is being traversed... We may need to figure
|
||||
// out something else for backtraces in the future since this
|
||||
// API may not be long-lived to keep calling.
|
||||
for fde in published.iter().rev() {
|
||||
__deregister_frame(*fde as *const _);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -23,11 +23,11 @@
|
||||
|
||||
mod code_memory;
|
||||
mod compiler;
|
||||
mod function_table;
|
||||
mod imports;
|
||||
mod instantiate;
|
||||
mod link;
|
||||
mod resolver;
|
||||
mod unwind;
|
||||
|
||||
pub mod native;
|
||||
pub mod trampoline;
|
||||
|
||||
11
crates/jit/src/unwind.rs
Normal file
11
crates/jit/src/unwind.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(all(windows, target_arch = "x86_64"))] {
|
||||
mod winx64;
|
||||
pub use self::winx64::*;
|
||||
} else if #[cfg(unix)] {
|
||||
mod systemv;
|
||||
pub use self::systemv::*;
|
||||
} else {
|
||||
compile_error!("unsupported target platform for unwind");
|
||||
}
|
||||
}
|
||||
150
crates/jit/src/unwind/systemv.rs
Normal file
150
crates/jit/src/unwind/systemv.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
//! Module for System V ABI unwind registry.
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
|
||||
use gimli::{
|
||||
write::{Address, EhFrame, EndianVec, FrameTable, Writer},
|
||||
RunTimeEndian,
|
||||
};
|
||||
|
||||
/// Represents a registry of function unwind information for System V ABI.
|
||||
pub struct UnwindRegistry {
|
||||
base_address: usize,
|
||||
functions: Vec<gimli::write::FrameDescriptionEntry>,
|
||||
frame_table: Vec<u8>,
|
||||
registrations: Vec<usize>,
|
||||
published: bool,
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
// libunwind import
|
||||
fn __register_frame(fde: *const u8);
|
||||
fn __deregister_frame(fde: *const u8);
|
||||
}
|
||||
|
||||
impl UnwindRegistry {
|
||||
/// Creates a new unwind registry with the given base address.
|
||||
pub fn new(base_address: usize) -> Self {
|
||||
Self {
|
||||
base_address,
|
||||
functions: Vec::new(),
|
||||
frame_table: Vec::new(),
|
||||
registrations: Vec::new(),
|
||||
published: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers a function given the start offset, length, and unwind information.
|
||||
pub fn register(&mut self, func_start: u32, _func_len: u32, info: &UnwindInfo) -> Result<()> {
|
||||
if self.published {
|
||||
bail!("unwind registry has already been published");
|
||||
}
|
||||
|
||||
match info {
|
||||
UnwindInfo::SystemV(info) => {
|
||||
self.functions.push(info.to_fde(Address::Constant(
|
||||
self.base_address as u64 + func_start as u64,
|
||||
)));
|
||||
}
|
||||
_ => bail!("unsupported unwind information"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Publishes all registered functions.
|
||||
pub fn publish(&mut self, isa: &dyn TargetIsa) -> Result<()> {
|
||||
if self.published {
|
||||
bail!("unwind registry has already been published");
|
||||
}
|
||||
|
||||
if self.functions.is_empty() {
|
||||
self.published = true;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.set_frame_table(isa)?;
|
||||
|
||||
unsafe {
|
||||
self.register_frames();
|
||||
}
|
||||
|
||||
self.published = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_frame_table(&mut self, isa: &dyn TargetIsa) -> Result<()> {
|
||||
let mut table = FrameTable::default();
|
||||
let cie_id = table.add_cie(match isa.create_systemv_cie() {
|
||||
Some(cie) => cie,
|
||||
None => bail!("ISA does not support System V unwind information"),
|
||||
});
|
||||
|
||||
let functions = std::mem::replace(&mut self.functions, Vec::new());
|
||||
|
||||
for func in functions {
|
||||
table.add_fde(cie_id, func);
|
||||
}
|
||||
|
||||
let mut eh_frame = EhFrame(EndianVec::new(RunTimeEndian::default()));
|
||||
table.write_eh_frame(&mut eh_frame).unwrap();
|
||||
|
||||
// GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
|
||||
eh_frame.0.write_u32(0).unwrap();
|
||||
|
||||
self.frame_table = eh_frame.0.into_vec();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn register_frames(&mut self) {
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_os = "macos")] {
|
||||
// On macOS, `__register_frame` takes a pointer to a single FDE
|
||||
let start = self.frame_table.as_ptr();
|
||||
let end = start.add(self.frame_table.len());
|
||||
let mut current = start;
|
||||
|
||||
// Walk all of the entries in the frame table and register them
|
||||
while current < end {
|
||||
let len = std::ptr::read::<u32>(current as *const u32) as usize;
|
||||
|
||||
// Skip over the CIE
|
||||
if current != start {
|
||||
__register_frame(current);
|
||||
self.registrations.push(current as usize);
|
||||
}
|
||||
|
||||
// Move to the next table entry (+4 because the length itself is not inclusive)
|
||||
current = current.add(len + 4);
|
||||
}
|
||||
} else {
|
||||
// On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
|
||||
let ptr = self.frame_table.as_ptr();
|
||||
__register_frame(ptr);
|
||||
self.registrations.push(ptr as usize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UnwindRegistry {
|
||||
fn drop(&mut self) {
|
||||
if self.published {
|
||||
unsafe {
|
||||
// libgcc stores the frame entries as a linked list in decreasing sort order
|
||||
// based on the PC value of the registered entry.
|
||||
//
|
||||
// As we store the registrations in increasing order, it would be O(N^2) to
|
||||
// deregister in that order.
|
||||
//
|
||||
// To ensure that we just pop off the first element in the list upon every
|
||||
// deregistration, walk our list of registrations backwards.
|
||||
for fde in self.registrations.iter().rev() {
|
||||
__deregister_frame(*fde as *const _);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
91
crates/jit/src/unwind/winx64.rs
Normal file
91
crates/jit/src/unwind/winx64.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
//! Module for Windows x64 ABI unwind registry.
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
|
||||
use winapi::um::winnt;
|
||||
|
||||
/// Represents a registry of function unwind information for Windows x64 ABI.
|
||||
pub struct UnwindRegistry {
|
||||
base_address: usize,
|
||||
functions: Vec<winnt::RUNTIME_FUNCTION>,
|
||||
published: bool,
|
||||
}
|
||||
|
||||
impl UnwindRegistry {
|
||||
/// Creates a new unwind registry with the given base address.
|
||||
pub fn new(base_address: usize) -> Self {
|
||||
Self {
|
||||
base_address,
|
||||
functions: Vec::new(),
|
||||
published: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers a function given the start offset, length, and unwind information.
|
||||
pub fn register(&mut self, func_start: u32, func_len: u32, info: &UnwindInfo) -> Result<()> {
|
||||
if self.published {
|
||||
bail!("unwind registry has already been published");
|
||||
}
|
||||
|
||||
match info {
|
||||
UnwindInfo::WindowsX64(_) => {
|
||||
let mut entry = winnt::RUNTIME_FUNCTION::default();
|
||||
|
||||
entry.BeginAddress = func_start;
|
||||
entry.EndAddress = func_start + func_len;
|
||||
|
||||
// The unwind information should be immediately following the function
|
||||
// with padding for 4 byte alignment
|
||||
unsafe {
|
||||
*entry.u.UnwindInfoAddress_mut() = (entry.EndAddress + 3) & !3;
|
||||
}
|
||||
|
||||
self.functions.push(entry);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
_ => bail!("unsupported unwind information"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Publishes all registered functions.
|
||||
pub fn publish(&mut self, _isa: &dyn TargetIsa) -> Result<()> {
|
||||
if self.published {
|
||||
bail!("unwind registry has already been published");
|
||||
}
|
||||
|
||||
self.published = true;
|
||||
|
||||
if !self.functions.is_empty() {
|
||||
// Windows heap allocations are 32-bit aligned, but assert just in case
|
||||
assert_eq!(
|
||||
(self.functions.as_mut_ptr() as u64) % 4,
|
||||
0,
|
||||
"function table allocation was not aligned"
|
||||
);
|
||||
|
||||
unsafe {
|
||||
if winnt::RtlAddFunctionTable(
|
||||
self.functions.as_mut_ptr(),
|
||||
self.functions.len() as u32,
|
||||
self.base_address as u64,
|
||||
) == 0
|
||||
{
|
||||
bail!("failed to register function table");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UnwindRegistry {
|
||||
fn drop(&mut self) {
|
||||
if self.published {
|
||||
unsafe {
|
||||
winnt::RtlDeleteFunctionTable(self.functions.as_mut_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user