Merge pull request #1466 from peterhuene/fix-unwind-emit

Refactor unwind generation in Cranelift.
This commit is contained in:
Peter Huene
2020-04-16 13:34:23 -07:00
committed by GitHub
44 changed files with 2700 additions and 3161 deletions

View File

@@ -10,9 +10,7 @@ use std::mem;
use std::panic::{self, AssertUnwindSafe};
use wasmtime_environ::entity::PrimaryMap;
use wasmtime_environ::isa::TargetIsa;
use wasmtime_environ::{
ir, settings, CompiledFunction, CompiledFunctionUnwindInfo, Export, Module,
};
use wasmtime_environ::{ir, settings, CompiledFunction, Export, Module};
use wasmtime_jit::trampoline::ir::{
ExternalName, Function, InstBuilder, MemFlags, StackSlotData, StackSlotKind,
};
@@ -112,7 +110,6 @@ fn make_trampoline(
let mut context = Context::new();
context.func = Function::with_name_signature(ExternalName::user(0, 0), signature.clone());
context.func.collect_frame_layout_info();
let ss = context.func.create_stack_slot(StackSlotData::new(
StackSlotKind::ExplicitSlot,
@@ -188,9 +185,10 @@ fn make_trampoline(
.map_err(|error| pretty_error(&context.func, Some(isa), error))
.expect("compile_and_emit");
let unwind_info = CompiledFunctionUnwindInfo::new(isa, &context)
let unwind_info = context
.create_unwind_info(isa)
.map_err(|error| pretty_error(&context.func, Some(isa), error))
.expect("emit unwind info");
.expect("create unwind information");
code_memory
.allocate_for_function(&CompiledFunction {
@@ -251,7 +249,7 @@ pub fn create_handle_with_function(
// Next up we wrap everything up into an `InstanceHandle` by publishing our
// code memory (makes it executable) and ensuring all our various bits of
// state make it into the instance constructors.
code_memory.publish();
code_memory.publish(isa.as_ref());
let trampoline_state = TrampolineState { func, code_memory };
create_handle(
module,

View File

@@ -1,139 +0,0 @@
use std::collections::HashMap;
use wasmtime_environ::entity::EntityRef;
use wasmtime_environ::isa::{CallConv, TargetIsa};
use wasmtime_environ::wasm::DefinedFuncIndex;
use wasmtime_environ::{FrameLayoutChange, FrameLayouts};
use gimli::write::{
Address, CallFrameInstruction, CommonInformationEntry as CIEEntry, Error,
FrameDescriptionEntry as FDEEntry, FrameTable,
};
use gimli::{Encoding, Format, Register, X86_64};
fn to_cfi(
isa: &dyn TargetIsa,
change: &FrameLayoutChange,
cfa_def_reg: &mut Register,
cfa_def_offset: &mut i32,
) -> Option<CallFrameInstruction> {
Some(match change {
FrameLayoutChange::CallFrameAddressAt { reg, offset } => {
let mapped = match isa.map_dwarf_register(*reg) {
Ok(r) => Register(r),
Err(_) => return None,
};
let offset = (*offset) as i32;
if mapped != *cfa_def_reg && offset != *cfa_def_offset {
*cfa_def_reg = mapped;
*cfa_def_offset = offset;
CallFrameInstruction::Cfa(mapped, offset)
} else if offset != *cfa_def_offset {
*cfa_def_offset = offset;
CallFrameInstruction::CfaOffset(offset)
} else if mapped != *cfa_def_reg {
*cfa_def_reg = mapped;
CallFrameInstruction::CfaRegister(mapped)
} else {
return None;
}
}
FrameLayoutChange::RegAt { reg, cfa_offset } => {
assert!(cfa_offset % -8 == 0);
let cfa_offset = *cfa_offset as i32;
let mapped = match isa.map_dwarf_register(*reg) {
Ok(r) => Register(r),
Err(_) => return None,
};
CallFrameInstruction::Offset(mapped, cfa_offset)
}
FrameLayoutChange::ReturnAddressAt { cfa_offset } => {
assert!(cfa_offset % -8 == 0);
let cfa_offset = *cfa_offset as i32;
CallFrameInstruction::Offset(X86_64::RA, cfa_offset)
}
FrameLayoutChange::Preserve => CallFrameInstruction::RememberState,
FrameLayoutChange::Restore => CallFrameInstruction::RestoreState,
})
}
pub fn get_debug_frame_bytes(
funcs: &[(*const u8, usize)],
isa: &dyn TargetIsa,
layouts: &FrameLayouts,
) -> Result<Option<FrameTable>, Error> {
// FIXME Only x86-64 at this moment.
if isa.name() != "x86" || isa.pointer_bits() != 64 {
return Ok(None);
}
let address_size = isa.pointer_bytes();
let encoding = Encoding {
format: Format::Dwarf64,
version: 4,
address_size,
};
let mut frames = FrameTable::default();
let mut cached_cies = HashMap::new();
for (i, f) in funcs.into_iter().enumerate() {
let layout = &layouts[DefinedFuncIndex::new(i)];
// FIXME Can only process functions with SystemV-like prologue.
if layout.call_conv != CallConv::Fast
&& layout.call_conv != CallConv::Cold
&& layout.call_conv != CallConv::SystemV
{
continue;
}
// Caching CIE with similar initial_commands.
let (cie_id, mut cfa_def_reg, mut cfa_def_offset) = {
use std::collections::hash_map::Entry;
match cached_cies.entry(&layout.initial_commands) {
Entry::Occupied(o) => *o.get(),
Entry::Vacant(v) => {
// cfa_def_reg and cfa_def_offset initialized with some random values.
let mut cfa_def_reg = X86_64::RA;
let mut cfa_def_offset = 0i32;
// TODO adjust code_alignment_factor and data_alignment_factor based on ISA.
let mut cie = CIEEntry::new(
encoding,
/* code_alignment_factor = */ 1,
/* data_alignment_factor = */ -8,
/* return_address_register = */ X86_64::RA,
);
for cmd in layout.initial_commands.iter() {
if let Some(instr) = to_cfi(isa, cmd, &mut cfa_def_reg, &mut cfa_def_offset)
{
cie.add_instruction(instr);
}
}
let cie_id = frames.add_cie(cie);
*v.insert((cie_id, cfa_def_reg, cfa_def_offset))
}
}
};
let f_len = f.1 as u32;
let mut fde = FDEEntry::new(
Address::Symbol {
symbol: i,
addend: 0,
},
f_len,
);
for (offset, cmd) in layout.commands.into_iter() {
if let Some(instr) = to_cfi(isa, cmd, &mut cfa_def_reg, &mut cfa_def_offset) {
fde.add_instruction(*offset as u32, instr);
}
}
frames.add_fde(cie_id, fde);
}
Ok(Some(frames))
}

View File

@@ -2,19 +2,18 @@
#![allow(clippy::cast_ptr_alignment)]
use crate::frame::get_debug_frame_bytes;
use anyhow::Error;
use faerie::{Artifact, Decl};
use gimli::write::{Address, FrameTable};
use more_asserts::assert_gt;
use target_lexicon::BinaryFormat;
use wasmtime_environ::isa::TargetIsa;
use wasmtime_environ::{FrameLayouts, ModuleAddressMap, ModuleVmctxInfo, ValueLabelsRanges};
use wasmtime_environ::isa::{unwind::UnwindInfo, TargetIsa};
use wasmtime_environ::{Compilation, ModuleAddressMap, ModuleVmctxInfo, ValueLabelsRanges};
pub use crate::read_debuginfo::{read_debuginfo, DebugInfoData, WasmFileInfo};
pub use crate::transform::transform_dwarf;
pub use crate::write_debuginfo::{emit_dwarf, ResolvedSymbol, SymbolResolver};
mod frame;
mod gc;
mod read_debuginfo;
mod transform;
@@ -28,6 +27,29 @@ impl SymbolResolver for FunctionRelocResolver {
}
}
fn create_frame_table<'a>(
isa: &dyn TargetIsa,
infos: impl Iterator<Item = &'a Option<UnwindInfo>>,
) -> Option<FrameTable> {
let mut table = FrameTable::default();
let cie_id = table.add_cie(isa.create_systemv_cie()?);
for (i, info) in infos.enumerate() {
if let Some(UnwindInfo::SystemV(info)) = info {
table.add_fde(
cie_id,
info.to_fde(Address::Symbol {
symbol: i,
addend: 0,
}),
);
}
}
Some(table)
}
pub fn emit_debugsections(
obj: &mut Artifact,
vmctx_info: &ModuleVmctxInfo,
@@ -35,21 +57,13 @@ pub fn emit_debugsections(
debuginfo_data: &DebugInfoData,
at: &ModuleAddressMap,
ranges: &ValueLabelsRanges,
frame_layouts: &FrameLayouts,
compilation: &Compilation,
) -> Result<(), Error> {
let resolver = FunctionRelocResolver {};
let dwarf = transform_dwarf(isa, debuginfo_data, at, vmctx_info, ranges)?;
let frame_table = create_frame_table(isa, compilation.into_iter().map(|f| &f.unwind_info));
let max = at.values().map(|v| v.body_len).fold(0, usize::max);
let mut funcs_bodies = Vec::with_capacity(max as usize);
funcs_bodies.resize(max as usize, 0);
let funcs = at
.values()
.map(|v| (::std::ptr::null(), v.body_len))
.collect::<Vec<(*const u8, usize)>>();
let frames = get_debug_frame_bytes(&funcs, isa, frame_layouts)?;
emit_dwarf(obj, dwarf, &resolver, frames)?;
emit_dwarf(obj, dwarf, &resolver, frame_table)?;
Ok(())
}
@@ -70,8 +84,8 @@ pub fn emit_debugsections_image(
vmctx_info: &ModuleVmctxInfo,
at: &ModuleAddressMap,
ranges: &ValueLabelsRanges,
frame_layouts: &FrameLayouts,
funcs: &[(*const u8, usize)],
compilation: &Compilation,
) -> Result<Vec<u8>, Error> {
let func_offsets = &funcs
.iter()
@@ -93,8 +107,8 @@ pub fn emit_debugsections_image(
let body = unsafe { std::slice::from_raw_parts(segment_body.0, segment_body.1) };
obj.declare_with("all", Decl::function(), body.to_vec())?;
let frames = get_debug_frame_bytes(funcs, isa, frame_layouts)?;
emit_dwarf(&mut obj, dwarf, &resolver, frames)?;
let frame_table = create_frame_table(isa, compilation.into_iter().map(|f| &f.unwind_info));
emit_dwarf(&mut obj, dwarf, &resolver, frame_table)?;
// LLDB is too "magical" about mach-o, generating elf
let mut bytes = obj.emit_as(BinaryFormat::Elf)?;

View File

@@ -1,6 +1,5 @@
use crate::address_map::{ModuleAddressMap, ValueLabelsRanges};
use crate::compilation::{Compilation, Relocations, Traps};
use crate::frame_layout::FrameLayouts;
use cranelift_codegen::ir;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::DefinedFuncIndex;
@@ -36,7 +35,6 @@ pub struct ModuleCacheData {
value_ranges: ValueLabelsRanges,
stack_slots: PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
traps: Traps,
frame_layouts: FrameLayouts,
}
/// A type alias over the module cache data as a tuple.
@@ -47,7 +45,6 @@ pub type ModuleCacheDataTupleType = (
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
Traps,
FrameLayouts,
);
struct Sha256Hasher(Sha256);
@@ -207,7 +204,6 @@ impl ModuleCacheData {
value_ranges: data.3,
stack_slots: data.4,
traps: data.5,
frame_layouts: data.6,
}
}
@@ -219,7 +215,6 @@ impl ModuleCacheData {
self.value_ranges,
self.stack_slots,
self.traps,
self.frame_layouts,
)
}
}

View File

@@ -100,6 +100,5 @@ fn new_module_cache_data() -> Result<ModuleCacheDataTupleType, ()> {
PrimaryMap::new(),
PrimaryMap::new(),
PrimaryMap::new(),
PrimaryMap::new(),
))
}

View File

@@ -4,138 +4,13 @@
use crate::cache::ModuleCacheDataTupleType;
use crate::CacheConfig;
use crate::ModuleTranslation;
use cranelift_codegen::{binemit, ir, isa, CodegenResult, Context};
use cranelift_codegen::{binemit, ir, isa, isa::unwind::UnwindInfo};
use cranelift_entity::PrimaryMap;
use cranelift_wasm::{DefinedFuncIndex, FuncIndex, WasmError};
use serde::{Deserialize, Serialize};
use std::ops::Range;
use thiserror::Error;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct FDERelocEntry(pub i64, pub usize, pub u8);
/// Relocation entry for unwind info.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct CompiledFunctionUnwindInfoReloc {
/// Entry offest in the code block.
pub offset: u32,
/// Entry addend relative to the code block.
pub addend: u32,
}
/// Compiled function unwind information.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub enum CompiledFunctionUnwindInfo {
/// No info.
None,
/// Windows UNWIND_INFO.
Windows(Vec<u8>),
/// Frame layout info.
FrameLayout(Vec<u8>, usize, Vec<FDERelocEntry>),
}
impl CompiledFunctionUnwindInfo {
/// Constructs unwind info object.
pub fn new(isa: &dyn isa::TargetIsa, context: &Context) -> CodegenResult<Self> {
use cranelift_codegen::binemit::{
FrameUnwindKind, FrameUnwindOffset, FrameUnwindSink, Reloc,
};
use cranelift_codegen::isa::CallConv;
struct Sink(Vec<u8>, usize, Vec<FDERelocEntry>);
impl FrameUnwindSink for Sink {
fn len(&self) -> FrameUnwindOffset {
self.0.len()
}
fn bytes(&mut self, b: &[u8]) {
self.0.extend_from_slice(b);
}
fn reserve(&mut self, len: usize) {
self.0.reserve(len)
}
fn reloc(&mut self, r: Reloc, off: FrameUnwindOffset) {
self.2.push(FDERelocEntry(
0,
off,
match r {
Reloc::Abs4 => 4,
Reloc::Abs8 => 8,
_ => {
panic!("unexpected reloc type");
}
},
))
}
fn set_entry_offset(&mut self, off: FrameUnwindOffset) {
self.1 = off;
}
}
let kind = match context.func.signature.call_conv {
CallConv::SystemV | CallConv::Fast | CallConv::Cold => FrameUnwindKind::Libunwind,
CallConv::WindowsFastcall => FrameUnwindKind::Fastcall,
_ => {
return Ok(CompiledFunctionUnwindInfo::None);
}
};
let mut sink = Sink(Vec::new(), 0, Vec::new());
context.emit_unwind_info(isa, kind, &mut sink)?;
let Sink(data, offset, relocs) = sink;
if data.is_empty() {
return Ok(CompiledFunctionUnwindInfo::None);
}
let info = match kind {
FrameUnwindKind::Fastcall => CompiledFunctionUnwindInfo::Windows(data),
FrameUnwindKind::Libunwind => {
CompiledFunctionUnwindInfo::FrameLayout(data, offset, relocs)
}
};
Ok(info)
}
/// Retuns true is no unwind info data.
pub fn is_empty(&self) -> bool {
match self {
CompiledFunctionUnwindInfo::None => true,
CompiledFunctionUnwindInfo::Windows(d) => d.is_empty(),
CompiledFunctionUnwindInfo::FrameLayout(c, _, _) => c.is_empty(),
}
}
/// Returns size of serilized unwind info.
pub fn len(&self) -> usize {
match self {
CompiledFunctionUnwindInfo::None => 0,
CompiledFunctionUnwindInfo::Windows(d) => d.len(),
CompiledFunctionUnwindInfo::FrameLayout(c, _, _) => c.len(),
}
}
/// Serializes data into byte array.
pub fn serialize(&self, dest: &mut [u8], relocs: &mut Vec<CompiledFunctionUnwindInfoReloc>) {
match self {
CompiledFunctionUnwindInfo::None => (),
CompiledFunctionUnwindInfo::Windows(d) => {
dest.copy_from_slice(d);
}
CompiledFunctionUnwindInfo::FrameLayout(code, _fde_offset, r) => {
dest.copy_from_slice(code);
r.iter().for_each(move |r| {
assert_eq!(r.2, 8);
relocs.push(CompiledFunctionUnwindInfoReloc {
offset: r.1 as u32,
addend: r.0 as u32,
})
});
}
}
}
}
/// Compiled function: machine code body, jump table offsets, and unwind information.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct CompiledFunction {
@@ -146,7 +21,7 @@ pub struct CompiledFunction {
pub jt_offsets: ir::JumpTableOffsets,
/// The unwind information.
pub unwind_info: CompiledFunctionUnwindInfo,
pub unwind_info: Option<UnwindInfo>,
}
type Functions = PrimaryMap<DefinedFuncIndex, CompiledFunction>;
@@ -167,15 +42,15 @@ impl Compilation {
/// Allocates the compilation result with the given function bodies.
pub fn from_buffer(
buffer: Vec<u8>,
functions: impl IntoIterator<Item = (Range<usize>, ir::JumpTableOffsets, Range<usize>)>,
functions: impl IntoIterator<Item = (Range<usize>, ir::JumpTableOffsets)>,
) -> Self {
Self::new(
functions
.into_iter()
.map(|(body_range, jt_offsets, unwind_range)| CompiledFunction {
.map(|(body_range, jt_offsets)| CompiledFunction {
body: buffer[body_range].to_vec(),
jt_offsets,
unwind_info: CompiledFunctionUnwindInfo::Windows(buffer[unwind_range].to_vec()),
unwind_info: None, // not implemented for lightbeam currently
})
.collect(),
)

View File

@@ -3,10 +3,8 @@
use crate::address_map::{FunctionAddressMap, InstructionAddressMap};
use crate::cache::{ModuleCacheDataTupleType, ModuleCacheEntry};
use crate::compilation::{
Compilation, CompileError, CompiledFunction, CompiledFunctionUnwindInfo, Relocation,
RelocationTarget, TrapInformation,
Compilation, CompileError, CompiledFunction, Relocation, RelocationTarget, TrapInformation,
};
use crate::frame_layout::FrameLayout;
use crate::func_environ::{get_func_name, FuncEnvironment};
use crate::{CacheConfig, FunctionBodyData, ModuleLocal, ModuleTranslation, Tunables};
use cranelift_codegen::ir::{self, ExternalName};
@@ -154,38 +152,6 @@ fn get_function_address_map<'data>(
}
}
fn get_frame_layout(
context: &Context,
isa: &dyn isa::TargetIsa,
) -> (
Box<[ir::FrameLayoutChange]>,
Box<[(usize, ir::FrameLayoutChange)]>,
) {
let func = &context.func;
assert!(func.frame_layout.is_some(), "expected func.frame_layout");
let mut blocks = func.layout.blocks().collect::<Vec<_>>();
blocks.sort_by_key(|b| func.offsets[*b]); // Ensure inst offsets always increase
let encinfo = isa.encoding_info();
let mut last_offset = 0;
let mut commands = Vec::new();
for b in blocks {
for (offset, inst, size) in func.inst_offsets(b, &encinfo) {
if let Some(cmds) = func.frame_layout.as_ref().unwrap().instructions.get(&inst) {
let address_offset = (offset + size) as usize;
assert!(last_offset < address_offset);
for cmd in cmds.iter() {
commands.push((address_offset, cmd.clone()));
}
last_offset = address_offset;
}
}
}
let initial = func.frame_layout.as_ref().unwrap().initial.clone();
(initial, commands.into_boxed_slice())
}
/// A compiler that compiles a WebAssembly module with Cranelift, translating the Wasm to Cranelift IR,
/// optimizing it and then translating to assembly.
pub struct Cranelift;
@@ -224,7 +190,6 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
let mut value_ranges = PrimaryMap::with_capacity(env.function_body_inputs.len());
let mut stack_slots = PrimaryMap::with_capacity(env.function_body_inputs.len());
let mut traps = PrimaryMap::with_capacity(env.function_body_inputs.len());
let mut frame_layouts = PrimaryMap::with_capacity(env.function_body_inputs.len());
env.function_body_inputs
.into_iter()
@@ -235,7 +200,6 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
let mut context = Context::new();
context.func.name = get_func_name(func_index);
context.func.signature = env.local.signatures[env.local.functions[func_index]].clone();
context.func.collect_frame_layout_info();
if env.tunables.debug_info {
context.func.collect_debug_info();
}
@@ -264,23 +228,12 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
CompileError::Codegen(pretty_error(&context.func, Some(isa), error))
})?;
let unwind_info = CompiledFunctionUnwindInfo::new(isa, &context).map_err(|error| {
let unwind_info = context.create_unwind_info(isa).map_err(|error| {
CompileError::Codegen(pretty_error(&context.func, Some(isa), error))
})?;
let address_transform = get_function_address_map(&context, input, code_buf.len(), isa);
let frame_layout = if env.tunables.debug_info {
let (initial_commands, commands) = get_frame_layout(&context, isa);
Some(FrameLayout {
call_conv: context.func.signature.call_conv,
initial_commands,
commands,
})
} else {
None
};
let ranges = if env.tunables.debug_info {
let ranges = context.build_value_labels_ranges(isa).map_err(|error| {
CompileError::Codegen(pretty_error(&context.func, Some(isa), error))
@@ -295,7 +248,6 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
context.func.jt_offsets,
reloc_sink.func_relocs,
address_transform,
frame_layout,
ranges,
context.func.stack_slots,
trap_sink.traps,
@@ -310,7 +262,6 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
func_jt_offsets,
relocs,
address_transform,
frame_layout,
ranges,
sss,
function_traps,
@@ -326,9 +277,6 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
value_ranges.push(ranges.unwrap_or_default());
stack_slots.push(sss);
traps.push(function_traps);
if let Some(frame_layout) = frame_layout {
frame_layouts.push(frame_layout);
}
},
);
@@ -341,7 +289,6 @@ fn compile(env: CompileEnv<'_>) -> Result<ModuleCacheDataTupleType, CompileError
value_ranges,
stack_slots,
traps,
frame_layouts,
))
}

View File

@@ -13,7 +13,7 @@ pub mod settings {
}
pub mod isa {
pub use cranelift_codegen::isa::{CallConv, RegUnit, TargetFrontendConfig, TargetIsa};
pub use cranelift_codegen::isa::{unwind, CallConv, RegUnit, TargetFrontendConfig, TargetIsa};
}
pub mod entity {

View File

@@ -1,21 +0,0 @@
use cranelift_codegen::isa::CallConv;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::DefinedFuncIndex;
use serde::{Deserialize, Serialize};
pub use cranelift_codegen::ir::FrameLayoutChange;
/// Frame layout information: call convention and
/// registers save/restore commands.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct FrameLayout {
/// Call convention.
pub call_conv: CallConv,
/// Frame default/initial commands.
pub initial_commands: Box<[FrameLayoutChange]>,
/// Frame commands at specific offset.
pub commands: Box<[(usize, FrameLayoutChange)]>,
}
/// Functions frame layouts.
pub type FrameLayouts = PrimaryMap<DefinedFuncIndex, FrameLayout>;

View File

@@ -27,7 +27,6 @@
mod address_map;
mod compilation;
mod data_structures;
mod frame_layout;
mod func_environ;
mod module;
mod module_environ;
@@ -47,13 +46,11 @@ pub use crate::address_map::{
pub use crate::cache::create_new_config as cache_create_new_config;
pub use crate::cache::CacheConfig;
pub use crate::compilation::{
Compilation, CompileError, CompiledFunction, CompiledFunctionUnwindInfo,
CompiledFunctionUnwindInfoReloc, Compiler, Relocation, RelocationTarget, Relocations,
TrapInformation, Traps,
Compilation, CompileError, CompiledFunction, Compiler, Relocation, RelocationTarget,
Relocations, TrapInformation, Traps,
};
pub use crate::cranelift::Cranelift;
pub use crate::data_structures::*;
pub use crate::frame_layout::{FrameLayout, FrameLayoutChange, FrameLayouts};
pub use crate::func_environ::BuiltinFunctionIndex;
#[cfg(feature = "lightbeam")]
pub use crate::lightbeam::Lightbeam;

View File

@@ -55,7 +55,7 @@ impl crate::compilation::Compiler for Lightbeam {
let code_section_ranges_and_jt = code_section
.funcs()
.into_iter()
.map(|r| (r, SecondaryMap::new(), 0..0));
.map(|r| (r, SecondaryMap::new()));
Ok((
Compilation::from_buffer(code_section.buffer().to_vec(), code_section_ranges_and_jt),
@@ -64,7 +64,6 @@ impl crate::compilation::Compiler for Lightbeam {
ValueLabelsRanges::new(),
PrimaryMap::new(),
Traps::new(),
PrimaryMap::new(),
))
}
}

View File

@@ -29,6 +29,7 @@ more-asserts = "0.2.1"
anyhow = "1.0"
cfg-if = "0.1.9"
log = "0.4"
gimli = { version = "0.20.0", default-features = false, features = ["write"] }
[target.'cfg(target_os = "windows")'.dependencies]
winapi = { version = "0.3.8", features = ["winnt", "impl-default"] }

View File

@@ -1,37 +1,33 @@
//! Memory management for executable code.
use crate::function_table::FunctionTable;
use crate::unwind::UnwindRegistry;
use region;
use std::mem::ManuallyDrop;
use std::{cmp, mem};
use wasmtime_environ::{Compilation, CompiledFunction};
use wasmtime_environ::{
isa::{unwind::UnwindInfo, TargetIsa},
Compilation, CompiledFunction,
};
use wasmtime_runtime::{Mmap, VMFunctionBody};
struct CodeMemoryEntry {
mmap: ManuallyDrop<Mmap>,
table: ManuallyDrop<FunctionTable>,
registry: ManuallyDrop<UnwindRegistry>,
}
impl CodeMemoryEntry {
fn new() -> Self {
Self {
mmap: ManuallyDrop::new(Mmap::new()),
table: ManuallyDrop::new(FunctionTable::new()),
}
}
fn with_capacity(cap: usize) -> Result<Self, String> {
Ok(Self {
mmap: ManuallyDrop::new(Mmap::with_at_least(cap)?),
table: ManuallyDrop::new(FunctionTable::new()),
})
let mmap = ManuallyDrop::new(Mmap::with_at_least(cap)?);
let registry = ManuallyDrop::new(UnwindRegistry::new(mmap.as_ptr() as usize));
Ok(Self { mmap, registry })
}
}
impl Drop for CodeMemoryEntry {
fn drop(&mut self) {
unsafe {
// Table needs to be freed before mmap.
ManuallyDrop::drop(&mut self.table);
// The registry needs to be dropped before the mmap
ManuallyDrop::drop(&mut self.registry);
ManuallyDrop::drop(&mut self.mmap);
}
}
@@ -39,7 +35,7 @@ impl Drop for CodeMemoryEntry {
/// Memory manager for executable code.
pub struct CodeMemory {
current: CodeMemoryEntry,
current: Option<CodeMemoryEntry>,
entries: Vec<CodeMemoryEntry>,
position: usize,
published: usize,
@@ -54,7 +50,7 @@ impl CodeMemory {
/// Create a new `CodeMemory` instance.
pub fn new() -> Self {
Self {
current: CodeMemoryEntry::new(),
current: None,
entries: Vec::new(),
position: 0,
published: 0,
@@ -70,16 +66,14 @@ impl CodeMemory {
) -> Result<&mut [VMFunctionBody], String> {
let size = Self::function_allocation_size(func);
let (buf, table, start) = self.allocate(size)?;
let (buf, registry, start) = self.allocate(size)?;
let (_, _, _, vmfunc) = Self::copy_function(func, start as u32, buf, table);
let (_, _, vmfunc) = Self::copy_function(func, start as u32, buf, registry);
Ok(vmfunc)
}
/// Allocate a continuous memory block for a compilation.
///
/// Allocates memory for both the function bodies as well as function unwind data.
pub fn allocate_for_compilation(
&mut self,
compilation: &Compilation,
@@ -88,33 +82,35 @@ impl CodeMemory {
.into_iter()
.fold(0, |acc, func| acc + Self::function_allocation_size(func));
let (mut buf, mut table, start) = self.allocate(total_len)?;
let (mut buf, registry, start) = self.allocate(total_len)?;
let mut result = Vec::with_capacity(compilation.len());
let mut start = start as u32;
for func in compilation.into_iter() {
let (next_start, next_buf, next_table, vmfunc) =
Self::copy_function(func, start, buf, table);
let (next_start, next_buf, vmfunc) = Self::copy_function(func, start, buf, registry);
result.push(vmfunc);
start = next_start;
buf = next_buf;
table = next_table;
}
Ok(result.into_boxed_slice())
}
/// Make all allocated memory executable.
pub fn publish(&mut self) {
pub fn publish(&mut self, isa: &dyn TargetIsa) {
self.push_current(0)
.expect("failed to push current memory map");
for CodeMemoryEntry { mmap: m, table: t } in &mut self.entries[self.published..] {
for CodeMemoryEntry {
mmap: m,
registry: r,
} in &mut self.entries[self.published..]
{
// Remove write access to the pages due to the relocation fixups.
t.publish(m.as_ptr() as u64)
.expect("failed to publish function table");
r.publish(isa)
.expect("failed to publish function unwind registry");
if !m.is_empty() {
unsafe {
@@ -139,73 +135,79 @@ impl CodeMemory {
/// * The offset within the current mmap that the slice starts at
///
/// TODO: Add an alignment flag.
fn allocate(&mut self, size: usize) -> Result<(&mut [u8], &mut FunctionTable, usize), String> {
if self.current.mmap.len() - self.position < size {
fn allocate(&mut self, size: usize) -> Result<(&mut [u8], &mut UnwindRegistry, usize), String> {
assert!(size > 0);
if match &self.current {
Some(e) => e.mmap.len() - self.position < size,
None => true,
} {
self.push_current(cmp::max(0x10000, size))?;
}
let old_position = self.position;
self.position += size;
let e = self.current.as_mut().unwrap();
Ok((
&mut self.current.mmap.as_mut_slice()[old_position..self.position],
&mut self.current.table,
&mut e.mmap.as_mut_slice()[old_position..self.position],
&mut e.registry,
old_position,
))
}
/// Calculates the allocation size of the given compiled function.
fn function_allocation_size(func: &CompiledFunction) -> usize {
if func.unwind_info.is_empty() {
func.body.len()
} else {
// Account for necessary unwind information alignment padding (32-bit)
((func.body.len() + 3) & !3) + func.unwind_info.len()
match &func.unwind_info {
Some(UnwindInfo::WindowsX64(info)) => {
// Windows unwind information is required to be emitted into code memory
// This is because it must be a positive relative offset from the start of the memory
// Account for necessary unwind information alignment padding (32-bit alignment)
((func.body.len() + 3) & !3) + info.emit_size()
}
_ => func.body.len(),
}
}
/// Copies the data of the compiled function to the given buffer.
///
/// This will also add the function to the current function table.
/// This will also add the function to the current unwind registry.
fn copy_function<'a>(
func: &CompiledFunction,
func_start: u32,
buf: &'a mut [u8],
table: &'a mut FunctionTable,
) -> (
u32,
&'a mut [u8],
&'a mut FunctionTable,
&'a mut [VMFunctionBody],
) {
let func_end = func_start + (func.body.len() as u32);
registry: &mut UnwindRegistry,
) -> (u32, &'a mut [u8], &'a mut [VMFunctionBody]) {
let func_len = func.body.len();
let mut func_end = func_start + (func_len as u32);
let (body, remainder) = buf.split_at_mut(func.body.len());
let (body, mut remainder) = buf.split_at_mut(func_len);
body.copy_from_slice(&func.body);
let vmfunc = Self::view_as_mut_vmfunc_slice(body);
if func.unwind_info.is_empty() {
return (func_end, remainder, table, vmfunc);
if let Some(UnwindInfo::WindowsX64(info)) = &func.unwind_info {
// Windows unwind information is written following the function body
// Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
let unwind_start = (func_end + 3) & !3;
let unwind_size = info.emit_size();
let padding = (unwind_start - func_end) as usize;
let (slice, r) = remainder.split_at_mut(padding + unwind_size);
info.emit(&mut slice[padding..]);
func_end = unwind_start + (unwind_size as u32);
remainder = r;
}
// Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
let padding = ((func.body.len() + 3) & !3) - func.body.len();
let (unwind, remainder) = remainder.split_at_mut(padding + func.unwind_info.len());
let mut relocs = Vec::new();
func.unwind_info
.serialize(&mut unwind[padding..], &mut relocs);
if let Some(info) = &func.unwind_info {
registry
.register(func_start, func_len as u32, info)
.expect("failed to register unwind information");
}
let unwind_start = func_end + (padding as u32);
let unwind_end = unwind_start + (func.unwind_info.len() as u32);
relocs.iter_mut().for_each(move |r| {
r.offset += unwind_start;
r.addend += func_start;
});
table.add_function(func_start, func_end, unwind_start, &relocs);
(unwind_end, remainder, table, vmfunc)
(func_end, remainder, vmfunc)
}
/// Convert mut a slice from u8 to VMFunctionBody.
@@ -215,21 +217,19 @@ impl CodeMemory {
unsafe { &mut *body_ptr }
}
/// Pushes the current Mmap (and function table) and allocates a new Mmap of the given size.
/// Pushes the current entry and allocates a new one with the given size.
fn push_current(&mut self, new_size: usize) -> Result<(), String> {
let previous = mem::replace(
&mut self.current,
if new_size == 0 {
CodeMemoryEntry::new()
None
} else {
CodeMemoryEntry::with_capacity(cmp::max(0x10000, new_size))?
Some(CodeMemoryEntry::with_capacity(cmp::max(0x10000, new_size))?)
},
);
if !previous.mmap.is_empty() {
self.entries.push(previous);
} else {
assert_eq!(previous.table.len(), 0);
if let Some(e) = previous {
self.entries.push(e);
}
self.position = 0;

View File

@@ -14,9 +14,9 @@ use wasmtime_environ::entity::{EntityRef, PrimaryMap};
use wasmtime_environ::isa::{TargetFrontendConfig, TargetIsa};
use wasmtime_environ::wasm::{DefinedFuncIndex, DefinedMemoryIndex, MemoryIndex};
use wasmtime_environ::{
CacheConfig, CompileError, CompiledFunction, CompiledFunctionUnwindInfo, Compiler as _C,
ModuleAddressMap, ModuleMemoryOffset, ModuleTranslation, ModuleVmctxInfo, Relocation,
RelocationTarget, Relocations, Traps, Tunables, VMOffsets,
CacheConfig, CompileError, CompiledFunction, Compiler as _C, ModuleAddressMap,
ModuleMemoryOffset, ModuleTranslation, ModuleVmctxInfo, Relocation, RelocationTarget,
Relocations, Traps, Tunables, VMOffsets,
};
use wasmtime_runtime::{
InstantiationError, SignatureRegistry, VMFunctionBody, VMSharedSignatureIndex, VMTrampoline,
@@ -46,7 +46,6 @@ pub enum CompilationStrategy {
/// TODO: Consider using cranelift-module.
pub struct Compiler {
isa: Box<dyn TargetIsa>,
code_memory: CodeMemory,
signatures: SignatureRegistry,
strategy: CompilationStrategy,
@@ -102,34 +101,27 @@ impl Compiler {
translation: &ModuleTranslation,
debug_data: Option<DebugInfoData>,
) -> Result<Compilation, SetupError> {
let (
compilation,
relocations,
address_transform,
value_ranges,
stack_slots,
traps,
frame_layouts,
) = match self.strategy {
// For now, interpret `Auto` as `Cranelift` since that's the most stable
// implementation.
CompilationStrategy::Auto | CompilationStrategy::Cranelift => {
wasmtime_environ::cranelift::Cranelift::compile_module(
translation,
&*self.isa,
&self.cache_config,
)
let (compilation, relocations, address_transform, value_ranges, stack_slots, traps) =
match self.strategy {
// For now, interpret `Auto` as `Cranelift` since that's the most stable
// implementation.
CompilationStrategy::Auto | CompilationStrategy::Cranelift => {
wasmtime_environ::cranelift::Cranelift::compile_module(
translation,
&*self.isa,
&self.cache_config,
)
}
#[cfg(feature = "lightbeam")]
CompilationStrategy::Lightbeam => {
wasmtime_environ::lightbeam::Lightbeam::compile_module(
translation,
&*self.isa,
&self.cache_config,
)
}
}
#[cfg(feature = "lightbeam")]
CompilationStrategy::Lightbeam => {
wasmtime_environ::lightbeam::Lightbeam::compile_module(
translation,
&*self.isa,
&self.cache_config,
)
}
}
.map_err(SetupError::Compile)?;
.map_err(SetupError::Compile)?;
// Allocate all of the compiled functions into executable memory,
// copying over their contents.
@@ -202,8 +194,8 @@ impl Compiler {
&module_vmctx_info,
&address_transform,
&value_ranges,
&frame_layouts,
&funcs,
&compilation,
)
.map_err(SetupError::DebugInfo)?;
Some(bytes)
@@ -227,7 +219,7 @@ impl Compiler {
/// Make memory containing compiled code executable.
pub(crate) fn publish_compiled_code(&mut self) {
self.code_memory.publish();
self.code_memory.publish(self.isa.as_ref());
}
/// Shared signature registry.
@@ -264,7 +256,6 @@ pub fn make_trampoline(
let mut context = Context::new();
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
context.func.collect_frame_layout_info();
{
let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx);
@@ -343,7 +334,7 @@ pub fn make_trampoline(
)))
})?;
let unwind_info = CompiledFunctionUnwindInfo::new(isa, &context).map_err(|error| {
let unwind_info = context.create_unwind_info(isa).map_err(|error| {
SetupError::Compile(CompileError::Codegen(pretty_error(
&context.func,
Some(isa),
@@ -369,6 +360,10 @@ fn allocate_functions(
code_memory: &mut CodeMemory,
compilation: &wasmtime_environ::Compilation,
) -> Result<PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>, String> {
if compilation.is_empty() {
return Ok(PrimaryMap::new());
}
let fat_ptrs = code_memory.allocate_for_compilation(compilation)?;
// Second, create a PrimaryMap from result vector of pointers.
@@ -377,6 +372,7 @@ fn allocate_functions(
let fat_ptr: *mut [VMFunctionBody] = fat_ptrs[i];
result.push(fat_ptr);
}
Ok(result)
}

View File

@@ -1,224 +0,0 @@
//! Runtime function table.
//!
//! This module is primarily used to track JIT functions on Windows for stack walking and unwind.
type FunctionTableReloc = wasmtime_environ::CompiledFunctionUnwindInfoReloc;
/// Represents a runtime function table.
///
/// This is used to register JIT code with the operating system to enable stack walking and unwinding.
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
pub(crate) struct FunctionTable {
functions: Vec<winapi::um::winnt::RUNTIME_FUNCTION>,
published: bool,
}
#[cfg(all(target_os = "windows", target_arch = "x86_64"))]
impl FunctionTable {
/// Creates a new function table.
pub fn new() -> Self {
Self {
functions: Vec::new(),
published: false,
}
}
/// Returns the number of functions in the table, also referred to as its 'length'.
pub fn len(&self) -> usize {
self.functions.len()
}
/// Adds a function to the table based off of the start offset, end offset, and unwind offset.
///
/// The offsets are from the "module base", which is provided when the table is published.
pub fn add_function(
&mut self,
start: u32,
end: u32,
unwind: u32,
_relocs: &[FunctionTableReloc],
) {
assert_eq!(_relocs.len(), 0);
use winapi::um::winnt;
assert!(!self.published, "table has already been published");
let mut entry = winnt::RUNTIME_FUNCTION::default();
entry.BeginAddress = start;
entry.EndAddress = end;
unsafe {
*entry.u.UnwindInfoAddress_mut() = unwind;
}
self.functions.push(entry);
}
/// Publishes the function table using the given base address.
///
/// A published function table will automatically be deleted when it is dropped.
pub fn publish(&mut self, base_address: u64) -> Result<(), String> {
use winapi::um::winnt;
if self.published {
return Err("function table was already published".into());
}
self.published = true;
if self.functions.is_empty() {
return Ok(());
}
unsafe {
// Windows heap allocations are 32-bit aligned, but assert just in case
assert_eq!(
(self.functions.as_mut_ptr() as u64) % 4,
0,
"function table allocation was not aligned"
);
if winnt::RtlAddFunctionTable(
self.functions.as_mut_ptr(),
self.functions.len() as u32,
base_address,
) == 0
{
return Err("failed to add function table".into());
}
}
Ok(())
}
}
#[cfg(target_os = "windows")]
impl Drop for FunctionTable {
fn drop(&mut self) {
use winapi::um::winnt;
if self.published {
unsafe {
winnt::RtlDeleteFunctionTable(self.functions.as_mut_ptr());
}
}
}
}
/// Represents a runtime function table.
///
/// This is used to register JIT code with the operating system to enable stack walking and unwinding.
#[cfg(unix)]
pub(crate) struct FunctionTable {
functions: Vec<u32>,
relocs: Vec<FunctionTableReloc>,
published: Option<Vec<usize>>,
}
#[cfg(unix)]
impl FunctionTable {
/// Creates a new function table.
pub fn new() -> Self {
Self {
functions: Vec::new(),
relocs: Vec::new(),
published: None,
}
}
/// Returns the number of functions in the table, also referred to as its 'length'.
pub fn len(&self) -> usize {
self.functions.len()
}
/// Adds a function to the table based off of the start offset, end offset, and unwind offset.
///
/// The offsets are from the "module base", which is provided when the table is published.
pub fn add_function(
&mut self,
_start: u32,
_end: u32,
unwind: u32,
relocs: &[FunctionTableReloc],
) {
assert!(self.published.is_none(), "table has already been published");
self.functions.push(unwind);
self.relocs.extend_from_slice(relocs);
}
/// Publishes the function table using the given base address.
///
/// A published function table will automatically be deleted when it is dropped.
pub fn publish(&mut self, base_address: u64) -> Result<(), String> {
if self.published.is_some() {
return Err("function table was already published".into());
}
if self.functions.is_empty() {
assert_eq!(self.relocs.len(), 0);
self.published = Some(vec![]);
return Ok(());
}
extern "C" {
// libunwind import
fn __register_frame(fde: *const u8);
}
for reloc in self.relocs.iter() {
let addr = base_address + (reloc.offset as u64);
let target = base_address + (reloc.addend as u64);
unsafe {
std::ptr::write(addr as *mut u64, target);
}
}
let mut fdes = Vec::with_capacity(self.functions.len());
for unwind_offset in self.functions.iter() {
let addr = base_address + (*unwind_offset as u64);
let off = unsafe { std::ptr::read::<u32>(addr as *const u32) } as usize + 4;
let fde = (addr + off as u64) as usize;
unsafe {
__register_frame(fde as *const _);
}
fdes.push(fde);
}
self.published = Some(fdes);
Ok(())
}
}
#[cfg(unix)]
impl Drop for FunctionTable {
fn drop(&mut self) {
extern "C" {
// libunwind import
fn __deregister_frame(fde: *const u8);
}
if let Some(published) = &self.published {
unsafe {
// I'm not really sure why, but it appears to be way faster to
// unregister frames in reverse order rather than in-order. This
// way we're deregistering in LIFO order, and maybe there's some
// vec shifting or something like that in libgcc?
//
// Locally on Ubuntu 18.04 a wasm module with 40k empty
// functions takes 0.1s to compile and drop with reverse
// iteration. With forward iteration it takes 3s to compile and
// drop!
//
// Poking around libgcc sources seems to indicate that some sort
// of linked list is being traversed... We may need to figure
// out something else for backtraces in the future since this
// API may not be long-lived to keep calling.
for fde in published.iter().rev() {
__deregister_frame(*fde as *const _);
}
}
}
}
}

View File

@@ -23,11 +23,11 @@
mod code_memory;
mod compiler;
mod function_table;
mod imports;
mod instantiate;
mod link;
mod resolver;
mod unwind;
pub mod native;
pub mod trampoline;

11
crates/jit/src/unwind.rs Normal file
View File

@@ -0,0 +1,11 @@
cfg_if::cfg_if! {
if #[cfg(all(windows, target_arch = "x86_64"))] {
mod winx64;
pub use self::winx64::*;
} else if #[cfg(unix)] {
mod systemv;
pub use self::systemv::*;
} else {
compile_error!("unsupported target platform for unwind");
}
}

View File

@@ -0,0 +1,150 @@
//! Module for System V ABI unwind registry.
use anyhow::{bail, Result};
use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
use gimli::{
write::{Address, EhFrame, EndianVec, FrameTable, Writer},
RunTimeEndian,
};
/// Represents a registry of function unwind information for System V ABI.
pub struct UnwindRegistry {
base_address: usize,
functions: Vec<gimli::write::FrameDescriptionEntry>,
frame_table: Vec<u8>,
registrations: Vec<usize>,
published: bool,
}
extern "C" {
// libunwind import
fn __register_frame(fde: *const u8);
fn __deregister_frame(fde: *const u8);
}
impl UnwindRegistry {
/// Creates a new unwind registry with the given base address.
pub fn new(base_address: usize) -> Self {
Self {
base_address,
functions: Vec::new(),
frame_table: Vec::new(),
registrations: Vec::new(),
published: false,
}
}
/// Registers a function given the start offset, length, and unwind information.
pub fn register(&mut self, func_start: u32, _func_len: u32, info: &UnwindInfo) -> Result<()> {
if self.published {
bail!("unwind registry has already been published");
}
match info {
UnwindInfo::SystemV(info) => {
self.functions.push(info.to_fde(Address::Constant(
self.base_address as u64 + func_start as u64,
)));
}
_ => bail!("unsupported unwind information"),
}
Ok(())
}
/// Publishes all registered functions.
pub fn publish(&mut self, isa: &dyn TargetIsa) -> Result<()> {
if self.published {
bail!("unwind registry has already been published");
}
if self.functions.is_empty() {
self.published = true;
return Ok(());
}
self.set_frame_table(isa)?;
unsafe {
self.register_frames();
}
self.published = true;
Ok(())
}
fn set_frame_table(&mut self, isa: &dyn TargetIsa) -> Result<()> {
let mut table = FrameTable::default();
let cie_id = table.add_cie(match isa.create_systemv_cie() {
Some(cie) => cie,
None => bail!("ISA does not support System V unwind information"),
});
let functions = std::mem::replace(&mut self.functions, Vec::new());
for func in functions {
table.add_fde(cie_id, func);
}
let mut eh_frame = EhFrame(EndianVec::new(RunTimeEndian::default()));
table.write_eh_frame(&mut eh_frame).unwrap();
// GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
eh_frame.0.write_u32(0).unwrap();
self.frame_table = eh_frame.0.into_vec();
Ok(())
}
unsafe fn register_frames(&mut self) {
cfg_if::cfg_if! {
if #[cfg(target_os = "macos")] {
// On macOS, `__register_frame` takes a pointer to a single FDE
let start = self.frame_table.as_ptr();
let end = start.add(self.frame_table.len());
let mut current = start;
// Walk all of the entries in the frame table and register them
while current < end {
let len = std::ptr::read::<u32>(current as *const u32) as usize;
// Skip over the CIE
if current != start {
__register_frame(current);
self.registrations.push(current as usize);
}
// Move to the next table entry (+4 because the length itself is not inclusive)
current = current.add(len + 4);
}
} else {
// On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
let ptr = self.frame_table.as_ptr();
__register_frame(ptr);
self.registrations.push(ptr as usize);
}
}
}
}
impl Drop for UnwindRegistry {
fn drop(&mut self) {
if self.published {
unsafe {
// libgcc stores the frame entries as a linked list in decreasing sort order
// based on the PC value of the registered entry.
//
// As we store the registrations in increasing order, it would be O(N^2) to
// deregister in that order.
//
// To ensure that we just pop off the first element in the list upon every
// deregistration, walk our list of registrations backwards.
for fde in self.registrations.iter().rev() {
__deregister_frame(*fde as *const _);
}
}
}
}
}

View File

@@ -0,0 +1,91 @@
//! Module for Windows x64 ABI unwind registry.
use anyhow::{bail, Result};
use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
use winapi::um::winnt;
/// Represents a registry of function unwind information for Windows x64 ABI.
pub struct UnwindRegistry {
base_address: usize,
functions: Vec<winnt::RUNTIME_FUNCTION>,
published: bool,
}
impl UnwindRegistry {
/// Creates a new unwind registry with the given base address.
pub fn new(base_address: usize) -> Self {
Self {
base_address,
functions: Vec::new(),
published: false,
}
}
/// Registers a function given the start offset, length, and unwind information.
pub fn register(&mut self, func_start: u32, func_len: u32, info: &UnwindInfo) -> Result<()> {
if self.published {
bail!("unwind registry has already been published");
}
match info {
UnwindInfo::WindowsX64(_) => {
let mut entry = winnt::RUNTIME_FUNCTION::default();
entry.BeginAddress = func_start;
entry.EndAddress = func_start + func_len;
// The unwind information should be immediately following the function
// with padding for 4 byte alignment
unsafe {
*entry.u.UnwindInfoAddress_mut() = (entry.EndAddress + 3) & !3;
}
self.functions.push(entry);
Ok(())
}
_ => bail!("unsupported unwind information"),
}
}
/// Publishes all registered functions.
pub fn publish(&mut self, _isa: &dyn TargetIsa) -> Result<()> {
if self.published {
bail!("unwind registry has already been published");
}
self.published = true;
if !self.functions.is_empty() {
// Windows heap allocations are 32-bit aligned, but assert just in case
assert_eq!(
(self.functions.as_mut_ptr() as u64) % 4,
0,
"function table allocation was not aligned"
);
unsafe {
if winnt::RtlAddFunctionTable(
self.functions.as_mut_ptr(),
self.functions.len() as u32,
self.base_address as u64,
) == 0
{
bail!("failed to register function table");
}
}
}
Ok(())
}
}
impl Drop for UnwindRegistry {
fn drop(&mut self) {
if self.published {
unsafe {
winnt::RtlDeleteFunctionTable(self.functions.as_mut_ptr());
}
}
}
}