Refactor unwind generation in Cranelift.
This commit makes the following changes to unwind information generation in Cranelift: * Remove frame layout change implementation in favor of processing the prologue and epilogue instructions when unwind information is requested. This also means this work is no longer performed for Windows, which didn't utilize it. It also helps simplify the prologue and epilogue generation code. * Remove the unwind sink implementation that required each unwind information to be represented in final form. For FDEs, this meant writing a complete frame table per function, which wastes 20 bytes or so for each function with duplicate CIEs. This also enables Cranelift users to collect the unwind information and write it as a single frame table. * For System V calling convention, the unwind information is no longer stored in code memory (it's only a requirement for Windows ABI to do so). This allows for more compact code memory for modules with a lot of functions. * Deletes some duplicate code relating to frame table generation. Users can now simply use gimli to create a frame table from each function's unwind information. Fixes #1181.
This commit is contained in:
485
cranelift/codegen/src/isa/x86/unwind/systemv.rs
Normal file
485
cranelift/codegen/src/isa/x86/unwind/systemv.rs
Normal file
@@ -0,0 +1,485 @@
|
||||
//! Unwind information for System V ABI (x86-64).
|
||||
|
||||
use crate::ir::{Function, Inst, InstructionData, Opcode, Value};
|
||||
use crate::isa::{
|
||||
unwind::systemv::{CallFrameInstruction, RegisterMappingError, UnwindInfo},
|
||||
x86::registers::RU,
|
||||
CallConv, RegUnit, TargetIsa,
|
||||
};
|
||||
use crate::result::CodegenResult;
|
||||
use alloc::vec::Vec;
|
||||
use gimli::{write::CommonInformationEntry, Encoding, Format, Register, X86_64};
|
||||
|
||||
/// Creates a new x86-64 common information entry (CIE).
|
||||
pub fn create_cie() -> CommonInformationEntry {
|
||||
use gimli::write::CallFrameInstruction;
|
||||
|
||||
let mut entry = CommonInformationEntry::new(
|
||||
Encoding {
|
||||
address_size: 8,
|
||||
format: Format::Dwarf32,
|
||||
version: 1,
|
||||
},
|
||||
1, // Code alignment factor
|
||||
-8, // Data alignment factor
|
||||
X86_64::RA,
|
||||
);
|
||||
|
||||
// Every frame will start with the call frame address (CFA) at RSP+8
|
||||
// It is +8 to account for the push of the return address by the call instruction
|
||||
entry.add_instruction(CallFrameInstruction::Cfa(X86_64::RSP, 8));
|
||||
|
||||
// Every frame will start with the return address at RSP (CFA-8 = RSP+8-8 = RSP)
|
||||
entry.add_instruction(CallFrameInstruction::Offset(X86_64::RA, -8));
|
||||
|
||||
entry
|
||||
}
|
||||
|
||||
/// Map Cranelift registers to their corresponding Gimli registers.
|
||||
pub fn map_reg(isa: &dyn TargetIsa, reg: RegUnit) -> Result<Register, RegisterMappingError> {
|
||||
if isa.name() != "x86" || isa.pointer_bits() != 64 {
|
||||
return Err(RegisterMappingError::UnsupportedArchitecture);
|
||||
}
|
||||
|
||||
// Mapping from https://github.com/bytecodealliance/cranelift/pull/902 by @iximeow
|
||||
const X86_GP_REG_MAP: [gimli::Register; 16] = [
|
||||
X86_64::RAX,
|
||||
X86_64::RCX,
|
||||
X86_64::RDX,
|
||||
X86_64::RBX,
|
||||
X86_64::RSP,
|
||||
X86_64::RBP,
|
||||
X86_64::RSI,
|
||||
X86_64::RDI,
|
||||
X86_64::R8,
|
||||
X86_64::R9,
|
||||
X86_64::R10,
|
||||
X86_64::R11,
|
||||
X86_64::R12,
|
||||
X86_64::R13,
|
||||
X86_64::R14,
|
||||
X86_64::R15,
|
||||
];
|
||||
const X86_XMM_REG_MAP: [gimli::Register; 16] = [
|
||||
X86_64::XMM0,
|
||||
X86_64::XMM1,
|
||||
X86_64::XMM2,
|
||||
X86_64::XMM3,
|
||||
X86_64::XMM4,
|
||||
X86_64::XMM5,
|
||||
X86_64::XMM6,
|
||||
X86_64::XMM7,
|
||||
X86_64::XMM8,
|
||||
X86_64::XMM9,
|
||||
X86_64::XMM10,
|
||||
X86_64::XMM11,
|
||||
X86_64::XMM12,
|
||||
X86_64::XMM13,
|
||||
X86_64::XMM14,
|
||||
X86_64::XMM15,
|
||||
];
|
||||
|
||||
let reg_info = isa.register_info();
|
||||
let bank = reg_info
|
||||
.bank_containing_regunit(reg)
|
||||
.ok_or_else(|| RegisterMappingError::MissingBank)?;
|
||||
match bank.name {
|
||||
"IntRegs" => {
|
||||
// x86 GP registers have a weird mapping to DWARF registers, so we use a
|
||||
// lookup table.
|
||||
Ok(X86_GP_REG_MAP[(reg - bank.first_unit) as usize])
|
||||
}
|
||||
"FloatRegs" => Ok(X86_XMM_REG_MAP[(reg - bank.first_unit) as usize]),
|
||||
_ => Err(RegisterMappingError::UnsupportedRegisterBank(bank.name)),
|
||||
}
|
||||
}
|
||||
|
||||
struct InstructionBuilder<'a> {
|
||||
func: &'a Function,
|
||||
isa: &'a dyn TargetIsa,
|
||||
cfa_offset: i32,
|
||||
frame_register: Option<RegUnit>,
|
||||
instructions: Vec<(u32, CallFrameInstruction)>,
|
||||
stack_size: Option<i32>,
|
||||
epilogue_pop_offsets: Vec<u32>,
|
||||
}
|
||||
|
||||
impl<'a> InstructionBuilder<'a> {
|
||||
fn new(func: &'a Function, isa: &'a dyn TargetIsa, frame_register: Option<RegUnit>) -> Self {
|
||||
Self {
|
||||
func,
|
||||
isa,
|
||||
cfa_offset: 8, // CFA offset starts at 8 to account to return address on stack
|
||||
frame_register,
|
||||
instructions: Vec::new(),
|
||||
stack_size: None,
|
||||
epilogue_pop_offsets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn push_reg(&mut self, offset: u32, arg: Value) {
|
||||
self.cfa_offset += 8;
|
||||
|
||||
let reg = self.func.locations[arg].unwrap_reg();
|
||||
|
||||
// Update the CFA if this is the save of the frame pointer register or if a frame pointer isn't being used
|
||||
// When using a frame pointer, we only need to update the CFA to account for the push of the frame pointer itself
|
||||
if match self.frame_register {
|
||||
Some(fp) => reg == fp,
|
||||
None => true,
|
||||
} {
|
||||
self.instructions
|
||||
.push((offset, CallFrameInstruction::CfaOffset(self.cfa_offset)));
|
||||
}
|
||||
|
||||
// Pushes in the prologue are register saves, so record an offset of the save
|
||||
self.instructions.push((
|
||||
offset,
|
||||
CallFrameInstruction::Offset(
|
||||
map_reg(self.isa, reg)
|
||||
.expect("a register mapping from cranelift to gimli")
|
||||
.0,
|
||||
-self.cfa_offset,
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
fn adjust_sp_down(&mut self, offset: u32) {
|
||||
// Don't adjust the CFA if we're using a frame pointer
|
||||
if self.frame_register.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
self.cfa_offset += self
|
||||
.stack_size
|
||||
.expect("expected a previous stack size instruction");
|
||||
self.instructions
|
||||
.push((offset, CallFrameInstruction::CfaOffset(self.cfa_offset)));
|
||||
}
|
||||
|
||||
fn adjust_sp_down_imm(&mut self, offset: u32, imm: i64) {
|
||||
assert!(imm <= core::u32::MAX as i64);
|
||||
|
||||
// Don't adjust the CFA if we're using a frame pointer
|
||||
if self.frame_register.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
self.cfa_offset += imm as i32;
|
||||
self.instructions
|
||||
.push((offset, CallFrameInstruction::CfaOffset(self.cfa_offset)));
|
||||
}
|
||||
|
||||
fn adjust_sp_up_imm(&mut self, offset: u32, imm: i64) {
|
||||
assert!(imm <= core::u32::MAX as i64);
|
||||
|
||||
// Don't adjust the CFA if we're using a frame pointer
|
||||
if self.frame_register.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
self.cfa_offset -= imm as i32;
|
||||
self.instructions
|
||||
.push((offset, CallFrameInstruction::CfaOffset(self.cfa_offset)));
|
||||
}
|
||||
|
||||
fn move_reg(&mut self, offset: u32, src: RegUnit, dst: RegUnit) {
|
||||
if let Some(fp) = self.frame_register {
|
||||
// Check for change in CFA register (RSP is always the starting CFA)
|
||||
if src == (RU::rsp as RegUnit) && dst == fp {
|
||||
self.instructions.push((
|
||||
offset,
|
||||
CallFrameInstruction::CfaRegister(
|
||||
map_reg(self.isa, dst)
|
||||
.expect("a register mapping from cranelift to gimli")
|
||||
.0,
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn prologue_imm_const(&mut self, imm: i64) {
|
||||
assert!(imm <= core::u32::MAX as i64);
|
||||
assert!(self.stack_size.is_none());
|
||||
|
||||
// This instruction should only appear in a prologue to pass an
|
||||
// argument of the stack size to a stack check function.
|
||||
// Record the stack size so we know what it is when we encounter the adjustment
|
||||
// instruction (which will adjust via the register assigned to this instruction).
|
||||
self.stack_size = Some(imm as i32);
|
||||
}
|
||||
|
||||
fn ret(&mut self, inst: Inst) {
|
||||
let args = self.func.dfg.inst_args(inst);
|
||||
|
||||
for (i, arg) in args.iter().rev().enumerate() {
|
||||
// Only walk back the args for the pop instructions encountered
|
||||
if i >= self.epilogue_pop_offsets.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
self.cfa_offset -= 8;
|
||||
let reg = self.func.locations[*arg].unwrap_reg();
|
||||
|
||||
// Update the CFA if this is the restore of the frame pointer register or if a frame pointer isn't being used
|
||||
match self.frame_register {
|
||||
Some(fp) => {
|
||||
if reg == fp {
|
||||
self.instructions.push((
|
||||
self.epilogue_pop_offsets[i],
|
||||
CallFrameInstruction::Cfa(
|
||||
map_reg(self.isa, RU::rsp as RegUnit)
|
||||
.expect("a register mapping from cranelift to gimli")
|
||||
.0,
|
||||
self.cfa_offset,
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.instructions.push((
|
||||
self.epilogue_pop_offsets[i],
|
||||
CallFrameInstruction::CfaOffset(self.cfa_offset),
|
||||
));
|
||||
|
||||
// Pops in the epilogue are register restores, so record a "same value" for the register
|
||||
// This isn't necessary when using a frame pointer as the CFA doesn't change for CSR restores
|
||||
self.instructions.push((
|
||||
self.epilogue_pop_offsets[i],
|
||||
CallFrameInstruction::SameValue(
|
||||
map_reg(self.isa, reg)
|
||||
.expect("a register mapping from cranelift to gimli")
|
||||
.0,
|
||||
),
|
||||
));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
self.epilogue_pop_offsets.clear();
|
||||
}
|
||||
|
||||
fn insert_pop_offset(&mut self, offset: u32) {
|
||||
self.epilogue_pop_offsets.push(offset);
|
||||
}
|
||||
|
||||
fn remember_state(&mut self, offset: u32) {
|
||||
self.instructions
|
||||
.push((offset, CallFrameInstruction::RememberState));
|
||||
}
|
||||
|
||||
fn restore_state(&mut self, offset: u32) {
|
||||
self.instructions
|
||||
.push((offset, CallFrameInstruction::RestoreState));
|
||||
}
|
||||
|
||||
fn is_prologue_end(&self, inst: Inst) -> bool {
|
||||
self.func.prologue_end == Some(inst)
|
||||
}
|
||||
|
||||
fn is_epilogue_start(&self, inst: Inst) -> bool {
|
||||
self.func.epilogues_start.contains(&inst)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create_unwind_info(
|
||||
func: &Function,
|
||||
isa: &dyn TargetIsa,
|
||||
frame_register: Option<RegUnit>,
|
||||
) -> CodegenResult<Option<UnwindInfo>> {
|
||||
// Only System V-like calling conventions are supported
|
||||
match func.signature.call_conv {
|
||||
CallConv::Fast | CallConv::Cold | CallConv::SystemV => {}
|
||||
_ => return Ok(None),
|
||||
}
|
||||
|
||||
if func.prologue_end.is_none() || isa.name() != "x86" || isa.pointer_bits() != 64 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut builder = InstructionBuilder::new(func, isa, frame_register);
|
||||
let mut in_prologue = true;
|
||||
let mut in_epilogue = false;
|
||||
let mut len = 0;
|
||||
|
||||
let mut blocks = func.layout.blocks().collect::<Vec<_>>();
|
||||
blocks.sort_by_key(|b| func.offsets[*b]);
|
||||
|
||||
for (i, block) in blocks.iter().enumerate() {
|
||||
for (offset, inst, size) in func.inst_offsets(*block, &isa.encoding_info()) {
|
||||
let offset = offset + size;
|
||||
assert!(len <= offset);
|
||||
len = offset;
|
||||
|
||||
let is_last_block = i == blocks.len() - 1;
|
||||
|
||||
if in_prologue {
|
||||
// Check for prologue end (inclusive)
|
||||
in_prologue = !builder.is_prologue_end(inst);
|
||||
} else if !in_epilogue && builder.is_epilogue_start(inst) {
|
||||
// Now in an epilogue, emit a remember state instruction if not last block
|
||||
in_epilogue = true;
|
||||
|
||||
if !is_last_block {
|
||||
builder.remember_state(offset);
|
||||
}
|
||||
} else if !in_epilogue {
|
||||
// Ignore normal instructions
|
||||
continue;
|
||||
}
|
||||
|
||||
match builder.func.dfg[inst] {
|
||||
InstructionData::Unary { opcode, arg } => match opcode {
|
||||
Opcode::X86Push => {
|
||||
builder.push_reg(offset, arg);
|
||||
}
|
||||
Opcode::AdjustSpDown => {
|
||||
builder.adjust_sp_down(offset);
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
InstructionData::CopySpecial { src, dst, .. } => {
|
||||
builder.move_reg(offset, src, dst);
|
||||
}
|
||||
InstructionData::NullAry { opcode } => match opcode {
|
||||
Opcode::X86Pop => {
|
||||
builder.insert_pop_offset(offset);
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
InstructionData::UnaryImm { opcode, imm } => match opcode {
|
||||
Opcode::Iconst => {
|
||||
builder.prologue_imm_const(imm.into());
|
||||
}
|
||||
Opcode::AdjustSpDownImm => {
|
||||
builder.adjust_sp_down_imm(offset, imm.into());
|
||||
}
|
||||
Opcode::AdjustSpUpImm => {
|
||||
builder.adjust_sp_up_imm(offset, imm.into());
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
InstructionData::MultiAry { opcode, .. } => match opcode {
|
||||
Opcode::Return => {
|
||||
builder.ret(inst);
|
||||
|
||||
if !is_last_block {
|
||||
builder.restore_state(offset);
|
||||
}
|
||||
|
||||
in_epilogue = false;
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(UnwindInfo::new(builder.instructions, len)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cursor::{Cursor, FuncCursor};
|
||||
use crate::ir::{
|
||||
types, AbiParam, ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind,
|
||||
};
|
||||
use crate::isa::{lookup, CallConv};
|
||||
use crate::settings::{builder, Flags};
|
||||
use crate::Context;
|
||||
use gimli::write::Address;
|
||||
use std::str::FromStr;
|
||||
use target_lexicon::triple;
|
||||
|
||||
#[test]
|
||||
fn test_simple_func() {
|
||||
let isa = lookup(triple!("x86_64"))
|
||||
.expect("expect x86 ISA")
|
||||
.finish(Flags::new(builder()));
|
||||
|
||||
let mut context = Context::for_function(create_function(
|
||||
CallConv::SystemV,
|
||||
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
|
||||
));
|
||||
|
||||
context.compile(&*isa).expect("expected compilation");
|
||||
|
||||
let fde = match isa
|
||||
.create_unwind_info(&context.func)
|
||||
.expect("can create unwind info")
|
||||
{
|
||||
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
|
||||
info.to_fde(Address::Constant(1234))
|
||||
}
|
||||
_ => panic!("expected unwind information"),
|
||||
};
|
||||
|
||||
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(1234), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (15, Cfa(Register(7), 8))] }");
|
||||
}
|
||||
|
||||
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
|
||||
let mut func =
|
||||
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
|
||||
|
||||
let block0 = func.dfg.make_block();
|
||||
let mut pos = FuncCursor::new(&mut func);
|
||||
pos.insert_block(block0);
|
||||
pos.ins().return_(&[]);
|
||||
|
||||
if let Some(stack_slot) = stack_slot {
|
||||
func.stack_slots.push(stack_slot);
|
||||
}
|
||||
|
||||
func
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_return_func() {
|
||||
let isa = lookup(triple!("x86_64"))
|
||||
.expect("expect x86 ISA")
|
||||
.finish(Flags::new(builder()));
|
||||
|
||||
let mut context = Context::for_function(create_multi_return_function(CallConv::SystemV));
|
||||
|
||||
context.compile(&*isa).expect("expected compilation");
|
||||
|
||||
let fde = match isa
|
||||
.create_unwind_info(&context.func)
|
||||
.expect("can create unwind info")
|
||||
{
|
||||
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
|
||||
info.to_fde(Address::Constant(4321))
|
||||
}
|
||||
_ => panic!("expected unwind information"),
|
||||
};
|
||||
|
||||
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(4321), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (12, RememberState), (12, Cfa(Register(7), 8)), (13, RestoreState), (15, Cfa(Register(7), 0))] }");
|
||||
}
|
||||
|
||||
fn create_multi_return_function(call_conv: CallConv) -> Function {
|
||||
let mut sig = Signature::new(call_conv);
|
||||
sig.params.push(AbiParam::new(types::I32));
|
||||
let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
|
||||
|
||||
let block0 = func.dfg.make_block();
|
||||
let v0 = func.dfg.append_block_param(block0, types::I32);
|
||||
let block1 = func.dfg.make_block();
|
||||
let block2 = func.dfg.make_block();
|
||||
|
||||
let mut pos = FuncCursor::new(&mut func);
|
||||
pos.insert_block(block0);
|
||||
pos.ins().brnz(v0, block2, &[]);
|
||||
pos.ins().jump(block1, &[]);
|
||||
|
||||
pos.insert_block(block1);
|
||||
pos.ins().return_(&[]);
|
||||
|
||||
pos.insert_block(block2);
|
||||
pos.ins().return_(&[]);
|
||||
|
||||
func
|
||||
}
|
||||
}
|
||||
677
cranelift/codegen/src/isa/x86/unwind/windows.rs
Normal file
677
cranelift/codegen/src/isa/x86/unwind/windows.rs
Normal file
@@ -0,0 +1,677 @@
|
||||
//! Unwind information for Windows x64 ABI.
|
||||
|
||||
use crate::ir::{Function, InstructionData, Opcode, ValueLoc};
|
||||
use crate::isa::x86::registers::{FPR, GPR, RU};
|
||||
use crate::isa::{CallConv, RegUnit, TargetIsa};
|
||||
use crate::result::{CodegenError, CodegenResult};
|
||||
use alloc::vec::Vec;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use log::warn;
|
||||
|
||||
#[cfg(feature = "enable-serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Maximum (inclusive) size of a "small" stack allocation
|
||||
const SMALL_ALLOC_MAX_SIZE: u32 = 128;
|
||||
/// Maximum (inclusive) size of a "large" stack allocation that can represented in 16-bits
|
||||
const LARGE_ALLOC_16BIT_MAX_SIZE: u32 = 524280;
|
||||
|
||||
struct Writer<'a> {
|
||||
buf: &'a mut [u8],
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
impl<'a> Writer<'a> {
|
||||
pub fn new(buf: &'a mut [u8]) -> Self {
|
||||
Self { buf, offset: 0 }
|
||||
}
|
||||
|
||||
fn write_u8(&mut self, v: u8) {
|
||||
self.buf[self.offset] = v;
|
||||
self.offset += 1;
|
||||
}
|
||||
|
||||
fn write_u16<T: ByteOrder>(&mut self, v: u16) {
|
||||
T::write_u16(&mut self.buf[self.offset..(self.offset + 2)], v);
|
||||
self.offset += 2;
|
||||
}
|
||||
|
||||
fn write_u32<T: ByteOrder>(&mut self, v: u32) {
|
||||
T::write_u32(&mut self.buf[self.offset..(self.offset + 4)], v);
|
||||
self.offset += 4;
|
||||
}
|
||||
}
|
||||
|
||||
/// The supported unwind codes for the x64 Windows ABI.
|
||||
///
|
||||
/// See: https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64
|
||||
/// Only what is needed to describe the prologues generated by the Cranelift x86 ISA are represented here.
|
||||
/// Note: the Cranelift x86 ISA RU enum matches the Windows unwind GPR encoding values.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
|
||||
enum UnwindCode {
|
||||
PushRegister {
|
||||
offset: u8,
|
||||
reg: u8,
|
||||
},
|
||||
SaveXmm {
|
||||
offset: u8,
|
||||
reg: u8,
|
||||
stack_offset: u32,
|
||||
},
|
||||
StackAlloc {
|
||||
offset: u8,
|
||||
size: u32,
|
||||
},
|
||||
SetFramePointer {
|
||||
offset: u8,
|
||||
sp_offset: u8,
|
||||
},
|
||||
}
|
||||
|
||||
impl UnwindCode {
|
||||
fn emit(&self, writer: &mut Writer) {
|
||||
enum UnwindOperation {
|
||||
PushNonvolatileRegister = 0,
|
||||
LargeStackAlloc = 1,
|
||||
SmallStackAlloc = 2,
|
||||
SetFramePointer = 3,
|
||||
SaveXmm128 = 8,
|
||||
SaveXmm128Far = 9,
|
||||
}
|
||||
|
||||
match self {
|
||||
Self::PushRegister { offset, reg } => {
|
||||
writer.write_u8(*offset);
|
||||
writer.write_u8((*reg << 4) | (UnwindOperation::PushNonvolatileRegister as u8));
|
||||
}
|
||||
Self::SaveXmm {
|
||||
offset,
|
||||
reg,
|
||||
stack_offset,
|
||||
} => {
|
||||
writer.write_u8(*offset);
|
||||
let stack_offset = stack_offset / 16;
|
||||
if stack_offset <= core::u16::MAX as u32 {
|
||||
writer.write_u8((*reg << 4) | (UnwindOperation::SaveXmm128 as u8));
|
||||
writer.write_u16::<LittleEndian>(stack_offset as u16);
|
||||
} else {
|
||||
writer.write_u8((*reg << 4) | (UnwindOperation::SaveXmm128Far as u8));
|
||||
writer.write_u16::<LittleEndian>(stack_offset as u16);
|
||||
writer.write_u16::<LittleEndian>((stack_offset >> 16) as u16);
|
||||
}
|
||||
}
|
||||
Self::StackAlloc { offset, size } => {
|
||||
// Stack allocations on Windows must be a multiple of 8 and be at least 1 slot
|
||||
assert!(*size >= 8);
|
||||
assert!((*size % 8) == 0);
|
||||
|
||||
writer.write_u8(*offset);
|
||||
if *size <= SMALL_ALLOC_MAX_SIZE {
|
||||
writer.write_u8(
|
||||
((((*size - 8) / 8) as u8) << 4) | UnwindOperation::SmallStackAlloc as u8,
|
||||
);
|
||||
} else if *size <= LARGE_ALLOC_16BIT_MAX_SIZE {
|
||||
writer.write_u8(UnwindOperation::LargeStackAlloc as u8);
|
||||
writer.write_u16::<LittleEndian>((*size / 8) as u16);
|
||||
} else {
|
||||
writer.write_u8((1 << 4) | (UnwindOperation::LargeStackAlloc as u8));
|
||||
writer.write_u32::<LittleEndian>(*size);
|
||||
}
|
||||
}
|
||||
Self::SetFramePointer { offset, sp_offset } => {
|
||||
writer.write_u8(*offset);
|
||||
writer.write_u8((*sp_offset << 4) | (UnwindOperation::SetFramePointer as u8));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn node_count(&self) -> usize {
|
||||
match self {
|
||||
Self::StackAlloc { size, .. } => {
|
||||
if *size <= SMALL_ALLOC_MAX_SIZE {
|
||||
1
|
||||
} else if *size <= LARGE_ALLOC_16BIT_MAX_SIZE {
|
||||
2
|
||||
} else {
|
||||
3
|
||||
}
|
||||
}
|
||||
Self::SaveXmm { stack_offset, .. } => {
|
||||
if *stack_offset <= core::u16::MAX as u32 {
|
||||
2
|
||||
} else {
|
||||
3
|
||||
}
|
||||
}
|
||||
_ => 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create_unwind_info(
|
||||
func: &Function,
|
||||
isa: &dyn TargetIsa,
|
||||
frame_register: Option<RegUnit>,
|
||||
) -> CodegenResult<Option<UnwindInfo>> {
|
||||
// Only Windows fastcall is supported for unwind information
|
||||
if func.signature.call_conv != CallConv::WindowsFastcall || func.prologue_end.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let prologue_end = func.prologue_end.unwrap();
|
||||
let entry_block = func.layout.entry_block().expect("missing entry block");
|
||||
|
||||
// Stores the stack size when SP is not adjusted via an immediate value
|
||||
let mut stack_size = None;
|
||||
let mut prologue_size = 0;
|
||||
let mut unwind_codes = Vec::new();
|
||||
let mut found_end = false;
|
||||
|
||||
// Have we saved at least one FPR? if so, we might have to check additional constraints.
|
||||
let mut saved_fpr = false;
|
||||
|
||||
// In addition to the min offset for a callee-save, we need to know the offset from the
|
||||
// frame base to the stack pointer, so that we can record an unwind offset that spans only
|
||||
// to the end of callee-save space.
|
||||
let mut static_frame_allocation_size = 0u32;
|
||||
|
||||
// For the time being, FPR preservation is split into a stack_addr and later store/load.
|
||||
// Store the register used for stack store and ensure it is the same register with no
|
||||
// intervening changes to the frame size.
|
||||
let mut callee_save_region_reg = None;
|
||||
// Also record the callee-save region's offset from RSP, because it must be added to FPR
|
||||
// save offsets to compute an offset from the frame base.
|
||||
let mut callee_save_offset = None;
|
||||
|
||||
for (offset, inst, size) in func.inst_offsets(entry_block, &isa.encoding_info()) {
|
||||
// x64 ABI prologues cannot exceed 255 bytes in length
|
||||
if (offset + size) > 255 {
|
||||
warn!("function prologues cannot exceed 255 bytes in size for Windows x64");
|
||||
return Err(CodegenError::CodeTooLarge);
|
||||
}
|
||||
|
||||
prologue_size += size;
|
||||
|
||||
let unwind_offset = (offset + size) as u8;
|
||||
|
||||
match func.dfg[inst] {
|
||||
InstructionData::Unary { opcode, arg } => {
|
||||
match opcode {
|
||||
Opcode::X86Push => {
|
||||
static_frame_allocation_size += 8;
|
||||
|
||||
unwind_codes.push(UnwindCode::PushRegister {
|
||||
offset: unwind_offset,
|
||||
reg: GPR.index_of(func.locations[arg].unwrap_reg()) as u8,
|
||||
});
|
||||
}
|
||||
Opcode::AdjustSpDown => {
|
||||
let stack_size =
|
||||
stack_size.expect("expected a previous stack size instruction");
|
||||
static_frame_allocation_size += stack_size;
|
||||
|
||||
// This is used when calling a stack check function
|
||||
// We need to track the assignment to RAX which has the size of the stack
|
||||
unwind_codes.push(UnwindCode::StackAlloc {
|
||||
offset: unwind_offset,
|
||||
size: stack_size,
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
InstructionData::CopySpecial { src, dst, .. } => {
|
||||
if let Some(frame_register) = frame_register {
|
||||
if src == (RU::rsp as RegUnit) && dst == frame_register {
|
||||
// Constructing an rbp-based stack frame, so the static frame
|
||||
// allocation restarts at 0 from here.
|
||||
static_frame_allocation_size = 0;
|
||||
|
||||
unwind_codes.push(UnwindCode::SetFramePointer {
|
||||
offset: unwind_offset,
|
||||
sp_offset: 0,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
InstructionData::UnaryImm { opcode, imm } => {
|
||||
match opcode {
|
||||
Opcode::Iconst => {
|
||||
let imm: i64 = imm.into();
|
||||
assert!(imm <= core::u32::MAX as i64);
|
||||
assert!(stack_size.is_none());
|
||||
|
||||
// This instruction should only appear in a prologue to pass an
|
||||
// argument of the stack size to a stack check function.
|
||||
// Record the stack size so we know what it is when we encounter the adjustment
|
||||
// instruction (which will adjust via the register assigned to this instruction).
|
||||
stack_size = Some(imm as u32);
|
||||
}
|
||||
Opcode::AdjustSpDownImm => {
|
||||
let imm: i64 = imm.into();
|
||||
assert!(imm <= core::u32::MAX as i64);
|
||||
|
||||
static_frame_allocation_size += imm as u32;
|
||||
|
||||
unwind_codes.push(UnwindCode::StackAlloc {
|
||||
offset: unwind_offset,
|
||||
size: imm as u32,
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
InstructionData::StackLoad {
|
||||
opcode: Opcode::StackAddr,
|
||||
stack_slot,
|
||||
offset: _,
|
||||
} => {
|
||||
let result = func.dfg.inst_results(inst).get(0).unwrap();
|
||||
if let ValueLoc::Reg(frame_reg) = func.locations[*result] {
|
||||
callee_save_region_reg = Some(frame_reg);
|
||||
|
||||
// Figure out the offset in the call frame that `frame_reg` will have.
|
||||
let frame_size = func
|
||||
.stack_slots
|
||||
.layout_info
|
||||
.expect("func's stack slots have layout info if stack operations exist")
|
||||
.frame_size;
|
||||
// Because we're well after the prologue has been constructed, stack slots
|
||||
// must have been laid out...
|
||||
let slot_offset = func.stack_slots[stack_slot]
|
||||
.offset
|
||||
.expect("callee-save slot has an offset computed");
|
||||
let frame_offset = frame_size as i32 + slot_offset;
|
||||
|
||||
callee_save_offset = Some(frame_offset as u32);
|
||||
}
|
||||
}
|
||||
InstructionData::Store {
|
||||
opcode: Opcode::Store,
|
||||
args: [arg1, arg2],
|
||||
flags: _flags,
|
||||
offset,
|
||||
} => {
|
||||
if let (ValueLoc::Reg(ru), ValueLoc::Reg(base_ru)) =
|
||||
(func.locations[arg1], func.locations[arg2])
|
||||
{
|
||||
if Some(base_ru) == callee_save_region_reg {
|
||||
let offset_int: i32 = offset.into();
|
||||
assert!(offset_int >= 0, "negative fpr offset would store outside the stack frame, and is almost certainly an error");
|
||||
let offset_int: u32 = offset_int as u32 + callee_save_offset.expect("FPR presevation requires an FPR save region, which has some stack offset");
|
||||
if FPR.contains(ru) {
|
||||
saved_fpr = true;
|
||||
unwind_codes.push(UnwindCode::SaveXmm {
|
||||
offset: unwind_offset,
|
||||
reg: ru as u8,
|
||||
stack_offset: offset_int,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
|
||||
if inst == prologue_end {
|
||||
found_end = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(found_end);
|
||||
|
||||
if saved_fpr {
|
||||
if static_frame_allocation_size > 240 && saved_fpr {
|
||||
warn!("stack frame is too large ({} bytes) to use with Windows x64 SEH when preserving FPRs. \
|
||||
This is a Cranelift implementation limit, see \
|
||||
https://github.com/bytecodealliance/wasmtime/issues/1475",
|
||||
static_frame_allocation_size);
|
||||
return Err(CodegenError::ImplLimitExceeded);
|
||||
}
|
||||
// Only test static frame size is 16-byte aligned when an FPR is saved to avoid
|
||||
// panicking when alignment is elided because no FPRs are saved and no child calls are
|
||||
// made.
|
||||
assert!(
|
||||
static_frame_allocation_size % 16 == 0,
|
||||
"static frame allocation must be a multiple of 16"
|
||||
);
|
||||
}
|
||||
|
||||
// Hack to avoid panicking unnecessarily. Because Cranelift generates prologues with RBP at
|
||||
// one end of the call frame, and RSP at the other, required offsets are arbitrarily large.
|
||||
// Windows x64 SEH only allows this offset be up to 240 bytes, however, meaning large
|
||||
// frames are inexpressible, and we cannot actually compile the function. In case there are
|
||||
// no preserved FPRs, we can lie without error and claim the offset to RBP is 0 - nothing
|
||||
// will actually check it. This, then, avoids panics when compiling functions with large
|
||||
// call frames.
|
||||
let reported_frame_offset = if saved_fpr {
|
||||
(static_frame_allocation_size / 16) as u8
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
Ok(Some(UnwindInfo {
|
||||
flags: 0, // this assumes cranelift functions have no SEH handlers
|
||||
prologue_size: prologue_size as u8,
|
||||
frame_register: frame_register.map(|r| GPR.index_of(r) as u8),
|
||||
frame_register_offset: reported_frame_offset,
|
||||
unwind_codes,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Represents Windows x64 unwind information.
|
||||
///
|
||||
/// For information about Windows x64 unwind info, see:
|
||||
/// https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
|
||||
pub struct UnwindInfo {
|
||||
flags: u8,
|
||||
prologue_size: u8,
|
||||
frame_register: Option<u8>,
|
||||
frame_register_offset: u8,
|
||||
unwind_codes: Vec<UnwindCode>,
|
||||
}
|
||||
|
||||
impl UnwindInfo {
|
||||
/// Gets the emit size of the unwind information, in bytes.
|
||||
pub fn emit_size(&self) -> usize {
|
||||
let node_count = self.node_count();
|
||||
|
||||
// Calculation of the size requires no SEH handler or chained info
|
||||
assert!(self.flags == 0);
|
||||
|
||||
// Size of fixed part of UNWIND_INFO is 4 bytes
|
||||
// Then comes the UNWIND_CODE nodes (2 bytes each)
|
||||
// Then comes 2 bytes of padding for the unwind codes if necessary
|
||||
// Next would come the SEH data, but we assert above that the function doesn't have SEH data
|
||||
|
||||
4 + (node_count * 2) + if (node_count & 1) == 1 { 2 } else { 0 }
|
||||
}
|
||||
|
||||
/// Emits the unwind information into the given mutable byte slice.
|
||||
///
|
||||
/// This function will panic if the slice is not at least `emit_size` in length.
|
||||
pub fn emit(&self, buf: &mut [u8]) {
|
||||
const UNWIND_INFO_VERSION: u8 = 1;
|
||||
|
||||
let node_count = self.node_count();
|
||||
assert!(node_count <= 256);
|
||||
|
||||
let mut writer = Writer::new(buf);
|
||||
|
||||
writer.write_u8((self.flags << 3) | UNWIND_INFO_VERSION);
|
||||
writer.write_u8(self.prologue_size);
|
||||
writer.write_u8(node_count as u8);
|
||||
|
||||
if let Some(reg) = self.frame_register {
|
||||
writer.write_u8((self.frame_register_offset << 4) | reg);
|
||||
} else {
|
||||
writer.write_u8(0);
|
||||
}
|
||||
|
||||
// Unwind codes are written in reverse order (prologue offset descending)
|
||||
for code in self.unwind_codes.iter().rev() {
|
||||
code.emit(&mut writer);
|
||||
}
|
||||
|
||||
// To keep a 32-bit alignment, emit 2 bytes of padding if there's an odd number of 16-bit nodes
|
||||
if (node_count & 1) == 1 {
|
||||
writer.write_u16::<LittleEndian>(0);
|
||||
}
|
||||
|
||||
// Ensure the correct number of bytes was emitted
|
||||
assert_eq!(writer.offset, self.emit_size());
|
||||
}
|
||||
|
||||
fn node_count(&self) -> usize {
|
||||
self.unwind_codes
|
||||
.iter()
|
||||
.fold(0, |nodes, c| nodes + c.node_count())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cursor::{Cursor, FuncCursor};
|
||||
use crate::ir::{ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind};
|
||||
use crate::isa::{lookup, CallConv};
|
||||
use crate::settings::{builder, Flags};
|
||||
use crate::Context;
|
||||
use std::str::FromStr;
|
||||
use target_lexicon::triple;
|
||||
|
||||
#[test]
|
||||
fn test_wrong_calling_convention() {
|
||||
let isa = lookup(triple!("x86_64"))
|
||||
.expect("expect x86 ISA")
|
||||
.finish(Flags::new(builder()));
|
||||
|
||||
let mut context = Context::for_function(create_function(CallConv::SystemV, None));
|
||||
|
||||
context.compile(&*isa).expect("expected compilation");
|
||||
|
||||
assert_eq!(
|
||||
create_unwind_info(&context.func, &*isa, None).expect("can create unwind info"),
|
||||
None
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_small_alloc() {
|
||||
let isa = lookup(triple!("x86_64"))
|
||||
.expect("expect x86 ISA")
|
||||
.finish(Flags::new(builder()));
|
||||
|
||||
let mut context = Context::for_function(create_function(
|
||||
CallConv::WindowsFastcall,
|
||||
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
|
||||
));
|
||||
|
||||
context.compile(&*isa).expect("expected compilation");
|
||||
|
||||
let unwind = create_unwind_info(&context.func, &*isa, Some(RU::rbp.into()))
|
||||
.expect("can create unwind info")
|
||||
.expect("expected unwind info");
|
||||
|
||||
assert_eq!(
|
||||
unwind,
|
||||
UnwindInfo {
|
||||
flags: 0,
|
||||
prologue_size: 9,
|
||||
frame_register: Some(GPR.index_of(RU::rbp.into()) as u8),
|
||||
frame_register_offset: 0,
|
||||
unwind_codes: vec![
|
||||
UnwindCode::PushRegister {
|
||||
offset: 2,
|
||||
reg: GPR.index_of(RU::rbp.into()) as u8
|
||||
},
|
||||
UnwindCode::SetFramePointer {
|
||||
offset: 5,
|
||||
sp_offset: 0
|
||||
},
|
||||
UnwindCode::StackAlloc {
|
||||
offset: 9,
|
||||
size: 64 + 32
|
||||
}
|
||||
]
|
||||
}
|
||||
);
|
||||
|
||||
assert_eq!(unwind.emit_size(), 12);
|
||||
|
||||
let mut buf = [0u8; 12];
|
||||
unwind.emit(&mut buf);
|
||||
|
||||
assert_eq!(
|
||||
buf,
|
||||
[
|
||||
0x01, // Version and flags (version 1, no flags)
|
||||
0x09, // Prologue size
|
||||
0x03, // Unwind code count (1 for stack alloc, 1 for save frame reg, 1 for push reg)
|
||||
0x05, // Frame register + offset (RBP with 0 offset)
|
||||
0x09, // Prolog offset
|
||||
0xB2, // Operation 2 (small stack alloc), size = 0xB slots (e.g. (0xB * 8) + 8 = 96 (64 + 32) bytes)
|
||||
0x05, // Prolog offset
|
||||
0x03, // Operation 3 (save frame register), stack pointer offset = 0
|
||||
0x02, // Prolog offset
|
||||
0x50, // Operation 0 (save nonvolatile register), reg = 5 (RBP)
|
||||
0x00, // Padding byte
|
||||
0x00, // Padding byte
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_medium_alloc() {
|
||||
let isa = lookup(triple!("x86_64"))
|
||||
.expect("expect x86 ISA")
|
||||
.finish(Flags::new(builder()));
|
||||
|
||||
let mut context = Context::for_function(create_function(
|
||||
CallConv::WindowsFastcall,
|
||||
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 10000)),
|
||||
));
|
||||
|
||||
context.compile(&*isa).expect("expected compilation");
|
||||
|
||||
let unwind = create_unwind_info(&context.func, &*isa, Some(RU::rbp.into()))
|
||||
.expect("can create unwind info")
|
||||
.expect("expected unwind info");
|
||||
|
||||
assert_eq!(
|
||||
unwind,
|
||||
UnwindInfo {
|
||||
flags: 0,
|
||||
prologue_size: 27,
|
||||
frame_register: Some(GPR.index_of(RU::rbp.into()) as u8),
|
||||
frame_register_offset: 0,
|
||||
unwind_codes: vec![
|
||||
UnwindCode::PushRegister {
|
||||
offset: 2,
|
||||
reg: GPR.index_of(RU::rbp.into()) as u8
|
||||
},
|
||||
UnwindCode::SetFramePointer {
|
||||
offset: 5,
|
||||
sp_offset: 0
|
||||
},
|
||||
UnwindCode::StackAlloc {
|
||||
offset: 27,
|
||||
size: 10000 + 32
|
||||
}
|
||||
]
|
||||
}
|
||||
);
|
||||
|
||||
assert_eq!(unwind.emit_size(), 12);
|
||||
|
||||
let mut buf = [0u8; 12];
|
||||
unwind.emit(&mut buf);
|
||||
|
||||
assert_eq!(
|
||||
buf,
|
||||
[
|
||||
0x01, // Version and flags (version 1, no flags)
|
||||
0x1B, // Prologue size
|
||||
0x04, // Unwind code count (2 for stack alloc, 1 for save frame reg, 1 for push reg)
|
||||
0x05, // Frame register + offset (RBP with 0 offset)
|
||||
0x1B, // Prolog offset
|
||||
0x01, // Operation 1 (large stack alloc), size is scaled 16-bits (info = 0)
|
||||
0xE6, // Low size byte
|
||||
0x04, // High size byte (e.g. 0x04E6 * 8 = 100032 (10000 + 32) bytes)
|
||||
0x05, // Prolog offset
|
||||
0x03, // Operation 3 (save frame register), stack pointer offset = 0
|
||||
0x02, // Prolog offset
|
||||
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_alloc() {
|
||||
let isa = lookup(triple!("x86_64"))
|
||||
.expect("expect x86 ISA")
|
||||
.finish(Flags::new(builder()));
|
||||
|
||||
let mut context = Context::for_function(create_function(
|
||||
CallConv::WindowsFastcall,
|
||||
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 1000000)),
|
||||
));
|
||||
|
||||
context.compile(&*isa).expect("expected compilation");
|
||||
|
||||
let unwind = create_unwind_info(&context.func, &*isa, Some(RU::rbp.into()))
|
||||
.expect("can create unwind info")
|
||||
.expect("expected unwind info");
|
||||
|
||||
assert_eq!(
|
||||
unwind,
|
||||
UnwindInfo {
|
||||
flags: 0,
|
||||
prologue_size: 27,
|
||||
frame_register: Some(GPR.index_of(RU::rbp.into()) as u8),
|
||||
frame_register_offset: 0,
|
||||
unwind_codes: vec![
|
||||
UnwindCode::PushRegister {
|
||||
offset: 2,
|
||||
reg: GPR.index_of(RU::rbp.into()) as u8
|
||||
},
|
||||
UnwindCode::SetFramePointer {
|
||||
offset: 5,
|
||||
sp_offset: 0
|
||||
},
|
||||
UnwindCode::StackAlloc {
|
||||
offset: 27,
|
||||
size: 1000000 + 32
|
||||
}
|
||||
]
|
||||
}
|
||||
);
|
||||
|
||||
assert_eq!(unwind.emit_size(), 16);
|
||||
|
||||
let mut buf = [0u8; 16];
|
||||
unwind.emit(&mut buf);
|
||||
|
||||
assert_eq!(
|
||||
buf,
|
||||
[
|
||||
0x01, // Version and flags (version 1, no flags)
|
||||
0x1B, // Prologue size
|
||||
0x05, // Unwind code count (3 for stack alloc, 1 for save frame reg, 1 for push reg)
|
||||
0x05, // Frame register + offset (RBP with 0 offset)
|
||||
0x1B, // Prolog offset
|
||||
0x11, // Operation 1 (large stack alloc), size is unscaled 32-bits (info = 1)
|
||||
0x60, // Byte 1 of size
|
||||
0x42, // Byte 2 of size
|
||||
0x0F, // Byte 3 of size
|
||||
0x00, // Byte 4 of size (size is 0xF4260 = 1000032 (1000000 + 32) bytes)
|
||||
0x05, // Prolog offset
|
||||
0x03, // Operation 3 (save frame register), stack pointer offset = 0
|
||||
0x02, // Prolog offset
|
||||
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
|
||||
0x00, // Padding byte
|
||||
0x00, // Padding byte
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
|
||||
let mut func =
|
||||
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
|
||||
|
||||
let block0 = func.dfg.make_block();
|
||||
let mut pos = FuncCursor::new(&mut func);
|
||||
pos.insert_block(block0);
|
||||
pos.ins().return_(&[]);
|
||||
|
||||
if let Some(stack_slot) = stack_slot {
|
||||
func.stack_slots.push(stack_slot);
|
||||
}
|
||||
|
||||
func
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user