cranelift-codegen: move old x86 and RISC-V backends to isa/legacy/.

These backends will be removed in the future (see
bytecodealliance/rfcs#12 and the pending #3009 in this repo).

In the meantime, to more clearly communicate that they are using
"legacy" APIs and will eventually be removed, this PR places them in an
`isa/legacy/` subdirectory. No functional changes otherwise.
This commit is contained in:
Chris Fallin
2021-06-24 11:03:47 -07:00
parent 8172620763
commit 4b2723abb0
17 changed files with 23 additions and 13 deletions

View File

@@ -0,0 +1,235 @@
//! Unwind information for System V ABI (x86-64).
use crate::ir::Function;
use crate::isa::{
unwind::systemv::{RegisterMappingError, UnwindInfo},
RegUnit, TargetIsa,
};
use crate::result::CodegenResult;
use gimli::{write::CommonInformationEntry, Encoding, Format, Register, X86_64};
/// Creates a new x86-64 common information entry (CIE).
pub fn create_cie() -> CommonInformationEntry {
use gimli::write::CallFrameInstruction;
let mut entry = CommonInformationEntry::new(
Encoding {
address_size: 8,
format: Format::Dwarf32,
version: 1,
},
1, // Code alignment factor
-8, // Data alignment factor
X86_64::RA,
);
// Every frame will start with the call frame address (CFA) at RSP+8
// It is +8 to account for the push of the return address by the call instruction
entry.add_instruction(CallFrameInstruction::Cfa(X86_64::RSP, 8));
// Every frame will start with the return address at RSP (CFA-8 = RSP+8-8 = RSP)
entry.add_instruction(CallFrameInstruction::Offset(X86_64::RA, -8));
entry
}
/// Map Cranelift registers to their corresponding Gimli registers.
pub fn map_reg(isa: &dyn TargetIsa, reg: RegUnit) -> Result<Register, RegisterMappingError> {
if isa.name() != "x86" || isa.pointer_bits() != 64 {
return Err(RegisterMappingError::UnsupportedArchitecture);
}
// Mapping from https://github.com/bytecodealliance/cranelift/pull/902 by @iximeow
const X86_GP_REG_MAP: [gimli::Register; 16] = [
X86_64::RAX,
X86_64::RCX,
X86_64::RDX,
X86_64::RBX,
X86_64::RSP,
X86_64::RBP,
X86_64::RSI,
X86_64::RDI,
X86_64::R8,
X86_64::R9,
X86_64::R10,
X86_64::R11,
X86_64::R12,
X86_64::R13,
X86_64::R14,
X86_64::R15,
];
const X86_XMM_REG_MAP: [gimli::Register; 16] = [
X86_64::XMM0,
X86_64::XMM1,
X86_64::XMM2,
X86_64::XMM3,
X86_64::XMM4,
X86_64::XMM5,
X86_64::XMM6,
X86_64::XMM7,
X86_64::XMM8,
X86_64::XMM9,
X86_64::XMM10,
X86_64::XMM11,
X86_64::XMM12,
X86_64::XMM13,
X86_64::XMM14,
X86_64::XMM15,
];
let reg_info = isa.register_info();
let bank = reg_info
.bank_containing_regunit(reg)
.ok_or_else(|| RegisterMappingError::MissingBank)?;
match bank.name {
"IntRegs" => {
// x86 GP registers have a weird mapping to DWARF registers, so we use a
// lookup table.
Ok(X86_GP_REG_MAP[(reg - bank.first_unit) as usize])
}
"FloatRegs" => Ok(X86_XMM_REG_MAP[(reg - bank.first_unit) as usize]),
_ => Err(RegisterMappingError::UnsupportedRegisterBank(bank.name)),
}
}
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> {
// Only System V-like calling conventions are supported
match isa.unwind_info_kind() {
crate::machinst::UnwindInfoKind::SystemV => {}
_ => return Ok(None),
}
if func.prologue_end.is_none() || isa.name() != "x86" || isa.pointer_bits() != 64 {
return Ok(None);
}
let unwind = match super::create_unwind_info(func, isa)? {
Some(u) => u,
None => {
return Ok(None);
}
};
struct RegisterMapper<'a, 'b>(&'a (dyn TargetIsa + 'b));
impl<'a, 'b> crate::isa::unwind::systemv::RegisterMapper<RegUnit> for RegisterMapper<'a, 'b> {
fn map(&self, reg: RegUnit) -> Result<u16, RegisterMappingError> {
Ok(map_reg(self.0, reg)?.0)
}
fn sp(&self) -> u16 {
X86_64::RSP.0
}
fn fp(&self) -> Option<u16> {
Some(X86_64::RBP.0)
}
}
let map = RegisterMapper(isa);
Ok(Some(UnwindInfo::build(unwind, &map)?))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{
types, AbiParam, ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind,
};
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use gimli::write::Address;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_simple_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::SystemV,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let fde = match isa
.create_unwind_info(&context.func)
.expect("can create unwind info")
{
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
info.to_fde(Address::Constant(1234))
}
_ => panic!("expected unwind information"),
};
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(1234), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (15, SameValue(Register(6))), (15, Cfa(Register(7), 8))] }");
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
#[test]
fn test_multi_return_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_multi_return_function(CallConv::SystemV));
context.compile(&*isa).expect("expected compilation");
let fde = match isa
.create_unwind_info(&context.func)
.expect("can create unwind info")
{
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
info.to_fde(Address::Constant(4321))
}
_ => panic!("expected unwind information"),
};
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(4321), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (12, RememberState), (12, SameValue(Register(6))), (12, Cfa(Register(7), 8)), (13, RestoreState), (15, SameValue(Register(6))), (15, Cfa(Register(7), 8))] }");
}
fn create_multi_return_function(call_conv: CallConv) -> Function {
let mut sig = Signature::new(call_conv);
sig.params.push(AbiParam::new(types::I32));
let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
let block0 = func.dfg.make_block();
let v0 = func.dfg.append_block_param(block0, types::I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().brnz(v0, block2, &[]);
pos.ins().jump(block1, &[]);
pos.insert_block(block1);
pos.ins().return_(&[]);
pos.insert_block(block2);
pos.ins().return_(&[]);
func
}
}

View File

@@ -0,0 +1,265 @@
//! Unwind information for Windows x64 ABI.
use crate::ir::Function;
use crate::isa::x86::registers::{FPR, GPR};
use crate::isa::{unwind::winx64::UnwindInfo, RegUnit, TargetIsa};
use crate::result::CodegenResult;
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> {
// Only Windows fastcall is supported for unwind information
if !func.signature.call_conv.extends_windows_fastcall() || func.prologue_end.is_none() {
return Ok(None);
}
let unwind = match super::create_unwind_info(func, isa)? {
Some(u) => u,
None => {
return Ok(None);
}
};
Ok(Some(UnwindInfo::build::<RegUnit, RegisterMapper>(unwind)?))
}
struct RegisterMapper;
impl crate::isa::unwind::winx64::RegisterMapper<RegUnit> for RegisterMapper {
fn map(reg: RegUnit) -> crate::isa::unwind::winx64::MappedRegister {
use crate::isa::unwind::winx64::MappedRegister;
if GPR.contains(reg) {
MappedRegister::Int(GPR.index_of(reg) as u8)
} else if FPR.contains(reg) {
MappedRegister::Xmm(reg as u8)
} else {
panic!()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind};
use crate::isa::unwind::winx64::UnwindCode;
use crate::isa::x86::registers::RU;
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_wrong_calling_convention() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(CallConv::SystemV, None));
context.compile(&*isa).expect("expected compilation");
assert_eq!(
create_unwind_info(&context.func, &*isa).expect("can create unwind info"),
None
);
}
#[test]
fn test_small_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 9,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 9,
size: 64
}
]
}
);
assert_eq!(unwind.emit_size(), 8);
let mut buf = [0u8; 8];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x09, // Prologue size
0x02, // Unwind code count (1 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x09, // Prolog offset
0x72, // Operation 2 (small stack alloc), size = 0xB slots (e.g. (0x7 * 8) + 8 = 64 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (save nonvolatile register), reg = 5 (RBP)
]
);
}
#[test]
fn test_medium_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 10000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 27,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 27,
size: 10000
}
]
}
);
assert_eq!(unwind.emit_size(), 12);
let mut buf = [0u8; 12];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x1B, // Prologue size
0x03, // Unwind code count (2 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x1B, // Prolog offset
0x01, // Operation 1 (large stack alloc), size is scaled 16-bits (info = 0)
0xE2, // Low size byte
0x04, // High size byte (e.g. 0x04E2 * 8 = 10000 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
0x00, // Padding
0x00, // Padding
]
);
}
#[test]
fn test_large_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 1000000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 27,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 27,
size: 1000000
}
]
}
);
assert_eq!(unwind.emit_size(), 12);
let mut buf = [0u8; 12];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x1B, // Prologue size
0x04, // Unwind code count (3 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x1B, // Prolog offset
0x11, // Operation 1 (large stack alloc), size is unscaled 32-bits (info = 1)
0x40, // Byte 1 of size
0x42, // Byte 2 of size
0x0F, // Byte 3 of size
0x00, // Byte 4 of size (size is 0xF4240 = 1000000 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
]
);
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
}