cranelift-codegen: move old x86 and RISC-V backends to isa/legacy/.

These backends will be removed in the future (see
bytecodealliance/rfcs#12 and the pending #3009 in this repo).

In the meantime, to more clearly communicate that they are using
"legacy" APIs and will eventually be removed, this PR places them in an
`isa/legacy/` subdirectory. No functional changes otherwise.
This commit is contained in:
Chris Fallin
2021-06-24 11:03:47 -07:00
parent 8172620763
commit 4b2723abb0
17 changed files with 23 additions and 13 deletions

View File

@@ -0,0 +1,12 @@
//! Legacy ("old-style") backends that will be removed in the future.
// N.B.: the old x86-64 backend (`x86`) and the new one (`x64`) are both
// included whenever building with x86 support. The new backend is the default,
// but the old can be requested with `BackendVariant::Legacy`. However, if this
// crate is built with the `old-x86-backend` feature, then the old backend is
// default instead.
#[cfg(feature = "x86")]
pub(crate) mod x86;
#[cfg(feature = "riscv")]
pub(crate) mod riscv;

View File

@@ -0,0 +1,149 @@
//! RISC-V ABI implementation.
//!
//! This module implements the RISC-V calling convention through the primary `legalize_signature()`
//! entry point.
//!
//! This doesn't support the soft-float ABI at the moment.
use super::registers::{FPR, GPR};
use super::settings;
use crate::abi::{legalize_args, ArgAction, ArgAssigner, ValueConversion};
use crate::ir::{self, AbiParam, ArgumentExtension, ArgumentLoc, ArgumentPurpose, Type};
use crate::isa::RegClass;
use crate::regalloc::RegisterSet;
use alloc::borrow::Cow;
use core::i32;
use target_lexicon::Triple;
struct Args {
pointer_bits: u8,
pointer_bytes: u8,
pointer_type: Type,
regs: u32,
reg_limit: u32,
offset: u32,
}
impl Args {
fn new(bits: u8, enable_e: bool) -> Self {
Self {
pointer_bits: bits,
pointer_bytes: bits / 8,
pointer_type: Type::int(u16::from(bits)).unwrap(),
regs: 0,
reg_limit: if enable_e { 6 } else { 8 },
offset: 0,
}
}
}
impl ArgAssigner for Args {
fn assign(&mut self, arg: &AbiParam) -> ArgAction {
fn align(value: u32, to: u32) -> u32 {
(value + to - 1) & !(to - 1)
}
let ty = arg.value_type;
// Check for a legal type.
// RISC-V doesn't have SIMD at all, so break all vectors down.
if ty.is_vector() {
return ValueConversion::VectorSplit.into();
}
// Large integers and booleans are broken down to fit in a register.
if !ty.is_float() && ty.bits() > u16::from(self.pointer_bits) {
// Align registers and stack to a multiple of two pointers.
self.regs = align(self.regs, 2);
self.offset = align(self.offset, 2 * u32::from(self.pointer_bytes));
return ValueConversion::IntSplit.into();
}
// Small integers are extended to the size of a pointer register.
if ty.is_int() && ty.bits() < u16::from(self.pointer_bits) {
match arg.extension {
ArgumentExtension::None => {}
ArgumentExtension::Uext => return ValueConversion::Uext(self.pointer_type).into(),
ArgumentExtension::Sext => return ValueConversion::Sext(self.pointer_type).into(),
}
}
if self.regs < self.reg_limit {
// Assign to a register.
let reg = if ty.is_float() {
FPR.unit(10 + self.regs as usize)
} else {
GPR.unit(10 + self.regs as usize)
};
self.regs += 1;
ArgumentLoc::Reg(reg).into()
} else {
// Assign a stack location.
let loc = ArgumentLoc::Stack(self.offset as i32);
self.offset += u32::from(self.pointer_bytes);
debug_assert!(self.offset <= i32::MAX as u32);
loc.into()
}
}
}
/// Legalize `sig` for RISC-V.
pub fn legalize_signature(
sig: &mut Cow<ir::Signature>,
triple: &Triple,
isa_flags: &settings::Flags,
current: bool,
) {
let bits = triple.pointer_width().unwrap().bits();
let mut args = Args::new(bits, isa_flags.enable_e());
if let Some(new_params) = legalize_args(&sig.params, &mut args) {
sig.to_mut().params = new_params;
}
let mut rets = Args::new(bits, isa_flags.enable_e());
if let Some(new_returns) = legalize_args(&sig.returns, &mut rets) {
sig.to_mut().returns = new_returns;
}
if current {
let ptr = Type::int(u16::from(bits)).unwrap();
// Add the link register as an argument and return value.
//
// The `jalr` instruction implementing a return can technically accept the return address
// in any register, but a micro-architecture with a return address predictor will only
// recognize it as a return if the address is in `x1`.
let link = AbiParam::special_reg(ptr, ArgumentPurpose::Link, GPR.unit(1));
sig.to_mut().params.push(link);
sig.to_mut().returns.push(link);
}
}
/// Get register class for a type appearing in a legalized signature.
pub fn regclass_for_abi_type(ty: Type) -> RegClass {
if ty.is_float() {
FPR
} else {
GPR
}
}
pub fn allocatable_registers(_func: &ir::Function, isa_flags: &settings::Flags) -> RegisterSet {
let mut regs = RegisterSet::new();
regs.take(GPR, GPR.unit(0)); // Hard-wired 0.
// %x1 is the link register which is available for allocation.
regs.take(GPR, GPR.unit(2)); // Stack pointer.
regs.take(GPR, GPR.unit(3)); // Global pointer.
regs.take(GPR, GPR.unit(4)); // Thread pointer.
// TODO: %x8 is the frame pointer. Reserve it?
// Remove %x16 and up for RV32E.
if isa_flags.enable_e() {
for u in 16..32 {
regs.take(GPR, GPR.unit(u));
}
}
regs
}

View File

@@ -0,0 +1,182 @@
//! Emitting binary RISC-V machine code.
use crate::binemit::{bad_encoding, CodeSink, Reloc};
use crate::ir::{Function, Inst, InstructionData};
use crate::isa::{RegUnit, StackBaseMask, StackRef, TargetIsa};
use crate::predicates::is_signed_int;
use crate::regalloc::RegDiversions;
use core::u32;
include!(concat!(env!("OUT_DIR"), "/binemit-riscv.rs"));
/// R-type instructions.
///
/// 31 24 19 14 11 6
/// funct7 rs2 rs1 funct3 rd opcode
/// 25 20 15 12 7 0
///
/// Encoding bits: `opcode[6:2] | (funct3 << 5) | (funct7 << 8)`.
fn put_r<CS: CodeSink + ?Sized>(bits: u16, rs1: RegUnit, rs2: RegUnit, rd: RegUnit, sink: &mut CS) {
let bits = u32::from(bits);
let opcode5 = bits & 0x1f;
let funct3 = (bits >> 5) & 0x7;
let funct7 = (bits >> 8) & 0x7f;
let rs1 = u32::from(rs1) & 0x1f;
let rs2 = u32::from(rs2) & 0x1f;
let rd = u32::from(rd) & 0x1f;
// 0-6: opcode
let mut i = 0x3;
i |= opcode5 << 2;
i |= rd << 7;
i |= funct3 << 12;
i |= rs1 << 15;
i |= rs2 << 20;
i |= funct7 << 25;
sink.put4(i);
}
/// R-type instructions with a shift amount instead of rs2.
///
/// 31 25 19 14 11 6
/// funct7 shamt rs1 funct3 rd opcode
/// 25 20 15 12 7 0
///
/// Both funct7 and shamt contribute to bit 25. In RV64, shamt uses it for shifts > 31.
///
/// Encoding bits: `opcode[6:2] | (funct3 << 5) | (funct7 << 8)`.
fn put_rshamt<CS: CodeSink + ?Sized>(
bits: u16,
rs1: RegUnit,
shamt: i64,
rd: RegUnit,
sink: &mut CS,
) {
let bits = u32::from(bits);
let opcode5 = bits & 0x1f;
let funct3 = (bits >> 5) & 0x7;
let funct7 = (bits >> 8) & 0x7f;
let rs1 = u32::from(rs1) & 0x1f;
let shamt = shamt as u32 & 0x3f;
let rd = u32::from(rd) & 0x1f;
// 0-6: opcode
let mut i = 0x3;
i |= opcode5 << 2;
i |= rd << 7;
i |= funct3 << 12;
i |= rs1 << 15;
i |= shamt << 20;
i |= funct7 << 25;
sink.put4(i);
}
/// I-type instructions.
///
/// 31 19 14 11 6
/// imm rs1 funct3 rd opcode
/// 20 15 12 7 0
///
/// Encoding bits: `opcode[6:2] | (funct3 << 5)`
fn put_i<CS: CodeSink + ?Sized>(bits: u16, rs1: RegUnit, imm: i64, rd: RegUnit, sink: &mut CS) {
let bits = u32::from(bits);
let opcode5 = bits & 0x1f;
let funct3 = (bits >> 5) & 0x7;
let rs1 = u32::from(rs1) & 0x1f;
let rd = u32::from(rd) & 0x1f;
// 0-6: opcode
let mut i = 0x3;
i |= opcode5 << 2;
i |= rd << 7;
i |= funct3 << 12;
i |= rs1 << 15;
i |= (imm << 20) as u32;
sink.put4(i);
}
/// U-type instructions.
///
/// 31 11 6
/// imm rd opcode
/// 12 7 0
///
/// Encoding bits: `opcode[6:2] | (funct3 << 5)`
fn put_u<CS: CodeSink + ?Sized>(bits: u16, imm: i64, rd: RegUnit, sink: &mut CS) {
let bits = u32::from(bits);
let opcode5 = bits & 0x1f;
let rd = u32::from(rd) & 0x1f;
// 0-6: opcode
let mut i = 0x3;
i |= opcode5 << 2;
i |= rd << 7;
i |= imm as u32 & 0xfffff000;
sink.put4(i);
}
/// SB-type branch instructions.
///
/// 31 24 19 14 11 6
/// imm rs2 rs1 funct3 imm opcode
/// 25 20 15 12 7 0
///
/// Encoding bits: `opcode[6:2] | (funct3 << 5)`
fn put_sb<CS: CodeSink + ?Sized>(bits: u16, imm: i64, rs1: RegUnit, rs2: RegUnit, sink: &mut CS) {
let bits = u32::from(bits);
let opcode5 = bits & 0x1f;
let funct3 = (bits >> 5) & 0x7;
let rs1 = u32::from(rs1) & 0x1f;
let rs2 = u32::from(rs2) & 0x1f;
debug_assert!(is_signed_int(imm, 13, 1), "SB out of range {:#x}", imm);
let imm = imm as u32;
// 0-6: opcode
let mut i = 0x3;
i |= opcode5 << 2;
i |= funct3 << 12;
i |= rs1 << 15;
i |= rs2 << 20;
// The displacement is completely hashed up.
i |= ((imm >> 11) & 0x1) << 7;
i |= ((imm >> 1) & 0xf) << 8;
i |= ((imm >> 5) & 0x3f) << 25;
i |= ((imm >> 12) & 0x1) << 31;
sink.put4(i);
}
/// UJ-type jump instructions.
///
/// 31 11 6
/// imm rd opcode
/// 12 7 0
///
/// Encoding bits: `opcode[6:2]`
fn put_uj<CS: CodeSink + ?Sized>(bits: u16, imm: i64, rd: RegUnit, sink: &mut CS) {
let bits = u32::from(bits);
let opcode5 = bits & 0x1f;
let rd = u32::from(rd) & 0x1f;
debug_assert!(is_signed_int(imm, 21, 1), "UJ out of range {:#x}", imm);
let imm = imm as u32;
// 0-6: opcode
let mut i = 0x3;
i |= opcode5 << 2;
i |= rd << 7;
// The displacement is completely hashed up.
i |= imm & 0xff000;
i |= ((imm >> 11) & 0x1) << 20;
i |= ((imm >> 1) & 0x3ff) << 21;
i |= ((imm >> 20) & 0x1) << 31;
sink.put4(i);
}

View File

@@ -0,0 +1,18 @@
//! Encoding tables for RISC-V.
use super::registers::*;
use crate::ir;
use crate::isa;
use crate::isa::constraints::*;
use crate::isa::enc_tables::*;
use crate::isa::encoding::{base_size, RecipeSizing};
use crate::predicates;
// Include the generated encoding tables:
// - `LEVEL1_RV32`
// - `LEVEL1_RV64`
// - `LEVEL2`
// - `ENCLIST`
// - `INFO`
include!(concat!(env!("OUT_DIR"), "/encoding-riscv.rs"));
include!(concat!(env!("OUT_DIR"), "/legalize-riscv.rs"));

View File

@@ -0,0 +1,304 @@
//! RISC-V Instruction Set Architecture.
mod abi;
mod binemit;
mod enc_tables;
mod registers;
pub mod settings;
use super::super::settings as shared_settings;
#[cfg(feature = "testing_hooks")]
use crate::binemit::CodeSink;
use crate::binemit::{emit_function, MemoryCodeSink};
use crate::ir;
use crate::isa::enc_tables::{self as shared_enc_tables, lookup_enclist, Encodings};
use crate::isa::Builder as IsaBuilder;
use crate::isa::{EncInfo, RegClass, RegInfo, TargetIsa};
use crate::regalloc;
use alloc::{borrow::Cow, boxed::Box, vec::Vec};
use core::any::Any;
use core::fmt;
use core::hash::{Hash, Hasher};
use target_lexicon::{PointerWidth, Triple};
#[allow(dead_code)]
struct Isa {
triple: Triple,
shared_flags: shared_settings::Flags,
isa_flags: settings::Flags,
cpumode: &'static [shared_enc_tables::Level1Entry<u16>],
}
/// Get an ISA builder for creating RISC-V targets.
pub fn isa_builder(triple: Triple) -> IsaBuilder {
IsaBuilder {
triple,
setup: settings::builder(),
constructor: isa_constructor,
}
}
fn isa_constructor(
triple: Triple,
shared_flags: shared_settings::Flags,
builder: shared_settings::Builder,
) -> Box<dyn TargetIsa> {
let level1 = match triple.pointer_width().unwrap() {
PointerWidth::U16 => panic!("16-bit RISC-V unrecognized"),
PointerWidth::U32 => &enc_tables::LEVEL1_RV32[..],
PointerWidth::U64 => &enc_tables::LEVEL1_RV64[..],
};
Box::new(Isa {
triple,
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
}
impl TargetIsa for Isa {
fn name(&self) -> &'static str {
"riscv"
}
fn triple(&self) -> &Triple {
&self.triple
}
fn flags(&self) -> &shared_settings::Flags {
&self.shared_flags
}
fn isa_flags(&self) -> Vec<shared_settings::Value> {
self.isa_flags.iter().collect()
}
fn hash_all_flags(&self, mut hasher: &mut dyn Hasher) {
self.shared_flags.hash(&mut hasher);
self.isa_flags.hash(&mut hasher);
}
fn register_info(&self) -> RegInfo {
registers::INFO.clone()
}
fn encoding_info(&self) -> EncInfo {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(
&'a self,
func: &'a ir::Function,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
func,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut Cow<ir::Signature>, current: bool) {
abi::legalize_signature(sig, &self.triple, &self.isa_flags, current)
}
fn regclass_for_abi_type(&self, ty: ir::Type) -> RegClass {
abi::regclass_for_abi_type(ty)
}
fn allocatable_registers(&self, func: &ir::Function) -> regalloc::RegisterSet {
abi::allocatable_registers(func, &self.isa_flags)
}
#[cfg(feature = "testing_hooks")]
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink, self)
}
fn unsigned_add_overflow_condition(&self) -> ir::condcodes::IntCC {
unimplemented!()
}
fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC {
unimplemented!()
}
fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
#[cfg(test)]
mod tests {
use crate::ir::{immediates, types};
use crate::ir::{Function, InstructionData, Opcode};
use crate::isa;
use crate::settings::{self, Configurable};
use alloc::string::{String, ToString};
use core::str::FromStr;
use target_lexicon::triple;
fn encstr(isa: &dyn isa::TargetIsa, enc: Result<isa::Encoding, isa::Legalize>) -> String {
match enc {
Ok(e) => isa.encoding_info().display(e).to_string(),
Err(_) => "no encoding".to_string(),
}
}
#[test]
fn test_64bitenc() {
let shared_builder = settings::builder();
let shared_flags = settings::Flags::new(shared_builder);
let isa = isa::lookup(triple!("riscv64"))
.unwrap()
.finish(shared_flags);
let mut func = Function::new();
let block = func.dfg.make_block();
let arg64 = func.dfg.append_block_param(block, types::I64);
let arg32 = func.dfg.append_block_param(block, types::I32);
// Try to encode iadd_imm.i64 v1, -10.
let inst64 = InstructionData::BinaryImm64 {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10),
};
// ADDI is I/0b00100
assert_eq!(
encstr(&*isa, isa.encode(&func, &inst64, types::I64)),
"Ii#04"
);
// Try to encode iadd_imm.i64 v1, -10000.
let inst64_large = InstructionData::BinaryImm64 {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10000),
};
// Immediate is out of range for ADDI.
assert!(isa.encode(&func, &inst64_large, types::I64).is_err());
// Create an iadd_imm.i32 which is encodable in RV64.
let inst32 = InstructionData::BinaryImm64 {
opcode: Opcode::IaddImm,
arg: arg32,
imm: immediates::Imm64::new(10),
};
// ADDIW is I/0b00110
assert_eq!(
encstr(&*isa, isa.encode(&func, &inst32, types::I32)),
"Ii#06"
);
}
// Same as above, but for RV32.
#[test]
fn test_32bitenc() {
let shared_builder = settings::builder();
let shared_flags = settings::Flags::new(shared_builder);
let isa = isa::lookup(triple!("riscv32"))
.unwrap()
.finish(shared_flags);
let mut func = Function::new();
let block = func.dfg.make_block();
let arg64 = func.dfg.append_block_param(block, types::I64);
let arg32 = func.dfg.append_block_param(block, types::I32);
// Try to encode iadd_imm.i64 v1, -10.
let inst64 = InstructionData::BinaryImm64 {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10),
};
// In 32-bit mode, an i64 bit add should be narrowed.
assert!(isa.encode(&func, &inst64, types::I64).is_err());
// Try to encode iadd_imm.i64 v1, -10000.
let inst64_large = InstructionData::BinaryImm64 {
opcode: Opcode::IaddImm,
arg: arg64,
imm: immediates::Imm64::new(-10000),
};
// In 32-bit mode, an i64 bit add should be narrowed.
assert!(isa.encode(&func, &inst64_large, types::I64).is_err());
// Create an iadd_imm.i32 which is encodable in RV32.
let inst32 = InstructionData::BinaryImm64 {
opcode: Opcode::IaddImm,
arg: arg32,
imm: immediates::Imm64::new(10),
};
// ADDI is I/0b00100
assert_eq!(
encstr(&*isa, isa.encode(&func, &inst32, types::I32)),
"Ii#04"
);
// Create an imul.i32 which is encodable in RV32, but only when use_m is true.
let mul32 = InstructionData::Binary {
opcode: Opcode::Imul,
args: [arg32, arg32],
};
assert!(isa.encode(&func, &mul32, types::I32).is_err());
}
#[test]
fn test_rv32m() {
let shared_builder = settings::builder();
let shared_flags = settings::Flags::new(shared_builder);
// Set the supports_m stting which in turn enables the use_m predicate that unlocks
// encodings for imul.
let mut isa_builder = isa::lookup(triple!("riscv32")).unwrap();
isa_builder.enable("supports_m").unwrap();
let isa = isa_builder.finish(shared_flags);
let mut func = Function::new();
let block = func.dfg.make_block();
let arg32 = func.dfg.append_block_param(block, types::I32);
// Create an imul.i32 which is encodable in RV32M.
let mul32 = InstructionData::Binary {
opcode: Opcode::Imul,
args: [arg32, arg32],
};
assert_eq!(
encstr(&*isa, isa.encode(&func, &mul32, types::I32)),
"R#10c"
);
}
}
impl fmt::Display for Isa {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}\n{}", self.shared_flags, self.isa_flags)
}
}

View File

@@ -0,0 +1,50 @@
//! RISC-V register descriptions.
use crate::isa::registers::{RegBank, RegClass, RegClassData, RegInfo, RegUnit};
include!(concat!(env!("OUT_DIR"), "/registers-riscv.rs"));
#[cfg(test)]
mod tests {
use super::{FPR, GPR, INFO};
use crate::isa::RegUnit;
use alloc::string::{String, ToString};
#[test]
fn unit_encodings() {
assert_eq!(INFO.parse_regunit("x0"), Some(0));
assert_eq!(INFO.parse_regunit("x31"), Some(31));
assert_eq!(INFO.parse_regunit("f0"), Some(32));
assert_eq!(INFO.parse_regunit("f31"), Some(63));
assert_eq!(INFO.parse_regunit("x32"), None);
assert_eq!(INFO.parse_regunit("f32"), None);
}
#[test]
fn unit_names() {
fn uname(ru: RegUnit) -> String {
INFO.display_regunit(ru).to_string()
}
assert_eq!(uname(0), "%x0");
assert_eq!(uname(1), "%x1");
assert_eq!(uname(31), "%x31");
assert_eq!(uname(32), "%f0");
assert_eq!(uname(33), "%f1");
assert_eq!(uname(63), "%f31");
assert_eq!(uname(64), "%INVALID64");
}
#[test]
fn classes() {
assert!(GPR.contains(GPR.unit(0)));
assert!(GPR.contains(GPR.unit(31)));
assert!(!FPR.contains(GPR.unit(0)));
assert!(!FPR.contains(GPR.unit(31)));
assert!(!GPR.contains(FPR.unit(0)));
assert!(!GPR.contains(FPR.unit(31)));
assert!(FPR.contains(FPR.unit(0)));
assert!(FPR.contains(FPR.unit(31)));
}
}

View File

@@ -0,0 +1,56 @@
//! RISC-V Settings.
use crate::settings::{self, detail, Builder, Value};
use core::fmt;
// Include code generated by `cranelift-codegen/meta/src/gen_settings.rs`. This file contains a
// public `Flags` struct with an impl for all of the settings defined in
// `cranelift-codegen/meta/src/isa/riscv/mod.rs`.
include!(concat!(env!("OUT_DIR"), "/settings-riscv.rs"));
#[cfg(test)]
mod tests {
use super::{builder, Flags};
use crate::settings::{self, Configurable};
use alloc::string::ToString;
#[test]
fn display_default() {
let shared = settings::Flags::new(settings::builder());
let b = builder();
let f = Flags::new(&shared, b);
assert_eq!(
f.to_string(),
"[riscv]\n\
supports_m = false\n\
supports_a = false\n\
supports_f = false\n\
supports_d = false\n\
enable_m = true\n\
enable_e = false\n"
);
// Predicates are not part of the Display output.
assert_eq!(f.full_float(), false);
}
#[test]
fn predicates() {
let mut sb = settings::builder();
sb.set("enable_simd", "true").unwrap();
let shared = settings::Flags::new(sb);
let mut b = builder();
b.enable("supports_f").unwrap();
b.enable("supports_d").unwrap();
let f = Flags::new(&shared, b);
assert_eq!(f.full_float(), true);
let mut sb = settings::builder();
sb.set("enable_simd", "false").unwrap();
let shared = settings::Flags::new(sb);
let mut b = builder();
b.enable("supports_f").unwrap();
b.enable("supports_d").unwrap();
let f = Flags::new(&shared, b);
assert_eq!(f.full_float(), false);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,578 @@
//! Emitting binary x86 machine code.
use super::enc_tables::{needs_offset, needs_sib_byte};
use super::registers::RU;
use crate::binemit::{bad_encoding, CodeSink, Reloc};
use crate::ir::condcodes::{CondCode, FloatCC, IntCC};
use crate::ir::{
Block, Constant, ExternalName, Function, Inst, InstructionData, JumpTable, LibCall, Opcode,
TrapCode,
};
use crate::isa::{RegUnit, StackBase, StackBaseMask, StackRef, TargetIsa};
use crate::regalloc::RegDiversions;
use cranelift_codegen_shared::isa::x86::EncodingBits;
include!(concat!(env!("OUT_DIR"), "/binemit-x86.rs"));
// Convert a stack base to the corresponding register.
fn stk_base(base: StackBase) -> RegUnit {
let ru = match base {
StackBase::SP => RU::rsp,
StackBase::FP => RU::rbp,
StackBase::Zone => unimplemented!(),
};
ru as RegUnit
}
// Mandatory prefix bytes for Mp* opcodes.
const PREFIX: [u8; 3] = [0x66, 0xf3, 0xf2];
// Second byte for three-byte opcodes for mm=0b10 and mm=0b11.
const OP3_BYTE2: [u8; 2] = [0x38, 0x3a];
// A REX prefix with no bits set: 0b0100WRXB.
const BASE_REX: u8 = 0b0100_0000;
// Create a single-register REX prefix, setting the B bit to bit 3 of the register.
// This is used for instructions that encode a register in the low 3 bits of the opcode and for
// instructions that use the ModR/M `reg` field for something else.
fn rex1(reg_b: RegUnit) -> u8 {
let b = ((reg_b >> 3) & 1) as u8;
BASE_REX | b
}
// Create a dual-register REX prefix, setting:
//
// REX.B = bit 3 of r/m register, or SIB base register when a SIB byte is present.
// REX.R = bit 3 of reg register.
fn rex2(rm: RegUnit, reg: RegUnit) -> u8 {
let b = ((rm >> 3) & 1) as u8;
let r = ((reg >> 3) & 1) as u8;
BASE_REX | b | (r << 2)
}
// Create a three-register REX prefix, setting:
//
// REX.B = bit 3 of r/m register, or SIB base register when a SIB byte is present.
// REX.R = bit 3 of reg register.
// REX.X = bit 3 of SIB index register.
fn rex3(rm: RegUnit, reg: RegUnit, index: RegUnit) -> u8 {
let b = ((rm >> 3) & 1) as u8;
let r = ((reg >> 3) & 1) as u8;
let x = ((index >> 3) & 1) as u8;
BASE_REX | b | (x << 1) | (r << 2)
}
/// Encode the RXBR' bits of the EVEX P0 byte. For an explanation of these bits, see section 2.6.1
/// in the Intel Software Development Manual, volume 2A. These bits can be used by different
/// addressing modes (see section 2.6.2), requiring different `vex*` functions than this one.
fn evex2(rm: RegUnit, reg: RegUnit) -> u8 {
let b = (!(rm >> 3) & 1) as u8;
let x = (!(rm >> 4) & 1) as u8;
let r = (!(reg >> 3) & 1) as u8;
let r_ = (!(reg >> 4) & 1) as u8;
0x00 | r_ | (b << 1) | (x << 2) | (r << 3)
}
/// Determines whether a REX prefix should be emitted. A REX byte always has 0100 in bits 7:4; bits
/// 3:0 correspond to WRXB. W allows certain instructions to declare a 64-bit operand size; because
/// [needs_rex] is only used by [infer_rex] and we prevent [infer_rex] from using [w] in
/// [Template::build], we do not need to check again whether [w] forces an inferred REX prefix--it
/// always does and should be encoded like `.rex().w()`. The RXB are extension of ModR/M or SIB
/// fields; see section 2.2.1.2 in the Intel Software Development Manual.
#[inline]
fn needs_rex(rex: u8) -> bool {
rex != BASE_REX
}
// Emit a REX prefix.
//
// The R, X, and B bits are computed from registers using the functions above. The W bit is
// extracted from `bits`.
fn rex_prefix<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(rex & 0xf8, BASE_REX);
let w = EncodingBits::from(bits).rex_w();
sink.put1(rex | (w << 3));
}
// Emit a single-byte opcode with no REX prefix.
fn put_op1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8f00, 0, "Invalid encoding bits for Op1*");
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Op1 encoding");
sink.put1(bits as u8);
}
// Emit a single-byte opcode with REX prefix.
fn put_rexop1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0f00, 0, "Invalid encoding bits for RexOp1*");
rex_prefix(bits, rex, sink);
sink.put1(bits as u8);
}
/// Emit a single-byte opcode with inferred REX prefix.
fn put_dynrexop1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0f00, 0, "Invalid encoding bits for DynRexOp1*");
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(bits as u8);
}
// Emit two-byte opcode: 0F XX
fn put_op2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8f00, 0x0400, "Invalid encoding bits for Op2*");
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Op2 encoding");
sink.put1(0x0f);
sink.put1(bits as u8);
}
// Emit two-byte opcode: 0F XX with REX prefix.
fn put_rexop2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0f00, 0x0400, "Invalid encoding bits for RexOp2*");
rex_prefix(bits, rex, sink);
sink.put1(0x0f);
sink.put1(bits as u8);
}
/// Emit two-byte opcode: 0F XX with inferred REX prefix.
fn put_dynrexop2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(
bits & 0x0f00,
0x0400,
"Invalid encoding bits for DynRexOp2*"
);
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(0x0f);
sink.put1(bits as u8);
}
// Emit single-byte opcode with mandatory prefix.
fn put_mp1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8c00, 0, "Invalid encoding bits for Mp1*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Mp1 encoding");
sink.put1(bits as u8);
}
// Emit single-byte opcode with mandatory prefix and REX.
fn put_rexmp1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0c00, 0, "Invalid encoding bits for RexMp1*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
rex_prefix(bits, rex, sink);
sink.put1(bits as u8);
}
// Emit two-byte opcode (0F XX) with mandatory prefix.
fn put_mp2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8c00, 0x0400, "Invalid encoding bits for Mp2*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Mp2 encoding");
sink.put1(0x0f);
sink.put1(bits as u8);
}
// Emit two-byte opcode (0F XX) with mandatory prefix and REX.
fn put_rexmp2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0c00, 0x0400, "Invalid encoding bits for RexMp2*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
rex_prefix(bits, rex, sink);
sink.put1(0x0f);
sink.put1(bits as u8);
}
/// Emit two-byte opcode (0F XX) with mandatory prefix and inferred REX.
fn put_dynrexmp2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(
bits & 0x0c00,
0x0400,
"Invalid encoding bits for DynRexMp2*"
);
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(0x0f);
sink.put1(bits as u8);
}
/// Emit three-byte opcode (0F 3[8A] XX) with mandatory prefix.
fn put_mp3<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8800, 0x0800, "Invalid encoding bits for Mp3*");
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Mp3 encoding");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
sink.put1(0x0f);
sink.put1(OP3_BYTE2[(enc.mm() - 2) as usize]);
sink.put1(bits as u8);
}
/// Emit three-byte opcode (0F 3[8A] XX) with mandatory prefix and REX
fn put_rexmp3<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0800, 0x0800, "Invalid encoding bits for RexMp3*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
rex_prefix(bits, rex, sink);
sink.put1(0x0f);
sink.put1(OP3_BYTE2[(enc.mm() - 2) as usize]);
sink.put1(bits as u8);
}
/// Emit three-byte opcode (0F 3[8A] XX) with mandatory prefix and an inferred REX prefix.
fn put_dynrexmp3<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(
bits & 0x0800,
0x0800,
"Invalid encoding bits for DynRexMp3*"
);
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(0x0f);
sink.put1(OP3_BYTE2[(enc.mm() - 2) as usize]);
sink.put1(bits as u8);
}
/// Defines the EVEX context for the `L'`, `L`, and `b` bits (bits 6:4 of EVEX P2 byte). Table 2-36 in
/// section 2.6.10 (Intel Software Development Manual, volume 2A) describes how these bits can be
/// used together for certain classes of instructions; i.e., special care should be taken to ensure
/// that instructions use an applicable correct `EvexContext`. Table 2-39 contains cases where
/// opcodes can result in an #UD.
#[allow(dead_code)]
enum EvexContext {
RoundingRegToRegFP {
rc: EvexRoundingControl,
},
NoRoundingFP {
sae: bool,
length: EvexVectorLength,
},
MemoryOp {
broadcast: bool,
length: EvexVectorLength,
},
Other {
length: EvexVectorLength,
},
}
impl EvexContext {
/// Encode the `L'`, `L`, and `b` bits (bits 6:4 of EVEX P2 byte) for merging with the P2 byte.
fn bits(&self) -> u8 {
match self {
Self::RoundingRegToRegFP { rc } => 0b001 | rc.bits() << 1,
Self::NoRoundingFP { sae, length } => (*sae as u8) | length.bits() << 1,
Self::MemoryOp { broadcast, length } => (*broadcast as u8) | length.bits() << 1,
Self::Other { length } => length.bits() << 1,
}
}
}
/// The EVEX format allows choosing a vector length in the `L'` and `L` bits; see `EvexContext`.
#[allow(dead_code)]
enum EvexVectorLength {
V128,
V256,
V512,
}
impl EvexVectorLength {
/// Encode the `L'` and `L` bits for merging with the P2 byte.
fn bits(&self) -> u8 {
match self {
Self::V128 => 0b00,
Self::V256 => 0b01,
Self::V512 => 0b10,
// 0b11 is reserved (#UD).
}
}
}
/// The EVEX format allows defining rounding control in the `L'` and `L` bits; see `EvexContext`.
#[allow(dead_code)]
enum EvexRoundingControl {
RNE,
RD,
RU,
RZ,
}
impl EvexRoundingControl {
/// Encode the `L'` and `L` bits for merging with the P2 byte.
fn bits(&self) -> u8 {
match self {
Self::RNE => 0b00,
Self::RD => 0b01,
Self::RU => 0b10,
Self::RZ => 0b11,
}
}
}
/// Defines the EVEX masking behavior; masking support is described in section 2.6.4 of the Intel
/// Software Development Manual, volume 2A.
#[allow(dead_code)]
enum EvexMasking {
None,
Merging { k: u8 },
Zeroing { k: u8 },
}
impl EvexMasking {
/// Encode the `z` bit for merging with the P2 byte.
fn z_bit(&self) -> u8 {
match self {
Self::None | Self::Merging { .. } => 0,
Self::Zeroing { .. } => 1,
}
}
/// Encode the `aaa` bits for merging with the P2 byte.
fn aaa_bits(&self) -> u8 {
match self {
Self::None => 0b000,
Self::Merging { k } | Self::Zeroing { k } => {
debug_assert!(*k <= 7);
*k
}
}
}
}
/// Encode an EVEX prefix, including the instruction opcode. To match the current recipe
/// convention, the ModR/M byte is written separately in the recipe. This EVEX encoding function
/// only encodes the `reg` (operand 1), `vvvv` (operand 2), `rm` (operand 3) form; other forms are
/// possible (see section 2.6.2, Intel Software Development Manual, volume 2A), requiring
/// refactoring of this function or separate functions for each form (e.g. as for the REX prefix).
fn put_evex<CS: CodeSink + ?Sized>(
bits: u16,
reg: RegUnit,
vvvvv: RegUnit,
rm: RegUnit,
context: EvexContext,
masking: EvexMasking,
sink: &mut CS,
) {
let enc = EncodingBits::from(bits);
// EVEX prefix.
sink.put1(0x62);
debug_assert!(enc.mm() < 0b100);
let mut p0 = enc.mm() & 0b11;
p0 |= evex2(rm, reg) << 4; // bits 3:2 are always unset
sink.put1(p0);
let mut p1 = enc.pp() | 0b100; // bit 2 is always set
p1 |= (!(vvvvv as u8) & 0b1111) << 3;
p1 |= (enc.rex_w() & 0b1) << 7;
sink.put1(p1);
let mut p2 = masking.aaa_bits();
p2 |= (!(vvvvv as u8 >> 4) & 0b1) << 3;
p2 |= context.bits() << 4;
p2 |= masking.z_bit() << 7;
sink.put1(p2);
// Opcode
sink.put1(enc.opcode_byte());
// ModR/M byte placed in recipe
}
/// Emit a ModR/M byte for reg-reg operands.
fn modrm_rr<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b11000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a ModR/M byte where the reg bits are part of the opcode.
fn modrm_r_bits<CS: CodeSink + ?Sized>(rm: RegUnit, bits: u16, sink: &mut CS) {
let reg = (bits >> 12) as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b11000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 00 ModR/M byte. This is a register-indirect addressing mode with no offset.
/// Registers %rsp and %rbp are invalid for `rm`, %rsp indicates a SIB byte, and %rbp indicates an
/// absolute immediate 32-bit address.
fn modrm_rm<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b00000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 00 Mod/RM byte, with a rip-relative displacement in 64-bit mode. Effective address
/// is calculated by adding displacement to 64-bit rip of next instruction. See intel Sw dev manual
/// section 2.2.1.6.
fn modrm_riprel<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_rm(0b101, reg, sink)
}
/// Emit a mode 01 ModR/M byte. This is a register-indirect addressing mode with 8-bit
/// displacement.
/// Register %rsp is invalid for `rm`. It indicates the presence of a SIB byte.
fn modrm_disp8<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b01000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 10 ModR/M byte. This is a register-indirect addressing mode with 32-bit
/// displacement.
/// Register %rsp is invalid for `rm`. It indicates the presence of a SIB byte.
fn modrm_disp32<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b10000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 00 ModR/M with a 100 RM indicating a SIB byte is present.
fn modrm_sib<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_rm(0b100, reg, sink);
}
/// Emit a mode 01 ModR/M with a 100 RM indicating a SIB byte and 8-bit
/// displacement are present.
fn modrm_sib_disp8<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_disp8(0b100, reg, sink);
}
/// Emit a mode 10 ModR/M with a 100 RM indicating a SIB byte and 32-bit
/// displacement are present.
fn modrm_sib_disp32<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_disp32(0b100, reg, sink);
}
/// Emit a SIB byte with a base register and no scale+index.
fn sib_noindex<CS: CodeSink + ?Sized>(base: RegUnit, sink: &mut CS) {
let base = base as u8 & 7;
// SIB SS_III_BBB.
let mut b = 0b00_100_000;
b |= base;
sink.put1(b);
}
/// Emit a SIB byte with a scale, base, and index.
fn sib<CS: CodeSink + ?Sized>(scale: u8, index: RegUnit, base: RegUnit, sink: &mut CS) {
// SIB SS_III_BBB.
debug_assert_eq!(scale & !0x03, 0, "Scale out of range");
let scale = scale & 3;
let index = index as u8 & 7;
let base = base as u8 & 7;
let b: u8 = (scale << 6) | (index << 3) | base;
sink.put1(b);
}
/// Get the low 4 bits of an opcode for an integer condition code.
///
/// Add this offset to a base opcode for:
///
/// ---- 0x70: Short conditional branch.
/// 0x0f 0x80: Long conditional branch.
/// 0x0f 0x90: SetCC.
///
fn icc2opc(cond: IntCC) -> u16 {
use crate::ir::condcodes::IntCC::*;
match cond {
Overflow => 0x0,
NotOverflow => 0x1,
UnsignedLessThan => 0x2,
UnsignedGreaterThanOrEqual => 0x3,
Equal => 0x4,
NotEqual => 0x5,
UnsignedLessThanOrEqual => 0x6,
UnsignedGreaterThan => 0x7,
// 0x8 = Sign.
// 0x9 = !Sign.
// 0xa = Parity even.
// 0xb = Parity odd.
SignedLessThan => 0xc,
SignedGreaterThanOrEqual => 0xd,
SignedLessThanOrEqual => 0xe,
SignedGreaterThan => 0xf,
}
}
/// Get the low 4 bits of an opcode for a floating point condition code.
///
/// The ucomiss/ucomisd instructions set the FLAGS bits CF/PF/CF like this:
///
/// ZPC OSA
/// UN 111 000
/// GT 000 000
/// LT 001 000
/// EQ 100 000
///
/// Not all floating point condition codes are supported.
fn fcc2opc(cond: FloatCC) -> u16 {
use crate::ir::condcodes::FloatCC::*;
match cond {
Ordered => 0xb, // EQ|LT|GT => *np (P=0)
Unordered => 0xa, // UN => *p (P=1)
OrderedNotEqual => 0x5, // LT|GT => *ne (Z=0),
UnorderedOrEqual => 0x4, // UN|EQ => *e (Z=1)
GreaterThan => 0x7, // GT => *a (C=0&Z=0)
GreaterThanOrEqual => 0x3, // GT|EQ => *ae (C=0)
UnorderedOrLessThan => 0x2, // UN|LT => *b (C=1)
UnorderedOrLessThanOrEqual => 0x6, // UN|LT|EQ => *be (Z=1|C=1)
Equal | // EQ
NotEqual | // UN|LT|GT
LessThan | // LT
LessThanOrEqual | // LT|EQ
UnorderedOrGreaterThan | // UN|GT
UnorderedOrGreaterThanOrEqual // UN|GT|EQ
=> panic!("{} not supported", cond),
}
}
/// Emit a single-byte branch displacement to `destination`.
fn disp1<CS: CodeSink + ?Sized>(destination: Block, func: &Function, sink: &mut CS) {
let delta = func.offsets[destination].wrapping_sub(sink.offset() + 1);
sink.put1(delta as u8);
}
/// Emit a four-byte branch displacement to `destination`.
fn disp4<CS: CodeSink + ?Sized>(destination: Block, func: &Function, sink: &mut CS) {
let delta = func.offsets[destination].wrapping_sub(sink.offset() + 4);
sink.put4(delta);
}
/// Emit a four-byte displacement to jump table `jt`.
fn jt_disp4<CS: CodeSink + ?Sized>(jt: JumpTable, func: &Function, sink: &mut CS) {
let delta = func.jt_offsets[jt].wrapping_sub(sink.offset() + 4);
sink.put4(delta);
sink.reloc_jt(Reloc::X86PCRelRodata4, jt);
}
/// Emit a four-byte displacement to `constant`.
fn const_disp4<CS: CodeSink + ?Sized>(constant: Constant, func: &Function, sink: &mut CS) {
let offset = func.dfg.constants.get_offset(constant);
let delta = offset.wrapping_sub(sink.offset() + 4);
sink.put4(delta);
sink.reloc_constant(Reloc::X86PCRelRodata4, offset);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,199 @@
//! x86 Instruction Set Architectures.
mod abi;
mod binemit;
mod enc_tables;
mod registers;
pub mod settings;
#[cfg(feature = "unwind")]
pub mod unwind;
use super::super::settings as shared_settings;
#[cfg(feature = "testing_hooks")]
use crate::binemit::CodeSink;
use crate::binemit::{emit_function, MemoryCodeSink};
use crate::ir;
use crate::isa::enc_tables::{self as shared_enc_tables, lookup_enclist, Encodings};
use crate::isa::Builder as IsaBuilder;
#[cfg(feature = "unwind")]
use crate::isa::{unwind::systemv::RegisterMappingError, RegUnit};
use crate::isa::{EncInfo, RegClass, RegInfo, TargetIsa};
use crate::regalloc;
use crate::result::CodegenResult;
use crate::timing;
use alloc::{borrow::Cow, boxed::Box, vec::Vec};
use core::any::Any;
use core::fmt;
use core::hash::{Hash, Hasher};
use target_lexicon::{PointerWidth, Triple};
#[allow(dead_code)]
struct Isa {
triple: Triple,
shared_flags: shared_settings::Flags,
isa_flags: settings::Flags,
cpumode: &'static [shared_enc_tables::Level1Entry<u16>],
}
/// Get an ISA builder for creating x86 targets.
pub fn isa_builder(triple: Triple) -> IsaBuilder {
IsaBuilder {
triple,
setup: settings::builder(),
constructor: isa_constructor,
}
}
fn isa_constructor(
triple: Triple,
shared_flags: shared_settings::Flags,
builder: shared_settings::Builder,
) -> Box<dyn TargetIsa> {
let level1 = match triple.pointer_width().unwrap() {
PointerWidth::U16 => unimplemented!("x86-16"),
PointerWidth::U32 => &enc_tables::LEVEL1_I32[..],
PointerWidth::U64 => &enc_tables::LEVEL1_I64[..],
};
let isa_flags = settings::Flags::new(&shared_flags, builder);
Box::new(Isa {
triple,
isa_flags,
shared_flags,
cpumode: level1,
})
}
impl TargetIsa for Isa {
fn name(&self) -> &'static str {
"x86"
}
fn triple(&self) -> &Triple {
&self.triple
}
fn flags(&self) -> &shared_settings::Flags {
&self.shared_flags
}
fn isa_flags(&self) -> Vec<shared_settings::Value> {
self.isa_flags.iter().collect()
}
fn hash_all_flags(&self, mut hasher: &mut dyn Hasher) {
self.shared_flags.hash(&mut hasher);
self.isa_flags.hash(&mut hasher);
}
fn uses_cpu_flags(&self) -> bool {
true
}
fn uses_complex_addresses(&self) -> bool {
true
}
fn register_info(&self) -> RegInfo {
registers::INFO.clone()
}
#[cfg(feature = "unwind")]
fn map_dwarf_register(&self, reg: RegUnit) -> Result<u16, RegisterMappingError> {
unwind::systemv::map_reg(self, reg).map(|r| r.0)
}
fn encoding_info(&self) -> EncInfo {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(
&'a self,
func: &'a ir::Function,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
func,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut Cow<ir::Signature>, current: bool) {
abi::legalize_signature(
sig,
&self.triple,
current,
&self.shared_flags,
&self.isa_flags,
)
}
fn regclass_for_abi_type(&self, ty: ir::Type) -> RegClass {
abi::regclass_for_abi_type(ty)
}
fn allocatable_registers(&self, _func: &ir::Function) -> regalloc::RegisterSet {
abi::allocatable_registers(&self.triple, &self.shared_flags)
}
#[cfg(feature = "testing_hooks")]
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink, self)
}
fn prologue_epilogue(&self, func: &mut ir::Function) -> CodegenResult<()> {
let _tt = timing::prologue_epilogue();
abi::prologue_epilogue(func, self)
}
fn unsigned_add_overflow_condition(&self) -> ir::condcodes::IntCC {
ir::condcodes::IntCC::UnsignedLessThan
}
fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC {
ir::condcodes::IntCC::UnsignedLessThan
}
#[cfg(feature = "unwind")]
fn create_unwind_info(
&self,
func: &ir::Function,
) -> CodegenResult<Option<super::super::unwind::UnwindInfo>> {
abi::create_unwind_info(func, self)
}
#[cfg(feature = "unwind")]
fn create_systemv_cie(&self) -> Option<gimli::write::CommonInformationEntry> {
Some(unwind::systemv::create_cie())
}
fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
impl fmt::Display for Isa {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}\n{}", self.shared_flags, self.isa_flags)
}
}

View File

@@ -0,0 +1,86 @@
//! x86 register descriptions.
use crate::isa::registers::{RegBank, RegClass, RegClassData, RegInfo, RegUnit};
include!(concat!(env!("OUT_DIR"), "/registers-x86.rs"));
#[cfg(test)]
mod tests {
use super::*;
use crate::isa::RegUnit;
use alloc::string::{String, ToString};
#[test]
fn unit_encodings() {
fn gpr(unit: usize) -> Option<u16> {
Some(GPR.unit(unit))
}
// The encoding of integer registers is not alphabetical.
assert_eq!(INFO.parse_regunit("rax"), gpr(0));
assert_eq!(INFO.parse_regunit("rbx"), gpr(3));
assert_eq!(INFO.parse_regunit("rcx"), gpr(1));
assert_eq!(INFO.parse_regunit("rdx"), gpr(2));
assert_eq!(INFO.parse_regunit("rsi"), gpr(6));
assert_eq!(INFO.parse_regunit("rdi"), gpr(7));
assert_eq!(INFO.parse_regunit("rbp"), gpr(5));
assert_eq!(INFO.parse_regunit("rsp"), gpr(4));
assert_eq!(INFO.parse_regunit("r8"), gpr(8));
assert_eq!(INFO.parse_regunit("r15"), gpr(15));
fn fpr(unit: usize) -> Option<u16> {
Some(FPR.unit(unit))
}
assert_eq!(INFO.parse_regunit("xmm0"), fpr(0));
assert_eq!(INFO.parse_regunit("xmm15"), fpr(15));
// FIXME(#1306) Add these tests back in when FPR32 is re-added.
// fn fpr32(unit: usize) -> Option<u16> {
// Some(FPR32.unit(unit))
// }
// assert_eq!(INFO.parse_regunit("xmm0"), fpr32(0));
// assert_eq!(INFO.parse_regunit("xmm31"), fpr32(31));
}
#[test]
fn unit_names() {
fn gpr(ru: RegUnit) -> String {
INFO.display_regunit(GPR.first + ru).to_string()
}
assert_eq!(gpr(0), "%rax");
assert_eq!(gpr(3), "%rbx");
assert_eq!(gpr(1), "%rcx");
assert_eq!(gpr(2), "%rdx");
assert_eq!(gpr(6), "%rsi");
assert_eq!(gpr(7), "%rdi");
assert_eq!(gpr(5), "%rbp");
assert_eq!(gpr(4), "%rsp");
assert_eq!(gpr(8), "%r8");
assert_eq!(gpr(15), "%r15");
fn fpr(ru: RegUnit) -> String {
INFO.display_regunit(FPR.first + ru).to_string()
}
assert_eq!(fpr(0), "%xmm0");
assert_eq!(fpr(15), "%xmm15");
// FIXME(#1306) Add these tests back in when FPR32 is re-added.
// fn fpr32(ru: RegUnit) -> String {
// INFO.display_regunit(FPR32.first + ru).to_string()
// }
// assert_eq!(fpr32(0), "%xmm0");
// assert_eq!(fpr32(31), "%xmm31");
}
#[test]
fn regclasses() {
assert_eq!(GPR.intersect_index(GPR), Some(GPR.into()));
assert_eq!(GPR.intersect_index(ABCD), Some(ABCD.into()));
assert_eq!(GPR.intersect_index(FPR), None);
assert_eq!(ABCD.intersect_index(GPR), Some(ABCD.into()));
assert_eq!(ABCD.intersect_index(ABCD), Some(ABCD.into()));
assert_eq!(ABCD.intersect_index(FPR), None);
assert_eq!(FPR.intersect_index(FPR), Some(FPR.into()));
assert_eq!(FPR.intersect_index(GPR), None);
assert_eq!(FPR.intersect_index(ABCD), None);
}
}

View File

@@ -0,0 +1,52 @@
//! x86 Settings.
use crate::settings::{self, detail, Builder, Value};
use core::fmt;
// Include code generated by `cranelift-codegen/meta/src/gen_settings.rs:`. This file contains a
// public `Flags` struct with an impl for all of the settings defined in
// `cranelift-codegen/meta/src/isa/x86/settings.rs`.
include!(concat!(env!("OUT_DIR"), "/settings-x86.rs"));
#[cfg(test)]
mod tests {
use super::{builder, Flags};
use crate::settings::{self, Configurable};
#[test]
fn presets() {
let shared = settings::Flags::new(settings::builder());
// Nehalem has SSE4.1 but not BMI1.
let mut b0 = builder();
b0.enable("nehalem").unwrap();
let f0 = Flags::new(&shared, b0);
assert_eq!(f0.has_sse41(), true);
assert_eq!(f0.has_bmi1(), false);
let mut b1 = builder();
b1.enable("haswell").unwrap();
let f1 = Flags::new(&shared, b1);
assert_eq!(f1.has_sse41(), true);
assert_eq!(f1.has_bmi1(), true);
}
#[test]
fn display_presets() {
// Spot check that the flags Display impl does not cause a panic
let shared = settings::Flags::new(settings::builder());
let b0 = builder();
let f0 = Flags::new(&shared, b0);
let _ = format!("{}", f0);
let mut b1 = builder();
b1.enable("nehalem").unwrap();
let f1 = Flags::new(&shared, b1);
let _ = format!("{}", f1);
let mut b2 = builder();
b2.enable("haswell").unwrap();
let f2 = Flags::new(&shared, b2);
let _ = format!("{}", f2);
}
}

View File

@@ -0,0 +1,531 @@
//! Module for x86 unwind generation for supported ABIs.
pub mod systemv;
pub mod winx64;
use crate::ir::{Function, InstructionData, Opcode, ValueLoc};
use crate::isa::x86::registers::{FPR, RU};
use crate::isa::{RegUnit, TargetIsa};
use crate::result::CodegenResult;
use alloc::vec::Vec;
use std::collections::HashMap;
use crate::isa::unwind::input::{UnwindCode, UnwindInfo};
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo<RegUnit>>> {
// Find last block based on max offset.
let last_block = func
.layout
.blocks()
.max_by_key(|b| func.offsets[*b])
.expect("at least a block");
// Find last instruction offset + size, and make it function size.
let function_size = func
.inst_offsets(last_block, &isa.encoding_info())
.fold(0, |_, (offset, _, size)| offset + size);
let entry_block = func.layout.entry_block().expect("missing entry block");
let prologue_end = func.prologue_end.unwrap();
let epilogues_start = func
.epilogues_start
.iter()
.map(|(i, b)| (*b, *i))
.collect::<HashMap<_, _>>();
let word_size = isa.pointer_bytes();
let mut stack_size = None;
let mut prologue_size = 0;
let mut prologue_unwind_codes = Vec::new();
let mut epilogues_unwind_codes = Vec::new();
let mut frame_register: Option<RegUnit> = None;
// Process only entry block and blocks with epilogues.
let mut blocks = func
.epilogues_start
.iter()
.map(|(_, b)| *b)
.collect::<Vec<_>>();
if !blocks.contains(&entry_block) {
blocks.push(entry_block);
}
blocks.sort_by_key(|b| func.offsets[*b]);
for block in blocks.iter() {
let mut in_prologue = block == &entry_block;
let mut in_epilogue = false;
let mut epilogue_pop_offsets = Vec::new();
let epilogue_start = epilogues_start.get(block);
let is_last_block = block == &last_block;
for (offset, inst, size) in func.inst_offsets(*block, &isa.encoding_info()) {
let offset = offset + size;
let unwind_codes;
if in_prologue {
// Check for prologue end (inclusive)
if prologue_end == inst {
in_prologue = false;
}
prologue_size += size;
unwind_codes = &mut prologue_unwind_codes;
} else if !in_epilogue && epilogue_start == Some(&inst) {
// Now in an epilogue, emit a remember state instruction if not last block
in_epilogue = true;
epilogues_unwind_codes.push(Vec::new());
unwind_codes = epilogues_unwind_codes.last_mut().unwrap();
if !is_last_block {
unwind_codes.push((offset, UnwindCode::RememberState));
}
} else if in_epilogue {
unwind_codes = epilogues_unwind_codes.last_mut().unwrap();
} else {
// Ignore normal instructions
continue;
}
match func.dfg[inst] {
InstructionData::Unary { opcode, arg } => {
match opcode {
Opcode::X86Push => {
let reg = func.locations[arg].unwrap_reg();
unwind_codes.push((
offset,
UnwindCode::StackAlloc {
size: word_size.into(),
},
));
unwind_codes.push((
offset,
UnwindCode::SaveRegister {
reg,
stack_offset: 0,
},
));
}
Opcode::AdjustSpDown => {
let stack_size =
stack_size.expect("expected a previous stack size instruction");
// This is used when calling a stack check function
// We need to track the assignment to RAX which has the size of the stack
unwind_codes
.push((offset, UnwindCode::StackAlloc { size: stack_size }));
}
_ => {}
}
}
InstructionData::UnaryImm { opcode, imm } => {
match opcode {
Opcode::Iconst => {
let imm: i64 = imm.into();
assert!(imm <= core::u32::MAX as i64);
assert!(stack_size.is_none());
// This instruction should only appear in a prologue to pass an
// argument of the stack size to a stack check function.
// Record the stack size so we know what it is when we encounter the adjustment
// instruction (which will adjust via the register assigned to this instruction).
stack_size = Some(imm as u32);
}
Opcode::AdjustSpDownImm => {
let imm: i64 = imm.into();
assert!(imm <= core::u32::MAX as i64);
stack_size = Some(imm as u32);
unwind_codes
.push((offset, UnwindCode::StackAlloc { size: imm as u32 }));
}
Opcode::AdjustSpUpImm => {
let imm: i64 = imm.into();
assert!(imm <= core::u32::MAX as i64);
stack_size = Some(imm as u32);
unwind_codes
.push((offset, UnwindCode::StackDealloc { size: imm as u32 }));
}
_ => {}
}
}
InstructionData::Store {
opcode: Opcode::Store,
args: [arg1, arg2],
offset: stack_offset,
..
} => {
if let (ValueLoc::Reg(src), ValueLoc::Reg(dst)) =
(func.locations[arg1], func.locations[arg2])
{
// If this is a save of an FPR, record an unwind operation
// Note: the stack_offset here is relative to an adjusted SP
if dst == (RU::rsp as RegUnit) && FPR.contains(src) {
let stack_offset: i32 = stack_offset.into();
unwind_codes.push((
offset,
UnwindCode::SaveRegister {
reg: src,
stack_offset: stack_offset as u32,
},
));
}
}
}
InstructionData::CopySpecial { src, dst, .. } if frame_register.is_none() => {
// Check for change in CFA register (RSP is always the starting CFA)
if src == (RU::rsp as RegUnit) {
unwind_codes.push((offset, UnwindCode::SetFramePointer { reg: dst }));
frame_register = Some(dst);
}
}
InstructionData::NullAry { opcode } => match opcode {
Opcode::X86Pop => {
epilogue_pop_offsets.push(offset);
}
_ => {}
},
InstructionData::MultiAry { opcode, .. } if in_epilogue => match opcode {
Opcode::Return => {
let args = func.dfg.inst_args(inst);
for (i, arg) in args.iter().rev().enumerate() {
// Only walk back the args for the pop instructions encountered
if i >= epilogue_pop_offsets.len() {
break;
}
let offset = epilogue_pop_offsets[i];
let reg = func.locations[*arg].unwrap_reg();
unwind_codes.push((offset, UnwindCode::RestoreRegister { reg }));
unwind_codes.push((
offset,
UnwindCode::StackDealloc {
size: word_size.into(),
},
));
if Some(reg) == frame_register {
unwind_codes.push((offset, UnwindCode::RestoreFramePointer));
// Keep frame_register assigned for next epilogue.
}
}
epilogue_pop_offsets.clear();
// TODO ensure unwind codes sorted by offsets ?
if !is_last_block {
unwind_codes.push((offset, UnwindCode::RestoreState));
}
in_epilogue = false;
}
_ => {}
},
_ => {}
};
}
}
Ok(Some(UnwindInfo {
prologue_size,
prologue_unwind_codes,
epilogues_unwind_codes,
function_size,
word_size,
initial_sp_offset: word_size,
}))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{
types, AbiParam, ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind,
};
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_small_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 9,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into(),
}
),
(9, UnwindCode::StackAlloc { size: 64 })
],
epilogues_unwind_codes: vec![vec![
(13, UnwindCode::StackDealloc { size: 64 }),
(
15,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(15, UnwindCode::StackDealloc { size: 8 }),
(15, UnwindCode::RestoreFramePointer)
]],
function_size: 16,
word_size: 8,
initial_sp_offset: 8,
}
);
}
#[test]
fn test_medium_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 10000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 27,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into(),
}
),
(27, UnwindCode::StackAlloc { size: 10000 })
],
epilogues_unwind_codes: vec![vec![
(34, UnwindCode::StackDealloc { size: 10000 }),
(
36,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(36, UnwindCode::StackDealloc { size: 8 }),
(36, UnwindCode::RestoreFramePointer)
]],
function_size: 37,
word_size: 8,
initial_sp_offset: 8,
}
);
}
#[test]
fn test_large_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 1000000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 27,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into(),
}
),
(27, UnwindCode::StackAlloc { size: 1000000 })
],
epilogues_unwind_codes: vec![vec![
(34, UnwindCode::StackDealloc { size: 1000000 }),
(
36,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(36, UnwindCode::StackDealloc { size: 8 }),
(36, UnwindCode::RestoreFramePointer)
]],
function_size: 37,
word_size: 8,
initial_sp_offset: 8,
}
);
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
#[test]
fn test_multi_return_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_multi_return_function(CallConv::SystemV));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 5,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into()
}
)
],
epilogues_unwind_codes: vec![
vec![
(12, UnwindCode::RememberState),
(
12,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(12, UnwindCode::StackDealloc { size: 8 }),
(12, UnwindCode::RestoreFramePointer),
(13, UnwindCode::RestoreState)
],
vec![
(
15,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(15, UnwindCode::StackDealloc { size: 8 }),
(15, UnwindCode::RestoreFramePointer)
]
],
function_size: 16,
word_size: 8,
initial_sp_offset: 8,
}
);
}
fn create_multi_return_function(call_conv: CallConv) -> Function {
let mut sig = Signature::new(call_conv);
sig.params.push(AbiParam::new(types::I32));
let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
let block0 = func.dfg.make_block();
let v0 = func.dfg.append_block_param(block0, types::I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().brnz(v0, block2, &[]);
pos.ins().jump(block1, &[]);
pos.insert_block(block1);
pos.ins().return_(&[]);
pos.insert_block(block2);
pos.ins().return_(&[]);
func
}
}

View File

@@ -0,0 +1,235 @@
//! Unwind information for System V ABI (x86-64).
use crate::ir::Function;
use crate::isa::{
unwind::systemv::{RegisterMappingError, UnwindInfo},
RegUnit, TargetIsa,
};
use crate::result::CodegenResult;
use gimli::{write::CommonInformationEntry, Encoding, Format, Register, X86_64};
/// Creates a new x86-64 common information entry (CIE).
pub fn create_cie() -> CommonInformationEntry {
use gimli::write::CallFrameInstruction;
let mut entry = CommonInformationEntry::new(
Encoding {
address_size: 8,
format: Format::Dwarf32,
version: 1,
},
1, // Code alignment factor
-8, // Data alignment factor
X86_64::RA,
);
// Every frame will start with the call frame address (CFA) at RSP+8
// It is +8 to account for the push of the return address by the call instruction
entry.add_instruction(CallFrameInstruction::Cfa(X86_64::RSP, 8));
// Every frame will start with the return address at RSP (CFA-8 = RSP+8-8 = RSP)
entry.add_instruction(CallFrameInstruction::Offset(X86_64::RA, -8));
entry
}
/// Map Cranelift registers to their corresponding Gimli registers.
pub fn map_reg(isa: &dyn TargetIsa, reg: RegUnit) -> Result<Register, RegisterMappingError> {
if isa.name() != "x86" || isa.pointer_bits() != 64 {
return Err(RegisterMappingError::UnsupportedArchitecture);
}
// Mapping from https://github.com/bytecodealliance/cranelift/pull/902 by @iximeow
const X86_GP_REG_MAP: [gimli::Register; 16] = [
X86_64::RAX,
X86_64::RCX,
X86_64::RDX,
X86_64::RBX,
X86_64::RSP,
X86_64::RBP,
X86_64::RSI,
X86_64::RDI,
X86_64::R8,
X86_64::R9,
X86_64::R10,
X86_64::R11,
X86_64::R12,
X86_64::R13,
X86_64::R14,
X86_64::R15,
];
const X86_XMM_REG_MAP: [gimli::Register; 16] = [
X86_64::XMM0,
X86_64::XMM1,
X86_64::XMM2,
X86_64::XMM3,
X86_64::XMM4,
X86_64::XMM5,
X86_64::XMM6,
X86_64::XMM7,
X86_64::XMM8,
X86_64::XMM9,
X86_64::XMM10,
X86_64::XMM11,
X86_64::XMM12,
X86_64::XMM13,
X86_64::XMM14,
X86_64::XMM15,
];
let reg_info = isa.register_info();
let bank = reg_info
.bank_containing_regunit(reg)
.ok_or_else(|| RegisterMappingError::MissingBank)?;
match bank.name {
"IntRegs" => {
// x86 GP registers have a weird mapping to DWARF registers, so we use a
// lookup table.
Ok(X86_GP_REG_MAP[(reg - bank.first_unit) as usize])
}
"FloatRegs" => Ok(X86_XMM_REG_MAP[(reg - bank.first_unit) as usize]),
_ => Err(RegisterMappingError::UnsupportedRegisterBank(bank.name)),
}
}
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> {
// Only System V-like calling conventions are supported
match isa.unwind_info_kind() {
crate::machinst::UnwindInfoKind::SystemV => {}
_ => return Ok(None),
}
if func.prologue_end.is_none() || isa.name() != "x86" || isa.pointer_bits() != 64 {
return Ok(None);
}
let unwind = match super::create_unwind_info(func, isa)? {
Some(u) => u,
None => {
return Ok(None);
}
};
struct RegisterMapper<'a, 'b>(&'a (dyn TargetIsa + 'b));
impl<'a, 'b> crate::isa::unwind::systemv::RegisterMapper<RegUnit> for RegisterMapper<'a, 'b> {
fn map(&self, reg: RegUnit) -> Result<u16, RegisterMappingError> {
Ok(map_reg(self.0, reg)?.0)
}
fn sp(&self) -> u16 {
X86_64::RSP.0
}
fn fp(&self) -> Option<u16> {
Some(X86_64::RBP.0)
}
}
let map = RegisterMapper(isa);
Ok(Some(UnwindInfo::build(unwind, &map)?))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{
types, AbiParam, ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind,
};
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use gimli::write::Address;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_simple_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::SystemV,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let fde = match isa
.create_unwind_info(&context.func)
.expect("can create unwind info")
{
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
info.to_fde(Address::Constant(1234))
}
_ => panic!("expected unwind information"),
};
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(1234), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (15, SameValue(Register(6))), (15, Cfa(Register(7), 8))] }");
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
#[test]
fn test_multi_return_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_multi_return_function(CallConv::SystemV));
context.compile(&*isa).expect("expected compilation");
let fde = match isa
.create_unwind_info(&context.func)
.expect("can create unwind info")
{
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
info.to_fde(Address::Constant(4321))
}
_ => panic!("expected unwind information"),
};
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(4321), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (12, RememberState), (12, SameValue(Register(6))), (12, Cfa(Register(7), 8)), (13, RestoreState), (15, SameValue(Register(6))), (15, Cfa(Register(7), 8))] }");
}
fn create_multi_return_function(call_conv: CallConv) -> Function {
let mut sig = Signature::new(call_conv);
sig.params.push(AbiParam::new(types::I32));
let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
let block0 = func.dfg.make_block();
let v0 = func.dfg.append_block_param(block0, types::I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().brnz(v0, block2, &[]);
pos.ins().jump(block1, &[]);
pos.insert_block(block1);
pos.ins().return_(&[]);
pos.insert_block(block2);
pos.ins().return_(&[]);
func
}
}

View File

@@ -0,0 +1,265 @@
//! Unwind information for Windows x64 ABI.
use crate::ir::Function;
use crate::isa::x86::registers::{FPR, GPR};
use crate::isa::{unwind::winx64::UnwindInfo, RegUnit, TargetIsa};
use crate::result::CodegenResult;
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> {
// Only Windows fastcall is supported for unwind information
if !func.signature.call_conv.extends_windows_fastcall() || func.prologue_end.is_none() {
return Ok(None);
}
let unwind = match super::create_unwind_info(func, isa)? {
Some(u) => u,
None => {
return Ok(None);
}
};
Ok(Some(UnwindInfo::build::<RegUnit, RegisterMapper>(unwind)?))
}
struct RegisterMapper;
impl crate::isa::unwind::winx64::RegisterMapper<RegUnit> for RegisterMapper {
fn map(reg: RegUnit) -> crate::isa::unwind::winx64::MappedRegister {
use crate::isa::unwind::winx64::MappedRegister;
if GPR.contains(reg) {
MappedRegister::Int(GPR.index_of(reg) as u8)
} else if FPR.contains(reg) {
MappedRegister::Xmm(reg as u8)
} else {
panic!()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind};
use crate::isa::unwind::winx64::UnwindCode;
use crate::isa::x86::registers::RU;
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_wrong_calling_convention() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(CallConv::SystemV, None));
context.compile(&*isa).expect("expected compilation");
assert_eq!(
create_unwind_info(&context.func, &*isa).expect("can create unwind info"),
None
);
}
#[test]
fn test_small_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 9,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 9,
size: 64
}
]
}
);
assert_eq!(unwind.emit_size(), 8);
let mut buf = [0u8; 8];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x09, // Prologue size
0x02, // Unwind code count (1 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x09, // Prolog offset
0x72, // Operation 2 (small stack alloc), size = 0xB slots (e.g. (0x7 * 8) + 8 = 64 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (save nonvolatile register), reg = 5 (RBP)
]
);
}
#[test]
fn test_medium_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 10000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 27,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 27,
size: 10000
}
]
}
);
assert_eq!(unwind.emit_size(), 12);
let mut buf = [0u8; 12];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x1B, // Prologue size
0x03, // Unwind code count (2 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x1B, // Prolog offset
0x01, // Operation 1 (large stack alloc), size is scaled 16-bits (info = 0)
0xE2, // Low size byte
0x04, // High size byte (e.g. 0x04E2 * 8 = 10000 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
0x00, // Padding
0x00, // Padding
]
);
}
#[test]
fn test_large_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 1000000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 27,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 27,
size: 1000000
}
]
}
);
assert_eq!(unwind.emit_size(), 12);
let mut buf = [0u8; 12];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x1B, // Prologue size
0x04, // Unwind code count (3 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x1B, // Prolog offset
0x11, // Operation 1 (large stack alloc), size is unscaled 32-bits (info = 1)
0x40, // Byte 1 of size
0x42, // Byte 2 of size
0x0F, // Byte 3 of size
0x00, // Byte 4 of size (size is 0xF4240 = 1000000 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
]
);
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
}