Remove the old x86 backend

This commit is contained in:
bjorn3
2021-06-18 17:28:55 +02:00
parent e989caf337
commit 9e34df33b9
246 changed files with 76 additions and 28804 deletions

View File

@@ -3565,45 +3565,6 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
panic!("ALU+imm and ALU+carry ops should not appear here!");
}
#[cfg(feature = "x86")]
Opcode::X86Udivmodx
| Opcode::X86Sdivmodx
| Opcode::X86Umulx
| Opcode::X86Smulx
| Opcode::X86Cvtt2si
| Opcode::X86Fmin
| Opcode::X86Fmax
| Opcode::X86Push
| Opcode::X86Pop
| Opcode::X86Bsr
| Opcode::X86Bsf
| Opcode::X86Pblendw
| Opcode::X86Pshufd
| Opcode::X86Pshufb
| Opcode::X86Pextr
| Opcode::X86Pinsr
| Opcode::X86Insertps
| Opcode::X86Movsd
| Opcode::X86Movlhps
| Opcode::X86Palignr
| Opcode::X86Psll
| Opcode::X86Psrl
| Opcode::X86Psra
| Opcode::X86Ptest
| Opcode::X86Pmaxs
| Opcode::X86Pmaxu
| Opcode::X86Pmins
| Opcode::X86Pminu
| Opcode::X86Pmullq
| Opcode::X86Pmuludq
| Opcode::X86Punpckh
| Opcode::X86Punpckl
| Opcode::X86Vcvtudq2ps
| Opcode::X86ElfTlsGetAddr
| Opcode::X86MachoTlsGetAddr => {
panic!("x86-specific opcode in supposedly arch-neutral IR!");
}
Opcode::DummySargT => unreachable!(),
Opcode::Iabs => {

View File

@@ -1,12 +1,4 @@
//! Legacy ("old-style") backends that will be removed in the future.
// N.B.: the old x86-64 backend (`x86`) and the new one (`x64`) are both
// included whenever building with x86 support. The new backend is the default,
// but the old can be requested with `BackendVariant::Legacy`. However, if this
// crate is built with the `old-x86-backend` feature, then the old backend is
// default instead.
#[cfg(feature = "x86")]
pub(crate) mod x86;
#[cfg(feature = "riscv")]
pub(crate) mod riscv;

File diff suppressed because it is too large Load Diff

View File

@@ -1,578 +0,0 @@
//! Emitting binary x86 machine code.
use super::enc_tables::{needs_offset, needs_sib_byte};
use super::registers::RU;
use crate::binemit::{bad_encoding, CodeSink, Reloc};
use crate::ir::condcodes::{CondCode, FloatCC, IntCC};
use crate::ir::{
Block, Constant, ExternalName, Function, Inst, InstructionData, JumpTable, LibCall, Opcode,
TrapCode,
};
use crate::isa::{RegUnit, StackBase, StackBaseMask, StackRef, TargetIsa};
use crate::regalloc::RegDiversions;
use cranelift_codegen_shared::isa::x86::EncodingBits;
include!(concat!(env!("OUT_DIR"), "/binemit-x86.rs"));
// Convert a stack base to the corresponding register.
fn stk_base(base: StackBase) -> RegUnit {
let ru = match base {
StackBase::SP => RU::rsp,
StackBase::FP => RU::rbp,
StackBase::Zone => unimplemented!(),
};
ru as RegUnit
}
// Mandatory prefix bytes for Mp* opcodes.
const PREFIX: [u8; 3] = [0x66, 0xf3, 0xf2];
// Second byte for three-byte opcodes for mm=0b10 and mm=0b11.
const OP3_BYTE2: [u8; 2] = [0x38, 0x3a];
// A REX prefix with no bits set: 0b0100WRXB.
const BASE_REX: u8 = 0b0100_0000;
// Create a single-register REX prefix, setting the B bit to bit 3 of the register.
// This is used for instructions that encode a register in the low 3 bits of the opcode and for
// instructions that use the ModR/M `reg` field for something else.
fn rex1(reg_b: RegUnit) -> u8 {
let b = ((reg_b >> 3) & 1) as u8;
BASE_REX | b
}
// Create a dual-register REX prefix, setting:
//
// REX.B = bit 3 of r/m register, or SIB base register when a SIB byte is present.
// REX.R = bit 3 of reg register.
fn rex2(rm: RegUnit, reg: RegUnit) -> u8 {
let b = ((rm >> 3) & 1) as u8;
let r = ((reg >> 3) & 1) as u8;
BASE_REX | b | (r << 2)
}
// Create a three-register REX prefix, setting:
//
// REX.B = bit 3 of r/m register, or SIB base register when a SIB byte is present.
// REX.R = bit 3 of reg register.
// REX.X = bit 3 of SIB index register.
fn rex3(rm: RegUnit, reg: RegUnit, index: RegUnit) -> u8 {
let b = ((rm >> 3) & 1) as u8;
let r = ((reg >> 3) & 1) as u8;
let x = ((index >> 3) & 1) as u8;
BASE_REX | b | (x << 1) | (r << 2)
}
/// Encode the RXBR' bits of the EVEX P0 byte. For an explanation of these bits, see section 2.6.1
/// in the Intel Software Development Manual, volume 2A. These bits can be used by different
/// addressing modes (see section 2.6.2), requiring different `vex*` functions than this one.
fn evex2(rm: RegUnit, reg: RegUnit) -> u8 {
let b = (!(rm >> 3) & 1) as u8;
let x = (!(rm >> 4) & 1) as u8;
let r = (!(reg >> 3) & 1) as u8;
let r_ = (!(reg >> 4) & 1) as u8;
0x00 | r_ | (b << 1) | (x << 2) | (r << 3)
}
/// Determines whether a REX prefix should be emitted. A REX byte always has 0100 in bits 7:4; bits
/// 3:0 correspond to WRXB. W allows certain instructions to declare a 64-bit operand size; because
/// [needs_rex] is only used by [infer_rex] and we prevent [infer_rex] from using [w] in
/// [Template::build], we do not need to check again whether [w] forces an inferred REX prefix--it
/// always does and should be encoded like `.rex().w()`. The RXB are extension of ModR/M or SIB
/// fields; see section 2.2.1.2 in the Intel Software Development Manual.
#[inline]
fn needs_rex(rex: u8) -> bool {
rex != BASE_REX
}
// Emit a REX prefix.
//
// The R, X, and B bits are computed from registers using the functions above. The W bit is
// extracted from `bits`.
fn rex_prefix<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(rex & 0xf8, BASE_REX);
let w = EncodingBits::from(bits).rex_w();
sink.put1(rex | (w << 3));
}
// Emit a single-byte opcode with no REX prefix.
fn put_op1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8f00, 0, "Invalid encoding bits for Op1*");
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Op1 encoding");
sink.put1(bits as u8);
}
// Emit a single-byte opcode with REX prefix.
fn put_rexop1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0f00, 0, "Invalid encoding bits for RexOp1*");
rex_prefix(bits, rex, sink);
sink.put1(bits as u8);
}
/// Emit a single-byte opcode with inferred REX prefix.
fn put_dynrexop1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0f00, 0, "Invalid encoding bits for DynRexOp1*");
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(bits as u8);
}
// Emit two-byte opcode: 0F XX
fn put_op2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8f00, 0x0400, "Invalid encoding bits for Op2*");
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Op2 encoding");
sink.put1(0x0f);
sink.put1(bits as u8);
}
// Emit two-byte opcode: 0F XX with REX prefix.
fn put_rexop2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0f00, 0x0400, "Invalid encoding bits for RexOp2*");
rex_prefix(bits, rex, sink);
sink.put1(0x0f);
sink.put1(bits as u8);
}
/// Emit two-byte opcode: 0F XX with inferred REX prefix.
fn put_dynrexop2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(
bits & 0x0f00,
0x0400,
"Invalid encoding bits for DynRexOp2*"
);
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(0x0f);
sink.put1(bits as u8);
}
// Emit single-byte opcode with mandatory prefix.
fn put_mp1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8c00, 0, "Invalid encoding bits for Mp1*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Mp1 encoding");
sink.put1(bits as u8);
}
// Emit single-byte opcode with mandatory prefix and REX.
fn put_rexmp1<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0c00, 0, "Invalid encoding bits for RexMp1*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
rex_prefix(bits, rex, sink);
sink.put1(bits as u8);
}
// Emit two-byte opcode (0F XX) with mandatory prefix.
fn put_mp2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8c00, 0x0400, "Invalid encoding bits for Mp2*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Mp2 encoding");
sink.put1(0x0f);
sink.put1(bits as u8);
}
// Emit two-byte opcode (0F XX) with mandatory prefix and REX.
fn put_rexmp2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0c00, 0x0400, "Invalid encoding bits for RexMp2*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
rex_prefix(bits, rex, sink);
sink.put1(0x0f);
sink.put1(bits as u8);
}
/// Emit two-byte opcode (0F XX) with mandatory prefix and inferred REX.
fn put_dynrexmp2<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(
bits & 0x0c00,
0x0400,
"Invalid encoding bits for DynRexMp2*"
);
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(0x0f);
sink.put1(bits as u8);
}
/// Emit three-byte opcode (0F 3[8A] XX) with mandatory prefix.
fn put_mp3<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x8800, 0x0800, "Invalid encoding bits for Mp3*");
debug_assert_eq!(rex, BASE_REX, "Invalid registers for REX-less Mp3 encoding");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
sink.put1(0x0f);
sink.put1(OP3_BYTE2[(enc.mm() - 2) as usize]);
sink.put1(bits as u8);
}
/// Emit three-byte opcode (0F 3[8A] XX) with mandatory prefix and REX
fn put_rexmp3<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(bits & 0x0800, 0x0800, "Invalid encoding bits for RexMp3*");
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
rex_prefix(bits, rex, sink);
sink.put1(0x0f);
sink.put1(OP3_BYTE2[(enc.mm() - 2) as usize]);
sink.put1(bits as u8);
}
/// Emit three-byte opcode (0F 3[8A] XX) with mandatory prefix and an inferred REX prefix.
fn put_dynrexmp3<CS: CodeSink + ?Sized>(bits: u16, rex: u8, sink: &mut CS) {
debug_assert_eq!(
bits & 0x0800,
0x0800,
"Invalid encoding bits for DynRexMp3*"
);
let enc = EncodingBits::from(bits);
sink.put1(PREFIX[(enc.pp() - 1) as usize]);
if needs_rex(rex) {
rex_prefix(bits, rex, sink);
}
sink.put1(0x0f);
sink.put1(OP3_BYTE2[(enc.mm() - 2) as usize]);
sink.put1(bits as u8);
}
/// Defines the EVEX context for the `L'`, `L`, and `b` bits (bits 6:4 of EVEX P2 byte). Table 2-36 in
/// section 2.6.10 (Intel Software Development Manual, volume 2A) describes how these bits can be
/// used together for certain classes of instructions; i.e., special care should be taken to ensure
/// that instructions use an applicable correct `EvexContext`. Table 2-39 contains cases where
/// opcodes can result in an #UD.
#[allow(dead_code)]
enum EvexContext {
RoundingRegToRegFP {
rc: EvexRoundingControl,
},
NoRoundingFP {
sae: bool,
length: EvexVectorLength,
},
MemoryOp {
broadcast: bool,
length: EvexVectorLength,
},
Other {
length: EvexVectorLength,
},
}
impl EvexContext {
/// Encode the `L'`, `L`, and `b` bits (bits 6:4 of EVEX P2 byte) for merging with the P2 byte.
fn bits(&self) -> u8 {
match self {
Self::RoundingRegToRegFP { rc } => 0b001 | rc.bits() << 1,
Self::NoRoundingFP { sae, length } => (*sae as u8) | length.bits() << 1,
Self::MemoryOp { broadcast, length } => (*broadcast as u8) | length.bits() << 1,
Self::Other { length } => length.bits() << 1,
}
}
}
/// The EVEX format allows choosing a vector length in the `L'` and `L` bits; see `EvexContext`.
#[allow(dead_code)]
enum EvexVectorLength {
V128,
V256,
V512,
}
impl EvexVectorLength {
/// Encode the `L'` and `L` bits for merging with the P2 byte.
fn bits(&self) -> u8 {
match self {
Self::V128 => 0b00,
Self::V256 => 0b01,
Self::V512 => 0b10,
// 0b11 is reserved (#UD).
}
}
}
/// The EVEX format allows defining rounding control in the `L'` and `L` bits; see `EvexContext`.
#[allow(dead_code)]
enum EvexRoundingControl {
RNE,
RD,
RU,
RZ,
}
impl EvexRoundingControl {
/// Encode the `L'` and `L` bits for merging with the P2 byte.
fn bits(&self) -> u8 {
match self {
Self::RNE => 0b00,
Self::RD => 0b01,
Self::RU => 0b10,
Self::RZ => 0b11,
}
}
}
/// Defines the EVEX masking behavior; masking support is described in section 2.6.4 of the Intel
/// Software Development Manual, volume 2A.
#[allow(dead_code)]
enum EvexMasking {
None,
Merging { k: u8 },
Zeroing { k: u8 },
}
impl EvexMasking {
/// Encode the `z` bit for merging with the P2 byte.
fn z_bit(&self) -> u8 {
match self {
Self::None | Self::Merging { .. } => 0,
Self::Zeroing { .. } => 1,
}
}
/// Encode the `aaa` bits for merging with the P2 byte.
fn aaa_bits(&self) -> u8 {
match self {
Self::None => 0b000,
Self::Merging { k } | Self::Zeroing { k } => {
debug_assert!(*k <= 7);
*k
}
}
}
}
/// Encode an EVEX prefix, including the instruction opcode. To match the current recipe
/// convention, the ModR/M byte is written separately in the recipe. This EVEX encoding function
/// only encodes the `reg` (operand 1), `vvvv` (operand 2), `rm` (operand 3) form; other forms are
/// possible (see section 2.6.2, Intel Software Development Manual, volume 2A), requiring
/// refactoring of this function or separate functions for each form (e.g. as for the REX prefix).
fn put_evex<CS: CodeSink + ?Sized>(
bits: u16,
reg: RegUnit,
vvvvv: RegUnit,
rm: RegUnit,
context: EvexContext,
masking: EvexMasking,
sink: &mut CS,
) {
let enc = EncodingBits::from(bits);
// EVEX prefix.
sink.put1(0x62);
debug_assert!(enc.mm() < 0b100);
let mut p0 = enc.mm() & 0b11;
p0 |= evex2(rm, reg) << 4; // bits 3:2 are always unset
sink.put1(p0);
let mut p1 = enc.pp() | 0b100; // bit 2 is always set
p1 |= (!(vvvvv as u8) & 0b1111) << 3;
p1 |= (enc.rex_w() & 0b1) << 7;
sink.put1(p1);
let mut p2 = masking.aaa_bits();
p2 |= (!(vvvvv as u8 >> 4) & 0b1) << 3;
p2 |= context.bits() << 4;
p2 |= masking.z_bit() << 7;
sink.put1(p2);
// Opcode
sink.put1(enc.opcode_byte());
// ModR/M byte placed in recipe
}
/// Emit a ModR/M byte for reg-reg operands.
fn modrm_rr<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b11000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a ModR/M byte where the reg bits are part of the opcode.
fn modrm_r_bits<CS: CodeSink + ?Sized>(rm: RegUnit, bits: u16, sink: &mut CS) {
let reg = (bits >> 12) as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b11000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 00 ModR/M byte. This is a register-indirect addressing mode with no offset.
/// Registers %rsp and %rbp are invalid for `rm`, %rsp indicates a SIB byte, and %rbp indicates an
/// absolute immediate 32-bit address.
fn modrm_rm<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b00000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 00 Mod/RM byte, with a rip-relative displacement in 64-bit mode. Effective address
/// is calculated by adding displacement to 64-bit rip of next instruction. See intel Sw dev manual
/// section 2.2.1.6.
fn modrm_riprel<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_rm(0b101, reg, sink)
}
/// Emit a mode 01 ModR/M byte. This is a register-indirect addressing mode with 8-bit
/// displacement.
/// Register %rsp is invalid for `rm`. It indicates the presence of a SIB byte.
fn modrm_disp8<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b01000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 10 ModR/M byte. This is a register-indirect addressing mode with 32-bit
/// displacement.
/// Register %rsp is invalid for `rm`. It indicates the presence of a SIB byte.
fn modrm_disp32<CS: CodeSink + ?Sized>(rm: RegUnit, reg: RegUnit, sink: &mut CS) {
let reg = reg as u8 & 7;
let rm = rm as u8 & 7;
let mut b = 0b10000000;
b |= reg << 3;
b |= rm;
sink.put1(b);
}
/// Emit a mode 00 ModR/M with a 100 RM indicating a SIB byte is present.
fn modrm_sib<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_rm(0b100, reg, sink);
}
/// Emit a mode 01 ModR/M with a 100 RM indicating a SIB byte and 8-bit
/// displacement are present.
fn modrm_sib_disp8<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_disp8(0b100, reg, sink);
}
/// Emit a mode 10 ModR/M with a 100 RM indicating a SIB byte and 32-bit
/// displacement are present.
fn modrm_sib_disp32<CS: CodeSink + ?Sized>(reg: RegUnit, sink: &mut CS) {
modrm_disp32(0b100, reg, sink);
}
/// Emit a SIB byte with a base register and no scale+index.
fn sib_noindex<CS: CodeSink + ?Sized>(base: RegUnit, sink: &mut CS) {
let base = base as u8 & 7;
// SIB SS_III_BBB.
let mut b = 0b00_100_000;
b |= base;
sink.put1(b);
}
/// Emit a SIB byte with a scale, base, and index.
fn sib<CS: CodeSink + ?Sized>(scale: u8, index: RegUnit, base: RegUnit, sink: &mut CS) {
// SIB SS_III_BBB.
debug_assert_eq!(scale & !0x03, 0, "Scale out of range");
let scale = scale & 3;
let index = index as u8 & 7;
let base = base as u8 & 7;
let b: u8 = (scale << 6) | (index << 3) | base;
sink.put1(b);
}
/// Get the low 4 bits of an opcode for an integer condition code.
///
/// Add this offset to a base opcode for:
///
/// ---- 0x70: Short conditional branch.
/// 0x0f 0x80: Long conditional branch.
/// 0x0f 0x90: SetCC.
///
fn icc2opc(cond: IntCC) -> u16 {
use crate::ir::condcodes::IntCC::*;
match cond {
Overflow => 0x0,
NotOverflow => 0x1,
UnsignedLessThan => 0x2,
UnsignedGreaterThanOrEqual => 0x3,
Equal => 0x4,
NotEqual => 0x5,
UnsignedLessThanOrEqual => 0x6,
UnsignedGreaterThan => 0x7,
// 0x8 = Sign.
// 0x9 = !Sign.
// 0xa = Parity even.
// 0xb = Parity odd.
SignedLessThan => 0xc,
SignedGreaterThanOrEqual => 0xd,
SignedLessThanOrEqual => 0xe,
SignedGreaterThan => 0xf,
}
}
/// Get the low 4 bits of an opcode for a floating point condition code.
///
/// The ucomiss/ucomisd instructions set the FLAGS bits CF/PF/CF like this:
///
/// ZPC OSA
/// UN 111 000
/// GT 000 000
/// LT 001 000
/// EQ 100 000
///
/// Not all floating point condition codes are supported.
fn fcc2opc(cond: FloatCC) -> u16 {
use crate::ir::condcodes::FloatCC::*;
match cond {
Ordered => 0xb, // EQ|LT|GT => *np (P=0)
Unordered => 0xa, // UN => *p (P=1)
OrderedNotEqual => 0x5, // LT|GT => *ne (Z=0),
UnorderedOrEqual => 0x4, // UN|EQ => *e (Z=1)
GreaterThan => 0x7, // GT => *a (C=0&Z=0)
GreaterThanOrEqual => 0x3, // GT|EQ => *ae (C=0)
UnorderedOrLessThan => 0x2, // UN|LT => *b (C=1)
UnorderedOrLessThanOrEqual => 0x6, // UN|LT|EQ => *be (Z=1|C=1)
Equal | // EQ
NotEqual | // UN|LT|GT
LessThan | // LT
LessThanOrEqual | // LT|EQ
UnorderedOrGreaterThan | // UN|GT
UnorderedOrGreaterThanOrEqual // UN|GT|EQ
=> panic!("{} not supported", cond),
}
}
/// Emit a single-byte branch displacement to `destination`.
fn disp1<CS: CodeSink + ?Sized>(destination: Block, func: &Function, sink: &mut CS) {
let delta = func.offsets[destination].wrapping_sub(sink.offset() + 1);
sink.put1(delta as u8);
}
/// Emit a four-byte branch displacement to `destination`.
fn disp4<CS: CodeSink + ?Sized>(destination: Block, func: &Function, sink: &mut CS) {
let delta = func.offsets[destination].wrapping_sub(sink.offset() + 4);
sink.put4(delta);
}
/// Emit a four-byte displacement to jump table `jt`.
fn jt_disp4<CS: CodeSink + ?Sized>(jt: JumpTable, func: &Function, sink: &mut CS) {
let delta = func.jt_offsets[jt].wrapping_sub(sink.offset() + 4);
sink.put4(delta);
sink.reloc_jt(Reloc::X86PCRelRodata4, jt);
}
/// Emit a four-byte displacement to `constant`.
fn const_disp4<CS: CodeSink + ?Sized>(constant: Constant, func: &Function, sink: &mut CS) {
let offset = func.dfg.constants.get_offset(constant);
let delta = offset.wrapping_sub(sink.offset() + 4);
sink.put4(delta);
sink.reloc_constant(Reloc::X86PCRelRodata4, offset);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,199 +0,0 @@
//! x86 Instruction Set Architectures.
mod abi;
mod binemit;
mod enc_tables;
mod registers;
pub mod settings;
#[cfg(feature = "unwind")]
pub mod unwind;
use super::super::settings as shared_settings;
#[cfg(feature = "testing_hooks")]
use crate::binemit::CodeSink;
use crate::binemit::{emit_function, MemoryCodeSink};
use crate::ir;
use crate::isa::enc_tables::{self as shared_enc_tables, lookup_enclist, Encodings};
use crate::isa::Builder as IsaBuilder;
#[cfg(feature = "unwind")]
use crate::isa::{unwind::systemv::RegisterMappingError, RegUnit};
use crate::isa::{EncInfo, RegClass, RegInfo, TargetIsa};
use crate::regalloc;
use crate::result::CodegenResult;
use crate::timing;
use alloc::{borrow::Cow, boxed::Box, vec::Vec};
use core::any::Any;
use core::fmt;
use core::hash::{Hash, Hasher};
use target_lexicon::{PointerWidth, Triple};
#[allow(dead_code)]
struct Isa {
triple: Triple,
shared_flags: shared_settings::Flags,
isa_flags: settings::Flags,
cpumode: &'static [shared_enc_tables::Level1Entry<u16>],
}
/// Get an ISA builder for creating x86 targets.
pub fn isa_builder(triple: Triple) -> IsaBuilder {
IsaBuilder {
triple,
setup: settings::builder(),
constructor: isa_constructor,
}
}
fn isa_constructor(
triple: Triple,
shared_flags: shared_settings::Flags,
builder: shared_settings::Builder,
) -> Box<dyn TargetIsa> {
let level1 = match triple.pointer_width().unwrap() {
PointerWidth::U16 => unimplemented!("x86-16"),
PointerWidth::U32 => &enc_tables::LEVEL1_I32[..],
PointerWidth::U64 => &enc_tables::LEVEL1_I64[..],
};
let isa_flags = settings::Flags::new(&shared_flags, builder);
Box::new(Isa {
triple,
isa_flags,
shared_flags,
cpumode: level1,
})
}
impl TargetIsa for Isa {
fn name(&self) -> &'static str {
"x86"
}
fn triple(&self) -> &Triple {
&self.triple
}
fn flags(&self) -> &shared_settings::Flags {
&self.shared_flags
}
fn isa_flags(&self) -> Vec<shared_settings::Value> {
self.isa_flags.iter().collect()
}
fn hash_all_flags(&self, mut hasher: &mut dyn Hasher) {
self.shared_flags.hash(&mut hasher);
self.isa_flags.hash(&mut hasher);
}
fn uses_cpu_flags(&self) -> bool {
true
}
fn uses_complex_addresses(&self) -> bool {
true
}
fn register_info(&self) -> RegInfo {
registers::INFO.clone()
}
#[cfg(feature = "unwind")]
fn map_dwarf_register(&self, reg: RegUnit) -> Result<u16, RegisterMappingError> {
unwind::systemv::map_reg(self, reg).map(|r| r.0)
}
fn encoding_info(&self) -> EncInfo {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(
&'a self,
func: &'a ir::Function,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
func,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut Cow<ir::Signature>, current: bool) {
abi::legalize_signature(
sig,
&self.triple,
current,
&self.shared_flags,
&self.isa_flags,
)
}
fn regclass_for_abi_type(&self, ty: ir::Type) -> RegClass {
abi::regclass_for_abi_type(ty)
}
fn allocatable_registers(&self, _func: &ir::Function) -> regalloc::RegisterSet {
abi::allocatable_registers(&self.triple, &self.shared_flags)
}
#[cfg(feature = "testing_hooks")]
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut dyn CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink, self)
}
fn emit_function_to_memory(&self, func: &ir::Function, sink: &mut MemoryCodeSink) {
emit_function(func, binemit::emit_inst, sink, self)
}
fn prologue_epilogue(&self, func: &mut ir::Function) -> CodegenResult<()> {
let _tt = timing::prologue_epilogue();
abi::prologue_epilogue(func, self)
}
fn unsigned_add_overflow_condition(&self) -> ir::condcodes::IntCC {
ir::condcodes::IntCC::UnsignedLessThan
}
fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC {
ir::condcodes::IntCC::UnsignedLessThan
}
#[cfg(feature = "unwind")]
fn create_unwind_info(
&self,
func: &ir::Function,
) -> CodegenResult<Option<super::super::unwind::UnwindInfo>> {
abi::create_unwind_info(func, self)
}
#[cfg(feature = "unwind")]
fn create_systemv_cie(&self) -> Option<gimli::write::CommonInformationEntry> {
Some(unwind::systemv::create_cie())
}
fn as_any(&self) -> &dyn Any {
self as &dyn Any
}
}
impl fmt::Display for Isa {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}\n{}", self.shared_flags, self.isa_flags)
}
}

View File

@@ -1,86 +0,0 @@
//! x86 register descriptions.
use crate::isa::registers::{RegBank, RegClass, RegClassData, RegInfo, RegUnit};
include!(concat!(env!("OUT_DIR"), "/registers-x86.rs"));
#[cfg(test)]
mod tests {
use super::*;
use crate::isa::RegUnit;
use alloc::string::{String, ToString};
#[test]
fn unit_encodings() {
fn gpr(unit: usize) -> Option<u16> {
Some(GPR.unit(unit))
}
// The encoding of integer registers is not alphabetical.
assert_eq!(INFO.parse_regunit("rax"), gpr(0));
assert_eq!(INFO.parse_regunit("rbx"), gpr(3));
assert_eq!(INFO.parse_regunit("rcx"), gpr(1));
assert_eq!(INFO.parse_regunit("rdx"), gpr(2));
assert_eq!(INFO.parse_regunit("rsi"), gpr(6));
assert_eq!(INFO.parse_regunit("rdi"), gpr(7));
assert_eq!(INFO.parse_regunit("rbp"), gpr(5));
assert_eq!(INFO.parse_regunit("rsp"), gpr(4));
assert_eq!(INFO.parse_regunit("r8"), gpr(8));
assert_eq!(INFO.parse_regunit("r15"), gpr(15));
fn fpr(unit: usize) -> Option<u16> {
Some(FPR.unit(unit))
}
assert_eq!(INFO.parse_regunit("xmm0"), fpr(0));
assert_eq!(INFO.parse_regunit("xmm15"), fpr(15));
// FIXME(#1306) Add these tests back in when FPR32 is re-added.
// fn fpr32(unit: usize) -> Option<u16> {
// Some(FPR32.unit(unit))
// }
// assert_eq!(INFO.parse_regunit("xmm0"), fpr32(0));
// assert_eq!(INFO.parse_regunit("xmm31"), fpr32(31));
}
#[test]
fn unit_names() {
fn gpr(ru: RegUnit) -> String {
INFO.display_regunit(GPR.first + ru).to_string()
}
assert_eq!(gpr(0), "%rax");
assert_eq!(gpr(3), "%rbx");
assert_eq!(gpr(1), "%rcx");
assert_eq!(gpr(2), "%rdx");
assert_eq!(gpr(6), "%rsi");
assert_eq!(gpr(7), "%rdi");
assert_eq!(gpr(5), "%rbp");
assert_eq!(gpr(4), "%rsp");
assert_eq!(gpr(8), "%r8");
assert_eq!(gpr(15), "%r15");
fn fpr(ru: RegUnit) -> String {
INFO.display_regunit(FPR.first + ru).to_string()
}
assert_eq!(fpr(0), "%xmm0");
assert_eq!(fpr(15), "%xmm15");
// FIXME(#1306) Add these tests back in when FPR32 is re-added.
// fn fpr32(ru: RegUnit) -> String {
// INFO.display_regunit(FPR32.first + ru).to_string()
// }
// assert_eq!(fpr32(0), "%xmm0");
// assert_eq!(fpr32(31), "%xmm31");
}
#[test]
fn regclasses() {
assert_eq!(GPR.intersect_index(GPR), Some(GPR.into()));
assert_eq!(GPR.intersect_index(ABCD), Some(ABCD.into()));
assert_eq!(GPR.intersect_index(FPR), None);
assert_eq!(ABCD.intersect_index(GPR), Some(ABCD.into()));
assert_eq!(ABCD.intersect_index(ABCD), Some(ABCD.into()));
assert_eq!(ABCD.intersect_index(FPR), None);
assert_eq!(FPR.intersect_index(FPR), Some(FPR.into()));
assert_eq!(FPR.intersect_index(GPR), None);
assert_eq!(FPR.intersect_index(ABCD), None);
}
}

View File

@@ -1,52 +0,0 @@
//! x86 Settings.
use crate::settings::{self, detail, Builder, Value};
use core::fmt;
// Include code generated by `cranelift-codegen/meta/src/gen_settings.rs:`. This file contains a
// public `Flags` struct with an impl for all of the settings defined in
// `cranelift-codegen/meta/src/isa/x86/settings.rs`.
include!(concat!(env!("OUT_DIR"), "/settings-x86.rs"));
#[cfg(test)]
mod tests {
use super::{builder, Flags};
use crate::settings::{self, Configurable};
#[test]
fn presets() {
let shared = settings::Flags::new(settings::builder());
// Nehalem has SSE4.1 but not BMI1.
let mut b0 = builder();
b0.enable("nehalem").unwrap();
let f0 = Flags::new(&shared, b0);
assert_eq!(f0.has_sse41(), true);
assert_eq!(f0.has_bmi1(), false);
let mut b1 = builder();
b1.enable("haswell").unwrap();
let f1 = Flags::new(&shared, b1);
assert_eq!(f1.has_sse41(), true);
assert_eq!(f1.has_bmi1(), true);
}
#[test]
fn display_presets() {
// Spot check that the flags Display impl does not cause a panic
let shared = settings::Flags::new(settings::builder());
let b0 = builder();
let f0 = Flags::new(&shared, b0);
let _ = format!("{}", f0);
let mut b1 = builder();
b1.enable("nehalem").unwrap();
let f1 = Flags::new(&shared, b1);
let _ = format!("{}", f1);
let mut b2 = builder();
b2.enable("haswell").unwrap();
let f2 = Flags::new(&shared, b2);
let _ = format!("{}", f2);
}
}

View File

@@ -1,531 +0,0 @@
//! Module for x86 unwind generation for supported ABIs.
pub mod systemv;
pub mod winx64;
use crate::ir::{Function, InstructionData, Opcode, ValueLoc};
use crate::isa::x86::registers::{FPR, RU};
use crate::isa::{RegUnit, TargetIsa};
use crate::result::CodegenResult;
use alloc::vec::Vec;
use std::collections::HashMap;
use crate::isa::unwind::input::{UnwindCode, UnwindInfo};
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo<RegUnit>>> {
// Find last block based on max offset.
let last_block = func
.layout
.blocks()
.max_by_key(|b| func.offsets[*b])
.expect("at least a block");
// Find last instruction offset + size, and make it function size.
let function_size = func
.inst_offsets(last_block, &isa.encoding_info())
.fold(0, |_, (offset, _, size)| offset + size);
let entry_block = func.layout.entry_block().expect("missing entry block");
let prologue_end = func.prologue_end.unwrap();
let epilogues_start = func
.epilogues_start
.iter()
.map(|(i, b)| (*b, *i))
.collect::<HashMap<_, _>>();
let word_size = isa.pointer_bytes();
let mut stack_size = None;
let mut prologue_size = 0;
let mut prologue_unwind_codes = Vec::new();
let mut epilogues_unwind_codes = Vec::new();
let mut frame_register: Option<RegUnit> = None;
// Process only entry block and blocks with epilogues.
let mut blocks = func
.epilogues_start
.iter()
.map(|(_, b)| *b)
.collect::<Vec<_>>();
if !blocks.contains(&entry_block) {
blocks.push(entry_block);
}
blocks.sort_by_key(|b| func.offsets[*b]);
for block in blocks.iter() {
let mut in_prologue = block == &entry_block;
let mut in_epilogue = false;
let mut epilogue_pop_offsets = Vec::new();
let epilogue_start = epilogues_start.get(block);
let is_last_block = block == &last_block;
for (offset, inst, size) in func.inst_offsets(*block, &isa.encoding_info()) {
let offset = offset + size;
let unwind_codes;
if in_prologue {
// Check for prologue end (inclusive)
if prologue_end == inst {
in_prologue = false;
}
prologue_size += size;
unwind_codes = &mut prologue_unwind_codes;
} else if !in_epilogue && epilogue_start == Some(&inst) {
// Now in an epilogue, emit a remember state instruction if not last block
in_epilogue = true;
epilogues_unwind_codes.push(Vec::new());
unwind_codes = epilogues_unwind_codes.last_mut().unwrap();
if !is_last_block {
unwind_codes.push((offset, UnwindCode::RememberState));
}
} else if in_epilogue {
unwind_codes = epilogues_unwind_codes.last_mut().unwrap();
} else {
// Ignore normal instructions
continue;
}
match func.dfg[inst] {
InstructionData::Unary { opcode, arg } => {
match opcode {
Opcode::X86Push => {
let reg = func.locations[arg].unwrap_reg();
unwind_codes.push((
offset,
UnwindCode::StackAlloc {
size: word_size.into(),
},
));
unwind_codes.push((
offset,
UnwindCode::SaveRegister {
reg,
stack_offset: 0,
},
));
}
Opcode::AdjustSpDown => {
let stack_size =
stack_size.expect("expected a previous stack size instruction");
// This is used when calling a stack check function
// We need to track the assignment to RAX which has the size of the stack
unwind_codes
.push((offset, UnwindCode::StackAlloc { size: stack_size }));
}
_ => {}
}
}
InstructionData::UnaryImm { opcode, imm } => {
match opcode {
Opcode::Iconst => {
let imm: i64 = imm.into();
assert!(imm <= core::u32::MAX as i64);
assert!(stack_size.is_none());
// This instruction should only appear in a prologue to pass an
// argument of the stack size to a stack check function.
// Record the stack size so we know what it is when we encounter the adjustment
// instruction (which will adjust via the register assigned to this instruction).
stack_size = Some(imm as u32);
}
Opcode::AdjustSpDownImm => {
let imm: i64 = imm.into();
assert!(imm <= core::u32::MAX as i64);
stack_size = Some(imm as u32);
unwind_codes
.push((offset, UnwindCode::StackAlloc { size: imm as u32 }));
}
Opcode::AdjustSpUpImm => {
let imm: i64 = imm.into();
assert!(imm <= core::u32::MAX as i64);
stack_size = Some(imm as u32);
unwind_codes
.push((offset, UnwindCode::StackDealloc { size: imm as u32 }));
}
_ => {}
}
}
InstructionData::Store {
opcode: Opcode::Store,
args: [arg1, arg2],
offset: stack_offset,
..
} => {
if let (ValueLoc::Reg(src), ValueLoc::Reg(dst)) =
(func.locations[arg1], func.locations[arg2])
{
// If this is a save of an FPR, record an unwind operation
// Note: the stack_offset here is relative to an adjusted SP
if dst == (RU::rsp as RegUnit) && FPR.contains(src) {
let stack_offset: i32 = stack_offset.into();
unwind_codes.push((
offset,
UnwindCode::SaveRegister {
reg: src,
stack_offset: stack_offset as u32,
},
));
}
}
}
InstructionData::CopySpecial { src, dst, .. } if frame_register.is_none() => {
// Check for change in CFA register (RSP is always the starting CFA)
if src == (RU::rsp as RegUnit) {
unwind_codes.push((offset, UnwindCode::SetFramePointer { reg: dst }));
frame_register = Some(dst);
}
}
InstructionData::NullAry { opcode } => match opcode {
Opcode::X86Pop => {
epilogue_pop_offsets.push(offset);
}
_ => {}
},
InstructionData::MultiAry { opcode, .. } if in_epilogue => match opcode {
Opcode::Return => {
let args = func.dfg.inst_args(inst);
for (i, arg) in args.iter().rev().enumerate() {
// Only walk back the args for the pop instructions encountered
if i >= epilogue_pop_offsets.len() {
break;
}
let offset = epilogue_pop_offsets[i];
let reg = func.locations[*arg].unwrap_reg();
unwind_codes.push((offset, UnwindCode::RestoreRegister { reg }));
unwind_codes.push((
offset,
UnwindCode::StackDealloc {
size: word_size.into(),
},
));
if Some(reg) == frame_register {
unwind_codes.push((offset, UnwindCode::RestoreFramePointer));
// Keep frame_register assigned for next epilogue.
}
}
epilogue_pop_offsets.clear();
// TODO ensure unwind codes sorted by offsets ?
if !is_last_block {
unwind_codes.push((offset, UnwindCode::RestoreState));
}
in_epilogue = false;
}
_ => {}
},
_ => {}
};
}
}
Ok(Some(UnwindInfo {
prologue_size,
prologue_unwind_codes,
epilogues_unwind_codes,
function_size,
word_size,
initial_sp_offset: word_size,
}))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{
types, AbiParam, ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind,
};
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_small_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 9,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into(),
}
),
(9, UnwindCode::StackAlloc { size: 64 })
],
epilogues_unwind_codes: vec![vec![
(13, UnwindCode::StackDealloc { size: 64 }),
(
15,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(15, UnwindCode::StackDealloc { size: 8 }),
(15, UnwindCode::RestoreFramePointer)
]],
function_size: 16,
word_size: 8,
initial_sp_offset: 8,
}
);
}
#[test]
fn test_medium_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 10000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 27,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into(),
}
),
(27, UnwindCode::StackAlloc { size: 10000 })
],
epilogues_unwind_codes: vec![vec![
(34, UnwindCode::StackDealloc { size: 10000 }),
(
36,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(36, UnwindCode::StackDealloc { size: 8 }),
(36, UnwindCode::RestoreFramePointer)
]],
function_size: 37,
word_size: 8,
initial_sp_offset: 8,
}
);
}
#[test]
fn test_large_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 1000000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 27,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into(),
}
),
(27, UnwindCode::StackAlloc { size: 1000000 })
],
epilogues_unwind_codes: vec![vec![
(34, UnwindCode::StackDealloc { size: 1000000 }),
(
36,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(36, UnwindCode::StackDealloc { size: 8 }),
(36, UnwindCode::RestoreFramePointer)
]],
function_size: 37,
word_size: 8,
initial_sp_offset: 8,
}
);
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
#[test]
fn test_multi_return_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_multi_return_function(CallConv::SystemV));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
prologue_size: 5,
prologue_unwind_codes: vec![
(2, UnwindCode::StackAlloc { size: 8 }),
(
2,
UnwindCode::SaveRegister {
reg: RU::rbp.into(),
stack_offset: 0,
}
),
(
5,
UnwindCode::SetFramePointer {
reg: RU::rbp.into()
}
)
],
epilogues_unwind_codes: vec![
vec![
(12, UnwindCode::RememberState),
(
12,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(12, UnwindCode::StackDealloc { size: 8 }),
(12, UnwindCode::RestoreFramePointer),
(13, UnwindCode::RestoreState)
],
vec![
(
15,
UnwindCode::RestoreRegister {
reg: RU::rbp.into()
}
),
(15, UnwindCode::StackDealloc { size: 8 }),
(15, UnwindCode::RestoreFramePointer)
]
],
function_size: 16,
word_size: 8,
initial_sp_offset: 8,
}
);
}
fn create_multi_return_function(call_conv: CallConv) -> Function {
let mut sig = Signature::new(call_conv);
sig.params.push(AbiParam::new(types::I32));
let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
let block0 = func.dfg.make_block();
let v0 = func.dfg.append_block_param(block0, types::I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().brnz(v0, block2, &[]);
pos.ins().jump(block1, &[]);
pos.insert_block(block1);
pos.ins().return_(&[]);
pos.insert_block(block2);
pos.ins().return_(&[]);
func
}
}

View File

@@ -1,235 +0,0 @@
//! Unwind information for System V ABI (x86-64).
use crate::ir::Function;
use crate::isa::{
unwind::systemv::{RegisterMappingError, UnwindInfo},
RegUnit, TargetIsa,
};
use crate::result::CodegenResult;
use gimli::{write::CommonInformationEntry, Encoding, Format, Register, X86_64};
/// Creates a new x86-64 common information entry (CIE).
pub fn create_cie() -> CommonInformationEntry {
use gimli::write::CallFrameInstruction;
let mut entry = CommonInformationEntry::new(
Encoding {
address_size: 8,
format: Format::Dwarf32,
version: 1,
},
1, // Code alignment factor
-8, // Data alignment factor
X86_64::RA,
);
// Every frame will start with the call frame address (CFA) at RSP+8
// It is +8 to account for the push of the return address by the call instruction
entry.add_instruction(CallFrameInstruction::Cfa(X86_64::RSP, 8));
// Every frame will start with the return address at RSP (CFA-8 = RSP+8-8 = RSP)
entry.add_instruction(CallFrameInstruction::Offset(X86_64::RA, -8));
entry
}
/// Map Cranelift registers to their corresponding Gimli registers.
pub fn map_reg(isa: &dyn TargetIsa, reg: RegUnit) -> Result<Register, RegisterMappingError> {
if isa.name() != "x86" || isa.pointer_bits() != 64 {
return Err(RegisterMappingError::UnsupportedArchitecture);
}
// Mapping from https://github.com/bytecodealliance/cranelift/pull/902 by @iximeow
const X86_GP_REG_MAP: [gimli::Register; 16] = [
X86_64::RAX,
X86_64::RCX,
X86_64::RDX,
X86_64::RBX,
X86_64::RSP,
X86_64::RBP,
X86_64::RSI,
X86_64::RDI,
X86_64::R8,
X86_64::R9,
X86_64::R10,
X86_64::R11,
X86_64::R12,
X86_64::R13,
X86_64::R14,
X86_64::R15,
];
const X86_XMM_REG_MAP: [gimli::Register; 16] = [
X86_64::XMM0,
X86_64::XMM1,
X86_64::XMM2,
X86_64::XMM3,
X86_64::XMM4,
X86_64::XMM5,
X86_64::XMM6,
X86_64::XMM7,
X86_64::XMM8,
X86_64::XMM9,
X86_64::XMM10,
X86_64::XMM11,
X86_64::XMM12,
X86_64::XMM13,
X86_64::XMM14,
X86_64::XMM15,
];
let reg_info = isa.register_info();
let bank = reg_info
.bank_containing_regunit(reg)
.ok_or_else(|| RegisterMappingError::MissingBank)?;
match bank.name {
"IntRegs" => {
// x86 GP registers have a weird mapping to DWARF registers, so we use a
// lookup table.
Ok(X86_GP_REG_MAP[(reg - bank.first_unit) as usize])
}
"FloatRegs" => Ok(X86_XMM_REG_MAP[(reg - bank.first_unit) as usize]),
_ => Err(RegisterMappingError::UnsupportedRegisterBank(bank.name)),
}
}
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> {
// Only System V-like calling conventions are supported
match isa.unwind_info_kind() {
crate::machinst::UnwindInfoKind::SystemV => {}
_ => return Ok(None),
}
if func.prologue_end.is_none() || isa.name() != "x86" || isa.pointer_bits() != 64 {
return Ok(None);
}
let unwind = match super::create_unwind_info(func, isa)? {
Some(u) => u,
None => {
return Ok(None);
}
};
struct RegisterMapper<'a, 'b>(&'a (dyn TargetIsa + 'b));
impl<'a, 'b> crate::isa::unwind::systemv::RegisterMapper<RegUnit> for RegisterMapper<'a, 'b> {
fn map(&self, reg: RegUnit) -> Result<u16, RegisterMappingError> {
Ok(map_reg(self.0, reg)?.0)
}
fn sp(&self) -> u16 {
X86_64::RSP.0
}
fn fp(&self) -> Option<u16> {
Some(X86_64::RBP.0)
}
}
let map = RegisterMapper(isa);
Ok(Some(UnwindInfo::build(unwind, &map)?))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{
types, AbiParam, ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind,
};
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use gimli::write::Address;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_simple_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::SystemV,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let fde = match isa
.create_unwind_info(&context.func)
.expect("can create unwind info")
{
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
info.to_fde(Address::Constant(1234))
}
_ => panic!("expected unwind information"),
};
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(1234), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (15, SameValue(Register(6))), (15, Cfa(Register(7), 8))] }");
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
#[test]
fn test_multi_return_func() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_multi_return_function(CallConv::SystemV));
context.compile(&*isa).expect("expected compilation");
let fde = match isa
.create_unwind_info(&context.func)
.expect("can create unwind info")
{
Some(crate::isa::unwind::UnwindInfo::SystemV(info)) => {
info.to_fde(Address::Constant(4321))
}
_ => panic!("expected unwind information"),
};
assert_eq!(format!("{:?}", fde), "FrameDescriptionEntry { address: Constant(4321), length: 16, lsda: None, instructions: [(2, CfaOffset(16)), (2, Offset(Register(6), -16)), (5, CfaRegister(Register(6))), (12, RememberState), (12, SameValue(Register(6))), (12, Cfa(Register(7), 8)), (13, RestoreState), (15, SameValue(Register(6))), (15, Cfa(Register(7), 8))] }");
}
fn create_multi_return_function(call_conv: CallConv) -> Function {
let mut sig = Signature::new(call_conv);
sig.params.push(AbiParam::new(types::I32));
let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig);
let block0 = func.dfg.make_block();
let v0 = func.dfg.append_block_param(block0, types::I32);
let block1 = func.dfg.make_block();
let block2 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().brnz(v0, block2, &[]);
pos.ins().jump(block1, &[]);
pos.insert_block(block1);
pos.ins().return_(&[]);
pos.insert_block(block2);
pos.ins().return_(&[]);
func
}
}

View File

@@ -1,265 +0,0 @@
//! Unwind information for Windows x64 ABI.
use crate::ir::Function;
use crate::isa::x86::registers::{FPR, GPR};
use crate::isa::{unwind::winx64::UnwindInfo, RegUnit, TargetIsa};
use crate::result::CodegenResult;
pub(crate) fn create_unwind_info(
func: &Function,
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> {
// Only Windows fastcall is supported for unwind information
if !func.signature.call_conv.extends_windows_fastcall() || func.prologue_end.is_none() {
return Ok(None);
}
let unwind = match super::create_unwind_info(func, isa)? {
Some(u) => u,
None => {
return Ok(None);
}
};
Ok(Some(UnwindInfo::build::<RegUnit, RegisterMapper>(unwind)?))
}
struct RegisterMapper;
impl crate::isa::unwind::winx64::RegisterMapper<RegUnit> for RegisterMapper {
fn map(reg: RegUnit) -> crate::isa::unwind::winx64::MappedRegister {
use crate::isa::unwind::winx64::MappedRegister;
if GPR.contains(reg) {
MappedRegister::Int(GPR.index_of(reg) as u8)
} else if FPR.contains(reg) {
MappedRegister::Xmm(reg as u8)
} else {
panic!()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cursor::{Cursor, FuncCursor};
use crate::ir::{ExternalName, InstBuilder, Signature, StackSlotData, StackSlotKind};
use crate::isa::unwind::winx64::UnwindCode;
use crate::isa::x86::registers::RU;
use crate::isa::{lookup_variant, BackendVariant, CallConv};
use crate::settings::{builder, Flags};
use crate::Context;
use std::str::FromStr;
use target_lexicon::triple;
#[test]
fn test_wrong_calling_convention() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(CallConv::SystemV, None));
context.compile(&*isa).expect("expected compilation");
assert_eq!(
create_unwind_info(&context.func, &*isa).expect("can create unwind info"),
None
);
}
#[test]
fn test_small_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 64)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 9,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 9,
size: 64
}
]
}
);
assert_eq!(unwind.emit_size(), 8);
let mut buf = [0u8; 8];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x09, // Prologue size
0x02, // Unwind code count (1 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x09, // Prolog offset
0x72, // Operation 2 (small stack alloc), size = 0xB slots (e.g. (0x7 * 8) + 8 = 64 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (save nonvolatile register), reg = 5 (RBP)
]
);
}
#[test]
fn test_medium_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 10000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 27,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 27,
size: 10000
}
]
}
);
assert_eq!(unwind.emit_size(), 12);
let mut buf = [0u8; 12];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x1B, // Prologue size
0x03, // Unwind code count (2 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x1B, // Prolog offset
0x01, // Operation 1 (large stack alloc), size is scaled 16-bits (info = 0)
0xE2, // Low size byte
0x04, // High size byte (e.g. 0x04E2 * 8 = 10000 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
0x00, // Padding
0x00, // Padding
]
);
}
#[test]
fn test_large_alloc() {
let isa = lookup_variant(triple!("x86_64"), BackendVariant::Legacy)
.expect("expect x86 ISA")
.finish(Flags::new(builder()));
let mut context = Context::for_function(create_function(
CallConv::WindowsFastcall,
Some(StackSlotData::new(StackSlotKind::ExplicitSlot, 1000000)),
));
context.compile(&*isa).expect("expected compilation");
let unwind = create_unwind_info(&context.func, &*isa)
.expect("can create unwind info")
.expect("expected unwind info");
assert_eq!(
unwind,
UnwindInfo {
flags: 0,
prologue_size: 27,
frame_register: None,
frame_register_offset: 0,
unwind_codes: vec![
UnwindCode::PushRegister {
instruction_offset: 2,
reg: GPR.index_of(RU::rbp.into()) as u8
},
UnwindCode::StackAlloc {
instruction_offset: 27,
size: 1000000
}
]
}
);
assert_eq!(unwind.emit_size(), 12);
let mut buf = [0u8; 12];
unwind.emit(&mut buf);
assert_eq!(
buf,
[
0x01, // Version and flags (version 1, no flags)
0x1B, // Prologue size
0x04, // Unwind code count (3 for stack alloc, 1 for push reg)
0x00, // Frame register + offset (no frame register)
0x1B, // Prolog offset
0x11, // Operation 1 (large stack alloc), size is unscaled 32-bits (info = 1)
0x40, // Byte 1 of size
0x42, // Byte 2 of size
0x0F, // Byte 3 of size
0x00, // Byte 4 of size (size is 0xF4240 = 1000000 bytes)
0x02, // Prolog offset
0x50, // Operation 0 (push nonvolatile register), reg = 5 (RBP)
]
);
}
fn create_function(call_conv: CallConv, stack_slot: Option<StackSlotData>) -> Function {
let mut func =
Function::with_name_signature(ExternalName::user(0, 0), Signature::new(call_conv));
let block0 = func.dfg.make_block();
let mut pos = FuncCursor::new(&mut func);
pos.insert_block(block0);
pos.ins().return_(&[]);
if let Some(stack_slot) = stack_slot {
func.stack_slots.push(stack_slot);
}
func
}
}

View File

@@ -84,12 +84,9 @@ pub(crate) mod aarch64;
#[cfg(feature = "s390x")]
mod s390x;
#[cfg(any(feature = "x86", feature = "riscv"))]
#[cfg(feature = "riscv")]
mod legacy;
#[cfg(feature = "x86")]
use legacy::x86;
#[cfg(feature = "riscv")]
use legacy::riscv;
@@ -120,49 +117,19 @@ macro_rules! isa_builder {
}};
}
/// The "variant" for a given target. On one platform (x86-64), we have two
/// backends, the "old" and "new" one; the new one is the default if included
/// in the build configuration and not otherwise specified.
#[derive(Clone, Copy, Debug)]
pub enum BackendVariant {
/// Any backend available.
Any,
/// A "legacy" backend: one that operates using legalizations and encodings.
Legacy,
/// A backend built on `MachInst`s and the `VCode` framework.
MachInst,
}
impl Default for BackendVariant {
fn default() -> Self {
BackendVariant::Any
}
}
/// Look for an ISA for the given `triple`, selecting the backend variant given
/// by `variant` if available.
pub fn lookup_variant(triple: Triple, variant: BackendVariant) -> Result<Builder, LookupError> {
match (triple.architecture, variant) {
(Architecture::Riscv32 { .. }, _) | (Architecture::Riscv64 { .. }, _) => {
pub fn lookup_variant(triple: Triple) -> Result<Builder, LookupError> {
match triple.architecture {
Architecture::Riscv32 { .. } | Architecture::Riscv64 { .. } => {
isa_builder!(riscv, (feature = "riscv"), triple)
}
(Architecture::X86_64, BackendVariant::Legacy) => {
isa_builder!(x86, (feature = "x86"), triple)
}
(Architecture::X86_64, BackendVariant::MachInst) => {
Architecture::X86_64 => {
isa_builder!(x64, (feature = "x86"), triple)
}
#[cfg(not(feature = "old-x86-backend"))]
(Architecture::X86_64, BackendVariant::Any) => {
isa_builder!(x64, (feature = "x86"), triple)
}
#[cfg(feature = "old-x86-backend")]
(Architecture::X86_64, BackendVariant::Any) => {
isa_builder!(x86, (feature = "x86"), triple)
}
(Architecture::Arm { .. }, _) => isa_builder!(arm32, (feature = "arm32"), triple),
(Architecture::Aarch64 { .. }, _) => isa_builder!(aarch64, (feature = "arm64"), triple),
(Architecture::S390x { .. }, _) => isa_builder!(s390x, (feature = "s390x"), triple),
Architecture::Arm { .. } => isa_builder!(arm32, (feature = "arm32"), triple),
Architecture::Aarch64 { .. } => isa_builder!(aarch64, (feature = "arm64"), triple),
Architecture::S390x { .. } => isa_builder!(s390x, (feature = "s390x"), triple),
_ => Err(LookupError::Unsupported),
}
}
@@ -170,7 +137,7 @@ pub fn lookup_variant(triple: Triple, variant: BackendVariant) -> Result<Builder
/// Look for an ISA for the given `triple`.
/// Return a builder that can create a corresponding `TargetIsa`.
pub fn lookup(triple: Triple) -> Result<Builder, LookupError> {
lookup_variant(triple, BackendVariant::Any)
lookup_variant(triple)
}
/// Look for a supported ISA with the given `name`.
@@ -292,11 +259,6 @@ pub trait TargetIsa: fmt::Display + Send + Sync {
/// Get the ISA-dependent flag values that were used to make this trait object.
fn isa_flags(&self) -> Vec<settings::Value>;
/// Get the variant of this ISA (Legacy or MachInst).
fn variant(&self) -> BackendVariant {
BackendVariant::Legacy
}
/// Hashes all flags, both ISA-independent and ISA-specific, into the
/// specified hasher.
fn hash_all_flags(&self, hasher: &mut dyn Hasher);

View File

@@ -2962,45 +2962,6 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
| Opcode::IfcmpImm => {
panic!("ALU+imm and ALU+carry ops should not appear here!");
}
#[cfg(feature = "x86")]
Opcode::X86Udivmodx
| Opcode::X86Sdivmodx
| Opcode::X86Umulx
| Opcode::X86Smulx
| Opcode::X86Cvtt2si
| Opcode::X86Fmin
| Opcode::X86Fmax
| Opcode::X86Push
| Opcode::X86Pop
| Opcode::X86Bsr
| Opcode::X86Bsf
| Opcode::X86Pblendw
| Opcode::X86Pshufd
| Opcode::X86Pshufb
| Opcode::X86Pextr
| Opcode::X86Pinsr
| Opcode::X86Insertps
| Opcode::X86Movsd
| Opcode::X86Movlhps
| Opcode::X86Psll
| Opcode::X86Psrl
| Opcode::X86Psra
| Opcode::X86Ptest
| Opcode::X86Pmaxs
| Opcode::X86Pmaxu
| Opcode::X86Pmins
| Opcode::X86Pminu
| Opcode::X86Pmullq
| Opcode::X86Pmuludq
| Opcode::X86Punpckh
| Opcode::X86Punpckl
| Opcode::X86Vcvtudq2ps
| Opcode::X86Palignr
| Opcode::X86ElfTlsGetAddr
| Opcode::X86MachoTlsGetAddr => {
panic!("x86-specific opcode in supposedly arch-neutral IR!");
}
}
Ok(())

View File

@@ -109,7 +109,6 @@ mod tests {
use target_lexicon::triple;
#[test]
#[cfg_attr(feature = "old-x86-backend", ignore)]
fn test_simple_func() {
let isa = lookup(triple!("x86_64"))
.expect("expect x86 ISA")
@@ -152,7 +151,6 @@ mod tests {
}
#[test]
#[cfg_attr(feature = "old-x86-backend", ignore)]
fn test_multi_return_func() {
let isa = lookup(triple!("x86_64"))
.expect("expect x86 ISA")

View File

@@ -6900,44 +6900,6 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
panic!("Branch opcode reached non-branch lowering logic!");
}
Opcode::X86Udivmodx
| Opcode::X86Sdivmodx
| Opcode::X86Umulx
| Opcode::X86Smulx
| Opcode::X86Cvtt2si
| Opcode::X86Fmin
| Opcode::X86Fmax
| Opcode::X86Push
| Opcode::X86Pop
| Opcode::X86Bsr
| Opcode::X86Bsf
| Opcode::X86Pblendw
| Opcode::X86Pshufd
| Opcode::X86Pshufb
| Opcode::X86Pextr
| Opcode::X86Pinsr
| Opcode::X86Insertps
| Opcode::X86Movsd
| Opcode::X86Movlhps
| Opcode::X86Palignr
| Opcode::X86Psll
| Opcode::X86Psrl
| Opcode::X86Psra
| Opcode::X86Ptest
| Opcode::X86Pmaxs
| Opcode::X86Pmaxu
| Opcode::X86Pmins
| Opcode::X86Pminu
| Opcode::X86Pmullq
| Opcode::X86Pmuludq
| Opcode::X86Punpckh
| Opcode::X86Punpckl
| Opcode::X86Vcvtudq2ps
| Opcode::X86ElfTlsGetAddr
| Opcode::X86MachoTlsGetAddr => {
panic!("x86-specific opcode in supposedly arch-neutral IR!");
}
Opcode::Nop => {
// Nothing.
}

View File

@@ -21,9 +21,9 @@ use crate::ir::types::{I32, I64};
use crate::ir::{self, InstBuilder, MemFlags};
use crate::isa::TargetIsa;
#[cfg(any(feature = "x86", feature = "riscv"))]
#[cfg(feature = "riscv")]
use crate::predicates;
#[cfg(any(feature = "x86", feature = "riscv"))]
#[cfg(feature = "riscv")]
use alloc::vec::Vec;
use crate::timing;

View File

@@ -3,7 +3,7 @@
use crate::binemit;
use crate::ir;
use crate::isa::{
BackendVariant, EncInfo, Encoding, Encodings, Legalize, RegClass, RegInfo, TargetIsa,
EncInfo, Encoding, Encodings, Legalize, RegClass, RegInfo, TargetIsa,
};
use crate::machinst::*;
use crate::regalloc::RegisterSet;
@@ -64,10 +64,6 @@ impl TargetIsa for TargetIsaAdapter {
self.backend.isa_flags()
}
fn variant(&self) -> BackendVariant {
BackendVariant::MachInst
}
fn hash_all_flags(&self, hasher: &mut dyn Hasher) {
self.backend.hash_all_flags(hasher);
}