cranelift: Add newtype wrappers for x64 register classes
This primary motivation of this large commit (apologies for its size!) is to
introduce `Gpr` and `Xmm` newtypes over `Reg`. This should help catch
difficult-to-diagnose register class mixup bugs in x64 lowerings.
But having a newtype for `Gpr` and `Xmm` themselves isn't enough to catch all of
our operand-with-wrong-register-class bugs, because about 50% of operands on x64
aren't just a register, but a register or memory address or even an
immediate! So we have `{Gpr,Xmm}Mem[Imm]` newtypes as well.
Unfortunately, `GprMem` et al can't be `enum`s and are therefore a little bit
noisier to work with from ISLE. They need to maintain the invariant that their
registers really are of the claimed register class, so they need to encapsulate
the inner data. If they exposed the underlying `enum` variants, then anyone
could just change register classes or construct a `GprMem` that holds an XMM
register, defeating the whole point of these newtypes. So when working with
these newtypes from ISLE, we rely on external constructors like `(gpr_to_gpr_mem
my_gpr)` instead of `(GprMem.Gpr my_gpr)`.
A bit of extra lines of code are included to add support for register mapping
for all of these newtypes as well. Ultimately this is all a bit wordier than I'd
hoped it would be when I first started authoring this commit, but I think it is
all worth it nonetheless!
In the process of adding these newtypes, I didn't want to have to update both
the ISLE `extern` type definition of `MInst` and the Rust definition, so I move
the definition fully into ISLE, similar as aarch64.
Finally, this process isn't complete. I've introduced the newtypes here, and
I've made most XMM-using instructions switch from `Reg` to `Xmm`, as well as
register class-converting instructions, but I haven't moved all of the GPR-using
instructions over to the newtypes yet. I figured this commit was big enough as
it was, and I can continue the adoption of these newtypes in follow up commits.
Part of #3685.
This commit is contained in:
@@ -1,26 +1,28 @@
|
||||
//! ISLE integration glue code for x64 lowering.
|
||||
|
||||
// Pull in the ISLE generated code.
|
||||
mod generated_code;
|
||||
pub(crate) mod generated_code;
|
||||
use generated_code::MInst;
|
||||
use regalloc::Writable;
|
||||
|
||||
// Types that the generated ISLE code uses via `use super::*`.
|
||||
use super::{
|
||||
is_mergeable_load, lower_to_amode, AluRmiROpcode, Inst as MInst, OperandSize, Reg, RegMemImm,
|
||||
};
|
||||
use super::{is_mergeable_load, lower_to_amode, Reg};
|
||||
use crate::{
|
||||
ir::{immediates::*, types::*, Inst, InstructionData, Opcode, TrapCode, Value, ValueList},
|
||||
isa::x64::{
|
||||
inst::{
|
||||
args::{
|
||||
Amode, Avx512Opcode, CmpOpcode, ExtKind, ExtMode, FcmpImm, Imm8Reg, RegMem,
|
||||
ShiftKind, SseOpcode, SyntheticAmode, CC,
|
||||
},
|
||||
regs, x64_map_regs,
|
||||
},
|
||||
settings::Flags as IsaFlags,
|
||||
ir::{
|
||||
immediates::*, types::*, Inst, InstructionData, Opcode, TrapCode, Value, ValueLabel,
|
||||
ValueList,
|
||||
},
|
||||
isa::{
|
||||
settings::Flags,
|
||||
unwind::UnwindInst,
|
||||
x64::{
|
||||
inst::{args::*, regs, x64_map_regs},
|
||||
settings::Flags as IsaFlags,
|
||||
},
|
||||
},
|
||||
machinst::{
|
||||
isle::*, AtomicRmwOp, InsnInput, InsnOutput, LowerCtx, VCodeConstant, VCodeConstantData,
|
||||
},
|
||||
machinst::{isle::*, InsnInput, InsnOutput, LowerCtx, VCodeConstantData},
|
||||
settings::Flags,
|
||||
};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
@@ -252,8 +254,8 @@ where
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm0(&mut self) -> WritableReg {
|
||||
WritableReg::from_reg(regs::xmm0())
|
||||
fn xmm0(&mut self) -> WritableXmm {
|
||||
WritableXmm::from_reg(Xmm::new(regs::xmm0()).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -262,7 +264,7 @@ where
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn amode_imm_reg_reg_shift(&mut self, simm32: u32, base: Reg, index: Reg, shift: u8) -> Amode {
|
||||
fn amode_imm_reg_reg_shift(&mut self, simm32: u32, base: Gpr, index: Gpr, shift: u8) -> Amode {
|
||||
Amode::imm_reg_reg_shift(simm32, base, index, shift)
|
||||
}
|
||||
|
||||
@@ -271,6 +273,16 @@ where
|
||||
amode.clone().into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn writable_gpr_to_reg(&mut self, r: WritableGpr) -> WritableReg {
|
||||
r.to_writable_reg()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn writable_xmm_to_reg(&mut self, r: WritableXmm) -> WritableReg {
|
||||
r.to_writable_reg()
|
||||
}
|
||||
|
||||
fn ishl_i8x16_mask_for_const(&mut self, amt: u32) -> SyntheticAmode {
|
||||
// When the shift amount is known, we can statically (i.e. at compile
|
||||
// time) determine the mask to use and only emit that.
|
||||
@@ -306,6 +318,96 @@ where
|
||||
.use_constant(VCodeConstantData::WellKnown(&I8X16_USHR_MASKS));
|
||||
SyntheticAmode::ConstantOffset(mask_table)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn writable_reg_to_xmm(&mut self, r: WritableReg) -> WritableXmm {
|
||||
Writable::from_reg(Xmm::new(r.to_reg()).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn writable_xmm_to_xmm(&mut self, r: WritableXmm) -> Xmm {
|
||||
r.to_reg()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn writable_gpr_to_gpr(&mut self, r: WritableGpr) -> Gpr {
|
||||
r.to_reg()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gpr_to_reg(&mut self, r: Gpr) -> Reg {
|
||||
r.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm_to_reg(&mut self, r: Xmm) -> Reg {
|
||||
r.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm_to_xmm_mem_imm(&mut self, r: Xmm) -> XmmMemImm {
|
||||
r.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn temp_writable_gpr(&mut self) -> WritableGpr {
|
||||
Writable::from_reg(Gpr::new(self.temp_writable_reg(I64).to_reg()).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn temp_writable_xmm(&mut self) -> WritableXmm {
|
||||
Writable::from_reg(Xmm::new(self.temp_writable_reg(I8X16).to_reg()).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm_mem_new(&mut self, rm: &RegMem) -> XmmMem {
|
||||
XmmMem::new(rm.clone()).unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gpr_mem_imm_new(&mut self, rmi: &RegMemImm) -> GprMemImm {
|
||||
GprMemImm::new(rmi.clone()).unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm_mem_imm_new(&mut self, rmi: &RegMemImm) -> XmmMemImm {
|
||||
XmmMemImm::new(rmi.clone()).unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm_to_xmm_mem(&mut self, r: Xmm) -> XmmMem {
|
||||
r.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm_mem_to_reg_mem(&mut self, xm: &XmmMem) -> RegMem {
|
||||
xm.clone().into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gpr_mem_to_reg_mem(&mut self, gm: &GprMem) -> RegMem {
|
||||
gm.clone().into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xmm_new(&mut self, r: Reg) -> Xmm {
|
||||
Xmm::new(r).unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gpr_new(&mut self, r: Reg) -> Gpr {
|
||||
Gpr::new(r).unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gpr_mem_new(&mut self, rm: &RegMem) -> GprMem {
|
||||
GprMem::new(rm.clone()).unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reg_to_gpr_mem(&mut self, r: Reg) -> GprMem {
|
||||
GprMem::new(RegMem::reg(r)).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// Since x64 doesn't have 8x16 shifts and we must use a 16x8 shift instead, we
|
||||
|
||||
Reference in New Issue
Block a user