x64: port atomic_rmw to ISLE (#4389)

* x64: port `atomic_rmw` to ISLE

This change ports `atomic_rmw` to ISLE for the x64 backend. It does not
change the lowering in any way, though it seems possible that the fixed
regs need not be as fixed and that there are opportunities for single
instruction lowerings. It does rename `inst_common::AtomicRmwOp` to
`MachAtomicRmwOp` to disambiguate with the IR enum with the same name.

* x64: remove remaining hardcoded register constraints for `atomic_rmw`

* x64: use `SyntheticAmode` in `AtomicRmwSeq`

* review: add missing reg collector for amode

* review: collect memory registers in the 'late' phase
This commit is contained in:
Andrew Brown
2022-07-06 16:58:59 -07:00
committed by GitHub
parent f98076ae88
commit 8629cbc6a4
10 changed files with 196 additions and 172 deletions

View File

@@ -45,11 +45,10 @@ pub(crate) fn insn_outputs<I: VCodeInst, C: LowerCtx<I = I>>(
//============================================================================
// Atomic instructions.
/// Atomic memory update operations. As of 21 Aug 2020 these are used for the aarch64 and x64
/// targets.
/// Atomic memory update operations.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum AtomicRmwOp {
pub enum MachAtomicRmwOp {
/// Add
Add,
/// Sub
@@ -74,21 +73,22 @@ pub enum AtomicRmwOp {
Smax,
}
impl AtomicRmwOp {
/// Converts an `ir::AtomicRmwOp` to the corresponding `inst_common::AtomicRmwOp`.
impl MachAtomicRmwOp {
/// Converts an `ir::AtomicRmwOp` to the corresponding
/// `inst_common::AtomicRmwOp`.
pub fn from(ir_op: ir::AtomicRmwOp) -> Self {
match ir_op {
ir::AtomicRmwOp::Add => AtomicRmwOp::Add,
ir::AtomicRmwOp::Sub => AtomicRmwOp::Sub,
ir::AtomicRmwOp::And => AtomicRmwOp::And,
ir::AtomicRmwOp::Nand => AtomicRmwOp::Nand,
ir::AtomicRmwOp::Or => AtomicRmwOp::Or,
ir::AtomicRmwOp::Xor => AtomicRmwOp::Xor,
ir::AtomicRmwOp::Xchg => AtomicRmwOp::Xchg,
ir::AtomicRmwOp::Umin => AtomicRmwOp::Umin,
ir::AtomicRmwOp::Umax => AtomicRmwOp::Umax,
ir::AtomicRmwOp::Smin => AtomicRmwOp::Smin,
ir::AtomicRmwOp::Smax => AtomicRmwOp::Smax,
ir::AtomicRmwOp::Add => MachAtomicRmwOp::Add,
ir::AtomicRmwOp::Sub => MachAtomicRmwOp::Sub,
ir::AtomicRmwOp::And => MachAtomicRmwOp::And,
ir::AtomicRmwOp::Nand => MachAtomicRmwOp::Nand,
ir::AtomicRmwOp::Or => MachAtomicRmwOp::Or,
ir::AtomicRmwOp::Xor => MachAtomicRmwOp::Xor,
ir::AtomicRmwOp::Xchg => MachAtomicRmwOp::Xchg,
ir::AtomicRmwOp::Umin => MachAtomicRmwOp::Umin,
ir::AtomicRmwOp::Umax => MachAtomicRmwOp::Umax,
ir::AtomicRmwOp::Smin => MachAtomicRmwOp::Smin,
ir::AtomicRmwOp::Smax => MachAtomicRmwOp::Smax,
}
}
}

View File

@@ -328,6 +328,11 @@ impl<'a, F: Fn(VReg) -> VReg> OperandCollector<'a, F> {
self.add_operand(Operand::reg_use(reg.into()));
}
/// Add a register use, at the end of the instruction (`After` position).
pub fn reg_late_use(&mut self, reg: Reg) {
self.add_operand(Operand::reg_use_at_end(reg.into()));
}
/// Add multiple register uses.
pub fn reg_uses(&mut self, regs: &[Reg]) {
for &reg in regs {