[AArch64] Refactor Mov instructions (#4033)

Merge Mov32 and Mov64 into a single instruction parameterized by a new
OperandSize field. Also combine the Mov[K,N,Z] into a single instruction
with a new opcode to select between the operations.

Copyright (c) 2022, Arm Limited.
This commit is contained in:
Sam Parker
2022-04-14 22:51:12 +01:00
committed by GitHub
parent dd442a4d2f
commit 682ef7b470
7 changed files with 349 additions and 319 deletions

View File

@@ -156,33 +156,18 @@
(mem PairAMode) (mem PairAMode)
(flags MemFlags)) (flags MemFlags))
;; A MOV instruction. These are encoded as ORR's (AluRRR form) but we ;; A MOV instruction. These are encoded as ORR's (AluRRR form).
;; keep them separate at the `Inst` level for better pretty-printing ;; The 32-bit version zeroes the top 32 bits of the
;; and faster `is_move()` logic. ;; destination, which is effectively an alias for an unsigned
(Mov64 ;; 32-to-64-bit extension.
(Mov
(size OperandSize)
(rd WritableReg) (rd WritableReg)
(rm Reg)) (rm Reg))
;; A 32-bit MOV. Zeroes the top 32 bits of the destination. This is ;; A MOV[Z,N,K] with a 16-bit immediate.
;; effectively an alias for an unsigned 32-to-64-bit extension. (MovWide
(Mov32 (op MoveWideOp)
(rd WritableReg)
(rm Reg))
;; A MOVZ with a 16-bit immediate.
(MovZ
(rd WritableReg)
(imm MoveWideConst)
(size OperandSize))
;; A MOVN with a 16-bit immediate.
(MovN
(rd WritableReg)
(imm MoveWideConst)
(size OperandSize))
;; A MOVK with a 16-bit immediate.
(MovK
(rd WritableReg) (rd WritableReg)
(imm MoveWideConst) (imm MoveWideConst)
(size OperandSize)) (size OperandSize))
@@ -841,6 +826,13 @@
(MSub) (MSub)
)) ))
(type MoveWideOp
(enum
(MovZ)
(MovN)
(MovK)
))
(type UImm5 (primitive UImm5)) (type UImm5 (primitive UImm5))
(type Imm12 (primitive Imm12)) (type Imm12 (primitive Imm12))
(type ImmLogic (primitive ImmLogic)) (type ImmLogic (primitive ImmLogic))
@@ -1361,14 +1353,14 @@
(decl movz (MoveWideConst OperandSize) Reg) (decl movz (MoveWideConst OperandSize) Reg)
(rule (movz imm size) (rule (movz imm size)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.MovZ dst imm size)))) (_ Unit (emit (MInst.MovWide (MoveWideOp.MovZ) dst imm size))))
dst)) dst))
;; Helper for emitting `MInst.MovN` instructions. ;; Helper for emitting `MInst.MovN` instructions.
(decl movn (MoveWideConst OperandSize) Reg) (decl movn (MoveWideConst OperandSize) Reg)
(rule (movn imm size) (rule (movn imm size)
(let ((dst WritableReg (temp_writable_reg $I64)) (let ((dst WritableReg (temp_writable_reg $I64))
(_ Unit (emit (MInst.MovN dst imm size)))) (_ Unit (emit (MInst.MovWide (MoveWideOp.MovN) dst imm size))))
dst)) dst))
;; Helper for emitting `MInst.AluRRImmLogic` instructions. ;; Helper for emitting `MInst.AluRRImmLogic` instructions.

View File

@@ -185,25 +185,16 @@ fn enc_conditional_br(
} }
} }
const MOVE_WIDE_FIXED: u32 = 0x12800000; fn enc_move_wide(op: MoveWideOp, rd: Writable<Reg>, imm: MoveWideConst, size: OperandSize) -> u32 {
#[repr(u32)]
enum MoveWideOpcode {
MOVN = 0b00,
MOVZ = 0b10,
MOVK = 0b11,
}
fn enc_move_wide(
op: MoveWideOpcode,
rd: Writable<Reg>,
imm: MoveWideConst,
size: OperandSize,
) -> u32 {
assert!(imm.shift <= 0b11); assert!(imm.shift <= 0b11);
MOVE_WIDE_FIXED let op = match op {
MoveWideOp::MovN => 0b00,
MoveWideOp::MovZ => 0b10,
MoveWideOp::MovK => 0b11,
};
0x12800000
| size.sf_bit() << 31 | size.sf_bit() << 31
| (op as u32) << 29 | op << 29
| u32::from(imm.shift) << 21 | u32::from(imm.shift) << 21
| u32::from(imm.bits) << 5 | u32::from(imm.bits) << 5
| machreg_to_gpr(rd.to_reg()) | machreg_to_gpr(rd.to_reg())
@@ -1315,12 +1306,14 @@ impl MachInstEmit for Inst {
} }
} }
} }
&Inst::Mov64 { rd, rm } => { &Inst::Mov { size, rd, rm } => {
let rd = allocs.next_writable(rd); let rd = allocs.next_writable(rd);
let rm = allocs.next(rm); let rm = allocs.next(rm);
assert!(rd.to_reg().class() == rm.class()); assert!(rd.to_reg().class() == rm.class());
assert!(rm.class() == RegClass::Int); assert!(rm.class() == RegClass::Int);
match size {
OperandSize::Size64 => {
// MOV to SP is interpreted as MOV to XZR instead. And our codegen // MOV to SP is interpreted as MOV to XZR instead. And our codegen
// should never MOV to XZR. // should never MOV to XZR.
assert!(rd.to_reg() != stack_reg()); assert!(rd.to_reg() != stack_reg());
@@ -1340,26 +1333,18 @@ impl MachInstEmit for Inst {
sink.put4(enc_arith_rrr(0b10101010_000, 0b000_000, rd, zero_reg(), rm)); sink.put4(enc_arith_rrr(0b10101010_000, 0b000_000, rd, zero_reg(), rm));
} }
} }
&Inst::Mov32 { rd, rm } => { OperandSize::Size32 => {
let rd = allocs.next_writable(rd);
let rm = allocs.next(rm);
// MOV to SP is interpreted as MOV to XZR instead. And our codegen // MOV to SP is interpreted as MOV to XZR instead. And our codegen
// should never MOV to XZR. // should never MOV to XZR.
assert!(machreg_to_gpr(rd.to_reg()) != 31); assert!(machreg_to_gpr(rd.to_reg()) != 31);
// Encoded as ORR rd, rm, zero. // Encoded as ORR rd, rm, zero.
sink.put4(enc_arith_rrr(0b00101010_000, 0b000_000, rd, zero_reg(), rm)); sink.put4(enc_arith_rrr(0b00101010_000, 0b000_000, rd, zero_reg(), rm));
} }
&Inst::MovZ { rd, imm, size } => {
let rd = allocs.next_writable(rd);
sink.put4(enc_move_wide(MoveWideOpcode::MOVZ, rd, imm, size))
} }
&Inst::MovN { rd, imm, size } => {
let rd = allocs.next_writable(rd);
sink.put4(enc_move_wide(MoveWideOpcode::MOVN, rd, imm, size))
} }
&Inst::MovK { rd, imm, size } => { &Inst::MovWide { op, rd, imm, size } => {
let rd = allocs.next_writable(rd); let rd = allocs.next_writable(rd);
sink.put4(enc_move_wide(MoveWideOpcode::MOVK, rd, imm, size)) sink.put4(enc_move_wide(op, rd, imm, size));
} }
&Inst::CSel { rd, rn, rm, cond } => { &Inst::CSel { rd, rn, rm, cond } => {
let rd = allocs.next_writable(rd); let rd = allocs.next_writable(rd);
@@ -2700,7 +2685,11 @@ impl MachInstEmit for Inst {
} => { } => {
let rd = allocs.next_writable(rd); let rd = allocs.next_writable(rd);
let rn = allocs.next(rn); let rn = allocs.next(rn);
let mov = Inst::Mov32 { rd, rm: rn }; let mov = Inst::Mov {
size: OperandSize::Size32,
rd,
rm: rn,
};
mov.emit(&[], sink, emit_info, state); mov.emit(&[], sink, emit_info, state);
} }
&Inst::Extend { &Inst::Extend {
@@ -2980,7 +2969,11 @@ impl MachInstEmit for Inst {
add.emit(&[], sink, emit_info, state); add.emit(&[], sink, emit_info, state);
} else if offset == 0 { } else if offset == 0 {
if reg != rd.to_reg() { if reg != rd.to_reg() {
let mov = Inst::Mov64 { rd, rm: reg }; let mov = Inst::Mov {
size: OperandSize::Size64,
rd,
rm: reg,
};
mov.emit(&[], sink, emit_info, state); mov.emit(&[], sink, emit_info, state);
} }

View File

@@ -1920,7 +1920,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::Mov64 { Inst::Mov {
size: OperandSize::Size64,
rd: writable_xreg(8), rd: writable_xreg(8),
rm: xreg(9), rm: xreg(9),
}, },
@@ -1928,7 +1929,8 @@ fn test_aarch64_binemit() {
"mov x8, x9", "mov x8, x9",
)); ));
insns.push(( insns.push((
Inst::Mov32 { Inst::Mov {
size: OperandSize::Size32,
rd: writable_xreg(8), rd: writable_xreg(8),
rm: xreg(9), rm: xreg(9),
}, },
@@ -1937,7 +1939,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::MovZ { Inst::MovWide {
op: MoveWideOp::MovZ,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -1946,7 +1949,8 @@ fn test_aarch64_binemit() {
"movz x8, #65535", "movz x8, #65535",
)); ));
insns.push(( insns.push((
Inst::MovZ { Inst::MovWide {
op: MoveWideOp::MovZ,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -1955,7 +1959,8 @@ fn test_aarch64_binemit() {
"movz x8, #65535, LSL #16", "movz x8, #65535, LSL #16",
)); ));
insns.push(( insns.push((
Inst::MovZ { Inst::MovWide {
op: MoveWideOp::MovZ,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_ffff_0000_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_ffff_0000_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -1964,7 +1969,8 @@ fn test_aarch64_binemit() {
"movz x8, #65535, LSL #32", "movz x8, #65535, LSL #32",
)); ));
insns.push(( insns.push((
Inst::MovZ { Inst::MovWide {
op: MoveWideOp::MovZ,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0xffff_0000_0000_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0xffff_0000_0000_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -1973,7 +1979,8 @@ fn test_aarch64_binemit() {
"movz x8, #65535, LSL #48", "movz x8, #65535, LSL #48",
)); ));
insns.push(( insns.push((
Inst::MovZ { Inst::MovWide {
op: MoveWideOp::MovZ,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(),
size: OperandSize::Size32, size: OperandSize::Size32,
@@ -1983,7 +1990,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::MovN { Inst::MovWide {
op: MoveWideOp::MovN,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -1992,7 +2000,8 @@ fn test_aarch64_binemit() {
"movn x8, #65535", "movn x8, #65535",
)); ));
insns.push(( insns.push((
Inst::MovN { Inst::MovWide {
op: MoveWideOp::MovN,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2001,7 +2010,8 @@ fn test_aarch64_binemit() {
"movn x8, #65535, LSL #16", "movn x8, #65535, LSL #16",
)); ));
insns.push(( insns.push((
Inst::MovN { Inst::MovWide {
op: MoveWideOp::MovN,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_ffff_0000_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_ffff_0000_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2010,7 +2020,8 @@ fn test_aarch64_binemit() {
"movn x8, #65535, LSL #32", "movn x8, #65535, LSL #32",
)); ));
insns.push(( insns.push((
Inst::MovN { Inst::MovWide {
op: MoveWideOp::MovN,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0xffff_0000_0000_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0xffff_0000_0000_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2019,7 +2030,8 @@ fn test_aarch64_binemit() {
"movn x8, #65535, LSL #48", "movn x8, #65535, LSL #48",
)); ));
insns.push(( insns.push((
Inst::MovN { Inst::MovWide {
op: MoveWideOp::MovN,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(),
size: OperandSize::Size32, size: OperandSize::Size32,
@@ -2029,7 +2041,8 @@ fn test_aarch64_binemit() {
)); ));
insns.push(( insns.push((
Inst::MovK { Inst::MovWide {
op: MoveWideOp::MovK,
rd: writable_xreg(12), rd: writable_xreg(12),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2038,7 +2051,8 @@ fn test_aarch64_binemit() {
"movk x12, #0", "movk x12, #0",
)); ));
insns.push(( insns.push((
Inst::MovK { Inst::MovWide {
op: MoveWideOp::MovK,
rd: writable_xreg(19), rd: writable_xreg(19),
imm: MoveWideConst::maybe_with_shift(0x0000, 16).unwrap(), imm: MoveWideConst::maybe_with_shift(0x0000, 16).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2047,7 +2061,8 @@ fn test_aarch64_binemit() {
"movk x19, #0, LSL #16", "movk x19, #0, LSL #16",
)); ));
insns.push(( insns.push((
Inst::MovK { Inst::MovWide {
op: MoveWideOp::MovK,
rd: writable_xreg(3), rd: writable_xreg(3),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_0000_ffff).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2056,7 +2071,8 @@ fn test_aarch64_binemit() {
"movk x3, #65535", "movk x3, #65535",
)); ));
insns.push(( insns.push((
Inst::MovK { Inst::MovWide {
op: MoveWideOp::MovK,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_0000_ffff_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2065,7 +2081,8 @@ fn test_aarch64_binemit() {
"movk x8, #65535, LSL #16", "movk x8, #65535, LSL #16",
)); ));
insns.push(( insns.push((
Inst::MovK { Inst::MovWide {
op: MoveWideOp::MovK,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0x0000_ffff_0000_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0x0000_ffff_0000_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,
@@ -2074,7 +2091,8 @@ fn test_aarch64_binemit() {
"movk x8, #65535, LSL #32", "movk x8, #65535, LSL #32",
)); ));
insns.push(( insns.push((
Inst::MovK { Inst::MovWide {
op: MoveWideOp::MovK,
rd: writable_xreg(8), rd: writable_xreg(8),
imm: MoveWideConst::maybe_from_u64(0xffff_0000_0000_0000).unwrap(), imm: MoveWideConst::maybe_from_u64(0xffff_0000_0000_0000).unwrap(),
size: OperandSize::Size64, size: OperandSize::Size64,

View File

@@ -40,8 +40,8 @@ mod emit_tests;
pub use crate::isa::aarch64::lower::isle::generated_code::{ pub use crate::isa::aarch64::lower::isle::generated_code::{
ALUOp, ALUOp3, AtomicRMWOp, BitOp, FPUOp1, FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp, ALUOp, ALUOp3, AtomicRMWOp, BitOp, FPUOp1, FPUOp2, FPUOp3, FpuRoundMode, FpuToIntOp,
IntToFpuOp, MInst as Inst, VecALUOp, VecExtendOp, VecLanesOp, VecMisc2, VecPairOp, VecRRLongOp, IntToFpuOp, MInst as Inst, MoveWideOp, VecALUOp, VecExtendOp, VecLanesOp, VecMisc2, VecPairOp,
VecRRNarrowOp, VecRRPairLongOp, VecRRRLongOp, VecShiftImmOp, VecRRLongOp, VecRRNarrowOp, VecRRPairLongOp, VecRRRLongOp, VecShiftImmOp,
}; };
/// A floating-point unit (FPU) operation with two args, a register and an immediate. /// A floating-point unit (FPU) operation with two args, a register and an immediate.
@@ -130,14 +130,16 @@ impl Inst {
if let Some(imm) = MoveWideConst::maybe_from_u64(value) { if let Some(imm) = MoveWideConst::maybe_from_u64(value) {
// 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVZ // 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVZ
smallvec![Inst::MovZ { smallvec![Inst::MovWide {
op: MoveWideOp::MovZ,
rd, rd,
imm, imm,
size: OperandSize::Size64 size: OperandSize::Size64
}] }]
} else if let Some(imm) = MoveWideConst::maybe_from_u64(!value) { } else if let Some(imm) = MoveWideConst::maybe_from_u64(!value) {
// 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVN // 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVN
smallvec![Inst::MovN { smallvec![Inst::MovWide {
op: MoveWideOp::MovN,
rd, rd,
imm, imm,
size: OperandSize::Size64 size: OperandSize::Size64
@@ -178,15 +180,30 @@ impl Inst {
let imm = let imm =
MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, i * 16) MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, i * 16)
.unwrap(); .unwrap();
insts.push(Inst::MovN { rd, imm, size }); insts.push(Inst::MovWide {
op: MoveWideOp::MovN,
rd,
imm,
size,
});
} else { } else {
let imm = let imm =
MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap(); MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap();
insts.push(Inst::MovZ { rd, imm, size }); insts.push(Inst::MovWide {
op: MoveWideOp::MovZ,
rd,
imm,
size,
});
} }
} else { } else {
let imm = MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap(); let imm = MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap();
insts.push(Inst::MovK { rd, imm, size }); insts.push(Inst::MovWide {
op: MoveWideOp::MovK,
rd,
imm,
size,
});
} }
} }
} }
@@ -641,20 +658,14 @@ fn aarch64_get_operands<F: Fn(VReg) -> VReg>(inst: &Inst, collector: &mut Operan
collector.reg_def(rt2); collector.reg_def(rt2);
pairmemarg_operands(mem, collector); pairmemarg_operands(mem, collector);
} }
&Inst::Mov64 { rd, rm } => { &Inst::Mov { rd, rm, .. } => {
collector.reg_def(rd); collector.reg_def(rd);
collector.reg_use(rm); collector.reg_use(rm);
} }
&Inst::Mov32 { rd, rm } => { &Inst::MovWide { op, rd, .. } => match op {
collector.reg_def(rd); MoveWideOp::MovK => collector.reg_mod(rd),
collector.reg_use(rm); _ => collector.reg_def(rd),
} },
&Inst::MovZ { rd, .. } | &Inst::MovN { rd, .. } => {
collector.reg_def(rd);
}
&Inst::MovK { rd, .. } => {
collector.reg_mod(rd);
}
&Inst::CSel { rd, rn, rm, .. } => { &Inst::CSel { rd, rn, rm, .. } => {
collector.reg_def(rd); collector.reg_def(rd);
collector.reg_use(rn); collector.reg_use(rn);
@@ -1043,7 +1054,11 @@ impl MachInst for Inst {
fn is_move(&self) -> Option<(Writable<Reg>, Reg)> { fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
match self { match self {
&Inst::Mov64 { rd, rm } => Some((rd, rm)), &Inst::Mov {
size: OperandSize::Size64,
rd,
rm,
} => Some((rd, rm)),
&Inst::FpuMove64 { rd, rn } => Some((rd, rn)), &Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
&Inst::FpuMove128 { rd, rn } => Some((rd, rn)), &Inst::FpuMove128 { rd, rn } => Some((rd, rn)),
_ => None, _ => None,
@@ -1097,7 +1112,8 @@ impl MachInst for Inst {
assert!(bits <= 128); assert!(bits <= 128);
assert!(to_reg.to_reg().class() == from_reg.class()); assert!(to_reg.to_reg().class() == from_reg.class());
match from_reg.class() { match from_reg.class() {
RegClass::Int => Inst::Mov64 { RegClass::Int => Inst::Mov {
size: OperandSize::Size64,
rd: to_reg, rd: to_reg,
rm: from_reg, rm: from_reg,
}, },
@@ -1467,30 +1483,25 @@ impl Inst {
let mem = mem.pretty_print_default(); let mem = mem.pretty_print_default();
format!("ldp {}, {}, {}", rt, rt2, mem) format!("ldp {}, {}, {}", rt, rt2, mem)
} }
&Inst::Mov64 { rd, rm } => { &Inst::Mov { size, rd, rm } => {
let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64, allocs); let rd = pretty_print_ireg(rd.to_reg(), size, allocs);
let rm = pretty_print_ireg(rm, OperandSize::Size64, allocs); let rm = pretty_print_ireg(rm, size, allocs);
format!("mov {}, {}", rd, rm) format!("mov {}, {}", rd, rm)
} }
&Inst::Mov32 { rd, rm } => { &Inst::MovWide {
let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size32, allocs); op,
let rm = pretty_print_ireg(rm, OperandSize::Size32, allocs); rd,
format!("mov {}, {}", rd, rm) ref imm,
} size,
&Inst::MovZ { rd, ref imm, size } => { } => {
let op_str = match op {
MoveWideOp::MovZ => "movz",
MoveWideOp::MovN => "movn",
MoveWideOp::MovK => "movk",
};
let rd = pretty_print_ireg(rd.to_reg(), size, allocs); let rd = pretty_print_ireg(rd.to_reg(), size, allocs);
let imm = imm.pretty_print(0, allocs); let imm = imm.pretty_print(0, allocs);
format!("movz {}, {}", rd, imm) format!("{} {}, {}", op_str, rd, imm)
}
&Inst::MovN { rd, ref imm, size } => {
let rd = pretty_print_ireg(rd.to_reg(), size, allocs);
let imm = imm.pretty_print(0, allocs);
format!("movn {}, {}", rd, imm)
}
&Inst::MovK { rd, ref imm, size } => {
let rd = pretty_print_ireg(rd.to_reg(), size, allocs);
let imm = imm.pretty_print(0, allocs);
format!("movk {}, {}", rd, imm)
} }
&Inst::CSel { rd, rn, rm, cond } => { &Inst::CSel { rd, rn, rm, cond } => {
let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64, allocs); let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64, allocs);

View File

@@ -7,8 +7,9 @@ pub mod generated_code;
use super::{ use super::{
writable_zero_reg, zero_reg, AMode, ASIMDFPModImm, ASIMDMovModImm, AtomicRmwOp, BranchTarget, writable_zero_reg, zero_reg, AMode, ASIMDFPModImm, ASIMDMovModImm, AtomicRmwOp, BranchTarget,
CallIndInfo, CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI, FloatCC, Imm12, ImmLogic, ImmShift, CallIndInfo, CallInfo, Cond, CondBrKind, ExtendOp, FPUOpRI, FloatCC, Imm12, ImmLogic, ImmShift,
Inst as MInst, IntCC, JTSequenceInfo, MachLabel, MoveWideConst, NarrowValueMode, Opcode, Inst as MInst, IntCC, JTSequenceInfo, MachLabel, MoveWideConst, MoveWideOp, NarrowValueMode,
OperandSize, PairAMode, Reg, ScalarSize, ShiftOpAndAmt, UImm5, VecMisc2, VectorSize, NZCV, Opcode, OperandSize, PairAMode, Reg, ScalarSize, ShiftOpAndAmt, UImm5, VecMisc2, VectorSize,
NZCV,
}; };
use crate::isa::aarch64::settings::Flags as IsaFlags; use crate::isa::aarch64::settings::Flags as IsaFlags;
use crate::machinst::isle::*; use crate::machinst::isle::*;
@@ -145,14 +146,29 @@ where
let imm = let imm =
MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, i * 16) MoveWideConst::maybe_with_shift(((!imm16) & 0xffff) as u16, i * 16)
.unwrap(); .unwrap();
self.emit(&MInst::MovN { rd, imm, size }); self.emit(&MInst::MovWide {
op: MoveWideOp::MovN,
rd,
imm,
size,
});
} else { } else {
let imm = MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap(); let imm = MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap();
self.emit(&MInst::MovZ { rd, imm, size }); self.emit(&MInst::MovWide {
op: MoveWideOp::MovZ,
rd,
imm,
size,
});
} }
} else { } else {
let imm = MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap(); let imm = MoveWideConst::maybe_with_shift(imm16 as u16, i * 16).unwrap();
self.emit(&MInst::MovK { rd, imm, size }); self.emit(&MInst::MovWide {
op: MoveWideOp::MovK,
rd,
imm,
size,
});
} }
} }
} }

View File

@@ -1,4 +1,4 @@
src/clif.isle 443b34b797fc8ace src/clif.isle 443b34b797fc8ace
src/prelude.isle afd037c4d91c875c src/prelude.isle afd037c4d91c875c
src/isa/aarch64/inst.isle 950bb0092242218e src/isa/aarch64/inst.isle f7f03d5ea5411344
src/isa/aarch64/lower.isle 71c7e603b0e4bdef src/isa/aarch64/lower.isle 71c7e603b0e4bdef

File diff suppressed because it is too large Load Diff