Refactor x64::Inst to use OperandSize instead of u8s.

TODO: some types take a 'is_64_bit' bool. Those are left unchanged for now.
This commit is contained in:
Kasey Carrothers
2021-01-30 18:39:10 -08:00
committed by Andrew Brown
parent b12d41bfe9
commit 3306408100
6 changed files with 780 additions and 458 deletions

View File

@@ -339,7 +339,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Self::I> {
smallvec![
Inst::cmp_rmi_r(/* bytes = */ 8, RegMemImm::reg(regs::rsp()), limit_reg),
Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::reg(regs::rsp()), limit_reg),
Inst::TrapIf {
// NBE == "> unsigned"; args above are reversed; this tests limit_reg > rsp.
cc: CC::NBE,
@@ -474,7 +474,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
match r_reg.get_class() {
RegClass::I64 => {
insts.push(Inst::mov_r_m(
/* bytes = */ 8,
OperandSize::Size64,
r_reg.to_reg(),
Amode::imm_reg(cur_offset, regs::rsp()),
));

View File

@@ -3,7 +3,7 @@
use super::regs::{self, show_ireg_sized};
use super::EmitState;
use crate::ir::condcodes::{FloatCC, IntCC};
use crate::ir::MemFlags;
use crate::ir::{MemFlags, Type};
use crate::isa::x64::inst::Inst;
use crate::machinst::*;
use regalloc::{
@@ -1336,10 +1336,16 @@ impl OperandSize {
2 => OperandSize::Size16,
4 => OperandSize::Size32,
8 => OperandSize::Size64,
_ => unreachable!(),
_ => unreachable!("Invalid OperandSize: {}", num_bytes),
}
}
// Computes the OperandSize for a given type.
// For vectors, the OperandSize of the lanes is returned.
pub(crate) fn from_ty(ty: Type) -> Self {
Self::from_bytes(ty.lane_type().bytes())
}
// Check that the value of self is one of the allowed sizes.
pub(crate) fn is_size(&self, sizes: &[Self]) -> bool {
for val in sizes.iter() {

View File

@@ -676,18 +676,18 @@ pub(crate) fn emit(
Inst::UnaryRmR { size, op, src, dst } => {
let rex_flags = match size {
2 | 4 => RexFlags::clear_w(),
8 => RexFlags::set_w(),
OperandSize::Size16 | OperandSize::Size32 => RexFlags::clear_w(),
OperandSize::Size64 => RexFlags::set_w(),
_ => unreachable!(),
};
use UnaryRmROpcode::*;
let prefix = match size {
2 => match op {
OperandSize::Size16 => match op {
Bsr | Bsf => LegacyPrefixes::_66,
Lzcnt | Tzcnt | Popcnt => LegacyPrefixes::_66F3,
},
4 | 8 => match op {
OperandSize::Size32 | OperandSize::Size64 => match op {
Bsr | Bsf => LegacyPrefixes::None,
Lzcnt | Tzcnt | Popcnt => LegacyPrefixes::_F3,
},
@@ -732,15 +732,14 @@ pub(crate) fn emit(
Inst::Not { size, src } => {
let src = int_reg_enc(src.to_reg());
let (opcode, prefix, rex_flags) = match size {
1 => (
OperandSize::Size8 => (
0xF6,
LegacyPrefixes::None,
*RexFlags::clear_w().always_emit_if_8bit_needed(src),
),
2 => (0xF7, LegacyPrefixes::_66, RexFlags::clear_w()),
4 => (0xF7, LegacyPrefixes::None, RexFlags::clear_w()),
8 => (0xF7, LegacyPrefixes::None, RexFlags::set_w()),
_ => unreachable!("{}", size),
OperandSize::Size16 => (0xF7, LegacyPrefixes::_66, RexFlags::clear_w()),
OperandSize::Size32 => (0xF7, LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size64 => (0xF7, LegacyPrefixes::None, RexFlags::set_w()),
};
let subopcode = 2;
@@ -750,15 +749,14 @@ pub(crate) fn emit(
Inst::Neg { size, src } => {
let src = int_reg_enc(src.to_reg());
let (opcode, prefix, rex_flags) = match size {
1 => (
OperandSize::Size8 => (
0xF6,
LegacyPrefixes::None,
*RexFlags::clear_w().always_emit_if_8bit_needed(src),
),
2 => (0xF7, LegacyPrefixes::_66, RexFlags::clear_w()),
4 => (0xF7, LegacyPrefixes::None, RexFlags::clear_w()),
8 => (0xF7, LegacyPrefixes::None, RexFlags::set_w()),
_ => unreachable!("{}", size),
OperandSize::Size16 => (0xF7, LegacyPrefixes::_66, RexFlags::clear_w()),
OperandSize::Size32 => (0xF7, LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size64 => (0xF7, LegacyPrefixes::None, RexFlags::set_w()),
};
let subopcode = 3;
@@ -771,11 +769,10 @@ pub(crate) fn emit(
divisor,
} => {
let (opcode, prefix, mut rex_flags) = match size {
1 => (0xF6, LegacyPrefixes::None, RexFlags::clear_w()),
2 => (0xF7, LegacyPrefixes::_66, RexFlags::clear_w()),
4 => (0xF7, LegacyPrefixes::None, RexFlags::clear_w()),
8 => (0xF7, LegacyPrefixes::None, RexFlags::set_w()),
_ => unreachable!("{}", size),
OperandSize::Size8 => (0xF6, LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size16 => (0xF7, LegacyPrefixes::_66, RexFlags::clear_w()),
OperandSize::Size32 => (0xF7, LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size64 => (0xF7, LegacyPrefixes::None, RexFlags::set_w()),
};
let loc = state.cur_srcloc();
@@ -785,7 +782,7 @@ pub(crate) fn emit(
match divisor {
RegMem::Reg { reg } => {
let src = int_reg_enc(*reg);
if *size == 1 {
if *size == OperandSize::Size8 {
rex_flags.always_emit_if_8bit_needed(src);
}
emit_std_enc_enc(sink, prefix, opcode, 1, subopcode, src, rex_flags)
@@ -801,9 +798,9 @@ pub(crate) fn emit(
Inst::MulHi { size, signed, rhs } => {
let (prefix, rex_flags) = match size {
2 => (LegacyPrefixes::_66, RexFlags::clear_w()),
4 => (LegacyPrefixes::None, RexFlags::clear_w()),
8 => (LegacyPrefixes::None, RexFlags::set_w()),
OperandSize::Size16 => (LegacyPrefixes::_66, RexFlags::clear_w()),
OperandSize::Size32 => (LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size64 => (LegacyPrefixes::None, RexFlags::set_w()),
_ => unreachable!(),
};
@@ -823,20 +820,19 @@ pub(crate) fn emit(
}
Inst::SignExtendData { size } => match size {
1 => {
OperandSize::Size8 => {
sink.put1(0x66);
sink.put1(0x98);
}
2 => {
OperandSize::Size16 => {
sink.put1(0x66);
sink.put1(0x99);
}
4 => sink.put1(0x99),
8 => {
OperandSize::Size32 => sink.put1(0x99),
OperandSize::Size64 => {
sink.put1(0x48);
sink.put1(0x99);
}
_ => unreachable!(),
},
Inst::CheckedDivOrRemSeq {
@@ -896,11 +892,7 @@ pub(crate) fn emit(
// x % -1 = 0; put the result into the destination, $rdx.
let done_label = sink.get_label();
let inst = Inst::imm(
OperandSize::from_bytes(*size as u32),
0,
Writable::from_reg(regs::rdx()),
);
let inst = Inst::imm(*size, 0, Writable::from_reg(regs::rdx()));
inst.emit(sink, info, state);
let inst = Inst::jmp_known(done_label);
@@ -909,13 +901,17 @@ pub(crate) fn emit(
(Some(do_op), Some(done_label))
} else {
// Check for integer overflow.
if *size == 8 {
if *size == OperandSize::Size64 {
let tmp = tmp.expect("temporary for i64 sdiv");
let inst = Inst::imm(OperandSize::Size64, 0x8000000000000000, tmp);
inst.emit(sink, info, state);
let inst = Inst::cmp_rmi_r(8, RegMemImm::reg(tmp.to_reg()), regs::rax());
let inst = Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::reg(tmp.to_reg()),
regs::rax(),
);
inst.emit(sink, info, state);
} else {
let inst = Inst::cmp_rmi_r(*size, RegMemImm::imm(0x80000000), regs::rax());
@@ -937,7 +933,7 @@ pub(crate) fn emit(
}
assert!(
*size > 1,
*size != OperandSize::Size8,
"CheckedDivOrRemSeq for i8 is not yet implemented"
);
@@ -1175,7 +1171,7 @@ pub(crate) fn emit(
let dst = &dst.finalize(state, sink);
match size {
1 => {
OperandSize::Size8 => {
// This is one of the few places where the presence of a
// redundant REX prefix changes the meaning of the
// instruction.
@@ -1198,7 +1194,7 @@ pub(crate) fn emit(
)
}
2 => {
OperandSize::Size16 => {
// MOV r16, r/m16 is 66 (REX.W==0) 89 /r
emit_std_reg_mem(
sink,
@@ -1213,7 +1209,7 @@ pub(crate) fn emit(
)
}
4 => {
OperandSize::Size32 => {
// MOV r32, r/m32 is (REX.W==0) 89 /r
emit_std_reg_mem(
sink,
@@ -1228,7 +1224,7 @@ pub(crate) fn emit(
)
}
8 => {
OperandSize::Size64 => {
// MOV r64, r/m64 is (REX.W==1) 89 /r
emit_std_reg_mem(
sink,
@@ -1242,8 +1238,6 @@ pub(crate) fn emit(
RexFlags::set_w(),
)
}
_ => panic!("x64::Inst::Mov_R_M::emit: unreachable"),
}
}
@@ -1265,15 +1259,14 @@ pub(crate) fn emit(
match num_bits {
None => {
let (opcode, prefix, rex_flags) = match size {
1 => (
OperandSize::Size8 => (
0xD2,
LegacyPrefixes::None,
*RexFlags::clear_w().always_emit_if_8bit_needed(enc_dst),
),
2 => (0xD3, LegacyPrefixes::_66, RexFlags::clear_w()),
4 => (0xD3, LegacyPrefixes::None, RexFlags::clear_w()),
8 => (0xD3, LegacyPrefixes::None, RexFlags::set_w()),
_ => unreachable!("{}", size),
OperandSize::Size16 => (0xD3, LegacyPrefixes::_66, RexFlags::clear_w()),
OperandSize::Size32 => (0xD3, LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size64 => (0xD3, LegacyPrefixes::None, RexFlags::set_w()),
};
// SHL/SHR/SAR %cl, reg8 is (REX.W==0) D2 /subopcode
@@ -1285,15 +1278,14 @@ pub(crate) fn emit(
Some(num_bits) => {
let (opcode, prefix, rex_flags) = match size {
1 => (
OperandSize::Size8 => (
0xC0,
LegacyPrefixes::None,
*RexFlags::clear_w().always_emit_if_8bit_needed(enc_dst),
),
2 => (0xC1, LegacyPrefixes::_66, RexFlags::clear_w()),
4 => (0xC1, LegacyPrefixes::None, RexFlags::clear_w()),
8 => (0xC1, LegacyPrefixes::None, RexFlags::set_w()),
_ => unreachable!("{}", size),
OperandSize::Size16 => (0xC1, LegacyPrefixes::_66, RexFlags::clear_w()),
OperandSize::Size32 => (0xC1, LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size64 => (0xC1, LegacyPrefixes::None, RexFlags::set_w()),
};
// SHL/SHR/SAR $ib, reg8 is (REX.W==0) C0 /subopcode
@@ -1377,26 +1369,25 @@ pub(crate) fn emit(
};
let mut prefix = LegacyPrefixes::None;
if *size == 2 {
if *size == OperandSize::Size16 {
prefix = LegacyPrefixes::_66;
}
let mut rex = match size {
8 => RexFlags::set_w(),
4 | 2 => RexFlags::clear_w(),
1 => {
OperandSize::Size64 => RexFlags::set_w(),
OperandSize::Size16 | OperandSize::Size32 => RexFlags::clear_w(),
OperandSize::Size8 => {
let mut rex = RexFlags::clear_w();
// Here, a redundant REX prefix changes the meaning of the instruction.
let enc_g = int_reg_enc(*reg_g);
rex.always_emit_if_8bit_needed(enc_g);
rex
}
_ => panic!("x64::Inst::Cmp_RMI_R::emit: unreachable"),
};
match src_e {
RegMemImm::Reg { reg: reg_e } => {
if *size == 1 {
if *size == OperandSize::Size8 {
// Check whether the E register forces the use of a redundant REX.
let enc_e = int_reg_enc(*reg_e);
rex.always_emit_if_8bit_needed(enc_e);
@@ -1405,9 +1396,9 @@ pub(crate) fn emit(
// Use the swapped operands encoding for CMP, to stay consistent with the output of
// gcc/llvm.
let opcode = match (*size, is_cmp) {
(1, true) => 0x38,
(OperandSize::Size8, true) => 0x38,
(_, true) => 0x39,
(1, false) => 0x84,
(OperandSize::Size8, false) => 0x84,
(_, false) => 0x85,
};
emit_std_reg_reg(sink, prefix, opcode, 1, *reg_e, *reg_g, rex);
@@ -1417,9 +1408,9 @@ pub(crate) fn emit(
let addr = &addr.finalize(state, sink);
// Whereas here we revert to the "normal" G-E ordering for CMP.
let opcode = match (*size, is_cmp) {
(1, true) => 0x3A,
(OperandSize::Size8, true) => 0x3A,
(_, true) => 0x3B,
(1, false) => 0x84,
(OperandSize::Size8, false) => 0x84,
(_, false) => 0x85,
};
emit_std_reg_mem(sink, state, info, prefix, opcode, 1, *reg_g, addr, rex);
@@ -1432,7 +1423,7 @@ pub(crate) fn emit(
// And also here we use the "normal" G-E ordering.
let opcode = if is_cmp {
if *size == 1 {
if *size == OperandSize::Size8 {
0x80
} else if use_imm8 {
0x83
@@ -1440,7 +1431,7 @@ pub(crate) fn emit(
0x81
}
} else {
if *size == 1 {
if *size == OperandSize::Size8 {
0xF6
} else {
0xF7
@@ -1450,7 +1441,7 @@ pub(crate) fn emit(
let enc_g = int_reg_enc(*reg_g);
emit_std_enc_enc(sink, prefix, opcode, 1, subopcode, enc_g, rex);
emit_simm(sink, if use_imm8 { 1 } else { *size }, *simm32);
emit_simm(sink, if use_imm8 { 1 } else { size.to_bytes() }, *simm32);
}
}
}
@@ -1477,9 +1468,9 @@ pub(crate) fn emit(
dst: reg_g,
} => {
let (prefix, rex_flags) = match size {
2 => (LegacyPrefixes::_66, RexFlags::clear_w()),
4 => (LegacyPrefixes::None, RexFlags::clear_w()),
8 => (LegacyPrefixes::None, RexFlags::set_w()),
OperandSize::Size16 => (LegacyPrefixes::_66, RexFlags::clear_w()),
OperandSize::Size32 => (LegacyPrefixes::None, RexFlags::clear_w()),
OperandSize::Size64 => (LegacyPrefixes::None, RexFlags::set_w()),
_ => unreachable!("invalid size spec for cmove"),
};
let opcode = 0x0F40 + cc.get_enc() as u32;
@@ -2338,7 +2329,7 @@ pub(crate) fn emit(
// If x seen as a signed int64 is not negative, a signed-conversion will do the right
// thing.
// TODO use tst src, src here.
let inst = Inst::cmp_rmi_r(8, RegMemImm::imm(0), src.to_reg());
let inst = Inst::cmp_rmi_r(OperandSize::Size64, RegMemImm::imm(0), src.to_reg());
inst.emit(sink, info, state);
one_way_jmp(sink, CC::L, handle_negative);
@@ -2358,7 +2349,12 @@ pub(crate) fn emit(
inst.emit(sink, info, state);
// tmp_gpr1 := src >> 1
let inst = Inst::shift_r(8, ShiftKind::ShiftRightLogical, Some(1), *tmp_gpr1);
let inst = Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
*tmp_gpr1,
);
inst.emit(sink, info, state);
let inst = Inst::gen_move(*tmp_gpr2, src.to_reg(), types::I64);
@@ -2464,7 +2460,7 @@ pub(crate) fn emit(
inst.emit(sink, info, state);
// Compare against 1, in case of overflow the dst operand was INT_MIN.
let inst = Inst::cmp_rmi_r(dst_size.to_bytes(), RegMemImm::imm(1), dst.to_reg());
let inst = Inst::cmp_rmi_r(*dst_size, RegMemImm::imm(1), dst.to_reg());
inst.emit(sink, info, state);
one_way_jmp(sink, CC::NO, done); // no overflow => done
@@ -2693,7 +2689,7 @@ pub(crate) fn emit(
let inst = Inst::xmm_to_gpr(trunc_op, src.to_reg(), *dst, *dst_size);
inst.emit(sink, info, state);
let inst = Inst::cmp_rmi_r(dst_size.to_bytes(), RegMemImm::imm(0), dst.to_reg());
let inst = Inst::cmp_rmi_r(*dst_size, RegMemImm::imm(0), dst.to_reg());
inst.emit(sink, info, state);
one_way_jmp(sink, CC::NL, done); // if dst >= 0, jump to done
@@ -2727,7 +2723,7 @@ pub(crate) fn emit(
let inst = Inst::xmm_to_gpr(trunc_op, src.to_reg(), *dst, *dst_size);
inst.emit(sink, info, state);
let inst = Inst::cmp_rmi_r(dst_size.to_bytes(), RegMemImm::imm(0), dst.to_reg());
let inst = Inst::cmp_rmi_r(*dst_size, RegMemImm::imm(0), dst.to_reg());
inst.emit(sink, info, state);
let next_is_large = sink.get_label();

File diff suppressed because it is too large Load Diff

View File

@@ -50,7 +50,7 @@ pub enum Inst {
/// Instructions on GPR that only read src and defines dst (dst is not modified): bsr, etc.
UnaryRmR {
size: u8, // 2, 4 or 8
size: OperandSize, // 2, 4 or 8
op: UnaryRmROpcode,
src: RegMem,
dst: Writable<Reg>,
@@ -58,25 +58,29 @@ pub enum Inst {
/// Bitwise not
Not {
size: u8, // 1, 2, 4 or 8
size: OperandSize, // 1, 2, 4 or 8
src: Writable<Reg>,
},
/// Integer negation
Neg {
size: u8, // 1, 2, 4 or 8
size: OperandSize, // 1, 2, 4 or 8
src: Writable<Reg>,
},
/// Integer quotient and remainder: (div idiv) $rax $rdx (reg addr)
Div {
size: u8, // 1, 2, 4 or 8
size: OperandSize, // 1, 2, 4 or 8
signed: bool,
divisor: RegMem,
},
/// The high bits (RDX) of a (un)signed multiply: RDX:RAX := RAX * rhs.
MulHi { size: u8, signed: bool, rhs: RegMem },
MulHi {
size: OperandSize, // 2, 4, or 8
signed: bool,
rhs: RegMem,
},
/// A synthetic sequence to implement the right inline checks for remainder and division,
/// assuming the dividend is in %rax.
@@ -91,7 +95,7 @@ pub enum Inst {
/// def!
CheckedDivOrRemSeq {
kind: DivOrRemKind,
size: u8,
size: OperandSize,
/// The divisor operand. Note it's marked as modified so that it gets assigned a register
/// different from the temporary.
divisor: Writable<Reg>,
@@ -101,7 +105,7 @@ pub enum Inst {
/// Do a sign-extend based on the sign of the value in rax into rdx: (cwd cdq cqo)
/// or al into ah: (cbw)
SignExtendData {
size: u8, // 1, 2, 4 or 8
size: OperandSize, // 1, 2, 4 or 8
},
/// Constant materialization: (imm32 imm64) reg.
@@ -149,14 +153,14 @@ pub enum Inst {
/// Integer stores: mov (b w l q) reg addr.
MovRM {
size: u8, // 1, 2, 4 or 8.
size: OperandSize, // 1, 2, 4 or 8.
src: Reg,
dst: SyntheticAmode,
},
/// Arithmetic shifts: (shl shr sar) (b w l q) imm reg.
ShiftR {
size: u8, // 1, 2, 4 or 8
size: OperandSize, // 1, 2, 4 or 8
kind: ShiftKind,
/// shift count: Some(0 .. #bits-in-type - 1), or None to mean "%cl".
num_bits: Option<u8>,
@@ -172,7 +176,7 @@ pub enum Inst {
/// Integer comparisons/tests: cmp or test (b w l q) (reg addr imm) reg.
CmpRmiR {
size: u8, // 1, 2, 4 or 8
size: OperandSize, // 1, 2, 4 or 8
opcode: CmpOpcode,
src: RegMemImm,
dst: Reg,
@@ -184,8 +188,7 @@ pub enum Inst {
/// Integer conditional move.
/// Overwrites the destination register.
Cmove {
/// Possible values are 2, 4 or 8. Checked in the related factory.
size: u8,
size: OperandSize, // 2, 4, or 8
cc: CC,
src: RegMem,
dst: Writable<Reg>,
@@ -588,32 +591,33 @@ impl Inst {
}
pub(crate) fn unary_rm_r(
size: u8,
size: OperandSize,
op: UnaryRmROpcode,
src: RegMem,
dst: Writable<Reg>,
) -> Self {
src.assert_regclass_is(RegClass::I64);
debug_assert!(dst.to_reg().get_class() == RegClass::I64);
debug_assert!(size == 8 || size == 4 || size == 2);
debug_assert!(size.is_size(&[
OperandSize::Size16,
OperandSize::Size32,
OperandSize::Size64
]));
Self::UnaryRmR { size, op, src, dst }
}
pub(crate) fn not(size: u8, src: Writable<Reg>) -> Inst {
pub(crate) fn not(size: OperandSize, src: Writable<Reg>) -> Inst {
debug_assert_eq!(src.to_reg().get_class(), RegClass::I64);
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
Inst::Not { size, src }
}
pub(crate) fn neg(size: u8, src: Writable<Reg>) -> Inst {
pub(crate) fn neg(size: OperandSize, src: Writable<Reg>) -> Inst {
debug_assert_eq!(src.to_reg().get_class(), RegClass::I64);
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
Inst::Neg { size, src }
}
pub(crate) fn div(size: u8, signed: bool, divisor: RegMem) -> Inst {
pub(crate) fn div(size: OperandSize, signed: bool, divisor: RegMem) -> Inst {
divisor.assert_regclass_is(RegClass::I64);
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
Inst::Div {
size,
signed,
@@ -621,19 +625,22 @@ impl Inst {
}
}
pub(crate) fn mul_hi(size: u8, signed: bool, rhs: RegMem) -> Inst {
pub(crate) fn mul_hi(size: OperandSize, signed: bool, rhs: RegMem) -> Inst {
debug_assert!(size.is_size(&[
OperandSize::Size16,
OperandSize::Size32,
OperandSize::Size64
]));
rhs.assert_regclass_is(RegClass::I64);
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
Inst::MulHi { size, signed, rhs }
}
pub(crate) fn checked_div_or_rem_seq(
kind: DivOrRemKind,
size: u8,
size: OperandSize,
divisor: Writable<Reg>,
tmp: Option<Writable<Reg>>,
) -> Inst {
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
debug_assert!(divisor.to_reg().get_class() == RegClass::I64);
debug_assert!(tmp
.map(|tmp| tmp.to_reg().get_class() == RegClass::I64)
@@ -646,8 +653,7 @@ impl Inst {
}
}
pub(crate) fn sign_extend_data(size: u8) -> Inst {
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
pub(crate) fn sign_extend_data(size: OperandSize) -> Inst {
Inst::SignExtendData { size }
}
@@ -889,12 +895,7 @@ impl Inst {
}
}
pub(crate) fn mov_r_m(
size: u8, // 1, 2, 4 or 8
src: Reg,
dst: impl Into<SyntheticAmode>,
) -> Inst {
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
pub(crate) fn mov_r_m(size: OperandSize, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
debug_assert!(src.get_class() == RegClass::I64);
Inst::MovRM {
size,
@@ -912,14 +913,13 @@ impl Inst {
}
pub(crate) fn shift_r(
size: u8,
size: OperandSize,
kind: ShiftKind,
num_bits: Option<u8>,
dst: Writable<Reg>,
) -> Inst {
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
debug_assert!(if let Some(num_bits) = num_bits {
num_bits < size * 8
num_bits < size.to_bits()
} else {
true
});
@@ -934,13 +934,8 @@ impl Inst {
/// Does a comparison of dst - src for operands of size `size`, as stated by the machine
/// instruction semantics. Be careful with the order of parameters!
pub(crate) fn cmp_rmi_r(
size: u8, // 1, 2, 4 or 8
src: RegMemImm,
dst: Reg,
) -> Inst {
pub(crate) fn cmp_rmi_r(size: OperandSize, src: RegMemImm, dst: Reg) -> Inst {
src.assert_regclass_is(RegClass::I64);
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
debug_assert_eq!(dst.get_class(), RegClass::I64);
Inst::CmpRmiR {
size,
@@ -951,13 +946,8 @@ impl Inst {
}
/// Does a comparison of dst & src for operands of size `size`.
pub(crate) fn test_rmi_r(
size: u8, // 1, 2, 4 or 8
src: RegMemImm,
dst: Reg,
) -> Inst {
pub(crate) fn test_rmi_r(size: OperandSize, src: RegMemImm, dst: Reg) -> Inst {
src.assert_regclass_is(RegClass::I64);
debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
debug_assert_eq!(dst.get_class(), RegClass::I64);
Inst::CmpRmiR {
size,
@@ -978,8 +968,12 @@ impl Inst {
Inst::Setcc { cc, dst }
}
pub(crate) fn cmove(size: u8, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
debug_assert!(size == 8 || size == 4 || size == 2);
pub(crate) fn cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
debug_assert!(size.is_size(&[
OperandSize::Size16,
OperandSize::Size32,
OperandSize::Size64
]));
debug_assert!(dst.to_reg().get_class() == RegClass::I64);
Inst::Cmove { size, cc, src, dst }
}
@@ -1127,7 +1121,7 @@ impl Inst {
RegClass::I64 => {
// Always store the full register, to ensure that the high bits are properly set
// when doing a full reload.
Inst::mov_r_m(8 /* bytes */, from_reg, to_addr)
Inst::mov_r_m(OperandSize::Size64, from_reg, to_addr)
}
RegClass::V128 => {
let opcode = match ty {
@@ -1293,13 +1287,12 @@ impl PrettyPrint for Inst {
}
}
fn suffix_bwlq(size: u8) -> String {
fn suffix_bwlq(size: OperandSize) -> String {
match size {
1 => "b".to_string(),
2 => "w".to_string(),
4 => "l".to_string(),
8 => "q".to_string(),
_ => panic!("Inst(x64).show.suffixBWLQ: size={}", size),
OperandSize::Size8 => "b".to_string(),
OperandSize::Size16 => "w".to_string(),
OperandSize::Size32 => "l".to_string(),
OperandSize::Size64 => "q".to_string(),
}
}
@@ -1321,20 +1314,20 @@ impl PrettyPrint for Inst {
Inst::UnaryRmR { src, dst, op, size } => format!(
"{} {}, {}",
ljustify2(op.to_string(), suffix_bwlq(*size)),
src.show_rru_sized(mb_rru, *size),
show_ireg_sized(dst.to_reg(), mb_rru, *size),
src.show_rru_sized(mb_rru, size.to_bytes()),
show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes()),
),
Inst::Not { size, src } => format!(
"{} {}",
ljustify2("not".to_string(), suffix_bwlq(*size)),
show_ireg_sized(src.to_reg(), mb_rru, *size)
show_ireg_sized(src.to_reg(), mb_rru, size.to_bytes())
),
Inst::Neg { size, src } => format!(
"{} {}",
ljustify2("neg".to_string(), suffix_bwlq(*size)),
show_ireg_sized(src.to_reg(), mb_rru, *size)
show_ireg_sized(src.to_reg(), mb_rru, size.to_bytes())
),
Inst::Div {
@@ -1349,7 +1342,7 @@ impl PrettyPrint for Inst {
} else {
"div".into()
}),
divisor.show_rru_sized(mb_rru, *size)
divisor.show_rru_sized(mb_rru, size.to_bytes())
),
Inst::MulHi {
@@ -1361,7 +1354,7 @@ impl PrettyPrint for Inst {
} else {
"mul".to_string()
}),
rhs.show_rru_sized(mb_rru, *size)
rhs.show_rru_sized(mb_rru, size.to_bytes())
),
Inst::CheckedDivOrRemSeq {
@@ -1377,15 +1370,14 @@ impl PrettyPrint for Inst {
DivOrRemKind::SignedRem => "srem",
DivOrRemKind::UnsignedRem => "urem",
},
show_ireg_sized(divisor.to_reg(), mb_rru, *size),
show_ireg_sized(divisor.to_reg(), mb_rru, size.to_bytes()),
),
Inst::SignExtendData { size } => match size {
1 => "cbw",
2 => "cwd",
4 => "cdq",
8 => "cqo",
_ => unreachable!(),
OperandSize::Size8 => "cbw",
OperandSize::Size16 => "cwd",
OperandSize::Size32 => "cdq",
OperandSize::Size64 => "cqo",
}
.into(),
@@ -1611,7 +1603,7 @@ impl PrettyPrint for Inst {
Inst::MovRM { size, src, dst, .. } => format!(
"{} {}, {}",
ljustify2("mov".to_string(), suffix_bwlq(*size)),
show_ireg_sized(*src, mb_rru, *size),
show_ireg_sized(*src, mb_rru, size.to_bytes()),
dst.show_rru(mb_rru)
),
@@ -1624,14 +1616,14 @@ impl PrettyPrint for Inst {
None => format!(
"{} %cl, {}",
ljustify2(kind.to_string(), suffix_bwlq(*size)),
show_ireg_sized(dst.to_reg(), mb_rru, *size)
show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
),
Some(num_bits) => format!(
"{} ${}, {}",
ljustify2(kind.to_string(), suffix_bwlq(*size)),
num_bits,
show_ireg_sized(dst.to_reg(), mb_rru, *size)
show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
),
},
@@ -1655,8 +1647,8 @@ impl PrettyPrint for Inst {
format!(
"{} {}, {}",
ljustify2(op.to_string(), suffix_bwlq(*size)),
src.show_rru_sized(mb_rru, *size),
show_ireg_sized(*dst, mb_rru, *size)
src.show_rru_sized(mb_rru, size.to_bytes()),
show_ireg_sized(*dst, mb_rru, size.to_bytes())
)
}
@@ -1669,8 +1661,8 @@ impl PrettyPrint for Inst {
Inst::Cmove { size, cc, src, dst } => format!(
"{} {}, {}",
ljustify(format!("cmov{}{}", cc.to_string(), suffix_bwlq(*size))),
src.show_rru_sized(mb_rru, *size),
show_ireg_sized(dst.to_reg(), mb_rru, *size)
src.show_rru_sized(mb_rru, size.to_bytes()),
show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
),
Inst::XmmCmove {
@@ -1758,7 +1750,7 @@ impl PrettyPrint for Inst {
let size = ty.bytes() as u8;
format!(
"lock cmpxchg{} {}, {}",
suffix_bwlq(size),
suffix_bwlq(OperandSize::from_bytes(size as u32)),
show_ireg_sized(*src, mb_rru, size),
dst.show_rru(mb_rru)
)
@@ -1828,7 +1820,7 @@ fn x64_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
}
Inst::Div { size, divisor, .. } => {
collector.add_mod(Writable::from_reg(regs::rax()));
if *size == 1 {
if *size == OperandSize::Size8 {
collector.add_def(Writable::from_reg(regs::rdx()));
} else {
collector.add_mod(Writable::from_reg(regs::rdx()));
@@ -1852,12 +1844,11 @@ fn x64_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
}
}
Inst::SignExtendData { size } => match size {
1 => collector.add_mod(Writable::from_reg(regs::rax())),
2 | 4 | 8 => {
OperandSize::Size8 => collector.add_mod(Writable::from_reg(regs::rax())),
_ => {
collector.add_use(regs::rax());
collector.add_def(Writable::from_reg(regs::rdx()));
}
_ => unreachable!(),
},
Inst::UnaryRmR { src, dst, .. } | Inst::XmmUnaryRmR { src, dst, .. } => {
src.get_regs_as_uses(collector);
@@ -2547,7 +2538,7 @@ impl MachInst for Inst {
match self {
Self::VirtualSPOffsetAdj { offset } => Some(MachInstStackOpInfo::NomSPAdj(*offset)),
Self::MovRM {
size: 8,
size: OperandSize::Size8,
src,
dst: SyntheticAmode::NominalSPOffset { simm32 },
} => Some(MachInstStackOpInfo::StoreNomSPOff(*src, *simm32 as i64)),

View File

@@ -399,9 +399,9 @@ fn emit_cmp<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst, cc: IntCC) -> IntC
let rhs_hi = RegMemImm::reg(rhs.regs()[1]);
match cc {
IntCC::Equal => {
ctx.emit(Inst::cmp_rmi_r(8, rhs_hi, lhs_hi));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi));
ctx.emit(Inst::setcc(CC::Z, cmp1));
ctx.emit(Inst::cmp_rmi_r(8, rhs_lo, lhs_lo));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo));
ctx.emit(Inst::setcc(CC::Z, cmp2));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -418,9 +418,9 @@ fn emit_cmp<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst, cc: IntCC) -> IntC
IntCC::NotEqual
}
IntCC::NotEqual => {
ctx.emit(Inst::cmp_rmi_r(8, rhs_hi, lhs_hi));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi));
ctx.emit(Inst::setcc(CC::NZ, cmp1));
ctx.emit(Inst::cmp_rmi_r(8, rhs_lo, lhs_lo));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo));
ctx.emit(Inst::setcc(CC::NZ, cmp2));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -447,10 +447,10 @@ fn emit_cmp<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst, cc: IntCC) -> IntC
// Result = (lhs_hi <> rhs_hi) ||
// (lhs_hi == rhs_hi && lhs_lo <> rhs_lo)
let cmp3 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::cmp_rmi_r(8, rhs_hi, lhs_hi));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_hi, lhs_hi));
ctx.emit(Inst::setcc(CC::from_intcc(cc.without_equal()), cmp1));
ctx.emit(Inst::setcc(CC::Z, cmp2));
ctx.emit(Inst::cmp_rmi_r(8, rhs_lo, lhs_lo));
ctx.emit(Inst::cmp_rmi_r(OperandSize::Size64, rhs_lo, lhs_lo));
ctx.emit(Inst::setcc(CC::from_intcc(cc.unsigned()), cmp3));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -488,7 +488,11 @@ fn emit_cmp<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst, cc: IntCC) -> IntC
// Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives
// us dst - src at the machine instruction level, so invert operands.
ctx.emit(Inst::cmp_rmi_r(ty.bytes() as u8, RegMemImm::reg(rhs), lhs));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::from_ty(ty),
RegMemImm::reg(rhs),
lhs,
));
cc
}
}
@@ -612,7 +616,7 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
));
// tmp1 = src >> 1
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
@@ -632,7 +636,12 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
tmp2,
));
// tmp2 = (src & 0b0101..) << 1
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(1), tmp2));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(1),
tmp2,
));
// tmp0 = (src >> 1) & 0b0101.. | (src & 0b0101..) << 1
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
@@ -650,7 +659,7 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
tmp2,
));
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(2),
tmp1,
@@ -667,7 +676,12 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(2), tmp2));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(2),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -684,7 +698,7 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
tmp2,
));
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(4),
tmp1,
@@ -701,7 +715,12 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(4), tmp2));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(4),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -719,7 +738,7 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
tmp2,
));
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(8),
tmp1,
@@ -736,7 +755,12 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(8), tmp2));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(8),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -755,7 +779,7 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
tmp2,
));
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(16),
tmp1,
@@ -772,7 +796,12 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(16), tmp2));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(16),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -791,7 +820,7 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
tmp2,
));
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(32),
tmp1,
@@ -808,7 +837,12 @@ fn emit_bitrev<C: LowerCtx<I = Inst>>(ctx: &mut C, src: Reg, dst: Writable<Reg>,
RegMemImm::reg(tmp0.to_reg()),
tmp2,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(32), tmp2));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(32),
tmp2,
));
ctx.emit(Inst::gen_move(tmp0, tmp2.to_reg(), types::I64));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -859,7 +893,12 @@ fn emit_shl_i128<C: LowerCtx<I = Inst>>(
amt_src,
types::I64,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, None, tmp1));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
None,
tmp1,
));
ctx.emit(Inst::gen_move(tmp2, src_hi, types::I64));
ctx.emit(Inst::gen_move(
@@ -867,7 +906,12 @@ fn emit_shl_i128<C: LowerCtx<I = Inst>>(
amt_src,
types::I64,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, None, tmp2));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
None,
tmp2,
));
ctx.emit(Inst::imm(OperandSize::Size64, 64, amt));
ctx.emit(Inst::alu_rmi_r(
@@ -883,7 +927,12 @@ fn emit_shl_i128<C: LowerCtx<I = Inst>>(
amt.to_reg(),
types::I64,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftRightLogical, None, tmp3));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
None,
tmp3,
));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -915,9 +964,24 @@ fn emit_shl_i128<C: LowerCtx<I = Inst>>(
RegMemImm::imm(64),
amt,
));
ctx.emit(Inst::cmove(8, CC::Z, RegMem::reg(tmp3.to_reg()), dst_hi));
ctx.emit(Inst::cmove(8, CC::Z, RegMem::reg(tmp1.to_reg()), dst_lo));
ctx.emit(Inst::cmove(8, CC::NZ, RegMem::reg(tmp1.to_reg()), dst_hi));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp3.to_reg()),
dst_hi,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp1.to_reg()),
dst_lo,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::NZ,
RegMem::reg(tmp1.to_reg()),
dst_hi,
));
}
fn emit_shr_i128<C: LowerCtx<I = Inst>>(
@@ -969,7 +1033,7 @@ fn emit_shr_i128<C: LowerCtx<I = Inst>>(
amt_src,
types::I64,
));
ctx.emit(Inst::shift_r(8, shift_kind, None, tmp1));
ctx.emit(Inst::shift_r(OperandSize::Size64, shift_kind, None, tmp1));
ctx.emit(Inst::gen_move(tmp2, src_lo, types::I64));
ctx.emit(Inst::gen_move(
@@ -977,7 +1041,7 @@ fn emit_shr_i128<C: LowerCtx<I = Inst>>(
amt_src,
types::I64,
));
ctx.emit(Inst::shift_r(8, shift_kind, None, tmp2));
ctx.emit(Inst::shift_r(OperandSize::Size64, shift_kind, None, tmp2));
ctx.emit(Inst::imm(OperandSize::Size64, 64, amt));
ctx.emit(Inst::alu_rmi_r(
@@ -993,7 +1057,12 @@ fn emit_shr_i128<C: LowerCtx<I = Inst>>(
amt.to_reg(),
types::I64,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, None, tmp3));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
None,
tmp3,
));
ctx.emit(Inst::alu_rmi_r(
true,
@@ -1005,7 +1074,7 @@ fn emit_shr_i128<C: LowerCtx<I = Inst>>(
if is_signed {
ctx.emit(Inst::gen_move(dst_hi, src_hi, types::I64));
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightArithmetic,
Some(63),
dst_hi,
@@ -1035,9 +1104,24 @@ fn emit_shr_i128<C: LowerCtx<I = Inst>>(
RegMemImm::imm(64),
amt,
));
ctx.emit(Inst::cmove(8, CC::Z, RegMem::reg(tmp1.to_reg()), dst_hi));
ctx.emit(Inst::cmove(8, CC::Z, RegMem::reg(tmp3.to_reg()), dst_lo));
ctx.emit(Inst::cmove(8, CC::NZ, RegMem::reg(tmp1.to_reg()), dst_lo));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp1.to_reg()),
dst_hi,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp3.to_reg()),
dst_lo,
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::NZ,
RegMem::reg(tmp1.to_reg()),
dst_lo,
));
}
fn make_libcall_sig<C: LowerCtx<I = Inst>>(
@@ -1249,7 +1333,12 @@ fn emit_cmoves<C: LowerCtx<I = Inst>>(
let size = size / src.len() as u8;
let size = u8::max(size, 4); // at least 32 bits
for (dst, src) in dst.regs().iter().zip(src.regs().iter()) {
ctx.emit(Inst::cmove(size, cc, RegMem::reg(*src), *dst));
ctx.emit(Inst::cmove(
OperandSize::from_bytes(size.into()),
cc,
RegMem::reg(*src),
*dst,
));
}
}
@@ -1262,28 +1351,24 @@ fn emit_clz<C: LowerCtx<I = Inst>>(
) {
let src = RegMem::reg(src);
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
ctx.emit(Inst::imm(
OperandSize::from_bytes(ty.bytes()),
u64::max_value(),
dst,
));
ctx.emit(Inst::imm(OperandSize::from_ty(ty), u64::max_value(), dst));
ctx.emit(Inst::unary_rm_r(
ty.bytes() as u8,
OperandSize::from_ty(ty),
UnaryRmROpcode::Bsr,
src,
tmp,
));
ctx.emit(Inst::cmove(
ty.bytes() as u8,
OperandSize::from_ty(ty),
CC::Z,
RegMem::reg(dst.to_reg()),
tmp,
));
ctx.emit(Inst::imm(
OperandSize::from_bytes(ty.bytes()),
OperandSize::from_ty(ty),
orig_ty.bits() as u64 - 1,
dst,
));
@@ -1308,14 +1393,14 @@ fn emit_ctz<C: LowerCtx<I = Inst>>(
ctx.emit(Inst::imm(OperandSize::Size32, orig_ty.bits() as u64, tmp));
ctx.emit(Inst::unary_rm_r(
ty.bytes() as u8,
OperandSize::from_ty(ty),
UnaryRmROpcode::Bsf,
src,
dst,
));
ctx.emit(Inst::cmove(
ty.bytes() as u8,
OperandSize::from_ty(ty),
CC::Z,
RegMem::reg(tmp.to_reg()),
dst,
@@ -1630,7 +1715,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
types::I64,
));
ctx.emit(Inst::mul_hi(
/* size = */ 8,
OperandSize::Size64,
/* signed = */ false,
RegMem::reg(rhs.regs()[0]),
));
@@ -1764,7 +1849,6 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Bnot => {
let ty = ty.unwrap();
let size = ty.bytes() as u8;
if ty.is_vector() {
let src = put_input_in_reg(ctx, inputs[0]);
@@ -1777,16 +1861,16 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let src = put_input_in_regs(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst.regs()[0], src.regs()[0], types::I64));
ctx.emit(Inst::not(8, dst.regs()[0]));
ctx.emit(Inst::not(OperandSize::Size64, dst.regs()[0]));
ctx.emit(Inst::gen_move(dst.regs()[1], src.regs()[1], types::I64));
ctx.emit(Inst::not(8, dst.regs()[1]));
ctx.emit(Inst::not(OperandSize::Size64, dst.regs()[1]));
} else if ty.is_bool() {
unimplemented!("bool bnot")
} else {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::not(size, dst));
ctx.emit(Inst::not(OperandSize::from_ty(ty), dst));
}
}
@@ -1825,23 +1909,25 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// This implementation uses the last two encoding methods.
let (size, lhs) = match dst_ty {
types::I8 | types::I16 => match op {
Opcode::Ishl => (4, put_input_in_reg(ctx, inputs[0])),
Opcode::Ishl => (OperandSize::Size32, put_input_in_reg(ctx, inputs[0])),
Opcode::Ushr => (
4,
OperandSize::Size32,
extend_input_to_reg(ctx, inputs[0], ExtSpec::ZeroExtendTo32),
),
Opcode::Sshr => (
4,
OperandSize::Size32,
extend_input_to_reg(ctx, inputs[0], ExtSpec::SignExtendTo32),
),
Opcode::Rotl | Opcode::Rotr => {
(dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0]))
}
Opcode::Rotl | Opcode::Rotr => (
OperandSize::from_ty(dst_ty),
put_input_in_reg(ctx, inputs[0]),
),
_ => unreachable!(),
},
types::I32 | types::I64 => {
(dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0]))
}
types::I32 | types::I64 => (
OperandSize::from_ty(dst_ty),
put_input_in_reg(ctx, inputs[0]),
),
_ => unreachable!("unhandled output type for shift/rotates: {}", dst_ty),
};
@@ -2048,7 +2134,12 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
base_mask_address,
));
ctx.emit(Inst::gen_move(mask_offset, reg, types::I64));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(4), mask_offset));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftLeft,
Some(4),
mask_offset,
));
Amode::imm_reg_reg_shift(
0,
base_mask_address.to_reg(),
@@ -2170,12 +2261,17 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
if let Some(shift_by) = ctx.get_input_as_source_or_const(insn, 1).constant {
// Mask the shift amount according to Cranelift's semantics.
let shift_by = (shift_by as u8) & (types::I64.bits() as u8 - 1);
ctx.emit(Inst::shift_r(8, kind, Some(shift_by), reg));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
kind,
Some(shift_by),
reg,
));
} else {
let dynamic_shift_by = put_input_in_reg(ctx, inputs[1]);
let w_rcx = Writable::from_reg(regs::rcx());
ctx.emit(Inst::mov_r_r(true, dynamic_shift_by, w_rcx));
ctx.emit(Inst::shift_r(8, kind, None, reg));
ctx.emit(Inst::shift_r(OperandSize::Size64, kind, None, reg));
};
};
shift(lower_lane);
@@ -2268,10 +2364,9 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
dst,
));
} else {
let size = ty.bytes() as u8;
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::neg(size, dst));
ctx.emit(Inst::neg(OperandSize::from_ty(ty), dst));
}
}
@@ -2285,7 +2380,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::unary_rm_r(
orig_ty.bytes() as u8,
OperandSize::from_ty(orig_ty),
UnaryRmROpcode::Lzcnt,
src,
dst,
@@ -2320,8 +2415,17 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
RegMemImm::imm(64),
dst,
));
ctx.emit(Inst::cmp_rmi_r(8, RegMemImm::imm(64), tmp1.to_reg()));
ctx.emit(Inst::cmove(8, CC::NZ, RegMem::reg(tmp1.to_reg()), dst));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(64),
tmp1.to_reg(),
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::NZ,
RegMem::reg(tmp1.to_reg()),
dst,
));
ctx.emit(Inst::alu_rmi_r(
true,
AluRmiROpcode::Xor,
@@ -2355,7 +2459,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::unary_rm_r(
orig_ty.bytes() as u8,
OperandSize::from_ty(orig_ty),
UnaryRmROpcode::Tzcnt,
src,
dst,
@@ -2387,8 +2491,17 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
RegMemImm::imm(64),
tmp1,
));
ctx.emit(Inst::cmp_rmi_r(8, RegMemImm::imm(64), dst.to_reg()));
ctx.emit(Inst::cmove(8, CC::Z, RegMem::reg(tmp1.to_reg()), dst));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(64),
dst.to_reg(),
));
ctx.emit(Inst::cmove(
OperandSize::Size64,
CC::Z,
RegMem::reg(tmp1.to_reg()),
dst,
));
ctx.emit(Inst::alu_rmi_r(
true,
AluRmiROpcode::Xor,
@@ -2422,7 +2535,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
ctx.emit(Inst::unary_rm_r(
ty.bytes() as u8,
OperandSize::from_ty(ty),
UnaryRmROpcode::Popcnt,
src,
dst,
@@ -2441,13 +2554,13 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let src_hi = srcs.regs()[1];
ctx.emit(Inst::unary_rm_r(
8,
OperandSize::Size64,
UnaryRmROpcode::Popcnt,
RegMem::reg(src_lo),
dst,
));
ctx.emit(Inst::unary_rm_r(
8,
OperandSize::Size64,
UnaryRmROpcode::Popcnt,
RegMem::reg(src_hi),
tmp,
@@ -2507,7 +2620,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $1, tmp1
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
@@ -2537,7 +2650,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $1, tmp1
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
@@ -2561,7 +2674,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $1, tmp1
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
@@ -2587,7 +2700,12 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst));
// shr $4, dst
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftRightLogical, Some(4), dst));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(4),
dst,
));
// add tmp2, dst
ctx.emit(Inst::alu_rmi_r(
@@ -2621,7 +2739,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $56, dst
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(56),
dst,
@@ -2638,7 +2756,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $1, tmp1
ctx.emit(Inst::shift_r(
4,
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
@@ -2665,7 +2783,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $1, tmp1
ctx.emit(Inst::shift_r(
4,
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
@@ -2689,7 +2807,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $1, tmp1
ctx.emit(Inst::shift_r(
4,
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
@@ -2715,7 +2833,12 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst));
// shr $4, dst
ctx.emit(Inst::shift_r(4, ShiftKind::ShiftRightLogical, Some(4), dst));
ctx.emit(Inst::shift_r(
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(4),
dst,
));
// add tmp2, dst
ctx.emit(Inst::alu_rmi_r(
@@ -2743,7 +2866,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// shr $24, dst
ctx.emit(Inst::shift_r(
4,
OperandSize::Size32,
ShiftKind::ShiftRightLogical,
Some(24),
dst,
@@ -2814,7 +2937,11 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}
_ => unreachable!(),
};
ctx.emit(Inst::cmp_rmi_r(ty.bytes() as u8, RegMemImm::imm(imm), src));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::from_ty(ty),
RegMemImm::imm(imm),
src,
));
ctx.emit(Inst::setcc(CC::Z, dst));
}
@@ -2861,7 +2988,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
types::I64,
));
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightArithmetic,
Some(63),
dst.regs()[1],
@@ -4555,8 +4682,16 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
if elem_ty == types::I128 {
let srcs = put_input_in_regs(ctx, inputs[0]);
ctx.emit(Inst::mov_r_m(8, srcs.regs()[0], addr.clone()));
ctx.emit(Inst::mov_r_m(8, srcs.regs()[1], addr.offset(8)));
ctx.emit(Inst::mov_r_m(
OperandSize::Size64,
srcs.regs()[0],
addr.clone(),
));
ctx.emit(Inst::mov_r_m(
OperandSize::Size64,
srcs.regs()[1],
addr.offset(8),
));
} else {
let src = put_input_in_reg(ctx, inputs[0]);
@@ -4567,7 +4702,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// TODO Specialize for different types: MOVUPD, MOVDQU, etc.
Inst::xmm_mov_r_m(SseOpcode::Movups, src, addr)
}
_ => Inst::mov_r_m(elem_ty.bytes() as u8, src, addr),
_ => Inst::mov_r_m(OperandSize::from_ty(elem_ty), src, addr),
});
}
}
@@ -4672,7 +4807,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let ty_access = ctx.input_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(ty_access));
ctx.emit(Inst::mov_r_m(ty_access.bytes() as u8, data, addr));
ctx.emit(Inst::mov_r_m(OperandSize::from_ty(ty_access), data, addr));
ctx.emit(Inst::Fence {
kind: FenceKind::MFence,
});
@@ -4808,7 +4943,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
CC::from_intcc(cond_code)
} else {
let sel_ty = ctx.input_ty(insn, 0);
let size = ctx.input_ty(insn, 0).bytes() as u8;
let size = OperandSize::from_ty(ctx.input_ty(insn, 0));
let test = put_input_in_reg(ctx, flag_input);
let test_input = if sel_ty == types::B1 {
// The input is a boolean value; test the LSB for nonzero with:
@@ -4891,7 +5026,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let is_div = kind.is_div();
let input_ty = ctx.input_ty(insn, 0);
let size = input_ty.bytes() as u8;
let size = OperandSize::from_ty(input_ty);
let dividend = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
@@ -4915,7 +5050,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let divisor_copy = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::gen_move(divisor_copy, divisor, types::I64));
let tmp = if op == Opcode::Sdiv && size == 8 {
let tmp = if op == Opcode::Sdiv && size == OperandSize::Size64 {
Some(ctx.alloc_tmp(types::I64).only_reg().unwrap())
} else {
None
@@ -4959,10 +5094,10 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// The quotient is in rax.
ctx.emit(Inst::gen_move(dst, regs::rax(), input_ty));
} else {
if size == 1 {
if size == OperandSize::Size8 {
// The remainder is in AH. Right-shift by 8 bits then move from rax.
ctx.emit(Inst::shift_r(
8,
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(8),
Writable::from_reg(regs::rax()),
@@ -4977,7 +5112,6 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Umulhi | Opcode::Smulhi => {
let input_ty = ctx.input_ty(insn, 0);
let size = input_ty.bytes() as u8;
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
@@ -4992,7 +5126,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Emit the actual mul or imul.
let signed = op == Opcode::Smulhi;
ctx.emit(Inst::mul_hi(size, signed, rhs));
ctx.emit(Inst::mul_hi(OperandSize::from_ty(input_ty), signed, rhs));
// Read the result from the high part (stored in %rdx).
ctx.emit(Inst::gen_move(dst, regs::rdx(), input_ty));
@@ -5367,7 +5501,12 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
dst,
size,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftRightLogical, Some(8), dst));
ctx.emit(Inst::shift_r(
OperandSize::Size64,
ShiftKind::ShiftRightLogical,
Some(8),
dst,
));
}
_ => unimplemented!("unknown input type {} for {}", src_ty, op),
}
@@ -5562,9 +5701,17 @@ impl LowerBackend for X64Backend {
};
let tmp1 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
let tmp2 = ctx.alloc_tmp(types::I64).only_reg().unwrap();
ctx.emit(Inst::cmp_rmi_r(8, RegMemImm::imm(0), src.regs()[0]));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(0),
src.regs()[0],
));
ctx.emit(Inst::setcc(half_cc, tmp1));
ctx.emit(Inst::cmp_rmi_r(8, RegMemImm::imm(0), src.regs()[1]));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size64,
RegMemImm::imm(0),
src.regs()[1],
));
ctx.emit(Inst::setcc(half_cc, tmp2));
ctx.emit(Inst::alu_rmi_r(
false,
@@ -5586,7 +5733,6 @@ impl LowerBackend for X64Backend {
Opcode::Brnz => CC::NZ,
_ => unreachable!(),
};
let size_bytes = src_ty.bytes() as u8;
// See case for `Opcode::Select` above re: testing the
// boolean input.
let test_input = if src_ty == types::B1 {
@@ -5598,7 +5744,11 @@ impl LowerBackend for X64Backend {
RegMemImm::reg(src)
};
ctx.emit(Inst::test_rmi_r(size_bytes, test_input, src));
ctx.emit(Inst::test_rmi_r(
OperandSize::from_ty(src_ty),
test_input,
src,
));
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
unimplemented!("brz/brnz with non-int type {:?}", src_ty);
@@ -5623,10 +5773,9 @@ impl LowerBackend for X64Backend {
},
);
let cc = CC::from_intcc(ctx.data(branches[0]).cond_code().unwrap());
let byte_size = src_ty.bytes() as u8;
// Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives
// us dst - src at the machine instruction level, so invert operands.
ctx.emit(Inst::cmp_rmi_r(byte_size, rhs, lhs));
ctx.emit(Inst::cmp_rmi_r(OperandSize::from_ty(src_ty), rhs, lhs));
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
unimplemented!("bricmp with non-int type {:?}", src_ty);
@@ -5654,7 +5803,7 @@ impl LowerBackend for X64Backend {
);
let ty = ctx.input_ty(ifcmp_sp, 0);
ctx.emit(Inst::cmp_rmi_r(
ty.bytes() as u8,
OperandSize::from_ty(ty),
RegMemImm::reg(regs::rsp()),
operand,
));
@@ -5721,7 +5870,11 @@ impl LowerBackend for X64Backend {
);
// Bounds-check (compute flags from idx - jt_size) and branch to default.
ctx.emit(Inst::cmp_rmi_r(4, RegMemImm::imm(jt_size), idx));
ctx.emit(Inst::cmp_rmi_r(
OperandSize::Size32,
RegMemImm::imm(jt_size),
idx,
));
// Emit the compound instruction that does:
//