Multi-register value support: framework for Values wider than machine regs.
This will allow for support for `I128` values everywhere, and `I64` values on 32-bit targets (e.g., ARM32 and x86-32). It does not alter the machine backends to build such support; it just adds the framework for the MachInst backends to *reason* about a `Value` residing in more than one register.
This commit is contained in:
@@ -77,7 +77,7 @@ fn try_fill_baldrdash_reg(call_conv: isa::CallConv, param: &ir::AbiParam) -> Opt
|
||||
&ir::ArgumentPurpose::VMContext => {
|
||||
// This is SpiderMonkey's `WasmTlsReg`.
|
||||
Some(ABIArg::Reg(
|
||||
xreg(BALDRDASH_TLS_REG).to_real_reg(),
|
||||
ValueRegs::one(xreg(BALDRDASH_TLS_REG).to_real_reg()),
|
||||
ir::types::I64,
|
||||
param.extension,
|
||||
param.purpose,
|
||||
@@ -86,7 +86,7 @@ fn try_fill_baldrdash_reg(call_conv: isa::CallConv, param: &ir::AbiParam) -> Opt
|
||||
&ir::ArgumentPurpose::SignatureId => {
|
||||
// This is SpiderMonkey's `WasmTableCallSigReg`.
|
||||
Some(ABIArg::Reg(
|
||||
xreg(BALDRDASH_SIG_REG).to_real_reg(),
|
||||
ValueRegs::one(xreg(BALDRDASH_SIG_REG).to_real_reg()),
|
||||
ir::types::I64,
|
||||
param.extension,
|
||||
param.purpose,
|
||||
@@ -220,7 +220,9 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
"Invalid type for AArch64: {:?}",
|
||||
param.value_type
|
||||
);
|
||||
let rc = Inst::rc_for_type(param.value_type).unwrap();
|
||||
let (rcs, _) = Inst::rc_for_type(param.value_type).unwrap();
|
||||
assert!(rcs.len() == 1, "Multi-reg values not supported yet");
|
||||
let rc = rcs[0];
|
||||
|
||||
let next_reg = match rc {
|
||||
RegClass::I64 => &mut next_xreg,
|
||||
@@ -238,7 +240,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
_ => unreachable!(),
|
||||
};
|
||||
ret.push(ABIArg::Reg(
|
||||
reg.to_real_reg(),
|
||||
ValueRegs::one(reg.to_real_reg()),
|
||||
param.value_type,
|
||||
param.extension,
|
||||
param.purpose,
|
||||
@@ -271,7 +273,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
||||
if next_xreg < max_per_class_reg_vals && remaining_reg_vals > 0 {
|
||||
ret.push(ABIArg::Reg(
|
||||
xreg(next_xreg).to_real_reg(),
|
||||
ValueRegs::one(xreg(next_xreg).to_real_reg()),
|
||||
I64,
|
||||
ir::ArgumentExtension::None,
|
||||
ir::ArgumentPurpose::Normal,
|
||||
@@ -345,7 +347,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
Inst::Ret
|
||||
}
|
||||
|
||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Inst; 4]> {
|
||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Inst> {
|
||||
let imm = imm as u64;
|
||||
let mut insts = SmallVec::new();
|
||||
if let Some(imm12) = Imm12::maybe_from_u64(imm) {
|
||||
@@ -370,7 +372,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
insts
|
||||
}
|
||||
|
||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Inst; 2]> {
|
||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst> {
|
||||
let mut insts = SmallVec::new();
|
||||
insts.push(Inst::AluRRRExtend {
|
||||
alu_op: ALUOp::SubS64,
|
||||
@@ -411,7 +413,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
Inst::gen_store(mem, from_reg, ty, MemFlags::trusted())
|
||||
}
|
||||
|
||||
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Inst; 2]> {
|
||||
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Inst> {
|
||||
if amount == 0 {
|
||||
return SmallVec::new();
|
||||
}
|
||||
@@ -455,7 +457,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_prologue_frame_setup() -> SmallVec<[Inst; 2]> {
|
||||
fn gen_prologue_frame_setup() -> SmallInstVec<Inst> {
|
||||
let mut insts = SmallVec::new();
|
||||
// stp fp (x29), lr (x30), [sp, #-16]!
|
||||
insts.push(Inst::StoreP64 {
|
||||
@@ -481,7 +483,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
insts
|
||||
}
|
||||
|
||||
fn gen_epilogue_frame_restore() -> SmallVec<[Inst; 2]> {
|
||||
fn gen_epilogue_frame_restore() -> SmallInstVec<Inst> {
|
||||
let mut insts = SmallVec::new();
|
||||
|
||||
// MOV (alias of ORR) interprets x31 as XZR, so use an ADD here.
|
||||
@@ -508,7 +510,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
||||
insts
|
||||
}
|
||||
|
||||
fn gen_probestack(_: u32) -> SmallVec<[Self::I; 2]> {
|
||||
fn gen_probestack(_: u32) -> SmallInstVec<Self::I> {
|
||||
// TODO: implement if we ever require stack probes on an AArch64 host
|
||||
// (unlikely unless Lucet is ported)
|
||||
smallvec![]
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// Some variants are never constructed, but we still want them as options in the future.
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::ir::types::{F32X2, F32X4, F64X2, I16X4, I16X8, I32X2, I32X4, I64X2, I8X16, I8X8};
|
||||
use crate::ir::types::*;
|
||||
use crate::ir::Type;
|
||||
use crate::isa::aarch64::inst::*;
|
||||
use crate::machinst::{ty_bits, MachLabel};
|
||||
|
||||
@@ -5,9 +5,7 @@
|
||||
|
||||
use crate::binemit::CodeOffset;
|
||||
use crate::ir::types::{
|
||||
B1, B16, B16X4, B16X8, B32, B32X2, B32X4, B64, B64X2, B8, B8X16, B8X8, F32, F32X2, F32X4, F64,
|
||||
F64X2, FFLAGS, I16, I16X4, I16X8, I32, I32X2, I32X4, I64, I64X2, I8, I8X16, I8X8, IFLAGS, R32,
|
||||
R64,
|
||||
B1, B128, B16, B32, B64, B8, F32, F64, FFLAGS, I128, I16, I32, I64, I8, I8X16, IFLAGS, R32, R64,
|
||||
};
|
||||
use crate::ir::{ExternalName, MemFlags, Opcode, SourceLoc, TrapCode, Type};
|
||||
use crate::isa::CallConv;
|
||||
@@ -1304,7 +1302,7 @@ impl Inst {
|
||||
}
|
||||
|
||||
/// Create instructions that load a 32-bit floating-point constant.
|
||||
pub fn load_fp_constant32<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
||||
pub fn load_fp_constant32<F: FnMut(Type) -> Writable<Reg>>(
|
||||
rd: Writable<Reg>,
|
||||
value: u32,
|
||||
mut alloc_tmp: F,
|
||||
@@ -1322,7 +1320,7 @@ impl Inst {
|
||||
} else {
|
||||
// TODO: use FMOV immediate form when `value` has sufficiently few mantissa/exponent
|
||||
// bits.
|
||||
let tmp = alloc_tmp(RegClass::I64, I32);
|
||||
let tmp = alloc_tmp(I32);
|
||||
let mut insts = Inst::load_constant(tmp, value as u64);
|
||||
|
||||
insts.push(Inst::MovToFpu {
|
||||
@@ -1336,7 +1334,7 @@ impl Inst {
|
||||
}
|
||||
|
||||
/// Create instructions that load a 64-bit floating-point constant.
|
||||
pub fn load_fp_constant64<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
||||
pub fn load_fp_constant64<F: FnMut(Type) -> Writable<Reg>>(
|
||||
rd: Writable<Reg>,
|
||||
const_data: u64,
|
||||
mut alloc_tmp: F,
|
||||
@@ -1350,7 +1348,7 @@ impl Inst {
|
||||
// bits. Also, treat it as half of a 128-bit vector and consider replicated
|
||||
// patterns. Scalar MOVI might also be an option.
|
||||
} else if const_data & (u32::MAX as u64) == 0 {
|
||||
let tmp = alloc_tmp(RegClass::I64, I64);
|
||||
let tmp = alloc_tmp(I64);
|
||||
let mut insts = Inst::load_constant(tmp, const_data);
|
||||
|
||||
insts.push(Inst::MovToFpu {
|
||||
@@ -1366,7 +1364,7 @@ impl Inst {
|
||||
}
|
||||
|
||||
/// Create instructions that load a 128-bit vector constant.
|
||||
pub fn load_fp_constant128<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
||||
pub fn load_fp_constant128<F: FnMut(Type) -> Writable<Reg>>(
|
||||
rd: Writable<Reg>,
|
||||
const_data: u128,
|
||||
alloc_tmp: F,
|
||||
@@ -1416,7 +1414,7 @@ impl Inst {
|
||||
|
||||
/// Create instructions that load a vector constant consisting of elements with
|
||||
/// the same value.
|
||||
pub fn load_replicated_vector_pattern<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
||||
pub fn load_replicated_vector_pattern<F: FnMut(Type) -> Writable<Reg>>(
|
||||
rd: Writable<Reg>,
|
||||
pattern: u64,
|
||||
size: VectorSize,
|
||||
@@ -1472,7 +1470,7 @@ impl Inst {
|
||||
} else if let Some(imm) = ASIMDFPModImm::maybe_from_u64(pattern, lane_size) {
|
||||
smallvec![Inst::VecDupFPImm { rd, imm, size }]
|
||||
} else {
|
||||
let tmp = alloc_tmp(RegClass::I64, I64);
|
||||
let tmp = alloc_tmp(I64);
|
||||
let mut insts = SmallVec::from(&Inst::load_constant(tmp, pattern)[..]);
|
||||
|
||||
insts.push(Inst::VecDup {
|
||||
@@ -2862,12 +2860,16 @@ impl MachInst for Inst {
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
||||
to_reg: Writable<Reg>,
|
||||
value: u64,
|
||||
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
|
||||
to_regs: ValueRegs<Writable<Reg>>,
|
||||
value: u128,
|
||||
ty: Type,
|
||||
alloc_tmp: F,
|
||||
) -> SmallVec<[Inst; 4]> {
|
||||
let to_reg = to_regs
|
||||
.only_reg()
|
||||
.expect("multi-reg values not supported yet");
|
||||
let value = value as u64;
|
||||
if ty == F64 {
|
||||
Inst::load_fp_constant64(to_reg, value, alloc_tmp)
|
||||
} else if ty == F32 {
|
||||
@@ -2905,14 +2907,28 @@ impl MachInst for Inst {
|
||||
None
|
||||
}
|
||||
|
||||
fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
|
||||
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
|
||||
match ty {
|
||||
I8 | I16 | I32 | I64 | B1 | B8 | B16 | B32 | B64 | R32 | R64 => Ok(RegClass::I64),
|
||||
F32 | F64 => Ok(RegClass::V128),
|
||||
IFLAGS | FFLAGS => Ok(RegClass::I64),
|
||||
B8X8 | B8X16 | B16X4 | B16X8 | B32X2 | B32X4 | B64X2 => Ok(RegClass::V128),
|
||||
F32X2 | I8X8 | I16X4 | I32X2 => Ok(RegClass::V128),
|
||||
F32X4 | F64X2 | I8X16 | I16X8 | I32X4 | I64X2 => Ok(RegClass::V128),
|
||||
I8 => Ok((&[RegClass::I64], &[I8])),
|
||||
I16 => Ok((&[RegClass::I64], &[I16])),
|
||||
I32 => Ok((&[RegClass::I64], &[I32])),
|
||||
I64 => Ok((&[RegClass::I64], &[I64])),
|
||||
B1 => Ok((&[RegClass::I64], &[B1])),
|
||||
B8 => Ok((&[RegClass::I64], &[B8])),
|
||||
B16 => Ok((&[RegClass::I64], &[B16])),
|
||||
B32 => Ok((&[RegClass::I64], &[B32])),
|
||||
B64 => Ok((&[RegClass::I64], &[B64])),
|
||||
R32 => panic!("32-bit reftype pointer should never be seen on AArch64"),
|
||||
R64 => Ok((&[RegClass::I64], &[R64])),
|
||||
F32 => Ok((&[RegClass::V128], &[F32])),
|
||||
F64 => Ok((&[RegClass::V128], &[F64])),
|
||||
I128 => Ok((&[RegClass::I64, RegClass::I64], &[I64, I64])),
|
||||
B128 => Ok((&[RegClass::I64, RegClass::I64], &[B64, B64])),
|
||||
_ if ty.is_vector() => {
|
||||
assert!(ty.bits() <= 128);
|
||||
Ok((&[RegClass::V128], &[I8X16]))
|
||||
}
|
||||
IFLAGS | FFLAGS => Ok((&[RegClass::I64], &[I64])),
|
||||
_ => Err(CodegenError::Unsupported(format!(
|
||||
"Unexpected SSA-value type: {}",
|
||||
ty
|
||||
|
||||
@@ -22,7 +22,7 @@ use super::lower_inst;
|
||||
|
||||
use crate::data_value::DataValue;
|
||||
use log::{debug, trace};
|
||||
use regalloc::{Reg, RegClass, Writable};
|
||||
use regalloc::{Reg, Writable};
|
||||
use smallvec::SmallVec;
|
||||
|
||||
//============================================================================
|
||||
@@ -179,9 +179,9 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
||||
} else {
|
||||
c
|
||||
};
|
||||
let to_reg = ctx.alloc_tmp(Inst::rc_for_type(ty).unwrap(), ty);
|
||||
for inst in Inst::gen_constant(to_reg, masked, ty, |reg_class, ty| {
|
||||
ctx.alloc_tmp(reg_class, ty)
|
||||
let to_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
for inst in Inst::gen_constant(ValueRegs::one(to_reg), masked as u128, ty, |ty| {
|
||||
ctx.alloc_tmp(ty).only_reg().unwrap()
|
||||
})
|
||||
.into_iter()
|
||||
{
|
||||
@@ -189,13 +189,15 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
to_reg.to_reg()
|
||||
} else {
|
||||
ctx.put_input_in_reg(input.insn, input.input)
|
||||
ctx.put_input_in_regs(input.insn, input.input)
|
||||
.only_reg()
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
match (narrow_mode, from_bits) {
|
||||
(NarrowValueMode::None, _) => in_reg,
|
||||
(NarrowValueMode::ZeroExtend32, n) if n < 32 => {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::Extend {
|
||||
rd: tmp,
|
||||
rn: in_reg,
|
||||
@@ -206,7 +208,7 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
||||
tmp.to_reg()
|
||||
}
|
||||
(NarrowValueMode::SignExtend32, n) if n < 32 => {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::Extend {
|
||||
rd: tmp,
|
||||
rn: in_reg,
|
||||
@@ -223,7 +225,7 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
||||
// Constants are zero-extended to full 64-bit width on load already.
|
||||
in_reg
|
||||
} else {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::Extend {
|
||||
rd: tmp,
|
||||
rn: in_reg,
|
||||
@@ -235,7 +237,7 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
}
|
||||
(NarrowValueMode::SignExtend64, n) if n < 64 => {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::Extend {
|
||||
rd: tmp,
|
||||
rn: in_reg,
|
||||
@@ -696,7 +698,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
||||
/* addends64.len() == 0 */
|
||||
{
|
||||
if addends32.len() > 0 {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
let (reg1, extendop) = addends32.pop().unwrap();
|
||||
let signed = match extendop {
|
||||
ExtendOp::SXTW => true,
|
||||
@@ -718,7 +720,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
||||
} else
|
||||
/* addends32.len() == 0 */
|
||||
{
|
||||
let off_reg = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let off_reg = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
lower_constant_u64(ctx, off_reg, offset as u64);
|
||||
offset = 0;
|
||||
AMode::reg(off_reg.to_reg())
|
||||
@@ -734,7 +736,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
// Allocate the temp and shoehorn it into the AMode.
|
||||
let addr = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let addr = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
let (reg, memarg) = match memarg {
|
||||
AMode::RegExtended(r1, r2, extendop) => {
|
||||
(r1, AMode::RegExtended(addr.to_reg(), r2, extendop))
|
||||
@@ -782,7 +784,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
||||
// If the register is the stack reg, we must move it to another reg
|
||||
// before adding it.
|
||||
let reg = if reg == stack_reg() {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
ctx.emit(Inst::gen_move(tmp, stack_reg(), I64));
|
||||
tmp.to_reg()
|
||||
} else {
|
||||
@@ -824,7 +826,7 @@ pub(crate) fn lower_constant_f32<C: LowerCtx<I = Inst>>(
|
||||
rd: Writable<Reg>,
|
||||
value: f32,
|
||||
) {
|
||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
||||
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
|
||||
for inst in Inst::load_fp_constant32(rd, value.to_bits(), alloc_tmp) {
|
||||
ctx.emit(inst);
|
||||
@@ -836,7 +838,7 @@ pub(crate) fn lower_constant_f64<C: LowerCtx<I = Inst>>(
|
||||
rd: Writable<Reg>,
|
||||
value: f64,
|
||||
) {
|
||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
||||
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
|
||||
for inst in Inst::load_fp_constant64(rd, value.to_bits(), alloc_tmp) {
|
||||
ctx.emit(inst);
|
||||
@@ -858,7 +860,7 @@ pub(crate) fn lower_constant_f128<C: LowerCtx<I = Inst>>(
|
||||
size: VectorSize::Size8x16,
|
||||
});
|
||||
} else {
|
||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
||||
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
for inst in Inst::load_fp_constant128(rd, value, alloc_tmp) {
|
||||
ctx.emit(inst);
|
||||
}
|
||||
@@ -885,7 +887,7 @@ pub(crate) fn lower_splat_const<C: LowerCtx<I = Inst>>(
|
||||
),
|
||||
None => (value, size),
|
||||
};
|
||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
||||
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
|
||||
for inst in Inst::load_replicated_vector_pattern(rd, value, size, alloc_tmp) {
|
||||
ctx.emit(inst);
|
||||
@@ -1217,7 +1219,7 @@ pub(crate) fn lower_load<C: LowerCtx<I = Inst>, F: FnMut(&mut C, Writable<Reg>,
|
||||
|
||||
let off = ctx.data(ir_inst).load_store_offset().unwrap();
|
||||
let mem = lower_address(ctx, elem_ty, &inputs[..], off);
|
||||
let rd = get_output_reg(ctx, output);
|
||||
let rd = get_output_reg(ctx, output).only_reg().unwrap();
|
||||
|
||||
f(ctx, rd, elem_ty, mem);
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{CodegenError, CodegenResult};
|
||||
use crate::isa::aarch64::abi::*;
|
||||
use crate::isa::aarch64::inst::*;
|
||||
|
||||
use regalloc::{RegClass, Writable};
|
||||
use regalloc::Writable;
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec::Vec;
|
||||
@@ -46,21 +46,21 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
ty if ty.is_bool() => value,
|
||||
ty => unreachable!("Unknown type for const: {}", ty),
|
||||
};
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
lower_constant_u64(ctx, rd, value);
|
||||
}
|
||||
Opcode::F32const => {
|
||||
let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
lower_constant_f32(ctx, rd, value);
|
||||
}
|
||||
Opcode::F64const => {
|
||||
let value = f64::from_bits(ctx.get_constant(insn).unwrap());
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
lower_constant_f64(ctx, rd, value);
|
||||
}
|
||||
Opcode::Iadd => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ty = ty.unwrap();
|
||||
if !ty.is_vector() {
|
||||
let mul_insn =
|
||||
@@ -116,7 +116,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
}
|
||||
Opcode::Isub => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
if !ty.is_vector() {
|
||||
@@ -148,7 +148,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// (SQADD / UQADD / SQSUB / UQSUB), which require scalar FP registers.
|
||||
let is_signed = op == Opcode::SaddSat || op == Opcode::SsubSat;
|
||||
let ty = ty.unwrap();
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
if !ty.is_vector() {
|
||||
let narrow_mode = if is_signed {
|
||||
NarrowValueMode::SignExtend64
|
||||
@@ -162,8 +162,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
Opcode::SsubSat => FPUOp2::Sqsub64,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let va = ctx.alloc_tmp(RegClass::V128, I128);
|
||||
let vb = ctx.alloc_tmp(RegClass::V128, I128);
|
||||
let va = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||
let vb = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||
let ra = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
||||
let rb = put_input_in_reg(ctx, inputs[1], narrow_mode);
|
||||
ctx.emit(Inst::MovToFpu {
|
||||
@@ -211,7 +211,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Ineg => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ty = ty.unwrap();
|
||||
if !ty.is_vector() {
|
||||
let rn = zero_reg();
|
||||
@@ -230,7 +230,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Imul => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
@@ -245,8 +245,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
});
|
||||
} else {
|
||||
if ty == I64X2 {
|
||||
let tmp1 = ctx.alloc_tmp(RegClass::V128, I64X2);
|
||||
let tmp2 = ctx.alloc_tmp(RegClass::V128, I64X2);
|
||||
let tmp1 = ctx.alloc_tmp(I64X2).only_reg().unwrap();
|
||||
let tmp2 = ctx.alloc_tmp(I64X2).only_reg().unwrap();
|
||||
|
||||
// This I64X2 multiplication is performed with several 32-bit
|
||||
// operations.
|
||||
@@ -362,7 +362,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Umulhi | Opcode::Smulhi => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let is_signed = op == Opcode::Smulhi;
|
||||
let input_ty = ctx.input_ty(insn, 0);
|
||||
assert!(ctx.input_ty(insn, 1) == input_ty);
|
||||
@@ -443,7 +443,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
ALUOp::UDiv64
|
||||
};
|
||||
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
|
||||
// The div instruction does not trap on divide by zero or signed overflow
|
||||
@@ -550,7 +550,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
assert!(from_bits <= to_bits);
|
||||
if from_bits < to_bits {
|
||||
let signed = op == Opcode::Sextend;
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
|
||||
if let Some(extract_insn) = maybe_input_insn(ctx, inputs[0], Opcode::Extractlane) {
|
||||
let idx =
|
||||
@@ -596,7 +596,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Bnot => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ty = ty.unwrap();
|
||||
if !ty.is_vector() {
|
||||
let rm = put_input_in_rs_immlogic(ctx, inputs[0], NarrowValueMode::None);
|
||||
@@ -620,7 +620,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
| Opcode::BandNot
|
||||
| Opcode::BorNot
|
||||
| Opcode::BxorNot => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ty = ty.unwrap();
|
||||
if !ty.is_vector() {
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
@@ -646,7 +646,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
|
||||
ctx.emit(Inst::VecRRR {
|
||||
alu_op,
|
||||
@@ -660,7 +660,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => {
|
||||
let ty = ty.unwrap();
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
if !ty.is_vector() {
|
||||
let size = OperandSize::from_bits(ty_bits(ty));
|
||||
let narrow_mode = match (op, size) {
|
||||
@@ -692,7 +692,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
let rm = if is_right_shift {
|
||||
// Right shifts are implemented with a negative left shift.
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rn = zero_reg();
|
||||
ctx.emit(Inst::AluRRR {
|
||||
@@ -751,7 +751,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let ty = ty.unwrap();
|
||||
let ty_bits_size = ty_bits(ty) as u8;
|
||||
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(
|
||||
ctx,
|
||||
inputs[0],
|
||||
@@ -785,7 +785,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// ignored (because of the implicit masking done by the instruction),
|
||||
// so this is equivalent to negating the input.
|
||||
let alu_op = choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64);
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, ty);
|
||||
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
ctx.emit(Inst::AluRRR {
|
||||
alu_op,
|
||||
rd: tmp,
|
||||
@@ -808,7 +808,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// Really ty_bits_size - rn, but the upper bits of the result are
|
||||
// ignored (because of the implicit masking done by the instruction),
|
||||
// so this is equivalent to negating the input.
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::AluRRR {
|
||||
alu_op: ALUOp::Sub32,
|
||||
rd: tmp,
|
||||
@@ -821,7 +821,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
};
|
||||
|
||||
// Explicitly mask the rotation count.
|
||||
let tmp_masked_rm = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp_masked_rm = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::AluRRImmLogic {
|
||||
alu_op: ALUOp::And32,
|
||||
rd: tmp_masked_rm,
|
||||
@@ -830,8 +830,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
});
|
||||
let tmp_masked_rm = tmp_masked_rm.to_reg();
|
||||
|
||||
let tmp1 = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp2 = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp1 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
let tmp2 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::AluRRImm12 {
|
||||
alu_op: ALUOp::Sub32,
|
||||
rd: tmp1,
|
||||
@@ -870,7 +870,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
immshift.imm &= ty_bits_size - 1;
|
||||
|
||||
let tmp1 = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let tmp1 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::AluRRImmShift {
|
||||
alu_op: ALUOp::Lsr32,
|
||||
rd: tmp1,
|
||||
@@ -900,7 +900,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let needs_zext = match op {
|
||||
Opcode::Bitrev | Opcode::Ctz => false,
|
||||
Opcode::Clz | Opcode::Cls => true,
|
||||
@@ -970,12 +970,12 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// x += x << 32
|
||||
// x >> 56
|
||||
let ty = ty.unwrap();
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
// FIXME(#1537): zero-extend 8/16/32-bit operands only to 32 bits,
|
||||
// and fix the sequence below to work properly for this.
|
||||
let narrow_mode = NarrowValueMode::ZeroExtend64;
|
||||
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
|
||||
// If this is a 32-bit Popcnt, use Lsr32 to clear the top 32 bits of the register, then
|
||||
// the rest of the code is identical to the 64-bit version.
|
||||
@@ -1236,7 +1236,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
} => (stack_slot, offset),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let offset: i32 = offset.into();
|
||||
let inst = ctx
|
||||
.abi()
|
||||
@@ -1245,7 +1245,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::AtomicRmw => {
|
||||
let r_dst = get_output_reg(ctx, outputs[0]);
|
||||
let r_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let mut r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let mut r_arg2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty_access = ty.unwrap();
|
||||
@@ -1270,7 +1270,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// This is very similar to, but not identical to, the AtomicRmw case. Note
|
||||
// that the AtomicCAS sequence does its own masking, so we don't need to worry
|
||||
// about zero-extending narrow (I8/I16/I32) values here.
|
||||
let r_dst = get_output_reg(ctx, outputs[0]);
|
||||
let r_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let mut r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let mut r_expected = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let mut r_replacement = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||
@@ -1301,7 +1301,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::AtomicLoad => {
|
||||
let r_data = get_output_reg(ctx, outputs[0]);
|
||||
let r_data = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let ty_access = ty.unwrap();
|
||||
assert!(is_valid_atomic_transaction_ty(ty_access));
|
||||
@@ -1382,7 +1382,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
};
|
||||
|
||||
// csel.cond rd, rn, rm
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||
let ty = ctx.output_ty(insn, 0);
|
||||
@@ -1409,7 +1409,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
|
||||
|
||||
// csel.COND rd, rn, rm
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||
let ty = ctx.output_ty(insn, 0);
|
||||
@@ -1428,8 +1428,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let ty = ty.unwrap();
|
||||
if !ty.is_vector() {
|
||||
debug_assert_ne!(Opcode::Vselect, op);
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||
@@ -1458,7 +1458,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
ctx.emit(Inst::gen_move(rd, rcond, ty));
|
||||
|
||||
ctx.emit(Inst::VecRRR {
|
||||
@@ -1479,7 +1479,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// single-def ifcmp.
|
||||
let ifcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ifcmp).unwrap();
|
||||
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
materialize_bool_result(ctx, insn, rd, cond);
|
||||
}
|
||||
|
||||
@@ -1488,7 +1488,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let cond = lower_fp_condcode(condcode);
|
||||
let ffcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ffcmp).unwrap();
|
||||
lower_fcmp_or_ffcmp_to_flags(ctx, ffcmp_insn);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
materialize_bool_result(ctx, insn, rd, cond);
|
||||
}
|
||||
|
||||
@@ -1496,7 +1496,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// Null references are represented by the constant value 0; invalid references are
|
||||
// represented by the constant value -1. See `define_reftypes()` in
|
||||
// `meta/src/isa/x86/encodings.rs` to confirm.
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let ty = ctx.input_ty(insn, 0);
|
||||
let (alu_op, const_value) = match op {
|
||||
@@ -1516,7 +1516,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Copy => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let ty = ctx.input_ty(insn, 0);
|
||||
ctx.emit(Inst::gen_move(rd, rn, ty));
|
||||
@@ -1526,7 +1526,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// Smaller integers/booleans are stored with high-order bits
|
||||
// undefined, so we can simply do a copy.
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ty = ctx.input_ty(insn, 0);
|
||||
ctx.emit(Inst::gen_move(rd, rn, ty));
|
||||
}
|
||||
@@ -1553,7 +1553,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// Nothing.
|
||||
} else {
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let to_bits = if to_bits == 64 {
|
||||
64
|
||||
} else {
|
||||
@@ -1575,7 +1575,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// Booleans are stored as all-zeroes (0) or all-ones (-1). We AND
|
||||
// out the LSB to give a 0 / 1-valued integer result.
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let output_bits = ty_bits(ctx.output_ty(insn, 0));
|
||||
|
||||
let (imm_ty, alu_op) = if output_bits > 32 {
|
||||
@@ -1592,7 +1592,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Bitcast => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ity = ctx.input_ty(insn, 0);
|
||||
let oty = ctx.output_ty(insn, 0);
|
||||
let ity_bits = ty_bits(ity);
|
||||
@@ -1644,7 +1644,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// (above the bits for the value's type) are undefined, so we
|
||||
// need not extend the return values.
|
||||
let reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
|
||||
let retval_reg = ctx.retval(i);
|
||||
let retval_reg = ctx.retval(i).only_reg().unwrap();
|
||||
let ty = ctx.input_ty(insn, i);
|
||||
ctx.emit(Inst::gen_move(retval_reg, reg, ty));
|
||||
}
|
||||
@@ -1663,7 +1663,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let condcode = ctx.data(insn).cond_code().unwrap();
|
||||
let cond = lower_condcode(condcode);
|
||||
let is_signed = condcode_is_signed(condcode);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ty = ctx.input_ty(insn, 0);
|
||||
let bits = ty_bits(ty);
|
||||
let narrow_mode = match (bits <= 32, is_signed) {
|
||||
@@ -1691,7 +1691,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let ty = ctx.input_ty(insn, 0);
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
|
||||
if !ty.is_vector() {
|
||||
match ty_bits(ty) {
|
||||
@@ -1768,7 +1768,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::FuncAddr => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let (extname, _) = ctx.call_target(insn).unwrap();
|
||||
let extname = extname.clone();
|
||||
ctx.emit(Inst::LoadExtName {
|
||||
@@ -1783,7 +1783,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::SymbolValue => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let (extname, _, offset) = ctx.symbol_value(insn).unwrap();
|
||||
let extname = extname.clone();
|
||||
ctx.emit(Inst::LoadExtName {
|
||||
@@ -1824,18 +1824,18 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
assert!(inputs.len() == abi.num_args());
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
let arg_reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
|
||||
abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
|
||||
abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg));
|
||||
}
|
||||
abi.emit_call(ctx);
|
||||
for (i, output) in outputs.iter().enumerate() {
|
||||
let retval_reg = get_output_reg(ctx, *output);
|
||||
abi.emit_copy_retval_to_reg(ctx, i, retval_reg);
|
||||
let retval_reg = get_output_reg(ctx, *output).only_reg().unwrap();
|
||||
abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg));
|
||||
}
|
||||
abi.emit_stack_post_adjust(ctx);
|
||||
}
|
||||
|
||||
Opcode::GetPinnedReg => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
ctx.emit(Inst::gen_move(rd, xreg(PINNED_REG), I64));
|
||||
}
|
||||
|
||||
@@ -1874,13 +1874,13 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
Opcode::Vconst => {
|
||||
let value = const_param_to_u128(ctx, insn).expect("Invalid immediate bytes");
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
lower_constant_f128(ctx, rd, value);
|
||||
}
|
||||
|
||||
Opcode::RawBitcast => {
|
||||
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let ty = ctx.input_ty(insn, 0);
|
||||
ctx.emit(Inst::gen_move(rd, rm, ty));
|
||||
}
|
||||
@@ -1888,7 +1888,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
Opcode::Extractlane => {
|
||||
if let InstructionData::BinaryImm8 { imm, .. } = ctx.data(insn) {
|
||||
let idx = *imm;
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let size = VectorSize::from_ty(ctx.input_ty(insn, 0));
|
||||
let ty = ty.unwrap();
|
||||
@@ -1913,7 +1913,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
unreachable!();
|
||||
};
|
||||
let input_ty = ctx.input_ty(insn, 1);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
@@ -1935,7 +1935,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Splat => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let size = VectorSize::from_ty(ty.unwrap());
|
||||
|
||||
if let Some((_, insn)) = maybe_input_insn_multi(
|
||||
@@ -1979,7 +1979,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
&load_inputs[..],
|
||||
load_outputs[0],
|
||||
|ctx, _rd, _elem_ty, mem| {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
let (addr, addr_inst) = Inst::gen_load_addr(tmp, mem);
|
||||
if let Some(addr_inst) = addr_inst {
|
||||
ctx.emit(addr_inst);
|
||||
@@ -2002,7 +2002,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
Opcode::ScalarToVector => {
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let input_ty = ctx.input_ty(insn, 0);
|
||||
if (input_ty == I32 && ty.unwrap() == I32X4)
|
||||
|| (input_ty == I64 && ty.unwrap() == I64X2)
|
||||
@@ -2021,9 +2021,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::VanyTrue | Opcode::VallTrue => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let tmp = ctx.alloc_tmp(RegClass::V128, ty.unwrap());
|
||||
let src_ty = ctx.input_ty(insn, 0);
|
||||
let tmp = ctx.alloc_tmp(src_ty).only_reg().unwrap();
|
||||
|
||||
// This operation is implemented by using umaxp or uminv to
|
||||
// create a scalar value, which is then compared against zero.
|
||||
@@ -2070,7 +2071,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::VhighBits => {
|
||||
let dst_r = get_output_reg(ctx, outputs[0]);
|
||||
let dst_r = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let src_v = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let ty = ctx.input_ty(insn, 0);
|
||||
// All three sequences use one integer temporary and two vector temporaries. The
|
||||
@@ -2080,9 +2081,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
// derivation of these sequences. Alternative sequences are discussed in
|
||||
// https://github.com/bytecodealliance/wasmtime/issues/2296, although they are not
|
||||
// used here.
|
||||
let tmp_r0 = ctx.alloc_tmp(RegClass::I64, I64);
|
||||
let tmp_v0 = ctx.alloc_tmp(RegClass::V128, I8X16);
|
||||
let tmp_v1 = ctx.alloc_tmp(RegClass::V128, I8X16);
|
||||
let tmp_r0 = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||
let tmp_v0 = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||
let tmp_v1 = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||
match ty {
|
||||
I8X16 => {
|
||||
// sshr tmp_v1.16b, src_v.16b, #7
|
||||
@@ -2255,7 +2256,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
Opcode::Shuffle => {
|
||||
let mask = const_param_to_u128(ctx, insn).expect("Invalid immediate mask bytes");
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rn2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
// 2 register table vector lookups require consecutive table registers;
|
||||
@@ -2283,7 +2284,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::Swizzle => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
|
||||
@@ -2310,7 +2311,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
Opcode::Imax => VecALUOp::Smax,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
@@ -2324,12 +2325,12 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
}
|
||||
|
||||
Opcode::WideningPairwiseDotProductS => {
|
||||
let r_y = get_output_reg(ctx, outputs[0]);
|
||||
let r_y = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let r_a = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let r_b = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
if ty == I32X4 {
|
||||
let tmp = ctx.alloc_tmp(RegClass::V128, I8X16);
|
||||
let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||
// The args have type I16X8.
|
||||
// "y = i32x4.dot_i16x8_s(a, b)"
|
||||
// => smull tmp, a, b
|
||||
@@ -2369,7 +2370,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let bits = ty_bits(ty);
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
if !ty.is_vector() {
|
||||
let fpu_op = match (op, bits) {
|
||||
(Opcode::Fadd, 32) => FPUOp2::Add32,
|
||||
@@ -2413,7 +2414,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
if ty == F32X4 || ty == F64X2 {
|
||||
// pmin(a,b) => bitsel(b, a, cmpgt(a, b))
|
||||
// pmax(a,b) => bitsel(b, a, cmpgt(b, a))
|
||||
let r_dst = get_output_reg(ctx, outputs[0]);
|
||||
let r_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let r_a = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let r_b = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
// Since we're going to write the output register `r_dst` anyway, we might as
|
||||
@@ -2449,7 +2450,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let ty = ty.unwrap();
|
||||
let bits = ty_bits(ty);
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
if !ty.is_vector() {
|
||||
let fpu_op = match (op, bits) {
|
||||
(Opcode::Sqrt, 32) => FPUOp1::Sqrt32,
|
||||
@@ -2498,7 +2499,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
_ => panic!("Unknown op/bits combination (scalar)"),
|
||||
};
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
ctx.emit(Inst::FpuRound { op, rd, rn });
|
||||
} else {
|
||||
let (op, size) = match (op, ty) {
|
||||
@@ -2513,7 +2514,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
_ => panic!("Unknown op/ty combination (vector){:?}", ty),
|
||||
};
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
ctx.emit(Inst::VecMisc { op, rd, rn, size });
|
||||
}
|
||||
}
|
||||
@@ -2528,7 +2529,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ra = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
ctx.emit(Inst::FpuRRRR {
|
||||
fpu_op,
|
||||
rn,
|
||||
@@ -2554,8 +2555,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
assert!(bits == 32 || bits == 64);
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let tmp = ctx.alloc_tmp(RegClass::V128, F64);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let tmp = ctx.alloc_tmp(F64).only_reg().unwrap();
|
||||
|
||||
// Copy LHS to rd.
|
||||
ctx.emit(Inst::gen_move(rd, rn, ty));
|
||||
@@ -2594,7 +2595,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
};
|
||||
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
|
||||
// First, check the output: it's important to carry the NaN conversion before the
|
||||
// in-bounds conversion, per wasm semantics.
|
||||
@@ -2611,7 +2612,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
kind: CondBrKind::Cond(lower_fp_condcode(FloatCC::Unordered)),
|
||||
});
|
||||
|
||||
let tmp = ctx.alloc_tmp(RegClass::V128, I128);
|
||||
let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||
|
||||
// Check that the input is in range, with "truncate towards zero" semantics. This means
|
||||
// we allow values that are slightly out of range:
|
||||
@@ -2736,7 +2737,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
Opcode::FcvtFromUint | Opcode::FcvtFromSint => {
|
||||
let ty = ty.unwrap();
|
||||
let signed = op == Opcode::FcvtFromSint;
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
|
||||
if ty.is_vector() {
|
||||
let op = if signed {
|
||||
@@ -2782,7 +2783,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let ty = ty.unwrap();
|
||||
let out_signed = op == Opcode::FcvtToSintSat;
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
|
||||
if ty.is_vector() {
|
||||
let op = if out_signed {
|
||||
@@ -2829,8 +2830,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let rtmp1 = ctx.alloc_tmp(RegClass::V128, in_ty);
|
||||
let rtmp2 = ctx.alloc_tmp(RegClass::V128, in_ty);
|
||||
let rtmp1 = ctx.alloc_tmp(in_ty).only_reg().unwrap();
|
||||
let rtmp2 = ctx.alloc_tmp(in_ty).only_reg().unwrap();
|
||||
|
||||
if in_bits == 32 {
|
||||
lower_constant_f32(ctx, rtmp1, max as f32);
|
||||
@@ -2920,7 +2921,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
// Now handle the iadd as above, except use an AddS opcode that sets
|
||||
// flags.
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
@@ -3001,7 +3002,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
Opcode::DummySargT => unreachable!(),
|
||||
|
||||
Opcode::Iabs => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
ctx.emit(Inst::VecMisc {
|
||||
@@ -3012,7 +3013,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
});
|
||||
}
|
||||
Opcode::AvgRound => {
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
@@ -3031,7 +3032,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
} else {
|
||||
VecMiscNarrowOp::Sqxtun
|
||||
};
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rn2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let ty = ty.unwrap();
|
||||
@@ -3054,7 +3055,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
|
||||
Opcode::SwidenLow | Opcode::SwidenHigh | Opcode::UwidenLow | Opcode::UwidenHigh => {
|
||||
let lane_type = ty.unwrap().lane_type();
|
||||
let rd = get_output_reg(ctx, outputs[0]);
|
||||
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let (t, high_half) = match (lane_type, op) {
|
||||
(I16, Opcode::SwidenLow) => (VecExtendOp::Sxtl8, false),
|
||||
@@ -3313,8 +3314,8 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
|
||||
NarrowValueMode::ZeroExtend32,
|
||||
);
|
||||
|
||||
let rtmp1 = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let rtmp2 = ctx.alloc_tmp(RegClass::I64, I32);
|
||||
let rtmp1 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
let rtmp2 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
|
||||
// Bounds-check, leaving condition codes for JTSequence's
|
||||
// branch to default target below.
|
||||
|
||||
@@ -82,7 +82,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
let reg = rreg(next_rreg);
|
||||
|
||||
ret.push(ABIArg::Reg(
|
||||
reg.to_real_reg(),
|
||||
ValueRegs::one(reg.to_real_reg()),
|
||||
param.value_type,
|
||||
param.extension,
|
||||
param.purpose,
|
||||
@@ -102,7 +102,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
||||
if next_rreg < max_reg_val {
|
||||
ret.push(ABIArg::Reg(
|
||||
rreg(next_rreg).to_real_reg(),
|
||||
ValueRegs::one(rreg(next_rreg).to_real_reg()),
|
||||
I32,
|
||||
ir::ArgumentExtension::None,
|
||||
ir::ArgumentPurpose::Normal,
|
||||
@@ -185,7 +185,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
Inst::EpiloguePlaceholder
|
||||
}
|
||||
|
||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Inst; 4]> {
|
||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Inst> {
|
||||
let mut insts = SmallVec::new();
|
||||
|
||||
if let Some(imm12) = UImm12::maybe_from_i64(imm as i64) {
|
||||
@@ -209,7 +209,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
insts
|
||||
}
|
||||
|
||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Inst; 2]> {
|
||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst> {
|
||||
let mut insts = SmallVec::new();
|
||||
insts.push(Inst::Cmp {
|
||||
rn: sp_reg(),
|
||||
@@ -243,7 +243,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
Inst::gen_store(from_reg, mem, ty)
|
||||
}
|
||||
|
||||
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Inst; 2]> {
|
||||
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Inst> {
|
||||
let mut ret = SmallVec::new();
|
||||
|
||||
if amount == 0 {
|
||||
@@ -283,7 +283,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
Inst::VirtualSPOffsetAdj { offset }
|
||||
}
|
||||
|
||||
fn gen_prologue_frame_setup() -> SmallVec<[Inst; 2]> {
|
||||
fn gen_prologue_frame_setup() -> SmallInstVec<Inst> {
|
||||
let mut ret = SmallVec::new();
|
||||
let reg_list = vec![fp_reg(), lr_reg()];
|
||||
ret.push(Inst::Push { reg_list });
|
||||
@@ -294,7 +294,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
ret
|
||||
}
|
||||
|
||||
fn gen_epilogue_frame_restore() -> SmallVec<[Inst; 2]> {
|
||||
fn gen_epilogue_frame_restore() -> SmallInstVec<Inst> {
|
||||
let mut ret = SmallVec::new();
|
||||
ret.push(Inst::Mov {
|
||||
rd: writable_sp_reg(),
|
||||
@@ -305,7 +305,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
||||
ret
|
||||
}
|
||||
|
||||
fn gen_probestack(_: u32) -> SmallVec<[Self::I; 2]> {
|
||||
fn gen_probestack(_: u32) -> SmallInstVec<Self::I> {
|
||||
// TODO: implement if we ever require stack probes on ARM32 (unlikely
|
||||
// unless Lucet is ported)
|
||||
smallvec![]
|
||||
|
||||
@@ -807,12 +807,17 @@ impl MachInst for Inst {
|
||||
Inst::mov(to_reg, from_reg)
|
||||
}
|
||||
|
||||
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
||||
to_reg: Writable<Reg>,
|
||||
value: u64,
|
||||
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
|
||||
to_regs: ValueRegs<Writable<Reg>>,
|
||||
value: u128,
|
||||
ty: Type,
|
||||
_alloc_tmp: F,
|
||||
) -> SmallVec<[Inst; 4]> {
|
||||
let to_reg = to_regs
|
||||
.only_reg()
|
||||
.expect("multi-reg values not supported yet");
|
||||
let value = value as u64;
|
||||
|
||||
match ty {
|
||||
B1 | I8 | B8 | I16 | B16 | I32 | B32 => {
|
||||
let v: i64 = value as i64;
|
||||
@@ -839,10 +844,10 @@ impl MachInst for Inst {
|
||||
None
|
||||
}
|
||||
|
||||
fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
|
||||
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
|
||||
match ty {
|
||||
I8 | I16 | I32 | B1 | B8 | B16 | B32 => Ok(RegClass::I32),
|
||||
IFLAGS => Ok(RegClass::I32),
|
||||
I8 | I16 | I32 | B1 | B8 | B16 | B32 => Ok((&[RegClass::I32], &[I32])),
|
||||
IFLAGS => Ok((&[RegClass::I32], &[I32])),
|
||||
_ => Err(CodegenError::Unsupported(format!(
|
||||
"Unexpected SSA-value type: {}",
|
||||
ty
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::isa::arm32::Arm32Backend;
|
||||
|
||||
use super::lower_inst;
|
||||
|
||||
use regalloc::{Reg, RegClass, Writable};
|
||||
use regalloc::{Reg, Writable};
|
||||
|
||||
//============================================================================
|
||||
// Lowering: convert instruction outputs to result types.
|
||||
@@ -55,7 +55,7 @@ pub(crate) enum NarrowValueMode {
|
||||
|
||||
/// Lower an instruction output to a reg.
|
||||
pub(crate) fn output_to_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, out: InsnOutput) -> Writable<Reg> {
|
||||
ctx.get_output(out.insn, out.output)
|
||||
ctx.get_output(out.insn, out.output).only_reg().unwrap()
|
||||
}
|
||||
|
||||
/// Lower an instruction input to a reg.
|
||||
@@ -70,21 +70,25 @@ pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
|
||||
let from_bits = ty.bits() as u8;
|
||||
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
|
||||
let in_reg = if let Some(c) = inputs.constant {
|
||||
let to_reg = ctx.alloc_tmp(Inst::rc_for_type(ty).unwrap(), ty);
|
||||
for inst in Inst::gen_constant(to_reg, c, ty, |reg_class, ty| ctx.alloc_tmp(reg_class, ty))
|
||||
.into_iter()
|
||||
let to_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
for inst in Inst::gen_constant(ValueRegs::one(to_reg), c as u128, ty, |ty| {
|
||||
ctx.alloc_tmp(ty).only_reg().unwrap()
|
||||
})
|
||||
.into_iter()
|
||||
{
|
||||
ctx.emit(inst);
|
||||
}
|
||||
to_reg.to_reg()
|
||||
} else {
|
||||
ctx.put_input_in_reg(input.insn, input.input)
|
||||
ctx.put_input_in_regs(input.insn, input.input)
|
||||
.only_reg()
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
match (narrow_mode, from_bits) {
|
||||
(NarrowValueMode::None, _) => in_reg,
|
||||
(NarrowValueMode::ZeroExtend, 1) => {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::AluRRImm8 {
|
||||
alu_op: ALUOp::And,
|
||||
rd: tmp,
|
||||
@@ -94,7 +98,7 @@ pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
|
||||
tmp.to_reg()
|
||||
}
|
||||
(NarrowValueMode::ZeroExtend, n) if n < 32 => {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::Extend {
|
||||
rd: tmp,
|
||||
rm: in_reg,
|
||||
@@ -104,7 +108,7 @@ pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
|
||||
tmp.to_reg()
|
||||
}
|
||||
(NarrowValueMode::SignExtend, n) if n < 32 => {
|
||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
ctx.emit(Inst::Extend {
|
||||
rd: tmp,
|
||||
rm: in_reg,
|
||||
|
||||
@@ -10,7 +10,6 @@ use crate::CodegenResult;
|
||||
use crate::isa::arm32::abi::*;
|
||||
use crate::isa::arm32::inst::*;
|
||||
|
||||
use regalloc::RegClass;
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use super::lower::*;
|
||||
@@ -143,7 +142,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
let rd = output_to_reg(ctx, outputs[0]);
|
||||
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
||||
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||
|
||||
// ror rd, rn, 32 - (rm & 31)
|
||||
ctx.emit(Inst::AluRRImm8 {
|
||||
@@ -171,7 +170,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
match ty {
|
||||
I32 => {
|
||||
let rd_hi = output_to_reg(ctx, outputs[0]);
|
||||
let rd_lo = ctx.alloc_tmp(RegClass::I32, ty);
|
||||
let rd_lo = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||
|
||||
@@ -487,7 +486,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
Opcode::FallthroughReturn | Opcode::Return => {
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
let reg = input_to_reg(ctx, *input, NarrowValueMode::None);
|
||||
let retval_reg = ctx.retval(i);
|
||||
let retval_reg = ctx.retval(i).only_reg().unwrap();
|
||||
let ty = ctx.input_ty(insn, i);
|
||||
|
||||
ctx.emit(Inst::gen_move(retval_reg, reg, ty));
|
||||
@@ -522,12 +521,12 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
assert_eq!(inputs.len(), abi.num_args());
|
||||
for (i, input) in inputs.iter().enumerate().filter(|(i, _)| *i <= 3) {
|
||||
let arg_reg = input_to_reg(ctx, *input, NarrowValueMode::None);
|
||||
abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
|
||||
abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg));
|
||||
}
|
||||
abi.emit_call(ctx);
|
||||
for (i, output) in outputs.iter().enumerate() {
|
||||
let retval_reg = output_to_reg(ctx, *output);
|
||||
abi.emit_copy_retval_to_reg(ctx, i, retval_reg);
|
||||
abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg));
|
||||
}
|
||||
}
|
||||
_ => panic!("lowering {} unimplemented!", op),
|
||||
|
||||
@@ -32,7 +32,7 @@ fn try_fill_baldrdash_reg(call_conv: CallConv, param: &ir::AbiParam) -> Option<A
|
||||
&ir::ArgumentPurpose::VMContext => {
|
||||
// This is SpiderMonkey's `WasmTlsReg`.
|
||||
Some(ABIArg::Reg(
|
||||
regs::r14().to_real_reg(),
|
||||
ValueRegs::one(regs::r14().to_real_reg()),
|
||||
types::I64,
|
||||
param.extension,
|
||||
param.purpose,
|
||||
@@ -41,7 +41,7 @@ fn try_fill_baldrdash_reg(call_conv: CallConv, param: &ir::AbiParam) -> Option<A
|
||||
&ir::ArgumentPurpose::SignatureId => {
|
||||
// This is SpiderMonkey's `WasmTableCallSigReg`.
|
||||
Some(ABIArg::Reg(
|
||||
regs::r10().to_real_reg(),
|
||||
ValueRegs::one(regs::r10().to_real_reg()),
|
||||
types::I64,
|
||||
param.extension,
|
||||
param.purpose,
|
||||
@@ -168,7 +168,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
ret.push(param);
|
||||
} else if let Some(reg) = candidate {
|
||||
ret.push(ABIArg::Reg(
|
||||
reg.to_real_reg(),
|
||||
ValueRegs::one(reg.to_real_reg()),
|
||||
param.value_type,
|
||||
param.extension,
|
||||
param.purpose,
|
||||
@@ -200,7 +200,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
||||
if let Some(reg) = get_intreg_for_arg_systemv(&call_conv, next_gpr) {
|
||||
ret.push(ABIArg::Reg(
|
||||
reg.to_real_reg(),
|
||||
ValueRegs::one(reg.to_real_reg()),
|
||||
types::I64,
|
||||
ir::ArgumentExtension::None,
|
||||
ir::ArgumentPurpose::Normal,
|
||||
@@ -288,7 +288,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
Inst::epilogue_placeholder()
|
||||
}
|
||||
|
||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Self::I; 4]> {
|
||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Self::I> {
|
||||
let mut ret = SmallVec::new();
|
||||
if from_reg != into_reg.to_reg() {
|
||||
ret.push(Inst::gen_move(into_reg, from_reg, I64));
|
||||
@@ -302,7 +302,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
ret
|
||||
}
|
||||
|
||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Self::I; 2]> {
|
||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Self::I> {
|
||||
smallvec![
|
||||
Inst::cmp_rmi_r(/* bytes = */ 8, RegMemImm::reg(regs::rsp()), limit_reg),
|
||||
Inst::TrapIf {
|
||||
@@ -343,7 +343,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
Inst::store(ty, from_reg, mem)
|
||||
}
|
||||
|
||||
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Self::I; 2]> {
|
||||
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Self::I> {
|
||||
let (alu_op, amount) = if amount >= 0 {
|
||||
(AluRmiROpcode::Add, amount)
|
||||
} else {
|
||||
@@ -366,7 +366,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_prologue_frame_setup() -> SmallVec<[Self::I; 2]> {
|
||||
fn gen_prologue_frame_setup() -> SmallInstVec<Self::I> {
|
||||
let r_rsp = regs::rsp();
|
||||
let r_rbp = regs::rbp();
|
||||
let w_rbp = Writable::from_reg(r_rbp);
|
||||
@@ -378,7 +378,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
insts
|
||||
}
|
||||
|
||||
fn gen_epilogue_frame_restore() -> SmallVec<[Self::I; 2]> {
|
||||
fn gen_epilogue_frame_restore() -> SmallInstVec<Self::I> {
|
||||
let mut insts = SmallVec::new();
|
||||
insts.push(Inst::mov_r_r(
|
||||
true,
|
||||
@@ -389,7 +389,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
||||
insts
|
||||
}
|
||||
|
||||
fn gen_probestack(frame_size: u32) -> SmallVec<[Self::I; 2]> {
|
||||
fn gen_probestack(frame_size: u32) -> SmallInstVec<Self::I> {
|
||||
let mut insts = SmallVec::new();
|
||||
insts.push(Inst::imm(
|
||||
OperandSize::Size32,
|
||||
|
||||
@@ -2506,22 +2506,28 @@ impl MachInst for Inst {
|
||||
None
|
||||
}
|
||||
|
||||
fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
|
||||
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
|
||||
match ty {
|
||||
types::I8
|
||||
| types::I16
|
||||
| types::I32
|
||||
| types::I64
|
||||
| types::B1
|
||||
| types::B8
|
||||
| types::B16
|
||||
| types::B32
|
||||
| types::B64
|
||||
| types::R32
|
||||
| types::R64 => Ok(RegClass::I64),
|
||||
types::F32 | types::F64 => Ok(RegClass::V128),
|
||||
_ if ty.bits() == 128 => Ok(RegClass::V128),
|
||||
types::IFLAGS | types::FFLAGS => Ok(RegClass::I64),
|
||||
types::I8 => Ok((&[RegClass::I64], &[types::I8])),
|
||||
types::I16 => Ok((&[RegClass::I64], &[types::I16])),
|
||||
types::I32 => Ok((&[RegClass::I64], &[types::I32])),
|
||||
types::I64 => Ok((&[RegClass::I64], &[types::I64])),
|
||||
types::B1 => Ok((&[RegClass::I64], &[types::B1])),
|
||||
types::B8 => Ok((&[RegClass::I64], &[types::B8])),
|
||||
types::B16 => Ok((&[RegClass::I64], &[types::B16])),
|
||||
types::B32 => Ok((&[RegClass::I64], &[types::B32])),
|
||||
types::B64 => Ok((&[RegClass::I64], &[types::B64])),
|
||||
types::R32 => panic!("32-bit reftype pointer should never be seen on x86-64"),
|
||||
types::R64 => Ok((&[RegClass::I64], &[types::R64])),
|
||||
types::F32 => Ok((&[RegClass::V128], &[types::F32])),
|
||||
types::F64 => Ok((&[RegClass::V128], &[types::F64])),
|
||||
types::I128 => Ok((&[RegClass::I64, RegClass::I64], &[types::I64, types::I64])),
|
||||
types::B128 => Ok((&[RegClass::I64, RegClass::I64], &[types::B64, types::B64])),
|
||||
_ if ty.is_vector() => {
|
||||
assert!(ty.bits() <= 128);
|
||||
Ok((&[RegClass::V128], &[types::I8X16]))
|
||||
}
|
||||
types::IFLAGS | types::FFLAGS => Ok((&[RegClass::I64], &[types::I64])),
|
||||
_ => Err(CodegenError::Unsupported(format!(
|
||||
"Unexpected SSA-value type: {}",
|
||||
ty
|
||||
@@ -2533,13 +2539,18 @@ impl MachInst for Inst {
|
||||
Inst::jmp_known(label)
|
||||
}
|
||||
|
||||
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
||||
to_reg: Writable<Reg>,
|
||||
value: u64,
|
||||
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
|
||||
to_regs: ValueRegs<Writable<Reg>>,
|
||||
value: u128,
|
||||
ty: Type,
|
||||
mut alloc_tmp: F,
|
||||
) -> SmallVec<[Self; 4]> {
|
||||
// We don't support 128-bit constants.
|
||||
assert!(value <= u64::MAX as u128);
|
||||
let mut ret = SmallVec::new();
|
||||
let to_reg = to_regs
|
||||
.only_reg()
|
||||
.expect("multi-reg values not supported on x64");
|
||||
if ty == types::F32 {
|
||||
if value == 0 {
|
||||
ret.push(Inst::xmm_rm_r(
|
||||
@@ -2548,8 +2559,8 @@ impl MachInst for Inst {
|
||||
to_reg,
|
||||
));
|
||||
} else {
|
||||
let tmp = alloc_tmp(RegClass::I64, types::I32);
|
||||
ret.push(Inst::imm(OperandSize::Size32, value, tmp));
|
||||
let tmp = alloc_tmp(types::I32);
|
||||
ret.push(Inst::imm(OperandSize::Size32, value as u64, tmp));
|
||||
|
||||
ret.push(Inst::gpr_to_xmm(
|
||||
SseOpcode::Movd,
|
||||
@@ -2566,8 +2577,8 @@ impl MachInst for Inst {
|
||||
to_reg,
|
||||
));
|
||||
} else {
|
||||
let tmp = alloc_tmp(RegClass::I64, types::I64);
|
||||
ret.push(Inst::imm(OperandSize::Size64, value, tmp));
|
||||
let tmp = alloc_tmp(types::I64);
|
||||
ret.push(Inst::imm(OperandSize::Size64, value as u64, tmp));
|
||||
|
||||
ret.push(Inst::gpr_to_xmm(
|
||||
SseOpcode::Movq,
|
||||
@@ -2599,6 +2610,7 @@ impl MachInst for Inst {
|
||||
to_reg,
|
||||
));
|
||||
} else {
|
||||
let value = value as u64;
|
||||
ret.push(Inst::imm(
|
||||
OperandSize::from_bytes(ty.bytes()),
|
||||
value.into(),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user