Multi-register value support: framework for Values wider than machine regs.
This will allow for support for `I128` values everywhere, and `I64` values on 32-bit targets (e.g., ARM32 and x86-32). It does not alter the machine backends to build such support; it just adds the framework for the MachInst backends to *reason* about a `Value` residing in more than one register.
This commit is contained in:
@@ -77,7 +77,7 @@ fn try_fill_baldrdash_reg(call_conv: isa::CallConv, param: &ir::AbiParam) -> Opt
|
|||||||
&ir::ArgumentPurpose::VMContext => {
|
&ir::ArgumentPurpose::VMContext => {
|
||||||
// This is SpiderMonkey's `WasmTlsReg`.
|
// This is SpiderMonkey's `WasmTlsReg`.
|
||||||
Some(ABIArg::Reg(
|
Some(ABIArg::Reg(
|
||||||
xreg(BALDRDASH_TLS_REG).to_real_reg(),
|
ValueRegs::one(xreg(BALDRDASH_TLS_REG).to_real_reg()),
|
||||||
ir::types::I64,
|
ir::types::I64,
|
||||||
param.extension,
|
param.extension,
|
||||||
param.purpose,
|
param.purpose,
|
||||||
@@ -86,7 +86,7 @@ fn try_fill_baldrdash_reg(call_conv: isa::CallConv, param: &ir::AbiParam) -> Opt
|
|||||||
&ir::ArgumentPurpose::SignatureId => {
|
&ir::ArgumentPurpose::SignatureId => {
|
||||||
// This is SpiderMonkey's `WasmTableCallSigReg`.
|
// This is SpiderMonkey's `WasmTableCallSigReg`.
|
||||||
Some(ABIArg::Reg(
|
Some(ABIArg::Reg(
|
||||||
xreg(BALDRDASH_SIG_REG).to_real_reg(),
|
ValueRegs::one(xreg(BALDRDASH_SIG_REG).to_real_reg()),
|
||||||
ir::types::I64,
|
ir::types::I64,
|
||||||
param.extension,
|
param.extension,
|
||||||
param.purpose,
|
param.purpose,
|
||||||
@@ -220,7 +220,9 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
"Invalid type for AArch64: {:?}",
|
"Invalid type for AArch64: {:?}",
|
||||||
param.value_type
|
param.value_type
|
||||||
);
|
);
|
||||||
let rc = Inst::rc_for_type(param.value_type).unwrap();
|
let (rcs, _) = Inst::rc_for_type(param.value_type).unwrap();
|
||||||
|
assert!(rcs.len() == 1, "Multi-reg values not supported yet");
|
||||||
|
let rc = rcs[0];
|
||||||
|
|
||||||
let next_reg = match rc {
|
let next_reg = match rc {
|
||||||
RegClass::I64 => &mut next_xreg,
|
RegClass::I64 => &mut next_xreg,
|
||||||
@@ -238,7 +240,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
ret.push(ABIArg::Reg(
|
ret.push(ABIArg::Reg(
|
||||||
reg.to_real_reg(),
|
ValueRegs::one(reg.to_real_reg()),
|
||||||
param.value_type,
|
param.value_type,
|
||||||
param.extension,
|
param.extension,
|
||||||
param.purpose,
|
param.purpose,
|
||||||
@@ -271,7 +273,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
||||||
if next_xreg < max_per_class_reg_vals && remaining_reg_vals > 0 {
|
if next_xreg < max_per_class_reg_vals && remaining_reg_vals > 0 {
|
||||||
ret.push(ABIArg::Reg(
|
ret.push(ABIArg::Reg(
|
||||||
xreg(next_xreg).to_real_reg(),
|
ValueRegs::one(xreg(next_xreg).to_real_reg()),
|
||||||
I64,
|
I64,
|
||||||
ir::ArgumentExtension::None,
|
ir::ArgumentExtension::None,
|
||||||
ir::ArgumentPurpose::Normal,
|
ir::ArgumentPurpose::Normal,
|
||||||
@@ -345,7 +347,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
Inst::Ret
|
Inst::Ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Inst; 4]> {
|
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Inst> {
|
||||||
let imm = imm as u64;
|
let imm = imm as u64;
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
if let Some(imm12) = Imm12::maybe_from_u64(imm) {
|
if let Some(imm12) = Imm12::maybe_from_u64(imm) {
|
||||||
@@ -370,7 +372,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
insts
|
insts
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Inst; 2]> {
|
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst> {
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
insts.push(Inst::AluRRRExtend {
|
insts.push(Inst::AluRRRExtend {
|
||||||
alu_op: ALUOp::SubS64,
|
alu_op: ALUOp::SubS64,
|
||||||
@@ -411,7 +413,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
Inst::gen_store(mem, from_reg, ty, MemFlags::trusted())
|
Inst::gen_store(mem, from_reg, ty, MemFlags::trusted())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Inst; 2]> {
|
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Inst> {
|
||||||
if amount == 0 {
|
if amount == 0 {
|
||||||
return SmallVec::new();
|
return SmallVec::new();
|
||||||
}
|
}
|
||||||
@@ -455,7 +457,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_prologue_frame_setup() -> SmallVec<[Inst; 2]> {
|
fn gen_prologue_frame_setup() -> SmallInstVec<Inst> {
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
// stp fp (x29), lr (x30), [sp, #-16]!
|
// stp fp (x29), lr (x30), [sp, #-16]!
|
||||||
insts.push(Inst::StoreP64 {
|
insts.push(Inst::StoreP64 {
|
||||||
@@ -481,7 +483,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
insts
|
insts
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_epilogue_frame_restore() -> SmallVec<[Inst; 2]> {
|
fn gen_epilogue_frame_restore() -> SmallInstVec<Inst> {
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
|
|
||||||
// MOV (alias of ORR) interprets x31 as XZR, so use an ADD here.
|
// MOV (alias of ORR) interprets x31 as XZR, so use an ADD here.
|
||||||
@@ -508,7 +510,7 @@ impl ABIMachineSpec for AArch64MachineDeps {
|
|||||||
insts
|
insts
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_probestack(_: u32) -> SmallVec<[Self::I; 2]> {
|
fn gen_probestack(_: u32) -> SmallInstVec<Self::I> {
|
||||||
// TODO: implement if we ever require stack probes on an AArch64 host
|
// TODO: implement if we ever require stack probes on an AArch64 host
|
||||||
// (unlikely unless Lucet is ported)
|
// (unlikely unless Lucet is ported)
|
||||||
smallvec![]
|
smallvec![]
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
// Some variants are never constructed, but we still want them as options in the future.
|
// Some variants are never constructed, but we still want them as options in the future.
|
||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
use crate::ir::types::{F32X2, F32X4, F64X2, I16X4, I16X8, I32X2, I32X4, I64X2, I8X16, I8X8};
|
use crate::ir::types::*;
|
||||||
use crate::ir::Type;
|
use crate::ir::Type;
|
||||||
use crate::isa::aarch64::inst::*;
|
use crate::isa::aarch64::inst::*;
|
||||||
use crate::machinst::{ty_bits, MachLabel};
|
use crate::machinst::{ty_bits, MachLabel};
|
||||||
|
|||||||
@@ -5,9 +5,7 @@
|
|||||||
|
|
||||||
use crate::binemit::CodeOffset;
|
use crate::binemit::CodeOffset;
|
||||||
use crate::ir::types::{
|
use crate::ir::types::{
|
||||||
B1, B16, B16X4, B16X8, B32, B32X2, B32X4, B64, B64X2, B8, B8X16, B8X8, F32, F32X2, F32X4, F64,
|
B1, B128, B16, B32, B64, B8, F32, F64, FFLAGS, I128, I16, I32, I64, I8, I8X16, IFLAGS, R32, R64,
|
||||||
F64X2, FFLAGS, I16, I16X4, I16X8, I32, I32X2, I32X4, I64, I64X2, I8, I8X16, I8X8, IFLAGS, R32,
|
|
||||||
R64,
|
|
||||||
};
|
};
|
||||||
use crate::ir::{ExternalName, MemFlags, Opcode, SourceLoc, TrapCode, Type};
|
use crate::ir::{ExternalName, MemFlags, Opcode, SourceLoc, TrapCode, Type};
|
||||||
use crate::isa::CallConv;
|
use crate::isa::CallConv;
|
||||||
@@ -1304,7 +1302,7 @@ impl Inst {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create instructions that load a 32-bit floating-point constant.
|
/// Create instructions that load a 32-bit floating-point constant.
|
||||||
pub fn load_fp_constant32<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
pub fn load_fp_constant32<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
rd: Writable<Reg>,
|
rd: Writable<Reg>,
|
||||||
value: u32,
|
value: u32,
|
||||||
mut alloc_tmp: F,
|
mut alloc_tmp: F,
|
||||||
@@ -1322,7 +1320,7 @@ impl Inst {
|
|||||||
} else {
|
} else {
|
||||||
// TODO: use FMOV immediate form when `value` has sufficiently few mantissa/exponent
|
// TODO: use FMOV immediate form when `value` has sufficiently few mantissa/exponent
|
||||||
// bits.
|
// bits.
|
||||||
let tmp = alloc_tmp(RegClass::I64, I32);
|
let tmp = alloc_tmp(I32);
|
||||||
let mut insts = Inst::load_constant(tmp, value as u64);
|
let mut insts = Inst::load_constant(tmp, value as u64);
|
||||||
|
|
||||||
insts.push(Inst::MovToFpu {
|
insts.push(Inst::MovToFpu {
|
||||||
@@ -1336,7 +1334,7 @@ impl Inst {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create instructions that load a 64-bit floating-point constant.
|
/// Create instructions that load a 64-bit floating-point constant.
|
||||||
pub fn load_fp_constant64<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
pub fn load_fp_constant64<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
rd: Writable<Reg>,
|
rd: Writable<Reg>,
|
||||||
const_data: u64,
|
const_data: u64,
|
||||||
mut alloc_tmp: F,
|
mut alloc_tmp: F,
|
||||||
@@ -1350,7 +1348,7 @@ impl Inst {
|
|||||||
// bits. Also, treat it as half of a 128-bit vector and consider replicated
|
// bits. Also, treat it as half of a 128-bit vector and consider replicated
|
||||||
// patterns. Scalar MOVI might also be an option.
|
// patterns. Scalar MOVI might also be an option.
|
||||||
} else if const_data & (u32::MAX as u64) == 0 {
|
} else if const_data & (u32::MAX as u64) == 0 {
|
||||||
let tmp = alloc_tmp(RegClass::I64, I64);
|
let tmp = alloc_tmp(I64);
|
||||||
let mut insts = Inst::load_constant(tmp, const_data);
|
let mut insts = Inst::load_constant(tmp, const_data);
|
||||||
|
|
||||||
insts.push(Inst::MovToFpu {
|
insts.push(Inst::MovToFpu {
|
||||||
@@ -1366,7 +1364,7 @@ impl Inst {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create instructions that load a 128-bit vector constant.
|
/// Create instructions that load a 128-bit vector constant.
|
||||||
pub fn load_fp_constant128<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
pub fn load_fp_constant128<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
rd: Writable<Reg>,
|
rd: Writable<Reg>,
|
||||||
const_data: u128,
|
const_data: u128,
|
||||||
alloc_tmp: F,
|
alloc_tmp: F,
|
||||||
@@ -1416,7 +1414,7 @@ impl Inst {
|
|||||||
|
|
||||||
/// Create instructions that load a vector constant consisting of elements with
|
/// Create instructions that load a vector constant consisting of elements with
|
||||||
/// the same value.
|
/// the same value.
|
||||||
pub fn load_replicated_vector_pattern<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
pub fn load_replicated_vector_pattern<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
rd: Writable<Reg>,
|
rd: Writable<Reg>,
|
||||||
pattern: u64,
|
pattern: u64,
|
||||||
size: VectorSize,
|
size: VectorSize,
|
||||||
@@ -1472,7 +1470,7 @@ impl Inst {
|
|||||||
} else if let Some(imm) = ASIMDFPModImm::maybe_from_u64(pattern, lane_size) {
|
} else if let Some(imm) = ASIMDFPModImm::maybe_from_u64(pattern, lane_size) {
|
||||||
smallvec![Inst::VecDupFPImm { rd, imm, size }]
|
smallvec![Inst::VecDupFPImm { rd, imm, size }]
|
||||||
} else {
|
} else {
|
||||||
let tmp = alloc_tmp(RegClass::I64, I64);
|
let tmp = alloc_tmp(I64);
|
||||||
let mut insts = SmallVec::from(&Inst::load_constant(tmp, pattern)[..]);
|
let mut insts = SmallVec::from(&Inst::load_constant(tmp, pattern)[..]);
|
||||||
|
|
||||||
insts.push(Inst::VecDup {
|
insts.push(Inst::VecDup {
|
||||||
@@ -2862,12 +2860,16 @@ impl MachInst for Inst {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
to_reg: Writable<Reg>,
|
to_regs: ValueRegs<Writable<Reg>>,
|
||||||
value: u64,
|
value: u128,
|
||||||
ty: Type,
|
ty: Type,
|
||||||
alloc_tmp: F,
|
alloc_tmp: F,
|
||||||
) -> SmallVec<[Inst; 4]> {
|
) -> SmallVec<[Inst; 4]> {
|
||||||
|
let to_reg = to_regs
|
||||||
|
.only_reg()
|
||||||
|
.expect("multi-reg values not supported yet");
|
||||||
|
let value = value as u64;
|
||||||
if ty == F64 {
|
if ty == F64 {
|
||||||
Inst::load_fp_constant64(to_reg, value, alloc_tmp)
|
Inst::load_fp_constant64(to_reg, value, alloc_tmp)
|
||||||
} else if ty == F32 {
|
} else if ty == F32 {
|
||||||
@@ -2905,14 +2907,28 @@ impl MachInst for Inst {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
|
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
|
||||||
match ty {
|
match ty {
|
||||||
I8 | I16 | I32 | I64 | B1 | B8 | B16 | B32 | B64 | R32 | R64 => Ok(RegClass::I64),
|
I8 => Ok((&[RegClass::I64], &[I8])),
|
||||||
F32 | F64 => Ok(RegClass::V128),
|
I16 => Ok((&[RegClass::I64], &[I16])),
|
||||||
IFLAGS | FFLAGS => Ok(RegClass::I64),
|
I32 => Ok((&[RegClass::I64], &[I32])),
|
||||||
B8X8 | B8X16 | B16X4 | B16X8 | B32X2 | B32X4 | B64X2 => Ok(RegClass::V128),
|
I64 => Ok((&[RegClass::I64], &[I64])),
|
||||||
F32X2 | I8X8 | I16X4 | I32X2 => Ok(RegClass::V128),
|
B1 => Ok((&[RegClass::I64], &[B1])),
|
||||||
F32X4 | F64X2 | I8X16 | I16X8 | I32X4 | I64X2 => Ok(RegClass::V128),
|
B8 => Ok((&[RegClass::I64], &[B8])),
|
||||||
|
B16 => Ok((&[RegClass::I64], &[B16])),
|
||||||
|
B32 => Ok((&[RegClass::I64], &[B32])),
|
||||||
|
B64 => Ok((&[RegClass::I64], &[B64])),
|
||||||
|
R32 => panic!("32-bit reftype pointer should never be seen on AArch64"),
|
||||||
|
R64 => Ok((&[RegClass::I64], &[R64])),
|
||||||
|
F32 => Ok((&[RegClass::V128], &[F32])),
|
||||||
|
F64 => Ok((&[RegClass::V128], &[F64])),
|
||||||
|
I128 => Ok((&[RegClass::I64, RegClass::I64], &[I64, I64])),
|
||||||
|
B128 => Ok((&[RegClass::I64, RegClass::I64], &[B64, B64])),
|
||||||
|
_ if ty.is_vector() => {
|
||||||
|
assert!(ty.bits() <= 128);
|
||||||
|
Ok((&[RegClass::V128], &[I8X16]))
|
||||||
|
}
|
||||||
|
IFLAGS | FFLAGS => Ok((&[RegClass::I64], &[I64])),
|
||||||
_ => Err(CodegenError::Unsupported(format!(
|
_ => Err(CodegenError::Unsupported(format!(
|
||||||
"Unexpected SSA-value type: {}",
|
"Unexpected SSA-value type: {}",
|
||||||
ty
|
ty
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use super::lower_inst;
|
|||||||
|
|
||||||
use crate::data_value::DataValue;
|
use crate::data_value::DataValue;
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use regalloc::{Reg, RegClass, Writable};
|
use regalloc::{Reg, Writable};
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
//============================================================================
|
//============================================================================
|
||||||
@@ -179,9 +179,9 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
|||||||
} else {
|
} else {
|
||||||
c
|
c
|
||||||
};
|
};
|
||||||
let to_reg = ctx.alloc_tmp(Inst::rc_for_type(ty).unwrap(), ty);
|
let to_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
for inst in Inst::gen_constant(to_reg, masked, ty, |reg_class, ty| {
|
for inst in Inst::gen_constant(ValueRegs::one(to_reg), masked as u128, ty, |ty| {
|
||||||
ctx.alloc_tmp(reg_class, ty)
|
ctx.alloc_tmp(ty).only_reg().unwrap()
|
||||||
})
|
})
|
||||||
.into_iter()
|
.into_iter()
|
||||||
{
|
{
|
||||||
@@ -189,13 +189,15 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
to_reg.to_reg()
|
to_reg.to_reg()
|
||||||
} else {
|
} else {
|
||||||
ctx.put_input_in_reg(input.insn, input.input)
|
ctx.put_input_in_regs(input.insn, input.input)
|
||||||
|
.only_reg()
|
||||||
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
match (narrow_mode, from_bits) {
|
match (narrow_mode, from_bits) {
|
||||||
(NarrowValueMode::None, _) => in_reg,
|
(NarrowValueMode::None, _) => in_reg,
|
||||||
(NarrowValueMode::ZeroExtend32, n) if n < 32 => {
|
(NarrowValueMode::ZeroExtend32, n) if n < 32 => {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::Extend {
|
ctx.emit(Inst::Extend {
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
rn: in_reg,
|
rn: in_reg,
|
||||||
@@ -206,7 +208,7 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
|||||||
tmp.to_reg()
|
tmp.to_reg()
|
||||||
}
|
}
|
||||||
(NarrowValueMode::SignExtend32, n) if n < 32 => {
|
(NarrowValueMode::SignExtend32, n) if n < 32 => {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::Extend {
|
ctx.emit(Inst::Extend {
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
rn: in_reg,
|
rn: in_reg,
|
||||||
@@ -223,7 +225,7 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
|||||||
// Constants are zero-extended to full 64-bit width on load already.
|
// Constants are zero-extended to full 64-bit width on load already.
|
||||||
in_reg
|
in_reg
|
||||||
} else {
|
} else {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::Extend {
|
ctx.emit(Inst::Extend {
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
rn: in_reg,
|
rn: in_reg,
|
||||||
@@ -235,7 +237,7 @@ pub(crate) fn put_input_in_reg<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
(NarrowValueMode::SignExtend64, n) if n < 64 => {
|
(NarrowValueMode::SignExtend64, n) if n < 64 => {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::Extend {
|
ctx.emit(Inst::Extend {
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
rn: in_reg,
|
rn: in_reg,
|
||||||
@@ -696,7 +698,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
|||||||
/* addends64.len() == 0 */
|
/* addends64.len() == 0 */
|
||||||
{
|
{
|
||||||
if addends32.len() > 0 {
|
if addends32.len() > 0 {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
let (reg1, extendop) = addends32.pop().unwrap();
|
let (reg1, extendop) = addends32.pop().unwrap();
|
||||||
let signed = match extendop {
|
let signed = match extendop {
|
||||||
ExtendOp::SXTW => true,
|
ExtendOp::SXTW => true,
|
||||||
@@ -718,7 +720,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
|||||||
} else
|
} else
|
||||||
/* addends32.len() == 0 */
|
/* addends32.len() == 0 */
|
||||||
{
|
{
|
||||||
let off_reg = ctx.alloc_tmp(RegClass::I64, I64);
|
let off_reg = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
lower_constant_u64(ctx, off_reg, offset as u64);
|
lower_constant_u64(ctx, off_reg, offset as u64);
|
||||||
offset = 0;
|
offset = 0;
|
||||||
AMode::reg(off_reg.to_reg())
|
AMode::reg(off_reg.to_reg())
|
||||||
@@ -734,7 +736,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Allocate the temp and shoehorn it into the AMode.
|
// Allocate the temp and shoehorn it into the AMode.
|
||||||
let addr = ctx.alloc_tmp(RegClass::I64, I64);
|
let addr = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
let (reg, memarg) = match memarg {
|
let (reg, memarg) = match memarg {
|
||||||
AMode::RegExtended(r1, r2, extendop) => {
|
AMode::RegExtended(r1, r2, extendop) => {
|
||||||
(r1, AMode::RegExtended(addr.to_reg(), r2, extendop))
|
(r1, AMode::RegExtended(addr.to_reg(), r2, extendop))
|
||||||
@@ -782,7 +784,7 @@ pub(crate) fn lower_address<C: LowerCtx<I = Inst>>(
|
|||||||
// If the register is the stack reg, we must move it to another reg
|
// If the register is the stack reg, we must move it to another reg
|
||||||
// before adding it.
|
// before adding it.
|
||||||
let reg = if reg == stack_reg() {
|
let reg = if reg == stack_reg() {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
ctx.emit(Inst::gen_move(tmp, stack_reg(), I64));
|
ctx.emit(Inst::gen_move(tmp, stack_reg(), I64));
|
||||||
tmp.to_reg()
|
tmp.to_reg()
|
||||||
} else {
|
} else {
|
||||||
@@ -824,7 +826,7 @@ pub(crate) fn lower_constant_f32<C: LowerCtx<I = Inst>>(
|
|||||||
rd: Writable<Reg>,
|
rd: Writable<Reg>,
|
||||||
value: f32,
|
value: f32,
|
||||||
) {
|
) {
|
||||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
|
|
||||||
for inst in Inst::load_fp_constant32(rd, value.to_bits(), alloc_tmp) {
|
for inst in Inst::load_fp_constant32(rd, value.to_bits(), alloc_tmp) {
|
||||||
ctx.emit(inst);
|
ctx.emit(inst);
|
||||||
@@ -836,7 +838,7 @@ pub(crate) fn lower_constant_f64<C: LowerCtx<I = Inst>>(
|
|||||||
rd: Writable<Reg>,
|
rd: Writable<Reg>,
|
||||||
value: f64,
|
value: f64,
|
||||||
) {
|
) {
|
||||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
|
|
||||||
for inst in Inst::load_fp_constant64(rd, value.to_bits(), alloc_tmp) {
|
for inst in Inst::load_fp_constant64(rd, value.to_bits(), alloc_tmp) {
|
||||||
ctx.emit(inst);
|
ctx.emit(inst);
|
||||||
@@ -858,7 +860,7 @@ pub(crate) fn lower_constant_f128<C: LowerCtx<I = Inst>>(
|
|||||||
size: VectorSize::Size8x16,
|
size: VectorSize::Size8x16,
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
for inst in Inst::load_fp_constant128(rd, value, alloc_tmp) {
|
for inst in Inst::load_fp_constant128(rd, value, alloc_tmp) {
|
||||||
ctx.emit(inst);
|
ctx.emit(inst);
|
||||||
}
|
}
|
||||||
@@ -885,7 +887,7 @@ pub(crate) fn lower_splat_const<C: LowerCtx<I = Inst>>(
|
|||||||
),
|
),
|
||||||
None => (value, size),
|
None => (value, size),
|
||||||
};
|
};
|
||||||
let alloc_tmp = |class, ty| ctx.alloc_tmp(class, ty);
|
let alloc_tmp = |ty| ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
|
|
||||||
for inst in Inst::load_replicated_vector_pattern(rd, value, size, alloc_tmp) {
|
for inst in Inst::load_replicated_vector_pattern(rd, value, size, alloc_tmp) {
|
||||||
ctx.emit(inst);
|
ctx.emit(inst);
|
||||||
@@ -1217,7 +1219,7 @@ pub(crate) fn lower_load<C: LowerCtx<I = Inst>, F: FnMut(&mut C, Writable<Reg>,
|
|||||||
|
|
||||||
let off = ctx.data(ir_inst).load_store_offset().unwrap();
|
let off = ctx.data(ir_inst).load_store_offset().unwrap();
|
||||||
let mem = lower_address(ctx, elem_ty, &inputs[..], off);
|
let mem = lower_address(ctx, elem_ty, &inputs[..], off);
|
||||||
let rd = get_output_reg(ctx, output);
|
let rd = get_output_reg(ctx, output).only_reg().unwrap();
|
||||||
|
|
||||||
f(ctx, rd, elem_ty, mem);
|
f(ctx, rd, elem_ty, mem);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ use crate::{CodegenError, CodegenResult};
|
|||||||
use crate::isa::aarch64::abi::*;
|
use crate::isa::aarch64::abi::*;
|
||||||
use crate::isa::aarch64::inst::*;
|
use crate::isa::aarch64::inst::*;
|
||||||
|
|
||||||
use regalloc::{RegClass, Writable};
|
use regalloc::Writable;
|
||||||
|
|
||||||
use alloc::boxed::Box;
|
use alloc::boxed::Box;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
@@ -46,21 +46,21 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
ty if ty.is_bool() => value,
|
ty if ty.is_bool() => value,
|
||||||
ty => unreachable!("Unknown type for const: {}", ty),
|
ty => unreachable!("Unknown type for const: {}", ty),
|
||||||
};
|
};
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
lower_constant_u64(ctx, rd, value);
|
lower_constant_u64(ctx, rd, value);
|
||||||
}
|
}
|
||||||
Opcode::F32const => {
|
Opcode::F32const => {
|
||||||
let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32);
|
let value = f32::from_bits(ctx.get_constant(insn).unwrap() as u32);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
lower_constant_f32(ctx, rd, value);
|
lower_constant_f32(ctx, rd, value);
|
||||||
}
|
}
|
||||||
Opcode::F64const => {
|
Opcode::F64const => {
|
||||||
let value = f64::from_bits(ctx.get_constant(insn).unwrap());
|
let value = f64::from_bits(ctx.get_constant(insn).unwrap());
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
lower_constant_f64(ctx, rd, value);
|
lower_constant_f64(ctx, rd, value);
|
||||||
}
|
}
|
||||||
Opcode::Iadd => {
|
Opcode::Iadd => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let mul_insn =
|
let mul_insn =
|
||||||
@@ -116,7 +116,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Opcode::Isub => {
|
Opcode::Isub => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
@@ -148,7 +148,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// (SQADD / UQADD / SQSUB / UQSUB), which require scalar FP registers.
|
// (SQADD / UQADD / SQSUB / UQSUB), which require scalar FP registers.
|
||||||
let is_signed = op == Opcode::SaddSat || op == Opcode::SsubSat;
|
let is_signed = op == Opcode::SaddSat || op == Opcode::SsubSat;
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let narrow_mode = if is_signed {
|
let narrow_mode = if is_signed {
|
||||||
NarrowValueMode::SignExtend64
|
NarrowValueMode::SignExtend64
|
||||||
@@ -162,8 +162,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
Opcode::SsubSat => FPUOp2::Sqsub64,
|
Opcode::SsubSat => FPUOp2::Sqsub64,
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let va = ctx.alloc_tmp(RegClass::V128, I128);
|
let va = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||||
let vb = ctx.alloc_tmp(RegClass::V128, I128);
|
let vb = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||||
let ra = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
let ra = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
||||||
let rb = put_input_in_reg(ctx, inputs[1], narrow_mode);
|
let rb = put_input_in_reg(ctx, inputs[1], narrow_mode);
|
||||||
ctx.emit(Inst::MovToFpu {
|
ctx.emit(Inst::MovToFpu {
|
||||||
@@ -211,7 +211,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Ineg => {
|
Opcode::Ineg => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let rn = zero_reg();
|
let rn = zero_reg();
|
||||||
@@ -230,7 +230,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Imul => {
|
Opcode::Imul => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
@@ -245,8 +245,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
if ty == I64X2 {
|
if ty == I64X2 {
|
||||||
let tmp1 = ctx.alloc_tmp(RegClass::V128, I64X2);
|
let tmp1 = ctx.alloc_tmp(I64X2).only_reg().unwrap();
|
||||||
let tmp2 = ctx.alloc_tmp(RegClass::V128, I64X2);
|
let tmp2 = ctx.alloc_tmp(I64X2).only_reg().unwrap();
|
||||||
|
|
||||||
// This I64X2 multiplication is performed with several 32-bit
|
// This I64X2 multiplication is performed with several 32-bit
|
||||||
// operations.
|
// operations.
|
||||||
@@ -362,7 +362,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Umulhi | Opcode::Smulhi => {
|
Opcode::Umulhi | Opcode::Smulhi => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let is_signed = op == Opcode::Smulhi;
|
let is_signed = op == Opcode::Smulhi;
|
||||||
let input_ty = ctx.input_ty(insn, 0);
|
let input_ty = ctx.input_ty(insn, 0);
|
||||||
assert!(ctx.input_ty(insn, 1) == input_ty);
|
assert!(ctx.input_ty(insn, 1) == input_ty);
|
||||||
@@ -443,7 +443,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
ALUOp::UDiv64
|
ALUOp::UDiv64
|
||||||
};
|
};
|
||||||
|
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
|
let rm = put_input_in_reg(ctx, inputs[1], narrow_mode);
|
||||||
// The div instruction does not trap on divide by zero or signed overflow
|
// The div instruction does not trap on divide by zero or signed overflow
|
||||||
@@ -550,7 +550,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
assert!(from_bits <= to_bits);
|
assert!(from_bits <= to_bits);
|
||||||
if from_bits < to_bits {
|
if from_bits < to_bits {
|
||||||
let signed = op == Opcode::Sextend;
|
let signed = op == Opcode::Sextend;
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
|
|
||||||
if let Some(extract_insn) = maybe_input_insn(ctx, inputs[0], Opcode::Extractlane) {
|
if let Some(extract_insn) = maybe_input_insn(ctx, inputs[0], Opcode::Extractlane) {
|
||||||
let idx =
|
let idx =
|
||||||
@@ -596,7 +596,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Bnot => {
|
Opcode::Bnot => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let rm = put_input_in_rs_immlogic(ctx, inputs[0], NarrowValueMode::None);
|
let rm = put_input_in_rs_immlogic(ctx, inputs[0], NarrowValueMode::None);
|
||||||
@@ -620,7 +620,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
| Opcode::BandNot
|
| Opcode::BandNot
|
||||||
| Opcode::BorNot
|
| Opcode::BorNot
|
||||||
| Opcode::BxorNot => {
|
| Opcode::BxorNot => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
@@ -646,7 +646,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
|
|
||||||
ctx.emit(Inst::VecRRR {
|
ctx.emit(Inst::VecRRR {
|
||||||
alu_op,
|
alu_op,
|
||||||
@@ -660,7 +660,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => {
|
Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => {
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let size = OperandSize::from_bits(ty_bits(ty));
|
let size = OperandSize::from_bits(ty_bits(ty));
|
||||||
let narrow_mode = match (op, size) {
|
let narrow_mode = match (op, size) {
|
||||||
@@ -692,7 +692,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
let rm = if is_right_shift {
|
let rm = if is_right_shift {
|
||||||
// Right shifts are implemented with a negative left shift.
|
// Right shifts are implemented with a negative left shift.
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rn = zero_reg();
|
let rn = zero_reg();
|
||||||
ctx.emit(Inst::AluRRR {
|
ctx.emit(Inst::AluRRR {
|
||||||
@@ -751,7 +751,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
let ty_bits_size = ty_bits(ty) as u8;
|
let ty_bits_size = ty_bits(ty) as u8;
|
||||||
|
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(
|
let rn = put_input_in_reg(
|
||||||
ctx,
|
ctx,
|
||||||
inputs[0],
|
inputs[0],
|
||||||
@@ -785,7 +785,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// ignored (because of the implicit masking done by the instruction),
|
// ignored (because of the implicit masking done by the instruction),
|
||||||
// so this is equivalent to negating the input.
|
// so this is equivalent to negating the input.
|
||||||
let alu_op = choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64);
|
let alu_op = choose_32_64(ty, ALUOp::Sub32, ALUOp::Sub64);
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, ty);
|
let tmp = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
ctx.emit(Inst::AluRRR {
|
ctx.emit(Inst::AluRRR {
|
||||||
alu_op,
|
alu_op,
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
@@ -808,7 +808,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// Really ty_bits_size - rn, but the upper bits of the result are
|
// Really ty_bits_size - rn, but the upper bits of the result are
|
||||||
// ignored (because of the implicit masking done by the instruction),
|
// ignored (because of the implicit masking done by the instruction),
|
||||||
// so this is equivalent to negating the input.
|
// so this is equivalent to negating the input.
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::AluRRR {
|
ctx.emit(Inst::AluRRR {
|
||||||
alu_op: ALUOp::Sub32,
|
alu_op: ALUOp::Sub32,
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
@@ -821,7 +821,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Explicitly mask the rotation count.
|
// Explicitly mask the rotation count.
|
||||||
let tmp_masked_rm = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp_masked_rm = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::AluRRImmLogic {
|
ctx.emit(Inst::AluRRImmLogic {
|
||||||
alu_op: ALUOp::And32,
|
alu_op: ALUOp::And32,
|
||||||
rd: tmp_masked_rm,
|
rd: tmp_masked_rm,
|
||||||
@@ -830,8 +830,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
});
|
});
|
||||||
let tmp_masked_rm = tmp_masked_rm.to_reg();
|
let tmp_masked_rm = tmp_masked_rm.to_reg();
|
||||||
|
|
||||||
let tmp1 = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp1 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
let tmp2 = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp2 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::AluRRImm12 {
|
ctx.emit(Inst::AluRRImm12 {
|
||||||
alu_op: ALUOp::Sub32,
|
alu_op: ALUOp::Sub32,
|
||||||
rd: tmp1,
|
rd: tmp1,
|
||||||
@@ -870,7 +870,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
immshift.imm &= ty_bits_size - 1;
|
immshift.imm &= ty_bits_size - 1;
|
||||||
|
|
||||||
let tmp1 = ctx.alloc_tmp(RegClass::I64, I32);
|
let tmp1 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::AluRRImmShift {
|
ctx.emit(Inst::AluRRImmShift {
|
||||||
alu_op: ALUOp::Lsr32,
|
alu_op: ALUOp::Lsr32,
|
||||||
rd: tmp1,
|
rd: tmp1,
|
||||||
@@ -900,7 +900,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => {
|
Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let needs_zext = match op {
|
let needs_zext = match op {
|
||||||
Opcode::Bitrev | Opcode::Ctz => false,
|
Opcode::Bitrev | Opcode::Ctz => false,
|
||||||
Opcode::Clz | Opcode::Cls => true,
|
Opcode::Clz | Opcode::Cls => true,
|
||||||
@@ -970,12 +970,12 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// x += x << 32
|
// x += x << 32
|
||||||
// x >> 56
|
// x >> 56
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
// FIXME(#1537): zero-extend 8/16/32-bit operands only to 32 bits,
|
// FIXME(#1537): zero-extend 8/16/32-bit operands only to 32 bits,
|
||||||
// and fix the sequence below to work properly for this.
|
// and fix the sequence below to work properly for this.
|
||||||
let narrow_mode = NarrowValueMode::ZeroExtend64;
|
let narrow_mode = NarrowValueMode::ZeroExtend64;
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
|
|
||||||
// If this is a 32-bit Popcnt, use Lsr32 to clear the top 32 bits of the register, then
|
// If this is a 32-bit Popcnt, use Lsr32 to clear the top 32 bits of the register, then
|
||||||
// the rest of the code is identical to the 64-bit version.
|
// the rest of the code is identical to the 64-bit version.
|
||||||
@@ -1236,7 +1236,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
} => (stack_slot, offset),
|
} => (stack_slot, offset),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let offset: i32 = offset.into();
|
let offset: i32 = offset.into();
|
||||||
let inst = ctx
|
let inst = ctx
|
||||||
.abi()
|
.abi()
|
||||||
@@ -1245,7 +1245,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::AtomicRmw => {
|
Opcode::AtomicRmw => {
|
||||||
let r_dst = get_output_reg(ctx, outputs[0]);
|
let r_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let mut r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let mut r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let mut r_arg2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let mut r_arg2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty_access = ty.unwrap();
|
let ty_access = ty.unwrap();
|
||||||
@@ -1270,7 +1270,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// This is very similar to, but not identical to, the AtomicRmw case. Note
|
// This is very similar to, but not identical to, the AtomicRmw case. Note
|
||||||
// that the AtomicCAS sequence does its own masking, so we don't need to worry
|
// that the AtomicCAS sequence does its own masking, so we don't need to worry
|
||||||
// about zero-extending narrow (I8/I16/I32) values here.
|
// about zero-extending narrow (I8/I16/I32) values here.
|
||||||
let r_dst = get_output_reg(ctx, outputs[0]);
|
let r_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let mut r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let mut r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let mut r_expected = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let mut r_expected = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let mut r_replacement = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
let mut r_replacement = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||||
@@ -1301,7 +1301,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::AtomicLoad => {
|
Opcode::AtomicLoad => {
|
||||||
let r_data = get_output_reg(ctx, outputs[0]);
|
let r_data = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let r_addr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let ty_access = ty.unwrap();
|
let ty_access = ty.unwrap();
|
||||||
assert!(is_valid_atomic_transaction_ty(ty_access));
|
assert!(is_valid_atomic_transaction_ty(ty_access));
|
||||||
@@ -1382,7 +1382,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
};
|
};
|
||||||
|
|
||||||
// csel.cond rd, rn, rm
|
// csel.cond rd, rn, rm
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||||
let ty = ctx.output_ty(insn, 0);
|
let ty = ctx.output_ty(insn, 0);
|
||||||
@@ -1409,7 +1409,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
|
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
|
||||||
|
|
||||||
// csel.COND rd, rn, rm
|
// csel.COND rd, rn, rm
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||||
let ty = ctx.output_ty(insn, 0);
|
let ty = ctx.output_ty(insn, 0);
|
||||||
@@ -1428,8 +1428,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
debug_assert_ne!(Opcode::Vselect, op);
|
debug_assert_ne!(Opcode::Vselect, op);
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||||
@@ -1458,7 +1458,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rcond = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
ctx.emit(Inst::gen_move(rd, rcond, ty));
|
ctx.emit(Inst::gen_move(rd, rcond, ty));
|
||||||
|
|
||||||
ctx.emit(Inst::VecRRR {
|
ctx.emit(Inst::VecRRR {
|
||||||
@@ -1479,7 +1479,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// single-def ifcmp.
|
// single-def ifcmp.
|
||||||
let ifcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ifcmp).unwrap();
|
let ifcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ifcmp).unwrap();
|
||||||
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
|
lower_icmp_or_ifcmp_to_flags(ctx, ifcmp_insn, is_signed);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
materialize_bool_result(ctx, insn, rd, cond);
|
materialize_bool_result(ctx, insn, rd, cond);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1488,7 +1488,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let cond = lower_fp_condcode(condcode);
|
let cond = lower_fp_condcode(condcode);
|
||||||
let ffcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ffcmp).unwrap();
|
let ffcmp_insn = maybe_input_insn(ctx, inputs[0], Opcode::Ffcmp).unwrap();
|
||||||
lower_fcmp_or_ffcmp_to_flags(ctx, ffcmp_insn);
|
lower_fcmp_or_ffcmp_to_flags(ctx, ffcmp_insn);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
materialize_bool_result(ctx, insn, rd, cond);
|
materialize_bool_result(ctx, insn, rd, cond);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1496,7 +1496,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// Null references are represented by the constant value 0; invalid references are
|
// Null references are represented by the constant value 0; invalid references are
|
||||||
// represented by the constant value -1. See `define_reftypes()` in
|
// represented by the constant value -1. See `define_reftypes()` in
|
||||||
// `meta/src/isa/x86/encodings.rs` to confirm.
|
// `meta/src/isa/x86/encodings.rs` to confirm.
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let ty = ctx.input_ty(insn, 0);
|
let ty = ctx.input_ty(insn, 0);
|
||||||
let (alu_op, const_value) = match op {
|
let (alu_op, const_value) = match op {
|
||||||
@@ -1516,7 +1516,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Copy => {
|
Opcode::Copy => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let ty = ctx.input_ty(insn, 0);
|
let ty = ctx.input_ty(insn, 0);
|
||||||
ctx.emit(Inst::gen_move(rd, rn, ty));
|
ctx.emit(Inst::gen_move(rd, rn, ty));
|
||||||
@@ -1526,7 +1526,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// Smaller integers/booleans are stored with high-order bits
|
// Smaller integers/booleans are stored with high-order bits
|
||||||
// undefined, so we can simply do a copy.
|
// undefined, so we can simply do a copy.
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ty = ctx.input_ty(insn, 0);
|
let ty = ctx.input_ty(insn, 0);
|
||||||
ctx.emit(Inst::gen_move(rd, rn, ty));
|
ctx.emit(Inst::gen_move(rd, rn, ty));
|
||||||
}
|
}
|
||||||
@@ -1553,7 +1553,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// Nothing.
|
// Nothing.
|
||||||
} else {
|
} else {
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let to_bits = if to_bits == 64 {
|
let to_bits = if to_bits == 64 {
|
||||||
64
|
64
|
||||||
} else {
|
} else {
|
||||||
@@ -1575,7 +1575,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// Booleans are stored as all-zeroes (0) or all-ones (-1). We AND
|
// Booleans are stored as all-zeroes (0) or all-ones (-1). We AND
|
||||||
// out the LSB to give a 0 / 1-valued integer result.
|
// out the LSB to give a 0 / 1-valued integer result.
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let output_bits = ty_bits(ctx.output_ty(insn, 0));
|
let output_bits = ty_bits(ctx.output_ty(insn, 0));
|
||||||
|
|
||||||
let (imm_ty, alu_op) = if output_bits > 32 {
|
let (imm_ty, alu_op) = if output_bits > 32 {
|
||||||
@@ -1592,7 +1592,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Bitcast => {
|
Opcode::Bitcast => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ity = ctx.input_ty(insn, 0);
|
let ity = ctx.input_ty(insn, 0);
|
||||||
let oty = ctx.output_ty(insn, 0);
|
let oty = ctx.output_ty(insn, 0);
|
||||||
let ity_bits = ty_bits(ity);
|
let ity_bits = ty_bits(ity);
|
||||||
@@ -1644,7 +1644,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// (above the bits for the value's type) are undefined, so we
|
// (above the bits for the value's type) are undefined, so we
|
||||||
// need not extend the return values.
|
// need not extend the return values.
|
||||||
let reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
|
let reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
|
||||||
let retval_reg = ctx.retval(i);
|
let retval_reg = ctx.retval(i).only_reg().unwrap();
|
||||||
let ty = ctx.input_ty(insn, i);
|
let ty = ctx.input_ty(insn, i);
|
||||||
ctx.emit(Inst::gen_move(retval_reg, reg, ty));
|
ctx.emit(Inst::gen_move(retval_reg, reg, ty));
|
||||||
}
|
}
|
||||||
@@ -1663,7 +1663,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let condcode = ctx.data(insn).cond_code().unwrap();
|
let condcode = ctx.data(insn).cond_code().unwrap();
|
||||||
let cond = lower_condcode(condcode);
|
let cond = lower_condcode(condcode);
|
||||||
let is_signed = condcode_is_signed(condcode);
|
let is_signed = condcode_is_signed(condcode);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ty = ctx.input_ty(insn, 0);
|
let ty = ctx.input_ty(insn, 0);
|
||||||
let bits = ty_bits(ty);
|
let bits = ty_bits(ty);
|
||||||
let narrow_mode = match (bits <= 32, is_signed) {
|
let narrow_mode = match (bits <= 32, is_signed) {
|
||||||
@@ -1691,7 +1691,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let ty = ctx.input_ty(insn, 0);
|
let ty = ctx.input_ty(insn, 0);
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
|
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
match ty_bits(ty) {
|
match ty_bits(ty) {
|
||||||
@@ -1768,7 +1768,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::FuncAddr => {
|
Opcode::FuncAddr => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let (extname, _) = ctx.call_target(insn).unwrap();
|
let (extname, _) = ctx.call_target(insn).unwrap();
|
||||||
let extname = extname.clone();
|
let extname = extname.clone();
|
||||||
ctx.emit(Inst::LoadExtName {
|
ctx.emit(Inst::LoadExtName {
|
||||||
@@ -1783,7 +1783,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::SymbolValue => {
|
Opcode::SymbolValue => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let (extname, _, offset) = ctx.symbol_value(insn).unwrap();
|
let (extname, _, offset) = ctx.symbol_value(insn).unwrap();
|
||||||
let extname = extname.clone();
|
let extname = extname.clone();
|
||||||
ctx.emit(Inst::LoadExtName {
|
ctx.emit(Inst::LoadExtName {
|
||||||
@@ -1824,18 +1824,18 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
assert!(inputs.len() == abi.num_args());
|
assert!(inputs.len() == abi.num_args());
|
||||||
for (i, input) in inputs.iter().enumerate() {
|
for (i, input) in inputs.iter().enumerate() {
|
||||||
let arg_reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
|
let arg_reg = put_input_in_reg(ctx, *input, NarrowValueMode::None);
|
||||||
abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
|
abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg));
|
||||||
}
|
}
|
||||||
abi.emit_call(ctx);
|
abi.emit_call(ctx);
|
||||||
for (i, output) in outputs.iter().enumerate() {
|
for (i, output) in outputs.iter().enumerate() {
|
||||||
let retval_reg = get_output_reg(ctx, *output);
|
let retval_reg = get_output_reg(ctx, *output).only_reg().unwrap();
|
||||||
abi.emit_copy_retval_to_reg(ctx, i, retval_reg);
|
abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg));
|
||||||
}
|
}
|
||||||
abi.emit_stack_post_adjust(ctx);
|
abi.emit_stack_post_adjust(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
Opcode::GetPinnedReg => {
|
Opcode::GetPinnedReg => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
ctx.emit(Inst::gen_move(rd, xreg(PINNED_REG), I64));
|
ctx.emit(Inst::gen_move(rd, xreg(PINNED_REG), I64));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1874,13 +1874,13 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
Opcode::Vconst => {
|
Opcode::Vconst => {
|
||||||
let value = const_param_to_u128(ctx, insn).expect("Invalid immediate bytes");
|
let value = const_param_to_u128(ctx, insn).expect("Invalid immediate bytes");
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
lower_constant_f128(ctx, rd, value);
|
lower_constant_f128(ctx, rd, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
Opcode::RawBitcast => {
|
Opcode::RawBitcast => {
|
||||||
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let ty = ctx.input_ty(insn, 0);
|
let ty = ctx.input_ty(insn, 0);
|
||||||
ctx.emit(Inst::gen_move(rd, rm, ty));
|
ctx.emit(Inst::gen_move(rd, rm, ty));
|
||||||
}
|
}
|
||||||
@@ -1888,7 +1888,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
Opcode::Extractlane => {
|
Opcode::Extractlane => {
|
||||||
if let InstructionData::BinaryImm8 { imm, .. } = ctx.data(insn) {
|
if let InstructionData::BinaryImm8 { imm, .. } = ctx.data(insn) {
|
||||||
let idx = *imm;
|
let idx = *imm;
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let size = VectorSize::from_ty(ctx.input_ty(insn, 0));
|
let size = VectorSize::from_ty(ctx.input_ty(insn, 0));
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
@@ -1913,7 +1913,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
unreachable!();
|
unreachable!();
|
||||||
};
|
};
|
||||||
let input_ty = ctx.input_ty(insn, 1);
|
let input_ty = ctx.input_ty(insn, 1);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
@@ -1935,7 +1935,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Splat => {
|
Opcode::Splat => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let size = VectorSize::from_ty(ty.unwrap());
|
let size = VectorSize::from_ty(ty.unwrap());
|
||||||
|
|
||||||
if let Some((_, insn)) = maybe_input_insn_multi(
|
if let Some((_, insn)) = maybe_input_insn_multi(
|
||||||
@@ -1979,7 +1979,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
&load_inputs[..],
|
&load_inputs[..],
|
||||||
load_outputs[0],
|
load_outputs[0],
|
||||||
|ctx, _rd, _elem_ty, mem| {
|
|ctx, _rd, _elem_ty, mem| {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I64, I64);
|
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
let (addr, addr_inst) = Inst::gen_load_addr(tmp, mem);
|
let (addr, addr_inst) = Inst::gen_load_addr(tmp, mem);
|
||||||
if let Some(addr_inst) = addr_inst {
|
if let Some(addr_inst) = addr_inst {
|
||||||
ctx.emit(addr_inst);
|
ctx.emit(addr_inst);
|
||||||
@@ -2002,7 +2002,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
Opcode::ScalarToVector => {
|
Opcode::ScalarToVector => {
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let input_ty = ctx.input_ty(insn, 0);
|
let input_ty = ctx.input_ty(insn, 0);
|
||||||
if (input_ty == I32 && ty.unwrap() == I32X4)
|
if (input_ty == I32 && ty.unwrap() == I32X4)
|
||||||
|| (input_ty == I64 && ty.unwrap() == I64X2)
|
|| (input_ty == I64 && ty.unwrap() == I64X2)
|
||||||
@@ -2021,9 +2021,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::VanyTrue | Opcode::VallTrue => {
|
Opcode::VanyTrue | Opcode::VallTrue => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let tmp = ctx.alloc_tmp(RegClass::V128, ty.unwrap());
|
let src_ty = ctx.input_ty(insn, 0);
|
||||||
|
let tmp = ctx.alloc_tmp(src_ty).only_reg().unwrap();
|
||||||
|
|
||||||
// This operation is implemented by using umaxp or uminv to
|
// This operation is implemented by using umaxp or uminv to
|
||||||
// create a scalar value, which is then compared against zero.
|
// create a scalar value, which is then compared against zero.
|
||||||
@@ -2070,7 +2071,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::VhighBits => {
|
Opcode::VhighBits => {
|
||||||
let dst_r = get_output_reg(ctx, outputs[0]);
|
let dst_r = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let src_v = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let src_v = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let ty = ctx.input_ty(insn, 0);
|
let ty = ctx.input_ty(insn, 0);
|
||||||
// All three sequences use one integer temporary and two vector temporaries. The
|
// All three sequences use one integer temporary and two vector temporaries. The
|
||||||
@@ -2080,9 +2081,9 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
// derivation of these sequences. Alternative sequences are discussed in
|
// derivation of these sequences. Alternative sequences are discussed in
|
||||||
// https://github.com/bytecodealliance/wasmtime/issues/2296, although they are not
|
// https://github.com/bytecodealliance/wasmtime/issues/2296, although they are not
|
||||||
// used here.
|
// used here.
|
||||||
let tmp_r0 = ctx.alloc_tmp(RegClass::I64, I64);
|
let tmp_r0 = ctx.alloc_tmp(I64).only_reg().unwrap();
|
||||||
let tmp_v0 = ctx.alloc_tmp(RegClass::V128, I8X16);
|
let tmp_v0 = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||||
let tmp_v1 = ctx.alloc_tmp(RegClass::V128, I8X16);
|
let tmp_v1 = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||||
match ty {
|
match ty {
|
||||||
I8X16 => {
|
I8X16 => {
|
||||||
// sshr tmp_v1.16b, src_v.16b, #7
|
// sshr tmp_v1.16b, src_v.16b, #7
|
||||||
@@ -2255,7 +2256,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
Opcode::Shuffle => {
|
Opcode::Shuffle => {
|
||||||
let mask = const_param_to_u128(ctx, insn).expect("Invalid immediate mask bytes");
|
let mask = const_param_to_u128(ctx, insn).expect("Invalid immediate mask bytes");
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rn2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rn2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
// 2 register table vector lookups require consecutive table registers;
|
// 2 register table vector lookups require consecutive table registers;
|
||||||
@@ -2283,7 +2284,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::Swizzle => {
|
Opcode::Swizzle => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
|
|
||||||
@@ -2310,7 +2311,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
Opcode::Imax => VecALUOp::Smax,
|
Opcode::Imax => VecALUOp::Smax,
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
@@ -2324,12 +2325,12 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Opcode::WideningPairwiseDotProductS => {
|
Opcode::WideningPairwiseDotProductS => {
|
||||||
let r_y = get_output_reg(ctx, outputs[0]);
|
let r_y = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let r_a = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let r_a = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let r_b = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let r_b = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
if ty == I32X4 {
|
if ty == I32X4 {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::V128, I8X16);
|
let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||||
// The args have type I16X8.
|
// The args have type I16X8.
|
||||||
// "y = i32x4.dot_i16x8_s(a, b)"
|
// "y = i32x4.dot_i16x8_s(a, b)"
|
||||||
// => smull tmp, a, b
|
// => smull tmp, a, b
|
||||||
@@ -2369,7 +2370,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let bits = ty_bits(ty);
|
let bits = ty_bits(ty);
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let fpu_op = match (op, bits) {
|
let fpu_op = match (op, bits) {
|
||||||
(Opcode::Fadd, 32) => FPUOp2::Add32,
|
(Opcode::Fadd, 32) => FPUOp2::Add32,
|
||||||
@@ -2413,7 +2414,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
if ty == F32X4 || ty == F64X2 {
|
if ty == F32X4 || ty == F64X2 {
|
||||||
// pmin(a,b) => bitsel(b, a, cmpgt(a, b))
|
// pmin(a,b) => bitsel(b, a, cmpgt(a, b))
|
||||||
// pmax(a,b) => bitsel(b, a, cmpgt(b, a))
|
// pmax(a,b) => bitsel(b, a, cmpgt(b, a))
|
||||||
let r_dst = get_output_reg(ctx, outputs[0]);
|
let r_dst = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let r_a = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let r_a = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let r_b = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let r_b = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
// Since we're going to write the output register `r_dst` anyway, we might as
|
// Since we're going to write the output register `r_dst` anyway, we might as
|
||||||
@@ -2449,7 +2450,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
let bits = ty_bits(ty);
|
let bits = ty_bits(ty);
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
if !ty.is_vector() {
|
if !ty.is_vector() {
|
||||||
let fpu_op = match (op, bits) {
|
let fpu_op = match (op, bits) {
|
||||||
(Opcode::Sqrt, 32) => FPUOp1::Sqrt32,
|
(Opcode::Sqrt, 32) => FPUOp1::Sqrt32,
|
||||||
@@ -2498,7 +2499,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
_ => panic!("Unknown op/bits combination (scalar)"),
|
_ => panic!("Unknown op/bits combination (scalar)"),
|
||||||
};
|
};
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
ctx.emit(Inst::FpuRound { op, rd, rn });
|
ctx.emit(Inst::FpuRound { op, rd, rn });
|
||||||
} else {
|
} else {
|
||||||
let (op, size) = match (op, ty) {
|
let (op, size) = match (op, ty) {
|
||||||
@@ -2513,7 +2514,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
_ => panic!("Unknown op/ty combination (vector){:?}", ty),
|
_ => panic!("Unknown op/ty combination (vector){:?}", ty),
|
||||||
};
|
};
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
ctx.emit(Inst::VecMisc { op, rd, rn, size });
|
ctx.emit(Inst::VecMisc { op, rd, rn, size });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2528,7 +2529,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ra = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
let ra = put_input_in_reg(ctx, inputs[2], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
ctx.emit(Inst::FpuRRRR {
|
ctx.emit(Inst::FpuRRRR {
|
||||||
fpu_op,
|
fpu_op,
|
||||||
rn,
|
rn,
|
||||||
@@ -2554,8 +2555,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
assert!(bits == 32 || bits == 64);
|
assert!(bits == 32 || bits == 64);
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let tmp = ctx.alloc_tmp(RegClass::V128, F64);
|
let tmp = ctx.alloc_tmp(F64).only_reg().unwrap();
|
||||||
|
|
||||||
// Copy LHS to rd.
|
// Copy LHS to rd.
|
||||||
ctx.emit(Inst::gen_move(rd, rn, ty));
|
ctx.emit(Inst::gen_move(rd, rn, ty));
|
||||||
@@ -2594,7 +2595,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
|
|
||||||
// First, check the output: it's important to carry the NaN conversion before the
|
// First, check the output: it's important to carry the NaN conversion before the
|
||||||
// in-bounds conversion, per wasm semantics.
|
// in-bounds conversion, per wasm semantics.
|
||||||
@@ -2611,7 +2612,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
kind: CondBrKind::Cond(lower_fp_condcode(FloatCC::Unordered)),
|
kind: CondBrKind::Cond(lower_fp_condcode(FloatCC::Unordered)),
|
||||||
});
|
});
|
||||||
|
|
||||||
let tmp = ctx.alloc_tmp(RegClass::V128, I128);
|
let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap();
|
||||||
|
|
||||||
// Check that the input is in range, with "truncate towards zero" semantics. This means
|
// Check that the input is in range, with "truncate towards zero" semantics. This means
|
||||||
// we allow values that are slightly out of range:
|
// we allow values that are slightly out of range:
|
||||||
@@ -2736,7 +2737,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
Opcode::FcvtFromUint | Opcode::FcvtFromSint => {
|
Opcode::FcvtFromUint | Opcode::FcvtFromSint => {
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
let signed = op == Opcode::FcvtFromSint;
|
let signed = op == Opcode::FcvtFromSint;
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
|
|
||||||
if ty.is_vector() {
|
if ty.is_vector() {
|
||||||
let op = if signed {
|
let op = if signed {
|
||||||
@@ -2782,7 +2783,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
let out_signed = op == Opcode::FcvtToSintSat;
|
let out_signed = op == Opcode::FcvtToSintSat;
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
|
|
||||||
if ty.is_vector() {
|
if ty.is_vector() {
|
||||||
let op = if out_signed {
|
let op = if out_signed {
|
||||||
@@ -2829,8 +2830,8 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let rtmp1 = ctx.alloc_tmp(RegClass::V128, in_ty);
|
let rtmp1 = ctx.alloc_tmp(in_ty).only_reg().unwrap();
|
||||||
let rtmp2 = ctx.alloc_tmp(RegClass::V128, in_ty);
|
let rtmp2 = ctx.alloc_tmp(in_ty).only_reg().unwrap();
|
||||||
|
|
||||||
if in_bits == 32 {
|
if in_bits == 32 {
|
||||||
lower_constant_f32(ctx, rtmp1, max as f32);
|
lower_constant_f32(ctx, rtmp1, max as f32);
|
||||||
@@ -2920,7 +2921,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
// Now handle the iadd as above, except use an AddS opcode that sets
|
// Now handle the iadd as above, except use an AddS opcode that sets
|
||||||
// flags.
|
// flags.
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_rse_imm12(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
@@ -3001,7 +3002,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
Opcode::DummySargT => unreachable!(),
|
Opcode::DummySargT => unreachable!(),
|
||||||
|
|
||||||
Opcode::Iabs => {
|
Opcode::Iabs => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
ctx.emit(Inst::VecMisc {
|
ctx.emit(Inst::VecMisc {
|
||||||
@@ -3012,7 +3013,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
Opcode::AvgRound => {
|
Opcode::AvgRound => {
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
@@ -3031,7 +3032,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
} else {
|
} else {
|
||||||
VecMiscNarrowOp::Sqxtun
|
VecMiscNarrowOp::Sqxtun
|
||||||
};
|
};
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rn2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rn2 = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let ty = ty.unwrap();
|
let ty = ty.unwrap();
|
||||||
@@ -3054,7 +3055,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
|
|
||||||
Opcode::SwidenLow | Opcode::SwidenHigh | Opcode::UwidenLow | Opcode::UwidenHigh => {
|
Opcode::SwidenLow | Opcode::SwidenHigh | Opcode::UwidenLow | Opcode::UwidenHigh => {
|
||||||
let lane_type = ty.unwrap().lane_type();
|
let lane_type = ty.unwrap().lane_type();
|
||||||
let rd = get_output_reg(ctx, outputs[0]);
|
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
|
||||||
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let (t, high_half) = match (lane_type, op) {
|
let (t, high_half) = match (lane_type, op) {
|
||||||
(I16, Opcode::SwidenLow) => (VecExtendOp::Sxtl8, false),
|
(I16, Opcode::SwidenLow) => (VecExtendOp::Sxtl8, false),
|
||||||
@@ -3313,8 +3314,8 @@ pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
|
|||||||
NarrowValueMode::ZeroExtend32,
|
NarrowValueMode::ZeroExtend32,
|
||||||
);
|
);
|
||||||
|
|
||||||
let rtmp1 = ctx.alloc_tmp(RegClass::I64, I32);
|
let rtmp1 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
let rtmp2 = ctx.alloc_tmp(RegClass::I64, I32);
|
let rtmp2 = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
|
|
||||||
// Bounds-check, leaving condition codes for JTSequence's
|
// Bounds-check, leaving condition codes for JTSequence's
|
||||||
// branch to default target below.
|
// branch to default target below.
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
let reg = rreg(next_rreg);
|
let reg = rreg(next_rreg);
|
||||||
|
|
||||||
ret.push(ABIArg::Reg(
|
ret.push(ABIArg::Reg(
|
||||||
reg.to_real_reg(),
|
ValueRegs::one(reg.to_real_reg()),
|
||||||
param.value_type,
|
param.value_type,
|
||||||
param.extension,
|
param.extension,
|
||||||
param.purpose,
|
param.purpose,
|
||||||
@@ -102,7 +102,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
||||||
if next_rreg < max_reg_val {
|
if next_rreg < max_reg_val {
|
||||||
ret.push(ABIArg::Reg(
|
ret.push(ABIArg::Reg(
|
||||||
rreg(next_rreg).to_real_reg(),
|
ValueRegs::one(rreg(next_rreg).to_real_reg()),
|
||||||
I32,
|
I32,
|
||||||
ir::ArgumentExtension::None,
|
ir::ArgumentExtension::None,
|
||||||
ir::ArgumentPurpose::Normal,
|
ir::ArgumentPurpose::Normal,
|
||||||
@@ -185,7 +185,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
Inst::EpiloguePlaceholder
|
Inst::EpiloguePlaceholder
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Inst; 4]> {
|
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Inst> {
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
|
|
||||||
if let Some(imm12) = UImm12::maybe_from_i64(imm as i64) {
|
if let Some(imm12) = UImm12::maybe_from_i64(imm as i64) {
|
||||||
@@ -209,7 +209,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
insts
|
insts
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Inst; 2]> {
|
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst> {
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
insts.push(Inst::Cmp {
|
insts.push(Inst::Cmp {
|
||||||
rn: sp_reg(),
|
rn: sp_reg(),
|
||||||
@@ -243,7 +243,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
Inst::gen_store(from_reg, mem, ty)
|
Inst::gen_store(from_reg, mem, ty)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Inst; 2]> {
|
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Inst> {
|
||||||
let mut ret = SmallVec::new();
|
let mut ret = SmallVec::new();
|
||||||
|
|
||||||
if amount == 0 {
|
if amount == 0 {
|
||||||
@@ -283,7 +283,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
Inst::VirtualSPOffsetAdj { offset }
|
Inst::VirtualSPOffsetAdj { offset }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_prologue_frame_setup() -> SmallVec<[Inst; 2]> {
|
fn gen_prologue_frame_setup() -> SmallInstVec<Inst> {
|
||||||
let mut ret = SmallVec::new();
|
let mut ret = SmallVec::new();
|
||||||
let reg_list = vec![fp_reg(), lr_reg()];
|
let reg_list = vec![fp_reg(), lr_reg()];
|
||||||
ret.push(Inst::Push { reg_list });
|
ret.push(Inst::Push { reg_list });
|
||||||
@@ -294,7 +294,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_epilogue_frame_restore() -> SmallVec<[Inst; 2]> {
|
fn gen_epilogue_frame_restore() -> SmallInstVec<Inst> {
|
||||||
let mut ret = SmallVec::new();
|
let mut ret = SmallVec::new();
|
||||||
ret.push(Inst::Mov {
|
ret.push(Inst::Mov {
|
||||||
rd: writable_sp_reg(),
|
rd: writable_sp_reg(),
|
||||||
@@ -305,7 +305,7 @@ impl ABIMachineSpec for Arm32MachineDeps {
|
|||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_probestack(_: u32) -> SmallVec<[Self::I; 2]> {
|
fn gen_probestack(_: u32) -> SmallInstVec<Self::I> {
|
||||||
// TODO: implement if we ever require stack probes on ARM32 (unlikely
|
// TODO: implement if we ever require stack probes on ARM32 (unlikely
|
||||||
// unless Lucet is ported)
|
// unless Lucet is ported)
|
||||||
smallvec![]
|
smallvec![]
|
||||||
|
|||||||
@@ -807,12 +807,17 @@ impl MachInst for Inst {
|
|||||||
Inst::mov(to_reg, from_reg)
|
Inst::mov(to_reg, from_reg)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
to_reg: Writable<Reg>,
|
to_regs: ValueRegs<Writable<Reg>>,
|
||||||
value: u64,
|
value: u128,
|
||||||
ty: Type,
|
ty: Type,
|
||||||
_alloc_tmp: F,
|
_alloc_tmp: F,
|
||||||
) -> SmallVec<[Inst; 4]> {
|
) -> SmallVec<[Inst; 4]> {
|
||||||
|
let to_reg = to_regs
|
||||||
|
.only_reg()
|
||||||
|
.expect("multi-reg values not supported yet");
|
||||||
|
let value = value as u64;
|
||||||
|
|
||||||
match ty {
|
match ty {
|
||||||
B1 | I8 | B8 | I16 | B16 | I32 | B32 => {
|
B1 | I8 | B8 | I16 | B16 | I32 | B32 => {
|
||||||
let v: i64 = value as i64;
|
let v: i64 = value as i64;
|
||||||
@@ -839,10 +844,10 @@ impl MachInst for Inst {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
|
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
|
||||||
match ty {
|
match ty {
|
||||||
I8 | I16 | I32 | B1 | B8 | B16 | B32 => Ok(RegClass::I32),
|
I8 | I16 | I32 | B1 | B8 | B16 | B32 => Ok((&[RegClass::I32], &[I32])),
|
||||||
IFLAGS => Ok(RegClass::I32),
|
IFLAGS => Ok((&[RegClass::I32], &[I32])),
|
||||||
_ => Err(CodegenError::Unsupported(format!(
|
_ => Err(CodegenError::Unsupported(format!(
|
||||||
"Unexpected SSA-value type: {}",
|
"Unexpected SSA-value type: {}",
|
||||||
ty
|
ty
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use crate::isa::arm32::Arm32Backend;
|
|||||||
|
|
||||||
use super::lower_inst;
|
use super::lower_inst;
|
||||||
|
|
||||||
use regalloc::{Reg, RegClass, Writable};
|
use regalloc::{Reg, Writable};
|
||||||
|
|
||||||
//============================================================================
|
//============================================================================
|
||||||
// Lowering: convert instruction outputs to result types.
|
// Lowering: convert instruction outputs to result types.
|
||||||
@@ -55,7 +55,7 @@ pub(crate) enum NarrowValueMode {
|
|||||||
|
|
||||||
/// Lower an instruction output to a reg.
|
/// Lower an instruction output to a reg.
|
||||||
pub(crate) fn output_to_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, out: InsnOutput) -> Writable<Reg> {
|
pub(crate) fn output_to_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, out: InsnOutput) -> Writable<Reg> {
|
||||||
ctx.get_output(out.insn, out.output)
|
ctx.get_output(out.insn, out.output).only_reg().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Lower an instruction input to a reg.
|
/// Lower an instruction input to a reg.
|
||||||
@@ -70,21 +70,25 @@ pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
|
|||||||
let from_bits = ty.bits() as u8;
|
let from_bits = ty.bits() as u8;
|
||||||
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
|
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
|
||||||
let in_reg = if let Some(c) = inputs.constant {
|
let in_reg = if let Some(c) = inputs.constant {
|
||||||
let to_reg = ctx.alloc_tmp(Inst::rc_for_type(ty).unwrap(), ty);
|
let to_reg = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
for inst in Inst::gen_constant(to_reg, c, ty, |reg_class, ty| ctx.alloc_tmp(reg_class, ty))
|
for inst in Inst::gen_constant(ValueRegs::one(to_reg), c as u128, ty, |ty| {
|
||||||
|
ctx.alloc_tmp(ty).only_reg().unwrap()
|
||||||
|
})
|
||||||
.into_iter()
|
.into_iter()
|
||||||
{
|
{
|
||||||
ctx.emit(inst);
|
ctx.emit(inst);
|
||||||
}
|
}
|
||||||
to_reg.to_reg()
|
to_reg.to_reg()
|
||||||
} else {
|
} else {
|
||||||
ctx.put_input_in_reg(input.insn, input.input)
|
ctx.put_input_in_regs(input.insn, input.input)
|
||||||
|
.only_reg()
|
||||||
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
match (narrow_mode, from_bits) {
|
match (narrow_mode, from_bits) {
|
||||||
(NarrowValueMode::None, _) => in_reg,
|
(NarrowValueMode::None, _) => in_reg,
|
||||||
(NarrowValueMode::ZeroExtend, 1) => {
|
(NarrowValueMode::ZeroExtend, 1) => {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::AluRRImm8 {
|
ctx.emit(Inst::AluRRImm8 {
|
||||||
alu_op: ALUOp::And,
|
alu_op: ALUOp::And,
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
@@ -94,7 +98,7 @@ pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
|
|||||||
tmp.to_reg()
|
tmp.to_reg()
|
||||||
}
|
}
|
||||||
(NarrowValueMode::ZeroExtend, n) if n < 32 => {
|
(NarrowValueMode::ZeroExtend, n) if n < 32 => {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::Extend {
|
ctx.emit(Inst::Extend {
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
rm: in_reg,
|
rm: in_reg,
|
||||||
@@ -104,7 +108,7 @@ pub(crate) fn input_to_reg<C: LowerCtx<I = Inst>>(
|
|||||||
tmp.to_reg()
|
tmp.to_reg()
|
||||||
}
|
}
|
||||||
(NarrowValueMode::SignExtend, n) if n < 32 => {
|
(NarrowValueMode::SignExtend, n) if n < 32 => {
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
ctx.emit(Inst::Extend {
|
ctx.emit(Inst::Extend {
|
||||||
rd: tmp,
|
rd: tmp,
|
||||||
rm: in_reg,
|
rm: in_reg,
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ use crate::CodegenResult;
|
|||||||
use crate::isa::arm32::abi::*;
|
use crate::isa::arm32::abi::*;
|
||||||
use crate::isa::arm32::inst::*;
|
use crate::isa::arm32::inst::*;
|
||||||
|
|
||||||
use regalloc::RegClass;
|
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
use super::lower::*;
|
use super::lower::*;
|
||||||
@@ -143,7 +142,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
let rd = output_to_reg(ctx, outputs[0]);
|
let rd = output_to_reg(ctx, outputs[0]);
|
||||||
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
let tmp = ctx.alloc_tmp(RegClass::I32, I32);
|
let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
|
||||||
|
|
||||||
// ror rd, rn, 32 - (rm & 31)
|
// ror rd, rn, 32 - (rm & 31)
|
||||||
ctx.emit(Inst::AluRRImm8 {
|
ctx.emit(Inst::AluRRImm8 {
|
||||||
@@ -171,7 +170,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
match ty {
|
match ty {
|
||||||
I32 => {
|
I32 => {
|
||||||
let rd_hi = output_to_reg(ctx, outputs[0]);
|
let rd_hi = output_to_reg(ctx, outputs[0]);
|
||||||
let rd_lo = ctx.alloc_tmp(RegClass::I32, ty);
|
let rd_lo = ctx.alloc_tmp(ty).only_reg().unwrap();
|
||||||
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
|
let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
|
||||||
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
|
let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
|
||||||
|
|
||||||
@@ -487,7 +486,7 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
Opcode::FallthroughReturn | Opcode::Return => {
|
Opcode::FallthroughReturn | Opcode::Return => {
|
||||||
for (i, input) in inputs.iter().enumerate() {
|
for (i, input) in inputs.iter().enumerate() {
|
||||||
let reg = input_to_reg(ctx, *input, NarrowValueMode::None);
|
let reg = input_to_reg(ctx, *input, NarrowValueMode::None);
|
||||||
let retval_reg = ctx.retval(i);
|
let retval_reg = ctx.retval(i).only_reg().unwrap();
|
||||||
let ty = ctx.input_ty(insn, i);
|
let ty = ctx.input_ty(insn, i);
|
||||||
|
|
||||||
ctx.emit(Inst::gen_move(retval_reg, reg, ty));
|
ctx.emit(Inst::gen_move(retval_reg, reg, ty));
|
||||||
@@ -522,12 +521,12 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
|||||||
assert_eq!(inputs.len(), abi.num_args());
|
assert_eq!(inputs.len(), abi.num_args());
|
||||||
for (i, input) in inputs.iter().enumerate().filter(|(i, _)| *i <= 3) {
|
for (i, input) in inputs.iter().enumerate().filter(|(i, _)| *i <= 3) {
|
||||||
let arg_reg = input_to_reg(ctx, *input, NarrowValueMode::None);
|
let arg_reg = input_to_reg(ctx, *input, NarrowValueMode::None);
|
||||||
abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
|
abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg));
|
||||||
}
|
}
|
||||||
abi.emit_call(ctx);
|
abi.emit_call(ctx);
|
||||||
for (i, output) in outputs.iter().enumerate() {
|
for (i, output) in outputs.iter().enumerate() {
|
||||||
let retval_reg = output_to_reg(ctx, *output);
|
let retval_reg = output_to_reg(ctx, *output);
|
||||||
abi.emit_copy_retval_to_reg(ctx, i, retval_reg);
|
abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => panic!("lowering {} unimplemented!", op),
|
_ => panic!("lowering {} unimplemented!", op),
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ fn try_fill_baldrdash_reg(call_conv: CallConv, param: &ir::AbiParam) -> Option<A
|
|||||||
&ir::ArgumentPurpose::VMContext => {
|
&ir::ArgumentPurpose::VMContext => {
|
||||||
// This is SpiderMonkey's `WasmTlsReg`.
|
// This is SpiderMonkey's `WasmTlsReg`.
|
||||||
Some(ABIArg::Reg(
|
Some(ABIArg::Reg(
|
||||||
regs::r14().to_real_reg(),
|
ValueRegs::one(regs::r14().to_real_reg()),
|
||||||
types::I64,
|
types::I64,
|
||||||
param.extension,
|
param.extension,
|
||||||
param.purpose,
|
param.purpose,
|
||||||
@@ -41,7 +41,7 @@ fn try_fill_baldrdash_reg(call_conv: CallConv, param: &ir::AbiParam) -> Option<A
|
|||||||
&ir::ArgumentPurpose::SignatureId => {
|
&ir::ArgumentPurpose::SignatureId => {
|
||||||
// This is SpiderMonkey's `WasmTableCallSigReg`.
|
// This is SpiderMonkey's `WasmTableCallSigReg`.
|
||||||
Some(ABIArg::Reg(
|
Some(ABIArg::Reg(
|
||||||
regs::r10().to_real_reg(),
|
ValueRegs::one(regs::r10().to_real_reg()),
|
||||||
types::I64,
|
types::I64,
|
||||||
param.extension,
|
param.extension,
|
||||||
param.purpose,
|
param.purpose,
|
||||||
@@ -168,7 +168,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
ret.push(param);
|
ret.push(param);
|
||||||
} else if let Some(reg) = candidate {
|
} else if let Some(reg) = candidate {
|
||||||
ret.push(ABIArg::Reg(
|
ret.push(ABIArg::Reg(
|
||||||
reg.to_real_reg(),
|
ValueRegs::one(reg.to_real_reg()),
|
||||||
param.value_type,
|
param.value_type,
|
||||||
param.extension,
|
param.extension,
|
||||||
param.purpose,
|
param.purpose,
|
||||||
@@ -200,7 +200,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
debug_assert!(args_or_rets == ArgsOrRets::Args);
|
||||||
if let Some(reg) = get_intreg_for_arg_systemv(&call_conv, next_gpr) {
|
if let Some(reg) = get_intreg_for_arg_systemv(&call_conv, next_gpr) {
|
||||||
ret.push(ABIArg::Reg(
|
ret.push(ABIArg::Reg(
|
||||||
reg.to_real_reg(),
|
ValueRegs::one(reg.to_real_reg()),
|
||||||
types::I64,
|
types::I64,
|
||||||
ir::ArgumentExtension::None,
|
ir::ArgumentExtension::None,
|
||||||
ir::ArgumentPurpose::Normal,
|
ir::ArgumentPurpose::Normal,
|
||||||
@@ -288,7 +288,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
Inst::epilogue_placeholder()
|
Inst::epilogue_placeholder()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Self::I; 4]> {
|
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Self::I> {
|
||||||
let mut ret = SmallVec::new();
|
let mut ret = SmallVec::new();
|
||||||
if from_reg != into_reg.to_reg() {
|
if from_reg != into_reg.to_reg() {
|
||||||
ret.push(Inst::gen_move(into_reg, from_reg, I64));
|
ret.push(Inst::gen_move(into_reg, from_reg, I64));
|
||||||
@@ -302,7 +302,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
ret
|
ret
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Self::I; 2]> {
|
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Self::I> {
|
||||||
smallvec![
|
smallvec![
|
||||||
Inst::cmp_rmi_r(/* bytes = */ 8, RegMemImm::reg(regs::rsp()), limit_reg),
|
Inst::cmp_rmi_r(/* bytes = */ 8, RegMemImm::reg(regs::rsp()), limit_reg),
|
||||||
Inst::TrapIf {
|
Inst::TrapIf {
|
||||||
@@ -343,7 +343,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
Inst::store(ty, from_reg, mem)
|
Inst::store(ty, from_reg, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Self::I; 2]> {
|
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Self::I> {
|
||||||
let (alu_op, amount) = if amount >= 0 {
|
let (alu_op, amount) = if amount >= 0 {
|
||||||
(AluRmiROpcode::Add, amount)
|
(AluRmiROpcode::Add, amount)
|
||||||
} else {
|
} else {
|
||||||
@@ -366,7 +366,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_prologue_frame_setup() -> SmallVec<[Self::I; 2]> {
|
fn gen_prologue_frame_setup() -> SmallInstVec<Self::I> {
|
||||||
let r_rsp = regs::rsp();
|
let r_rsp = regs::rsp();
|
||||||
let r_rbp = regs::rbp();
|
let r_rbp = regs::rbp();
|
||||||
let w_rbp = Writable::from_reg(r_rbp);
|
let w_rbp = Writable::from_reg(r_rbp);
|
||||||
@@ -378,7 +378,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
insts
|
insts
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_epilogue_frame_restore() -> SmallVec<[Self::I; 2]> {
|
fn gen_epilogue_frame_restore() -> SmallInstVec<Self::I> {
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
insts.push(Inst::mov_r_r(
|
insts.push(Inst::mov_r_r(
|
||||||
true,
|
true,
|
||||||
@@ -389,7 +389,7 @@ impl ABIMachineSpec for X64ABIMachineSpec {
|
|||||||
insts
|
insts
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_probestack(frame_size: u32) -> SmallVec<[Self::I; 2]> {
|
fn gen_probestack(frame_size: u32) -> SmallInstVec<Self::I> {
|
||||||
let mut insts = SmallVec::new();
|
let mut insts = SmallVec::new();
|
||||||
insts.push(Inst::imm(
|
insts.push(Inst::imm(
|
||||||
OperandSize::Size32,
|
OperandSize::Size32,
|
||||||
|
|||||||
@@ -2506,22 +2506,28 @@ impl MachInst for Inst {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
|
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
|
||||||
match ty {
|
match ty {
|
||||||
types::I8
|
types::I8 => Ok((&[RegClass::I64], &[types::I8])),
|
||||||
| types::I16
|
types::I16 => Ok((&[RegClass::I64], &[types::I16])),
|
||||||
| types::I32
|
types::I32 => Ok((&[RegClass::I64], &[types::I32])),
|
||||||
| types::I64
|
types::I64 => Ok((&[RegClass::I64], &[types::I64])),
|
||||||
| types::B1
|
types::B1 => Ok((&[RegClass::I64], &[types::B1])),
|
||||||
| types::B8
|
types::B8 => Ok((&[RegClass::I64], &[types::B8])),
|
||||||
| types::B16
|
types::B16 => Ok((&[RegClass::I64], &[types::B16])),
|
||||||
| types::B32
|
types::B32 => Ok((&[RegClass::I64], &[types::B32])),
|
||||||
| types::B64
|
types::B64 => Ok((&[RegClass::I64], &[types::B64])),
|
||||||
| types::R32
|
types::R32 => panic!("32-bit reftype pointer should never be seen on x86-64"),
|
||||||
| types::R64 => Ok(RegClass::I64),
|
types::R64 => Ok((&[RegClass::I64], &[types::R64])),
|
||||||
types::F32 | types::F64 => Ok(RegClass::V128),
|
types::F32 => Ok((&[RegClass::V128], &[types::F32])),
|
||||||
_ if ty.bits() == 128 => Ok(RegClass::V128),
|
types::F64 => Ok((&[RegClass::V128], &[types::F64])),
|
||||||
types::IFLAGS | types::FFLAGS => Ok(RegClass::I64),
|
types::I128 => Ok((&[RegClass::I64, RegClass::I64], &[types::I64, types::I64])),
|
||||||
|
types::B128 => Ok((&[RegClass::I64, RegClass::I64], &[types::B64, types::B64])),
|
||||||
|
_ if ty.is_vector() => {
|
||||||
|
assert!(ty.bits() <= 128);
|
||||||
|
Ok((&[RegClass::V128], &[types::I8X16]))
|
||||||
|
}
|
||||||
|
types::IFLAGS | types::FFLAGS => Ok((&[RegClass::I64], &[types::I64])),
|
||||||
_ => Err(CodegenError::Unsupported(format!(
|
_ => Err(CodegenError::Unsupported(format!(
|
||||||
"Unexpected SSA-value type: {}",
|
"Unexpected SSA-value type: {}",
|
||||||
ty
|
ty
|
||||||
@@ -2533,13 +2539,18 @@ impl MachInst for Inst {
|
|||||||
Inst::jmp_known(label)
|
Inst::jmp_known(label)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
to_reg: Writable<Reg>,
|
to_regs: ValueRegs<Writable<Reg>>,
|
||||||
value: u64,
|
value: u128,
|
||||||
ty: Type,
|
ty: Type,
|
||||||
mut alloc_tmp: F,
|
mut alloc_tmp: F,
|
||||||
) -> SmallVec<[Self; 4]> {
|
) -> SmallVec<[Self; 4]> {
|
||||||
|
// We don't support 128-bit constants.
|
||||||
|
assert!(value <= u64::MAX as u128);
|
||||||
let mut ret = SmallVec::new();
|
let mut ret = SmallVec::new();
|
||||||
|
let to_reg = to_regs
|
||||||
|
.only_reg()
|
||||||
|
.expect("multi-reg values not supported on x64");
|
||||||
if ty == types::F32 {
|
if ty == types::F32 {
|
||||||
if value == 0 {
|
if value == 0 {
|
||||||
ret.push(Inst::xmm_rm_r(
|
ret.push(Inst::xmm_rm_r(
|
||||||
@@ -2548,8 +2559,8 @@ impl MachInst for Inst {
|
|||||||
to_reg,
|
to_reg,
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
let tmp = alloc_tmp(RegClass::I64, types::I32);
|
let tmp = alloc_tmp(types::I32);
|
||||||
ret.push(Inst::imm(OperandSize::Size32, value, tmp));
|
ret.push(Inst::imm(OperandSize::Size32, value as u64, tmp));
|
||||||
|
|
||||||
ret.push(Inst::gpr_to_xmm(
|
ret.push(Inst::gpr_to_xmm(
|
||||||
SseOpcode::Movd,
|
SseOpcode::Movd,
|
||||||
@@ -2566,8 +2577,8 @@ impl MachInst for Inst {
|
|||||||
to_reg,
|
to_reg,
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
let tmp = alloc_tmp(RegClass::I64, types::I64);
|
let tmp = alloc_tmp(types::I64);
|
||||||
ret.push(Inst::imm(OperandSize::Size64, value, tmp));
|
ret.push(Inst::imm(OperandSize::Size64, value as u64, tmp));
|
||||||
|
|
||||||
ret.push(Inst::gpr_to_xmm(
|
ret.push(Inst::gpr_to_xmm(
|
||||||
SseOpcode::Movq,
|
SseOpcode::Movq,
|
||||||
@@ -2599,6 +2610,7 @@ impl MachInst for Inst {
|
|||||||
to_reg,
|
to_reg,
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
|
let value = value as u64;
|
||||||
ret.push(Inst::imm(
|
ret.push(Inst::imm(
|
||||||
OperandSize::from_bytes(ty.bytes()),
|
OperandSize::from_bytes(ty.bytes()),
|
||||||
value.into(),
|
value.into(),
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -5,8 +5,12 @@ use crate::ir::StackSlot;
|
|||||||
use crate::isa::CallConv;
|
use crate::isa::CallConv;
|
||||||
use crate::machinst::*;
|
use crate::machinst::*;
|
||||||
use crate::settings;
|
use crate::settings;
|
||||||
|
|
||||||
use regalloc::{Reg, Set, SpillSlot, Writable};
|
use regalloc::{Reg, Set, SpillSlot, Writable};
|
||||||
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
|
/// A small vector of instructions (with some reasonable size); appropriate for
|
||||||
|
/// a small fixed sequence implementing one operation.
|
||||||
|
pub type SmallInstVec<I> = SmallVec<[I; 4]>;
|
||||||
|
|
||||||
/// Trait implemented by an object that tracks ABI-related state (e.g., stack
|
/// Trait implemented by an object that tracks ABI-related state (e.g., stack
|
||||||
/// layout) and can generate code while emitting the *body* of a function.
|
/// layout) and can generate code while emitting the *body* of a function.
|
||||||
@@ -14,9 +18,9 @@ pub trait ABICallee {
|
|||||||
/// The instruction type for the ISA associated with this ABI.
|
/// The instruction type for the ISA associated with this ABI.
|
||||||
type I: VCodeInst;
|
type I: VCodeInst;
|
||||||
|
|
||||||
/// Does the ABI-body code need a temp reg? One will be provided to `init()`
|
/// Does the ABI-body code need a temp reg (and if so, of what type)? One
|
||||||
/// as the `maybe_tmp` arg if so.
|
/// will be provided to `init()` as the `maybe_tmp` arg if so.
|
||||||
fn temp_needed(&self) -> bool;
|
fn temp_needed(&self) -> Option<Type>;
|
||||||
|
|
||||||
/// Initialize. This is called after the ABICallee is constructed because it
|
/// Initialize. This is called after the ABICallee is constructed because it
|
||||||
/// may be provided with a temp vreg, which can only be allocated once the
|
/// may be provided with a temp vreg, which can only be allocated once the
|
||||||
@@ -52,7 +56,11 @@ pub trait ABICallee {
|
|||||||
|
|
||||||
/// Generate an instruction which copies an argument to a destination
|
/// Generate an instruction which copies an argument to a destination
|
||||||
/// register.
|
/// register.
|
||||||
fn gen_copy_arg_to_reg(&self, idx: usize, into_reg: Writable<Reg>) -> Self::I;
|
fn gen_copy_arg_to_regs(
|
||||||
|
&self,
|
||||||
|
idx: usize,
|
||||||
|
into_reg: ValueRegs<Writable<Reg>>,
|
||||||
|
) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Is the given argument needed in the body (as opposed to, e.g., serving
|
/// Is the given argument needed in the body (as opposed to, e.g., serving
|
||||||
/// only as a special ABI-specific placeholder)? This controls whether
|
/// only as a special ABI-specific placeholder)? This controls whether
|
||||||
@@ -67,7 +75,11 @@ pub trait ABICallee {
|
|||||||
fn gen_retval_area_setup(&self) -> Option<Self::I>;
|
fn gen_retval_area_setup(&self) -> Option<Self::I>;
|
||||||
|
|
||||||
/// Generate an instruction which copies a source register to a return value slot.
|
/// Generate an instruction which copies a source register to a return value slot.
|
||||||
fn gen_copy_reg_to_retval(&self, idx: usize, from_reg: Writable<Reg>) -> Vec<Self::I>;
|
fn gen_copy_regs_to_retval(
|
||||||
|
&self,
|
||||||
|
idx: usize,
|
||||||
|
from_reg: ValueRegs<Writable<Reg>>,
|
||||||
|
) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate a return instruction.
|
/// Generate a return instruction.
|
||||||
fn gen_ret(&self) -> Self::I;
|
fn gen_ret(&self) -> Self::I;
|
||||||
@@ -99,17 +111,33 @@ pub trait ABICallee {
|
|||||||
slot: StackSlot,
|
slot: StackSlot,
|
||||||
offset: u32,
|
offset: u32,
|
||||||
ty: Type,
|
ty: Type,
|
||||||
into_reg: Writable<Reg>,
|
into_reg: ValueRegs<Writable<Reg>>,
|
||||||
) -> Self::I;
|
) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Store to a stackslot.
|
/// Store to a stackslot.
|
||||||
fn store_stackslot(&self, slot: StackSlot, offset: u32, ty: Type, from_reg: Reg) -> Self::I;
|
fn store_stackslot(
|
||||||
|
&self,
|
||||||
|
slot: StackSlot,
|
||||||
|
offset: u32,
|
||||||
|
ty: Type,
|
||||||
|
from_reg: ValueRegs<Reg>,
|
||||||
|
) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Load from a spillslot.
|
/// Load from a spillslot.
|
||||||
fn load_spillslot(&self, slot: SpillSlot, ty: Type, into_reg: Writable<Reg>) -> Self::I;
|
fn load_spillslot(
|
||||||
|
&self,
|
||||||
|
slot: SpillSlot,
|
||||||
|
ty: Type,
|
||||||
|
into_reg: ValueRegs<Writable<Reg>>,
|
||||||
|
) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Store to a spillslot.
|
/// Store to a spillslot.
|
||||||
fn store_spillslot(&self, slot: SpillSlot, ty: Type, from_reg: Reg) -> Self::I;
|
fn store_spillslot(
|
||||||
|
&self,
|
||||||
|
slot: SpillSlot,
|
||||||
|
ty: Type,
|
||||||
|
from_reg: ValueRegs<Reg>,
|
||||||
|
) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate a stack map, given a list of spillslots and the emission state
|
/// Generate a stack map, given a list of spillslots and the emission state
|
||||||
/// at a given program point (prior to emission fo the safepointing
|
/// at a given program point (prior to emission fo the safepointing
|
||||||
@@ -125,13 +153,13 @@ pub trait ABICallee {
|
|||||||
/// `store_retval`, and spillslot accesses.) `self` is mutable so that we
|
/// `store_retval`, and spillslot accesses.) `self` is mutable so that we
|
||||||
/// can store information in it which will be useful when creating the
|
/// can store information in it which will be useful when creating the
|
||||||
/// epilogue.
|
/// epilogue.
|
||||||
fn gen_prologue(&mut self) -> Vec<Self::I>;
|
fn gen_prologue(&mut self) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate an epilogue, post-regalloc. Note that this must generate the
|
/// Generate an epilogue, post-regalloc. Note that this must generate the
|
||||||
/// actual return instruction (rather than emitting this in the lowering
|
/// actual return instruction (rather than emitting this in the lowering
|
||||||
/// logic), because the epilogue code comes before the return and the two are
|
/// logic), because the epilogue code comes before the return and the two are
|
||||||
/// likely closely related.
|
/// likely closely related.
|
||||||
fn gen_epilogue(&self) -> Vec<Self::I>;
|
fn gen_epilogue(&self) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Returns the full frame size for the given function, after prologue
|
/// Returns the full frame size for the given function, after prologue
|
||||||
/// emission has run. This comprises the spill slots and stack-storage slots
|
/// emission has run. This comprises the spill slots and stack-storage slots
|
||||||
@@ -188,19 +216,19 @@ pub trait ABICaller {
|
|||||||
fn num_args(&self) -> usize;
|
fn num_args(&self) -> usize;
|
||||||
|
|
||||||
/// Emit a copy of an argument value from a source register, prior to the call.
|
/// Emit a copy of an argument value from a source register, prior to the call.
|
||||||
fn emit_copy_reg_to_arg<C: LowerCtx<I = Self::I>>(
|
fn emit_copy_regs_to_arg<C: LowerCtx<I = Self::I>>(
|
||||||
&self,
|
&self,
|
||||||
ctx: &mut C,
|
ctx: &mut C,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
from_reg: Reg,
|
from_reg: ValueRegs<Reg>,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Emit a copy a return value into a destination register, after the call returns.
|
/// Emit a copy a return value into a destination register, after the call returns.
|
||||||
fn emit_copy_retval_to_reg<C: LowerCtx<I = Self::I>>(
|
fn emit_copy_retval_to_regs<C: LowerCtx<I = Self::I>>(
|
||||||
&self,
|
&self,
|
||||||
ctx: &mut C,
|
ctx: &mut C,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
into_reg: Writable<Reg>,
|
into_reg: ValueRegs<Writable<Reg>>,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Emit code to pre-adjust the stack, prior to argument copies and call.
|
/// Emit code to pre-adjust the stack, prior to argument copies and call.
|
||||||
|
|||||||
@@ -119,6 +119,7 @@ use crate::{ir, isa};
|
|||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use regalloc::{RealReg, Reg, RegClass, Set, SpillSlot, Writable};
|
use regalloc::{RealReg, Reg, RegClass, Set, SpillSlot, Writable};
|
||||||
|
use smallvec::{smallvec, SmallVec};
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
@@ -126,9 +127,9 @@ use std::mem;
|
|||||||
/// A location for an argument or return value.
|
/// A location for an argument or return value.
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub enum ABIArg {
|
pub enum ABIArg {
|
||||||
/// In a real register.
|
/// In a real register (or set of registers).
|
||||||
Reg(
|
Reg(
|
||||||
RealReg,
|
ValueRegs<RealReg>,
|
||||||
ir::Type,
|
ir::Type,
|
||||||
ir::ArgumentExtension,
|
ir::ArgumentExtension,
|
||||||
ir::ArgumentPurpose,
|
ir::ArgumentPurpose,
|
||||||
@@ -183,6 +184,17 @@ pub enum StackAMode {
|
|||||||
SPOffset(i64, ir::Type),
|
SPOffset(i64, ir::Type),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl StackAMode {
|
||||||
|
/// Offset by an addend.
|
||||||
|
pub fn offset(self, addend: i64) -> Self {
|
||||||
|
match self {
|
||||||
|
StackAMode::FPOffset(off, ty) => StackAMode::FPOffset(off + addend, ty),
|
||||||
|
StackAMode::NominalSPOffset(off, ty) => StackAMode::NominalSPOffset(off + addend, ty),
|
||||||
|
StackAMode::SPOffset(off, ty) => StackAMode::SPOffset(off + addend, ty),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Trait implemented by machine-specific backend to provide information about
|
/// Trait implemented by machine-specific backend to provide information about
|
||||||
/// register assignments and to allow generating the specific instructions for
|
/// register assignments and to allow generating the specific instructions for
|
||||||
/// stack loads/saves, prologues/epilogues, etc.
|
/// stack loads/saves, prologues/epilogues, etc.
|
||||||
@@ -270,12 +282,12 @@ pub trait ABIMachineSpec {
|
|||||||
///
|
///
|
||||||
/// - The add-imm sequence must work correctly when `from_reg` and/or
|
/// - The add-imm sequence must work correctly when `from_reg` and/or
|
||||||
/// `into_reg` are the register returned by `get_stacklimit_reg()`.
|
/// `into_reg` are the register returned by `get_stacklimit_reg()`.
|
||||||
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Self::I; 4]>;
|
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate a sequence that traps with a `TrapCode::StackOverflow` code if
|
/// Generate a sequence that traps with a `TrapCode::StackOverflow` code if
|
||||||
/// the stack pointer is less than the given limit register (assuming the
|
/// the stack pointer is less than the given limit register (assuming the
|
||||||
/// stack grows downward).
|
/// stack grows downward).
|
||||||
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallVec<[Self::I; 2]>;
|
fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate an instruction to compute an address of a stack slot (FP- or
|
/// Generate an instruction to compute an address of a stack slot (FP- or
|
||||||
/// SP-based offset).
|
/// SP-based offset).
|
||||||
@@ -301,7 +313,7 @@ pub trait ABIMachineSpec {
|
|||||||
fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Self::I;
|
fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Self::I;
|
||||||
|
|
||||||
/// Adjust the stack pointer up or down.
|
/// Adjust the stack pointer up or down.
|
||||||
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Self::I; 2]>;
|
fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate a meta-instruction that adjusts the nominal SP offset.
|
/// Generate a meta-instruction that adjusts the nominal SP offset.
|
||||||
fn gen_nominal_sp_adj(amount: i32) -> Self::I;
|
fn gen_nominal_sp_adj(amount: i32) -> Self::I;
|
||||||
@@ -309,13 +321,13 @@ pub trait ABIMachineSpec {
|
|||||||
/// Generate the usual frame-setup sequence for this architecture: e.g.,
|
/// Generate the usual frame-setup sequence for this architecture: e.g.,
|
||||||
/// `push rbp / mov rbp, rsp` on x86-64, or `stp fp, lr, [sp, #-16]!` on
|
/// `push rbp / mov rbp, rsp` on x86-64, or `stp fp, lr, [sp, #-16]!` on
|
||||||
/// AArch64.
|
/// AArch64.
|
||||||
fn gen_prologue_frame_setup() -> SmallVec<[Self::I; 2]>;
|
fn gen_prologue_frame_setup() -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate the usual frame-restore sequence for this architecture.
|
/// Generate the usual frame-restore sequence for this architecture.
|
||||||
fn gen_epilogue_frame_restore() -> SmallVec<[Self::I; 2]>;
|
fn gen_epilogue_frame_restore() -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate a probestack call.
|
/// Generate a probestack call.
|
||||||
fn gen_probestack(_frame_size: u32) -> SmallVec<[Self::I; 2]>;
|
fn gen_probestack(_frame_size: u32) -> SmallInstVec<Self::I>;
|
||||||
|
|
||||||
/// Generate a clobber-save sequence. This takes the list of *all* registers
|
/// Generate a clobber-save sequence. This takes the list of *all* registers
|
||||||
/// written/modified by the function body. The implementation here is
|
/// written/modified by the function body. The implementation here is
|
||||||
@@ -483,7 +495,7 @@ pub struct ABICalleeImpl<M: ABIMachineSpec> {
|
|||||||
/// need to be extremely careful with each instruction. The instructions are
|
/// need to be extremely careful with each instruction. The instructions are
|
||||||
/// manually register-allocated and carefully only use caller-saved
|
/// manually register-allocated and carefully only use caller-saved
|
||||||
/// registers and keep nothing live after this sequence of instructions.
|
/// registers and keep nothing live after this sequence of instructions.
|
||||||
stack_limit: Option<(Reg, Vec<M::I>)>,
|
stack_limit: Option<(Reg, SmallInstVec<M::I>)>,
|
||||||
/// Are we to invoke the probestack function in the prologue? If so,
|
/// Are we to invoke the probestack function in the prologue? If so,
|
||||||
/// what is the minimum size at which we must invoke it?
|
/// what is the minimum size at which we must invoke it?
|
||||||
probestack_min_frame: Option<u32>,
|
probestack_min_frame: Option<u32>,
|
||||||
@@ -498,7 +510,7 @@ fn get_special_purpose_param_register(
|
|||||||
) -> Option<Reg> {
|
) -> Option<Reg> {
|
||||||
let idx = f.signature.special_param_index(purpose)?;
|
let idx = f.signature.special_param_index(purpose)?;
|
||||||
match abi.args[idx] {
|
match abi.args[idx] {
|
||||||
ABIArg::Reg(reg, ..) => Some(reg.to_reg()),
|
ABIArg::Reg(regs, ..) => Some(regs.only_reg().unwrap().to_reg()),
|
||||||
ABIArg::Stack(..) => None,
|
ABIArg::Stack(..) => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -539,7 +551,7 @@ impl<M: ABIMachineSpec> ABICalleeImpl<M> {
|
|||||||
// from the arguments.
|
// from the arguments.
|
||||||
let stack_limit =
|
let stack_limit =
|
||||||
get_special_purpose_param_register(f, &sig, ir::ArgumentPurpose::StackLimit)
|
get_special_purpose_param_register(f, &sig, ir::ArgumentPurpose::StackLimit)
|
||||||
.map(|reg| (reg, Vec::new()))
|
.map(|reg| (reg, smallvec![]))
|
||||||
.or_else(|| f.stack_limit.map(|gv| gen_stack_limit::<M>(f, &sig, gv)));
|
.or_else(|| f.stack_limit.map(|gv| gen_stack_limit::<M>(f, &sig, gv)));
|
||||||
|
|
||||||
// Determine whether a probestack call is required for large enough
|
// Determine whether a probestack call is required for large enough
|
||||||
@@ -596,7 +608,12 @@ impl<M: ABIMachineSpec> ABICalleeImpl<M> {
|
|||||||
/// No values can be live after the prologue, but in this case that's ok
|
/// No values can be live after the prologue, but in this case that's ok
|
||||||
/// because we just need to perform a stack check before progressing with
|
/// because we just need to perform a stack check before progressing with
|
||||||
/// the rest of the function.
|
/// the rest of the function.
|
||||||
fn insert_stack_check(&self, stack_limit: Reg, stack_size: u32, insts: &mut Vec<M::I>) {
|
fn insert_stack_check(
|
||||||
|
&self,
|
||||||
|
stack_limit: Reg,
|
||||||
|
stack_size: u32,
|
||||||
|
insts: &mut SmallInstVec<M::I>,
|
||||||
|
) {
|
||||||
// With no explicit stack allocated we can just emit the simple check of
|
// With no explicit stack allocated we can just emit the simple check of
|
||||||
// the stack registers against the stack limit register, and trap if
|
// the stack registers against the stack limit register, and trap if
|
||||||
// it's out of bounds.
|
// it's out of bounds.
|
||||||
@@ -649,8 +666,8 @@ fn gen_stack_limit<M: ABIMachineSpec>(
|
|||||||
f: &ir::Function,
|
f: &ir::Function,
|
||||||
abi: &ABISig,
|
abi: &ABISig,
|
||||||
gv: ir::GlobalValue,
|
gv: ir::GlobalValue,
|
||||||
) -> (Reg, Vec<M::I>) {
|
) -> (Reg, SmallInstVec<M::I>) {
|
||||||
let mut insts = Vec::new();
|
let mut insts = smallvec![];
|
||||||
let reg = generate_gv::<M>(f, abi, gv, &mut insts);
|
let reg = generate_gv::<M>(f, abi, gv, &mut insts);
|
||||||
return (reg, insts);
|
return (reg, insts);
|
||||||
}
|
}
|
||||||
@@ -659,7 +676,7 @@ fn generate_gv<M: ABIMachineSpec>(
|
|||||||
f: &ir::Function,
|
f: &ir::Function,
|
||||||
abi: &ABISig,
|
abi: &ABISig,
|
||||||
gv: ir::GlobalValue,
|
gv: ir::GlobalValue,
|
||||||
insts: &mut Vec<M::I>,
|
insts: &mut SmallInstVec<M::I>,
|
||||||
) -> Reg {
|
) -> Reg {
|
||||||
match f.global_values[gv] {
|
match f.global_values[gv] {
|
||||||
// Return the direct register the vmcontext is in
|
// Return the direct register the vmcontext is in
|
||||||
@@ -709,11 +726,76 @@ fn ty_from_ty_hint_or_reg_class<M: ABIMachineSpec>(r: Reg, ty: Option<Type>) ->
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn gen_move_multi<M: ABIMachineSpec>(
|
||||||
|
dst: ValueRegs<Writable<Reg>>,
|
||||||
|
src: ValueRegs<Reg>,
|
||||||
|
ty: Type,
|
||||||
|
) -> SmallInstVec<M::I> {
|
||||||
|
let mut ret = smallvec![];
|
||||||
|
let (_, tys) = M::I::rc_for_type(ty).unwrap();
|
||||||
|
for ((&dst, &src), &ty) in dst.regs().iter().zip(src.regs().iter()).zip(tys.iter()) {
|
||||||
|
ret.push(M::gen_move(dst, src, ty));
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gen_load_stack_multi<M: ABIMachineSpec>(
|
||||||
|
from: StackAMode,
|
||||||
|
dst: ValueRegs<Writable<Reg>>,
|
||||||
|
ty: Type,
|
||||||
|
) -> SmallInstVec<M::I> {
|
||||||
|
let mut ret = smallvec![];
|
||||||
|
let (_, tys) = M::I::rc_for_type(ty).unwrap();
|
||||||
|
let mut offset = 0;
|
||||||
|
// N.B.: registers are given in the `ValueRegs` in target endian order.
|
||||||
|
for (&dst, &ty) in dst.regs().iter().zip(tys.iter()) {
|
||||||
|
ret.push(M::gen_load_stack(from.offset(offset), dst, ty));
|
||||||
|
offset += ty.bytes() as i64;
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gen_store_stack_multi<M: ABIMachineSpec>(
|
||||||
|
from: StackAMode,
|
||||||
|
src: ValueRegs<Reg>,
|
||||||
|
ty: Type,
|
||||||
|
) -> SmallInstVec<M::I> {
|
||||||
|
let mut ret = smallvec![];
|
||||||
|
let (_, tys) = M::I::rc_for_type(ty).unwrap();
|
||||||
|
let mut offset = 0;
|
||||||
|
// N.B.: registers are given in the `ValueRegs` in target endian order.
|
||||||
|
for (&src, &ty) in src.regs().iter().zip(tys.iter()) {
|
||||||
|
ret.push(M::gen_store_stack(from.offset(offset), src, ty));
|
||||||
|
offset += ty.bytes() as i64;
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gen_store_base_offset_multi<M: ABIMachineSpec>(
|
||||||
|
base: Reg,
|
||||||
|
mut offset: i32,
|
||||||
|
src: ValueRegs<Reg>,
|
||||||
|
ty: Type,
|
||||||
|
) -> SmallInstVec<M::I> {
|
||||||
|
let mut ret = smallvec![];
|
||||||
|
let (_, tys) = M::I::rc_for_type(ty).unwrap();
|
||||||
|
// N.B.: registers are given in the `ValueRegs` in target endian order.
|
||||||
|
for (&src, &ty) in src.regs().iter().zip(tys.iter()) {
|
||||||
|
ret.push(M::gen_store_base_offset(base, offset, src, ty));
|
||||||
|
offset += ty.bytes() as i32;
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
||||||
type I = M::I;
|
type I = M::I;
|
||||||
|
|
||||||
fn temp_needed(&self) -> bool {
|
fn temp_needed(&self) -> Option<Type> {
|
||||||
self.sig.stack_ret_arg.is_some()
|
if self.sig.stack_ret_arg.is_some() {
|
||||||
|
Some(M::word_type())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init(&mut self, maybe_tmp: Option<Writable<Reg>>) {
|
fn init(&mut self, maybe_tmp: Option<Writable<Reg>>) {
|
||||||
@@ -740,20 +822,24 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
fn liveins(&self) -> Set<RealReg> {
|
fn liveins(&self) -> Set<RealReg> {
|
||||||
let mut set: Set<RealReg> = Set::empty();
|
let mut set: Set<RealReg> = Set::empty();
|
||||||
for &arg in &self.sig.args {
|
for &arg in &self.sig.args {
|
||||||
if let ABIArg::Reg(r, ..) = arg {
|
if let ABIArg::Reg(regs, ..) = arg {
|
||||||
|
for &r in regs.regs() {
|
||||||
set.insert(r);
|
set.insert(r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
set
|
set
|
||||||
}
|
}
|
||||||
|
|
||||||
fn liveouts(&self) -> Set<RealReg> {
|
fn liveouts(&self) -> Set<RealReg> {
|
||||||
let mut set: Set<RealReg> = Set::empty();
|
let mut set: Set<RealReg> = Set::empty();
|
||||||
for &ret in &self.sig.rets {
|
for &ret in &self.sig.rets {
|
||||||
if let ABIArg::Reg(r, ..) = ret {
|
if let ABIArg::Reg(regs, ..) = ret {
|
||||||
|
for &r in regs.regs() {
|
||||||
set.insert(r);
|
set.insert(r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
set
|
set
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -769,14 +855,20 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
self.stackslots.len()
|
self.stackslots.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_copy_arg_to_reg(&self, idx: usize, into_reg: Writable<Reg>) -> Self::I {
|
fn gen_copy_arg_to_regs(
|
||||||
|
&self,
|
||||||
|
idx: usize,
|
||||||
|
into_regs: ValueRegs<Writable<Reg>>,
|
||||||
|
) -> SmallInstVec<Self::I> {
|
||||||
match &self.sig.args[idx] {
|
match &self.sig.args[idx] {
|
||||||
// Extension mode doesn't matter (we're copying out, not in; we
|
// Extension mode doesn't matter (we're copying out, not in; we
|
||||||
// ignore high bits by convention).
|
// ignore high bits by convention).
|
||||||
&ABIArg::Reg(r, ty, ..) => M::gen_move(into_reg, r.to_reg(), ty),
|
&ABIArg::Reg(regs, ty, ..) => {
|
||||||
&ABIArg::Stack(off, ty, ..) => M::gen_load_stack(
|
gen_move_multi::<M>(into_regs, regs.map(|r| r.to_reg()), ty)
|
||||||
|
}
|
||||||
|
&ABIArg::Stack(off, ty, ..) => gen_load_stack_multi::<M>(
|
||||||
StackAMode::FPOffset(M::fp_to_arg_offset(self.call_conv, &self.flags) + off, ty),
|
StackAMode::FPOffset(M::fp_to_arg_offset(self.call_conv, &self.flags) + off, ty),
|
||||||
into_reg,
|
into_regs,
|
||||||
ty,
|
ty,
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@@ -792,19 +884,29 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_copy_reg_to_retval(&self, idx: usize, from_reg: Writable<Reg>) -> Vec<Self::I> {
|
fn gen_copy_regs_to_retval(
|
||||||
let mut ret = Vec::new();
|
&self,
|
||||||
|
idx: usize,
|
||||||
|
from_regs: ValueRegs<Writable<Reg>>,
|
||||||
|
) -> SmallInstVec<Self::I> {
|
||||||
|
let mut ret = smallvec![];
|
||||||
let word_bits = M::word_bits() as u8;
|
let word_bits = M::word_bits() as u8;
|
||||||
match &self.sig.rets[idx] {
|
match &self.sig.rets[idx] {
|
||||||
&ABIArg::Reg(r, ty, ext, ..) => {
|
&ABIArg::Reg(regs, ty, ext, ..) => {
|
||||||
let from_bits = ty_bits(ty) as u8;
|
let from_bits = ty_bits(ty) as u8;
|
||||||
let dest_reg = Writable::from_reg(r.to_reg());
|
let dest_regs = writable_value_regs(regs.map(|r| r.to_reg()));
|
||||||
let ext = M::get_ext_mode(self.sig.call_conv, ext);
|
let ext = M::get_ext_mode(self.sig.call_conv, ext);
|
||||||
match (ext, from_bits) {
|
match (ext, from_bits) {
|
||||||
(ArgumentExtension::Uext, n) | (ArgumentExtension::Sext, n)
|
(ArgumentExtension::Uext, n) | (ArgumentExtension::Sext, n)
|
||||||
if n < word_bits =>
|
if n < word_bits =>
|
||||||
{
|
{
|
||||||
let signed = ext == ArgumentExtension::Sext;
|
let signed = ext == ArgumentExtension::Sext;
|
||||||
|
let dest_reg = dest_regs
|
||||||
|
.only_reg()
|
||||||
|
.expect("extension only possible from one-reg value");
|
||||||
|
let from_reg = from_regs
|
||||||
|
.only_reg()
|
||||||
|
.expect("extension only possible from one-reg value");
|
||||||
ret.push(M::gen_extend(
|
ret.push(M::gen_extend(
|
||||||
dest_reg,
|
dest_reg,
|
||||||
from_reg.to_reg(),
|
from_reg.to_reg(),
|
||||||
@@ -813,7 +915,10 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
/* to_bits = */ word_bits,
|
/* to_bits = */ word_bits,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
_ => ret.push(M::gen_move(dest_reg, from_reg.to_reg(), ty)),
|
_ => ret.extend(
|
||||||
|
gen_move_multi::<M>(dest_regs, non_writable_value_regs(from_regs), ty)
|
||||||
|
.into_iter(),
|
||||||
|
),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
&ABIArg::Stack(off, mut ty, ext, ..) => {
|
&ABIArg::Stack(off, mut ty, ext, ..) => {
|
||||||
@@ -829,6 +934,9 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
(ArgumentExtension::Uext, n) | (ArgumentExtension::Sext, n)
|
(ArgumentExtension::Uext, n) | (ArgumentExtension::Sext, n)
|
||||||
if n < word_bits =>
|
if n < word_bits =>
|
||||||
{
|
{
|
||||||
|
let from_reg = from_regs
|
||||||
|
.only_reg()
|
||||||
|
.expect("extension only possible from one-reg value");
|
||||||
assert_eq!(M::word_reg_class(), from_reg.to_reg().get_class());
|
assert_eq!(M::word_reg_class(), from_reg.to_reg().get_class());
|
||||||
let signed = ext == ArgumentExtension::Sext;
|
let signed = ext == ArgumentExtension::Sext;
|
||||||
ret.push(M::gen_extend(
|
ret.push(M::gen_extend(
|
||||||
@@ -843,12 +951,15 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
};
|
};
|
||||||
ret.push(M::gen_store_base_offset(
|
ret.extend(
|
||||||
|
gen_store_base_offset_multi::<M>(
|
||||||
self.ret_area_ptr.unwrap().to_reg(),
|
self.ret_area_ptr.unwrap().to_reg(),
|
||||||
off,
|
off,
|
||||||
from_reg.to_reg(),
|
non_writable_value_regs(from_regs),
|
||||||
ty,
|
ty,
|
||||||
));
|
)
|
||||||
|
.into_iter(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret
|
ret
|
||||||
@@ -856,7 +967,8 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
|
|
||||||
fn gen_retval_area_setup(&self) -> Option<Self::I> {
|
fn gen_retval_area_setup(&self) -> Option<Self::I> {
|
||||||
if let Some(i) = self.sig.stack_ret_arg {
|
if let Some(i) = self.sig.stack_ret_arg {
|
||||||
let inst = self.gen_copy_arg_to_reg(i, self.ret_area_ptr.unwrap());
|
let insts = self.gen_copy_arg_to_regs(i, ValueRegs::one(self.ret_area_ptr.unwrap()));
|
||||||
|
let inst = insts.into_iter().next().unwrap();
|
||||||
trace!(
|
trace!(
|
||||||
"gen_retval_area_setup: inst {:?}; ptr reg is {:?}",
|
"gen_retval_area_setup: inst {:?}; ptr reg is {:?}",
|
||||||
inst,
|
inst,
|
||||||
@@ -891,24 +1003,30 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
slot: StackSlot,
|
slot: StackSlot,
|
||||||
offset: u32,
|
offset: u32,
|
||||||
ty: Type,
|
ty: Type,
|
||||||
into_reg: Writable<Reg>,
|
into_regs: ValueRegs<Writable<Reg>>,
|
||||||
) -> Self::I {
|
) -> SmallInstVec<Self::I> {
|
||||||
// Offset from beginning of stackslot area, which is at nominal SP (see
|
// Offset from beginning of stackslot area, which is at nominal SP (see
|
||||||
// [MemArg::NominalSPOffset] for more details on nominal SP tracking).
|
// [MemArg::NominalSPOffset] for more details on nominal SP tracking).
|
||||||
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
|
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
|
||||||
let sp_off: i64 = stack_off + (offset as i64);
|
let sp_off: i64 = stack_off + (offset as i64);
|
||||||
trace!("load_stackslot: slot {} -> sp_off {}", slot, sp_off);
|
trace!("load_stackslot: slot {} -> sp_off {}", slot, sp_off);
|
||||||
M::gen_load_stack(StackAMode::NominalSPOffset(sp_off, ty), into_reg, ty)
|
gen_load_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), into_regs, ty)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store to a stackslot.
|
/// Store to a stackslot.
|
||||||
fn store_stackslot(&self, slot: StackSlot, offset: u32, ty: Type, from_reg: Reg) -> Self::I {
|
fn store_stackslot(
|
||||||
|
&self,
|
||||||
|
slot: StackSlot,
|
||||||
|
offset: u32,
|
||||||
|
ty: Type,
|
||||||
|
from_regs: ValueRegs<Reg>,
|
||||||
|
) -> SmallInstVec<Self::I> {
|
||||||
// Offset from beginning of stackslot area, which is at nominal SP (see
|
// Offset from beginning of stackslot area, which is at nominal SP (see
|
||||||
// [MemArg::NominalSPOffset] for more details on nominal SP tracking).
|
// [MemArg::NominalSPOffset] for more details on nominal SP tracking).
|
||||||
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
|
let stack_off = self.stackslots[slot.as_u32() as usize] as i64;
|
||||||
let sp_off: i64 = stack_off + (offset as i64);
|
let sp_off: i64 = stack_off + (offset as i64);
|
||||||
trace!("store_stackslot: slot {} -> sp_off {}", slot, sp_off);
|
trace!("store_stackslot: slot {} -> sp_off {}", slot, sp_off);
|
||||||
M::gen_store_stack(StackAMode::NominalSPOffset(sp_off, ty), from_reg, ty)
|
gen_store_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), from_regs, ty)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Produce an instruction that computes a stackslot address.
|
/// Produce an instruction that computes a stackslot address.
|
||||||
@@ -921,23 +1039,33 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Load from a spillslot.
|
/// Load from a spillslot.
|
||||||
fn load_spillslot(&self, slot: SpillSlot, ty: Type, into_reg: Writable<Reg>) -> Self::I {
|
fn load_spillslot(
|
||||||
|
&self,
|
||||||
|
slot: SpillSlot,
|
||||||
|
ty: Type,
|
||||||
|
into_regs: ValueRegs<Writable<Reg>>,
|
||||||
|
) -> SmallInstVec<Self::I> {
|
||||||
// Offset from beginning of spillslot area, which is at nominal SP + stackslots_size.
|
// Offset from beginning of spillslot area, which is at nominal SP + stackslots_size.
|
||||||
let islot = slot.get() as i64;
|
let islot = slot.get() as i64;
|
||||||
let spill_off = islot * M::word_bytes() as i64;
|
let spill_off = islot * M::word_bytes() as i64;
|
||||||
let sp_off = self.stackslots_size as i64 + spill_off;
|
let sp_off = self.stackslots_size as i64 + spill_off;
|
||||||
trace!("load_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
|
trace!("load_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
|
||||||
M::gen_load_stack(StackAMode::NominalSPOffset(sp_off, ty), into_reg, ty)
|
gen_load_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), into_regs, ty)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store to a spillslot.
|
/// Store to a spillslot.
|
||||||
fn store_spillslot(&self, slot: SpillSlot, ty: Type, from_reg: Reg) -> Self::I {
|
fn store_spillslot(
|
||||||
|
&self,
|
||||||
|
slot: SpillSlot,
|
||||||
|
ty: Type,
|
||||||
|
from_regs: ValueRegs<Reg>,
|
||||||
|
) -> SmallInstVec<Self::I> {
|
||||||
// Offset from beginning of spillslot area, which is at nominal SP + stackslots_size.
|
// Offset from beginning of spillslot area, which is at nominal SP + stackslots_size.
|
||||||
let islot = slot.get() as i64;
|
let islot = slot.get() as i64;
|
||||||
let spill_off = islot * M::word_bytes() as i64;
|
let spill_off = islot * M::word_bytes() as i64;
|
||||||
let sp_off = self.stackslots_size as i64 + spill_off;
|
let sp_off = self.stackslots_size as i64 + spill_off;
|
||||||
trace!("store_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
|
trace!("store_spillslot: slot {:?} -> sp_off {}", slot, sp_off);
|
||||||
M::gen_store_stack(StackAMode::NominalSPOffset(sp_off, ty), from_reg, ty)
|
gen_store_stack_multi::<M>(StackAMode::NominalSPOffset(sp_off, ty), from_regs, ty)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn spillslots_to_stack_map(
|
fn spillslots_to_stack_map(
|
||||||
@@ -970,8 +1098,8 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
StackMap::from_slice(&bits[..])
|
StackMap::from_slice(&bits[..])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_prologue(&mut self) -> Vec<Self::I> {
|
fn gen_prologue(&mut self) -> SmallInstVec<Self::I> {
|
||||||
let mut insts = vec![];
|
let mut insts = smallvec![];
|
||||||
if !self.call_conv.extends_baldrdash() {
|
if !self.call_conv.extends_baldrdash() {
|
||||||
// set up frame
|
// set up frame
|
||||||
insts.extend(M::gen_prologue_frame_setup().into_iter());
|
insts.extend(M::gen_prologue_frame_setup().into_iter());
|
||||||
@@ -994,7 +1122,7 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
// specified, otherwise always insert the stack check.
|
// specified, otherwise always insert the stack check.
|
||||||
if total_stacksize > 0 || !self.is_leaf {
|
if total_stacksize > 0 || !self.is_leaf {
|
||||||
if let Some((reg, stack_limit_load)) = &self.stack_limit {
|
if let Some((reg, stack_limit_load)) = &self.stack_limit {
|
||||||
insts.extend_from_slice(stack_limit_load);
|
insts.extend(stack_limit_load.clone());
|
||||||
self.insert_stack_check(*reg, total_stacksize, &mut insts);
|
self.insert_stack_check(*reg, total_stacksize, &mut insts);
|
||||||
}
|
}
|
||||||
if let Some(min_frame) = &self.probestack_min_frame {
|
if let Some(min_frame) = &self.probestack_min_frame {
|
||||||
@@ -1037,8 +1165,8 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
insts
|
insts
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_epilogue(&self) -> Vec<M::I> {
|
fn gen_epilogue(&self) -> SmallInstVec<M::I> {
|
||||||
let mut insts = vec![];
|
let mut insts = smallvec![];
|
||||||
|
|
||||||
// Restore clobbered registers.
|
// Restore clobbered registers.
|
||||||
insts.extend(M::gen_clobber_restore(
|
insts.extend(M::gen_clobber_restore(
|
||||||
@@ -1079,7 +1207,10 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
|
|
||||||
fn gen_spill(&self, to_slot: SpillSlot, from_reg: RealReg, ty: Option<Type>) -> Self::I {
|
fn gen_spill(&self, to_slot: SpillSlot, from_reg: RealReg, ty: Option<Type>) -> Self::I {
|
||||||
let ty = ty_from_ty_hint_or_reg_class::<M>(from_reg.to_reg(), ty);
|
let ty = ty_from_ty_hint_or_reg_class::<M>(from_reg.to_reg(), ty);
|
||||||
self.store_spillslot(to_slot, ty, from_reg.to_reg())
|
self.store_spillslot(to_slot, ty, ValueRegs::one(from_reg.to_reg()))
|
||||||
|
.into_iter()
|
||||||
|
.next()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gen_reload(
|
fn gen_reload(
|
||||||
@@ -1089,7 +1220,14 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
|
|||||||
ty: Option<Type>,
|
ty: Option<Type>,
|
||||||
) -> Self::I {
|
) -> Self::I {
|
||||||
let ty = ty_from_ty_hint_or_reg_class::<M>(to_reg.to_reg().to_reg(), ty);
|
let ty = ty_from_ty_hint_or_reg_class::<M>(to_reg.to_reg().to_reg(), ty);
|
||||||
self.load_spillslot(from_slot, ty, to_reg.map(|r| r.to_reg()))
|
self.load_spillslot(
|
||||||
|
from_slot,
|
||||||
|
ty,
|
||||||
|
writable_value_regs(ValueRegs::one(to_reg.to_reg().to_reg())),
|
||||||
|
)
|
||||||
|
.into_iter()
|
||||||
|
.next()
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unwind_info_kind(&self) -> UnwindInfoKind {
|
fn unwind_info_kind(&self) -> UnwindInfoKind {
|
||||||
@@ -1110,7 +1248,7 @@ fn abisig_to_uses_and_defs<M: ABIMachineSpec>(sig: &ABISig) -> (Vec<Reg>, Vec<Wr
|
|||||||
let mut uses = Vec::new();
|
let mut uses = Vec::new();
|
||||||
for arg in &sig.args {
|
for arg in &sig.args {
|
||||||
match arg {
|
match arg {
|
||||||
&ABIArg::Reg(reg, ..) => uses.push(reg.to_reg()),
|
&ABIArg::Reg(regs, ..) => uses.extend(regs.regs().iter().map(|r| r.to_reg())),
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1119,7 +1257,9 @@ fn abisig_to_uses_and_defs<M: ABIMachineSpec>(sig: &ABISig) -> (Vec<Reg>, Vec<Wr
|
|||||||
let mut defs = M::get_regs_clobbered_by_call(sig.call_conv);
|
let mut defs = M::get_regs_clobbered_by_call(sig.call_conv);
|
||||||
for ret in &sig.rets {
|
for ret in &sig.rets {
|
||||||
match ret {
|
match ret {
|
||||||
&ABIArg::Reg(reg, ..) => defs.push(Writable::from_reg(reg.to_reg())),
|
&ABIArg::Reg(regs, ..) => {
|
||||||
|
defs.extend(regs.regs().iter().map(|r| Writable::from_reg(r.to_reg())))
|
||||||
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1238,18 +1378,19 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
|
|||||||
adjust_stack_and_nominal_sp::<M, C>(ctx, off as i32, /* is_sub = */ false)
|
adjust_stack_and_nominal_sp::<M, C>(ctx, off as i32, /* is_sub = */ false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn emit_copy_reg_to_arg<C: LowerCtx<I = Self::I>>(
|
fn emit_copy_regs_to_arg<C: LowerCtx<I = Self::I>>(
|
||||||
&self,
|
&self,
|
||||||
ctx: &mut C,
|
ctx: &mut C,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
from_reg: Reg,
|
from_regs: ValueRegs<Reg>,
|
||||||
) {
|
) {
|
||||||
let word_rc = M::word_reg_class();
|
let word_rc = M::word_reg_class();
|
||||||
let word_bits = M::word_bits() as usize;
|
let word_bits = M::word_bits() as usize;
|
||||||
match &self.sig.args[idx] {
|
match &self.sig.args[idx] {
|
||||||
&ABIArg::Reg(reg, ty, ext, _) => {
|
&ABIArg::Reg(regs, ty, ext, _) => {
|
||||||
let ext = M::get_ext_mode(self.sig.call_conv, ext);
|
let ext = M::get_ext_mode(self.sig.call_conv, ext);
|
||||||
if ext != ir::ArgumentExtension::None && ty_bits(ty) < word_bits {
|
if ext != ir::ArgumentExtension::None && ty_bits(ty) < word_bits {
|
||||||
|
let reg = regs.only_reg().unwrap();
|
||||||
assert_eq!(word_rc, reg.get_class());
|
assert_eq!(word_rc, reg.get_class());
|
||||||
let signed = match ext {
|
let signed = match ext {
|
||||||
ir::ArgumentExtension::Uext => false,
|
ir::ArgumentExtension::Uext => false,
|
||||||
@@ -1258,18 +1399,27 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
|
|||||||
};
|
};
|
||||||
ctx.emit(M::gen_extend(
|
ctx.emit(M::gen_extend(
|
||||||
Writable::from_reg(reg.to_reg()),
|
Writable::from_reg(reg.to_reg()),
|
||||||
from_reg,
|
from_regs.only_reg().unwrap(),
|
||||||
signed,
|
signed,
|
||||||
ty_bits(ty) as u8,
|
ty_bits(ty) as u8,
|
||||||
word_bits as u8,
|
word_bits as u8,
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
ctx.emit(M::gen_move(Writable::from_reg(reg.to_reg()), from_reg, ty));
|
for insn in gen_move_multi::<M>(
|
||||||
|
writable_value_regs(regs.map(|r| r.to_reg())),
|
||||||
|
from_regs,
|
||||||
|
ty,
|
||||||
|
) {
|
||||||
|
ctx.emit(insn);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
&ABIArg::Stack(off, mut ty, ext, _) => {
|
&ABIArg::Stack(off, mut ty, ext, _) => {
|
||||||
let ext = M::get_ext_mode(self.sig.call_conv, ext);
|
let ext = M::get_ext_mode(self.sig.call_conv, ext);
|
||||||
if ext != ir::ArgumentExtension::None && ty_bits(ty) < word_bits {
|
if ext != ir::ArgumentExtension::None && ty_bits(ty) < word_bits {
|
||||||
|
let from_reg = from_regs
|
||||||
|
.only_reg()
|
||||||
|
.expect("only one reg for sub-word value width");
|
||||||
assert_eq!(word_rc, from_reg.get_class());
|
assert_eq!(word_rc, from_reg.get_class());
|
||||||
let signed = match ext {
|
let signed = match ext {
|
||||||
ir::ArgumentExtension::Uext => false,
|
ir::ArgumentExtension::Uext => false,
|
||||||
@@ -1289,32 +1439,37 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
|
|||||||
// Store the extended version.
|
// Store the extended version.
|
||||||
ty = M::word_type();
|
ty = M::word_type();
|
||||||
}
|
}
|
||||||
ctx.emit(M::gen_store_stack(
|
for insn in gen_store_stack_multi::<M>(StackAMode::SPOffset(off, ty), from_regs, ty)
|
||||||
StackAMode::SPOffset(off, ty),
|
{
|
||||||
from_reg,
|
ctx.emit(insn);
|
||||||
ty,
|
}
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn emit_copy_retval_to_reg<C: LowerCtx<I = Self::I>>(
|
fn emit_copy_retval_to_regs<C: LowerCtx<I = Self::I>>(
|
||||||
&self,
|
&self,
|
||||||
ctx: &mut C,
|
ctx: &mut C,
|
||||||
idx: usize,
|
idx: usize,
|
||||||
into_reg: Writable<Reg>,
|
into_regs: ValueRegs<Writable<Reg>>,
|
||||||
) {
|
) {
|
||||||
match &self.sig.rets[idx] {
|
match &self.sig.rets[idx] {
|
||||||
// Extension mode doesn't matter because we're copying out, not in,
|
// Extension mode doesn't matter because we're copying out, not in,
|
||||||
// and we ignore high bits in our own registers by convention.
|
// and we ignore high bits in our own registers by convention.
|
||||||
&ABIArg::Reg(reg, ty, _, _) => ctx.emit(M::gen_move(into_reg, reg.to_reg(), ty)),
|
&ABIArg::Reg(regs, ty, _, _) => {
|
||||||
|
for insn in gen_move_multi::<M>(into_regs, regs.map(|r| r.to_reg()), ty) {
|
||||||
|
ctx.emit(insn);
|
||||||
|
}
|
||||||
|
}
|
||||||
&ABIArg::Stack(off, ty, _, _) => {
|
&ABIArg::Stack(off, ty, _, _) => {
|
||||||
let ret_area_base = self.sig.stack_arg_space;
|
let ret_area_base = self.sig.stack_arg_space;
|
||||||
ctx.emit(M::gen_load_stack(
|
for insn in gen_load_stack_multi::<M>(
|
||||||
StackAMode::SPOffset(off + ret_area_base, ty),
|
StackAMode::SPOffset(off + ret_area_base, ty),
|
||||||
into_reg,
|
into_regs,
|
||||||
ty,
|
ty,
|
||||||
));
|
) {
|
||||||
|
ctx.emit(insn);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1324,19 +1479,18 @@ impl<M: ABIMachineSpec> ABICaller for ABICallerImpl<M> {
|
|||||||
mem::replace(&mut self.uses, Default::default()),
|
mem::replace(&mut self.uses, Default::default()),
|
||||||
mem::replace(&mut self.defs, Default::default()),
|
mem::replace(&mut self.defs, Default::default()),
|
||||||
);
|
);
|
||||||
let word_rc = M::word_reg_class();
|
|
||||||
let word_type = M::word_type();
|
let word_type = M::word_type();
|
||||||
if let Some(i) = self.sig.stack_ret_arg {
|
if let Some(i) = self.sig.stack_ret_arg {
|
||||||
let rd = ctx.alloc_tmp(word_rc, word_type);
|
let rd = ctx.alloc_tmp(word_type).only_reg().unwrap();
|
||||||
let ret_area_base = self.sig.stack_arg_space;
|
let ret_area_base = self.sig.stack_arg_space;
|
||||||
ctx.emit(M::gen_get_stack_addr(
|
ctx.emit(M::gen_get_stack_addr(
|
||||||
StackAMode::SPOffset(ret_area_base, I8),
|
StackAMode::SPOffset(ret_area_base, I8),
|
||||||
rd,
|
rd,
|
||||||
I8,
|
I8,
|
||||||
));
|
));
|
||||||
self.emit_copy_reg_to_arg(ctx, i, rd.to_reg());
|
self.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(rd.to_reg()));
|
||||||
}
|
}
|
||||||
let tmp = ctx.alloc_tmp(word_rc, word_type);
|
let tmp = ctx.alloc_tmp(word_type).only_reg().unwrap();
|
||||||
for (is_safepoint, inst) in M::gen_call(
|
for (is_safepoint, inst) in M::gen_call(
|
||||||
&self.dest,
|
&self.dest,
|
||||||
uses,
|
uses,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
//! Miscellaneous helpers for machine backends.
|
//! Miscellaneous helpers for machine backends.
|
||||||
|
|
||||||
use super::{InsnOutput, LowerCtx, VCodeInst};
|
use super::{InsnOutput, LowerCtx, VCodeInst, ValueRegs};
|
||||||
use crate::ir::Type;
|
use crate::ir::Type;
|
||||||
use regalloc::{Reg, Writable};
|
use regalloc::{Reg, Writable};
|
||||||
|
|
||||||
@@ -23,6 +23,6 @@ pub(crate) fn ty_has_float_or_vec_representation(ty: Type) -> bool {
|
|||||||
pub(crate) fn get_output_reg<I: VCodeInst, C: LowerCtx<I = I>>(
|
pub(crate) fn get_output_reg<I: VCodeInst, C: LowerCtx<I = I>>(
|
||||||
ctx: &mut C,
|
ctx: &mut C,
|
||||||
spec: InsnOutput,
|
spec: InsnOutput,
|
||||||
) -> Writable<Reg> {
|
) -> ValueRegs<Writable<Reg>> {
|
||||||
ctx.get_output(spec.insn, spec.output)
|
ctx.get_output(spec.insn, spec.output)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,29 +5,27 @@
|
|||||||
// TODO: separate the IR-query core of `LowerCtx` from the lowering logic built
|
// TODO: separate the IR-query core of `LowerCtx` from the lowering logic built
|
||||||
// on top of it, e.g. the side-effect/coloring analysis and the scan support.
|
// on top of it, e.g. the side-effect/coloring analysis and the scan support.
|
||||||
|
|
||||||
|
use crate::data_value::DataValue;
|
||||||
use crate::entity::SecondaryMap;
|
use crate::entity::SecondaryMap;
|
||||||
use crate::fx::{FxHashMap, FxHashSet};
|
use crate::fx::{FxHashMap, FxHashSet};
|
||||||
use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
|
use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
|
||||||
use crate::ir::instructions::BranchInfo;
|
use crate::ir::instructions::BranchInfo;
|
||||||
use crate::ir::types::I64;
|
|
||||||
use crate::ir::{
|
use crate::ir::{
|
||||||
ArgumentPurpose, Block, Constant, ConstantData, ExternalName, Function, GlobalValueData, Inst,
|
ArgumentPurpose, Block, Constant, ConstantData, ExternalName, Function, GlobalValueData, Inst,
|
||||||
InstructionData, MemFlags, Opcode, Signature, SourceLoc, Type, Value, ValueDef,
|
InstructionData, MemFlags, Opcode, Signature, SourceLoc, Type, Value, ValueDef,
|
||||||
};
|
};
|
||||||
use crate::machinst::{
|
use crate::machinst::{
|
||||||
ABICallee, BlockIndex, BlockLoweringOrder, LoweredBlock, MachLabel, VCode, VCodeBuilder,
|
writable_value_regs, ABICallee, BlockIndex, BlockLoweringOrder, LoweredBlock, MachLabel, VCode,
|
||||||
VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst,
|
VCodeBuilder, VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs,
|
||||||
};
|
};
|
||||||
use crate::CodegenResult;
|
use crate::CodegenResult;
|
||||||
|
|
||||||
use regalloc::{Reg, RegClass, StackmapRequestInfo, VirtualReg, Writable};
|
|
||||||
|
|
||||||
use crate::data_value::DataValue;
|
|
||||||
use alloc::boxed::Box;
|
use alloc::boxed::Box;
|
||||||
use alloc::vec::Vec;
|
use alloc::vec::Vec;
|
||||||
use core::convert::TryInto;
|
use core::convert::TryInto;
|
||||||
use log::debug;
|
use log::debug;
|
||||||
|
use regalloc::{Reg, StackmapRequestInfo, Writable};
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
/// An "instruction color" partitions CLIF instructions by side-effecting ops.
|
/// An "instruction color" partitions CLIF instructions by side-effecting ops.
|
||||||
/// All instructions with the same "color" are guaranteed not to be separated by
|
/// All instructions with the same "color" are guaranteed not to be separated by
|
||||||
@@ -71,7 +69,7 @@ pub trait LowerCtx {
|
|||||||
/// instruction should lower into a sequence that fills this register. (Why
|
/// instruction should lower into a sequence that fills this register. (Why
|
||||||
/// not allow the backend to specify its own result register for the return?
|
/// not allow the backend to specify its own result register for the return?
|
||||||
/// Because there may be multiple return points.)
|
/// Because there may be multiple return points.)
|
||||||
fn retval(&self, idx: usize) -> Writable<Reg>;
|
fn retval(&self, idx: usize) -> ValueRegs<Writable<Reg>>;
|
||||||
/// Returns the vreg containing the VmContext parameter, if there's one.
|
/// Returns the vreg containing the VmContext parameter, if there's one.
|
||||||
fn get_vm_context(&self) -> Option<Reg>;
|
fn get_vm_context(&self) -> Option<Reg>;
|
||||||
|
|
||||||
@@ -118,7 +116,7 @@ pub trait LowerCtx {
|
|||||||
///
|
///
|
||||||
/// The instruction input may be available in either of these forms. It may
|
/// The instruction input may be available in either of these forms. It may
|
||||||
/// be available in neither form, if the conditions are not met; if so, use
|
/// be available in neither form, if the conditions are not met; if so, use
|
||||||
/// `put_input_in_reg()` instead to get it in a register.
|
/// `put_input_in_regs()` instead to get it in a register.
|
||||||
///
|
///
|
||||||
/// If the backend merges the effect of a side-effecting instruction, it
|
/// If the backend merges the effect of a side-effecting instruction, it
|
||||||
/// must call `sink_inst()`. When this is called, it indicates that the
|
/// must call `sink_inst()`. When this is called, it indicates that the
|
||||||
@@ -126,29 +124,29 @@ pub trait LowerCtx {
|
|||||||
/// instruction's result(s) must have *no* uses remaining, because it will
|
/// instruction's result(s) must have *no* uses remaining, because it will
|
||||||
/// not be codegen'd (it has been integrated into the current instruction).
|
/// not be codegen'd (it has been integrated into the current instruction).
|
||||||
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput;
|
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput;
|
||||||
/// Put the `idx`th input into a register and return the assigned register.
|
/// Put the `idx`th input into register(s) and return the assigned register.
|
||||||
fn put_input_in_reg(&mut self, ir_inst: Inst, idx: usize) -> Reg;
|
fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg>;
|
||||||
/// Get the `idx`th output register of the given IR instruction. When
|
/// Get the `idx`th output register(s) of the given IR instruction. When
|
||||||
/// `backend.lower_inst_to_regs(ctx, inst)` is called, it is expected that
|
/// `backend.lower_inst_to_regs(ctx, inst)` is called, it is expected that
|
||||||
/// the backend will write results to these output register(s). This
|
/// the backend will write results to these output register(s). This
|
||||||
/// register will always be "fresh"; it is guaranteed not to overlap with
|
/// register will always be "fresh"; it is guaranteed not to overlap with
|
||||||
/// any of the inputs, and can be freely used as a scratch register within
|
/// any of the inputs, and can be freely used as a scratch register within
|
||||||
/// the lowered instruction sequence, as long as its final value is the
|
/// the lowered instruction sequence, as long as its final value is the
|
||||||
/// result of the computation.
|
/// result of the computation.
|
||||||
fn get_output(&self, ir_inst: Inst, idx: usize) -> Writable<Reg>;
|
fn get_output(&self, ir_inst: Inst, idx: usize) -> ValueRegs<Writable<Reg>>;
|
||||||
|
|
||||||
// Codegen primitives: allocate temps, emit instructions, set result registers,
|
// Codegen primitives: allocate temps, emit instructions, set result registers,
|
||||||
// ask for an input to be gen'd into a register.
|
// ask for an input to be gen'd into a register.
|
||||||
|
|
||||||
/// Get a new temp.
|
/// Get a new temp.
|
||||||
fn alloc_tmp(&mut self, rc: RegClass, ty: Type) -> Writable<Reg>;
|
fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>>;
|
||||||
/// Emit a machine instruction.
|
/// Emit a machine instruction.
|
||||||
fn emit(&mut self, mach_inst: Self::I);
|
fn emit(&mut self, mach_inst: Self::I);
|
||||||
/// Emit a machine instruction that is a safepoint.
|
/// Emit a machine instruction that is a safepoint.
|
||||||
fn emit_safepoint(&mut self, mach_inst: Self::I);
|
fn emit_safepoint(&mut self, mach_inst: Self::I);
|
||||||
/// Indicate that the side-effect of an instruction has been sunk to the
|
/// Indicate that the side-effect of an instruction has been sunk to the
|
||||||
/// current scan location. This should only be done with the instruction's
|
/// current scan location. This should only be done with the instruction's
|
||||||
/// original results are not used (i.e., `put_input_in_reg` is not invoked
|
/// original results are not used (i.e., `put_input_in_regs` is not invoked
|
||||||
/// for the input produced by the sunk instruction), otherwise the
|
/// for the input produced by the sunk instruction), otherwise the
|
||||||
/// side-effect will occur twice.
|
/// side-effect will occur twice.
|
||||||
fn sink_inst(&mut self, ir_inst: Inst);
|
fn sink_inst(&mut self, ir_inst: Inst);
|
||||||
@@ -234,10 +232,10 @@ pub struct Lower<'func, I: VCodeInst> {
|
|||||||
vcode: VCodeBuilder<I>,
|
vcode: VCodeBuilder<I>,
|
||||||
|
|
||||||
/// Mapping from `Value` (SSA value in IR) to virtual register.
|
/// Mapping from `Value` (SSA value in IR) to virtual register.
|
||||||
value_regs: SecondaryMap<Value, Reg>,
|
value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
|
||||||
|
|
||||||
/// Return-value vregs.
|
/// Return-value vregs.
|
||||||
retval_regs: Vec<Reg>,
|
retval_regs: Vec<ValueRegs<Reg>>,
|
||||||
|
|
||||||
/// Instruction colors at block exits. From this map, we can recover all
|
/// Instruction colors at block exits. From this map, we can recover all
|
||||||
/// instruction colors by scanning backward from the block end and
|
/// instruction colors by scanning backward from the block end and
|
||||||
@@ -306,20 +304,30 @@ pub enum RelocDistance {
|
|||||||
Far,
|
Far,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn alloc_vreg(
|
fn alloc_vregs<I: VCodeInst>(
|
||||||
value_regs: &mut SecondaryMap<Value, Reg>,
|
ty: Type,
|
||||||
regclass: RegClass,
|
|
||||||
value: Value,
|
|
||||||
next_vreg: &mut u32,
|
next_vreg: &mut u32,
|
||||||
) -> VirtualReg {
|
vcode: &mut VCodeBuilder<I>,
|
||||||
if value_regs[value].is_invalid() {
|
) -> CodegenResult<ValueRegs<Reg>> {
|
||||||
// default value in map.
|
|
||||||
let v = *next_vreg;
|
let v = *next_vreg;
|
||||||
*next_vreg += 1;
|
let (regclasses, tys) = I::rc_for_type(ty)?;
|
||||||
value_regs[value] = Reg::new_virtual(regclass, v);
|
*next_vreg += regclasses.len() as u32;
|
||||||
debug!("value {} gets vreg {:?}", value, v);
|
let regs = match regclasses {
|
||||||
|
&[rc0] => ValueRegs::one(Reg::new_virtual(rc0, v)),
|
||||||
|
&[rc0, rc1] => ValueRegs::two(Reg::new_virtual(rc0, v), Reg::new_virtual(rc1, v + 1)),
|
||||||
|
#[cfg(feature = "arm32")]
|
||||||
|
&[rc0, rc1, rc2, rc3] => ValueRegs::four(
|
||||||
|
Reg::new_virtual(rc0, v),
|
||||||
|
Reg::new_virtual(rc1, v + 1),
|
||||||
|
Reg::new_virtual(rc2, v + 2),
|
||||||
|
Reg::new_virtual(rc3, v + 3),
|
||||||
|
),
|
||||||
|
_ => panic!("Value must reside in 1, 2 or 4 registers"),
|
||||||
|
};
|
||||||
|
for (®_ty, ®) in tys.iter().zip(regs.regs().iter()) {
|
||||||
|
vcode.set_vreg_type(reg.to_virtual_reg(), reg_ty);
|
||||||
}
|
}
|
||||||
value_regs[value].as_virtual_reg().unwrap()
|
Ok(regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
enum GenerateReturn {
|
enum GenerateReturn {
|
||||||
@@ -340,29 +348,32 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
|
|
||||||
let mut next_vreg: u32 = 0;
|
let mut next_vreg: u32 = 0;
|
||||||
|
|
||||||
let mut value_regs = SecondaryMap::with_default(Reg::invalid());
|
let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
|
||||||
|
|
||||||
// Assign a vreg to each block param and each inst result.
|
// Assign a vreg to each block param and each inst result.
|
||||||
for bb in f.layout.blocks() {
|
for bb in f.layout.blocks() {
|
||||||
for ¶m in f.dfg.block_params(bb) {
|
for ¶m in f.dfg.block_params(bb) {
|
||||||
let ty = f.dfg.value_type(param);
|
let ty = f.dfg.value_type(param);
|
||||||
let vreg = alloc_vreg(&mut value_regs, I::rc_for_type(ty)?, param, &mut next_vreg);
|
if value_regs[param].is_invalid() {
|
||||||
vcode.set_vreg_type(vreg, ty);
|
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
|
||||||
debug!("bb {} param {}: vreg {:?}", bb, param, vreg);
|
value_regs[param] = regs;
|
||||||
|
debug!("bb {} param {}: regs {:?}", bb, param, regs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for inst in f.layout.block_insts(bb) {
|
for inst in f.layout.block_insts(bb) {
|
||||||
for &result in f.dfg.inst_results(inst) {
|
for &result in f.dfg.inst_results(inst) {
|
||||||
let ty = f.dfg.value_type(result);
|
let ty = f.dfg.value_type(result);
|
||||||
let vreg =
|
if value_regs[result].is_invalid() {
|
||||||
alloc_vreg(&mut value_regs, I::rc_for_type(ty)?, result, &mut next_vreg);
|
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
|
||||||
vcode.set_vreg_type(vreg, ty);
|
value_regs[result] = regs;
|
||||||
debug!(
|
debug!(
|
||||||
"bb {} inst {} ({:?}): result vreg {:?}",
|
"bb {} inst {} ({:?}): result regs {:?}",
|
||||||
bb, inst, f.dfg[inst], vreg
|
bb, inst, f.dfg[inst], regs,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let vm_context = f
|
let vm_context = f
|
||||||
.signature
|
.signature
|
||||||
@@ -370,18 +381,15 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
.map(|vm_context_index| {
|
.map(|vm_context_index| {
|
||||||
let entry_block = f.layout.entry_block().unwrap();
|
let entry_block = f.layout.entry_block().unwrap();
|
||||||
let param = f.dfg.block_params(entry_block)[vm_context_index];
|
let param = f.dfg.block_params(entry_block)[vm_context_index];
|
||||||
value_regs[param]
|
value_regs[param].only_reg().unwrap()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Assign a vreg to each return value.
|
// Assign vreg(s) to each return value.
|
||||||
let mut retval_regs = vec![];
|
let mut retval_regs = vec![];
|
||||||
for ret in &f.signature.returns {
|
for ret in &f.signature.returns {
|
||||||
let v = next_vreg;
|
let regs = alloc_vregs(ret.value_type, &mut next_vreg, &mut vcode)?;
|
||||||
next_vreg += 1;
|
retval_regs.push(regs);
|
||||||
let regclass = I::rc_for_type(ret.value_type)?;
|
debug!("retval gets regs {:?}", regs);
|
||||||
let vreg = Reg::new_virtual(regclass, v);
|
|
||||||
retval_regs.push(vreg);
|
|
||||||
vcode.set_vreg_type(vreg.as_virtual_reg().unwrap(), ret.value_type);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute instruction colors, find constant instructions, and find instructions with
|
// Compute instruction colors, find constant instructions, and find instructions with
|
||||||
@@ -453,10 +461,11 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
if !self.vcode.abi().arg_is_needed_in_body(i) {
|
if !self.vcode.abi().arg_is_needed_in_body(i) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let reg = Writable::from_reg(self.value_regs[*param]);
|
let regs = writable_value_regs(self.value_regs[*param]);
|
||||||
let insn = self.vcode.abi().gen_copy_arg_to_reg(i, reg);
|
for insn in self.vcode.abi().gen_copy_arg_to_regs(i, regs).into_iter() {
|
||||||
self.emit(insn);
|
self.emit(insn);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if let Some(insn) = self.vcode.abi().gen_retval_area_setup() {
|
if let Some(insn) = self.vcode.abi().gen_retval_area_setup() {
|
||||||
self.emit(insn);
|
self.emit(insn);
|
||||||
}
|
}
|
||||||
@@ -465,10 +474,14 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
|
|
||||||
fn gen_retval_setup(&mut self, gen_ret_inst: GenerateReturn) {
|
fn gen_retval_setup(&mut self, gen_ret_inst: GenerateReturn) {
|
||||||
let retval_regs = self.retval_regs.clone();
|
let retval_regs = self.retval_regs.clone();
|
||||||
for (i, reg) in retval_regs.into_iter().enumerate() {
|
for (i, regs) in retval_regs.into_iter().enumerate() {
|
||||||
let reg = Writable::from_reg(reg);
|
let regs = writable_value_regs(regs);
|
||||||
let insns = self.vcode.abi().gen_copy_reg_to_retval(i, reg);
|
for insn in self
|
||||||
for insn in insns {
|
.vcode
|
||||||
|
.abi()
|
||||||
|
.gen_copy_regs_to_retval(i, regs)
|
||||||
|
.into_iter()
|
||||||
|
{
|
||||||
self.emit(insn);
|
self.emit(insn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -499,8 +512,8 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
//
|
//
|
||||||
// * one for dsts whose sources are non-constants.
|
// * one for dsts whose sources are non-constants.
|
||||||
|
|
||||||
let mut const_bundles = SmallVec::<[(Type, Writable<Reg>, u64); 16]>::new();
|
let mut const_bundles: SmallVec<[_; 16]> = SmallVec::new();
|
||||||
let mut var_bundles = SmallVec::<[(Type, Writable<Reg>, Reg); 16]>::new();
|
let mut var_bundles: SmallVec<[_; 16]> = SmallVec::new();
|
||||||
|
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
for (dst_val, src_val) in self
|
for (dst_val, src_val) in self
|
||||||
@@ -514,7 +527,7 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
let ty = self.f.dfg.value_type(src_val);
|
let ty = self.f.dfg.value_type(src_val);
|
||||||
|
|
||||||
debug_assert!(ty == self.f.dfg.value_type(*dst_val));
|
debug_assert!(ty == self.f.dfg.value_type(*dst_val));
|
||||||
let dst_reg = self.value_regs[*dst_val];
|
let dst_regs = self.value_regs[*dst_val];
|
||||||
|
|
||||||
let input = self.get_value_as_source_or_const(src_val);
|
let input = self.get_value_as_source_or_const(src_val);
|
||||||
debug!("jump arg {} is {}", i, src_val);
|
debug!("jump arg {} is {}", i, src_val);
|
||||||
@@ -522,15 +535,15 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
|
|
||||||
if let Some(c) = input.constant {
|
if let Some(c) = input.constant {
|
||||||
debug!(" -> constant {}", c);
|
debug!(" -> constant {}", c);
|
||||||
const_bundles.push((ty, Writable::from_reg(dst_reg), c));
|
const_bundles.push((ty, writable_value_regs(dst_regs), c));
|
||||||
} else {
|
} else {
|
||||||
let src_reg = self.put_value_in_reg(src_val);
|
let src_regs = self.put_value_in_regs(src_val);
|
||||||
debug!(" -> reg {:?}", src_reg);
|
debug!(" -> reg {:?}", src_regs);
|
||||||
// Skip self-assignments. Not only are they pointless, they falsely trigger the
|
// Skip self-assignments. Not only are they pointless, they falsely trigger the
|
||||||
// overlap-check below and hence can cause a lot of unnecessary copying through
|
// overlap-check below and hence can cause a lot of unnecessary copying through
|
||||||
// temporaries.
|
// temporaries.
|
||||||
if dst_reg != src_reg {
|
if dst_regs != src_regs {
|
||||||
var_bundles.push((ty, Writable::from_reg(dst_reg), src_reg));
|
var_bundles.push((ty, writable_value_regs(dst_regs), src_regs));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -541,41 +554,69 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
// for cases of up to circa 16 args. Currently not possible because regalloc.rs
|
// for cases of up to circa 16 args. Currently not possible because regalloc.rs
|
||||||
// does not export it.
|
// does not export it.
|
||||||
let mut src_reg_set = FxHashSet::<Reg>::default();
|
let mut src_reg_set = FxHashSet::<Reg>::default();
|
||||||
for (_, _, src_reg) in &var_bundles {
|
for (_, _, src_regs) in &var_bundles {
|
||||||
src_reg_set.insert(*src_reg);
|
for ® in src_regs.regs() {
|
||||||
|
src_reg_set.insert(reg);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let mut overlaps = false;
|
let mut overlaps = false;
|
||||||
for (_, dst_reg, _) in &var_bundles {
|
'outer: for (_, dst_regs, _) in &var_bundles {
|
||||||
if src_reg_set.contains(&dst_reg.to_reg()) {
|
for ® in dst_regs.regs() {
|
||||||
|
if src_reg_set.contains(®.to_reg()) {
|
||||||
overlaps = true;
|
overlaps = true;
|
||||||
break;
|
break 'outer;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If, as is mostly the case, the source and destination register sets are non
|
// If, as is mostly the case, the source and destination register sets are non
|
||||||
// overlapping, then we can copy directly, so as to save the register allocator work.
|
// overlapping, then we can copy directly, so as to save the register allocator work.
|
||||||
if !overlaps {
|
if !overlaps {
|
||||||
for (ty, dst_reg, src_reg) in &var_bundles {
|
for (ty, dst_regs, src_regs) in &var_bundles {
|
||||||
self.emit(I::gen_move(*dst_reg, *src_reg, *ty));
|
let (_, reg_tys) = I::rc_for_type(*ty)?;
|
||||||
|
for ((dst, src), reg_ty) in dst_regs
|
||||||
|
.regs()
|
||||||
|
.iter()
|
||||||
|
.zip(src_regs.regs().iter())
|
||||||
|
.zip(reg_tys.iter())
|
||||||
|
{
|
||||||
|
self.emit(I::gen_move(*dst, *src, *reg_ty));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// There's some overlap, so play safe and copy via temps.
|
// There's some overlap, so play safe and copy via temps.
|
||||||
let mut tmp_regs = SmallVec::<[Writable<Reg>; 16]>::new();
|
let mut tmp_regs = SmallVec::<[ValueRegs<Writable<Reg>>; 16]>::new();
|
||||||
for (ty, _, _) in &var_bundles {
|
for (ty, _, _) in &var_bundles {
|
||||||
tmp_regs.push(self.alloc_tmp(I::rc_for_type(*ty)?, *ty));
|
tmp_regs.push(self.alloc_tmp(*ty));
|
||||||
}
|
}
|
||||||
for ((ty, _, src_reg), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
|
for ((ty, _, src_reg), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
|
||||||
self.emit(I::gen_move(*tmp_reg, *src_reg, *ty));
|
let (_, reg_tys) = I::rc_for_type(*ty)?;
|
||||||
|
for ((tmp, src), reg_ty) in tmp_reg
|
||||||
|
.regs()
|
||||||
|
.iter()
|
||||||
|
.zip(src_reg.regs().iter())
|
||||||
|
.zip(reg_tys.iter())
|
||||||
|
{
|
||||||
|
self.emit(I::gen_move(*tmp, *src, *reg_ty));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for ((ty, dst_reg, _), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
|
for ((ty, dst_reg, _), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
|
||||||
self.emit(I::gen_move(*dst_reg, (*tmp_reg).to_reg(), *ty));
|
let (_, reg_tys) = I::rc_for_type(*ty)?;
|
||||||
|
for ((dst, tmp), reg_ty) in dst_reg
|
||||||
|
.regs()
|
||||||
|
.iter()
|
||||||
|
.zip(tmp_reg.regs().iter())
|
||||||
|
.zip(reg_tys.iter())
|
||||||
|
{
|
||||||
|
self.emit(I::gen_move(*dst, tmp.to_reg(), *reg_ty));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now, finally, deal with the moves whose sources are constants.
|
// Now, finally, deal with the moves whose sources are constants.
|
||||||
for (ty, dst_reg, const_u64) in &const_bundles {
|
for (ty, dst_reg, const_val) in &const_bundles {
|
||||||
for inst in I::gen_constant(*dst_reg, *const_u64, *ty, |reg_class, ty| {
|
for inst in I::gen_constant(*dst_reg, *const_val as u128, *ty, |ty| {
|
||||||
self.alloc_tmp(reg_class, ty)
|
self.alloc_tmp(ty).only_reg().unwrap()
|
||||||
})
|
})
|
||||||
.into_iter()
|
.into_iter()
|
||||||
{
|
{
|
||||||
@@ -766,8 +807,8 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
debug!("about to lower function: {:?}", self.f);
|
debug!("about to lower function: {:?}", self.f);
|
||||||
|
|
||||||
// Initialize the ABI object, giving it a temp if requested.
|
// Initialize the ABI object, giving it a temp if requested.
|
||||||
let maybe_tmp = if self.vcode.abi().temp_needed() {
|
let maybe_tmp = if let Some(temp_ty) = self.vcode.abi().temp_needed() {
|
||||||
Some(self.alloc_tmp(RegClass::I64, I64))
|
Some(self.alloc_tmp(temp_ty).only_reg().unwrap())
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@@ -848,11 +889,11 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
Ok((vcode, stack_map_info))
|
Ok((vcode, stack_map_info))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn put_value_in_reg(&mut self, val: Value) -> Reg {
|
fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
|
||||||
debug!("put_value_in_reg: val {}", val,);
|
debug!("put_value_in_reg: val {}", val);
|
||||||
let mut reg = self.value_regs[val];
|
let mut regs = self.value_regs[val];
|
||||||
debug!(" -> reg {:?}", reg);
|
debug!(" -> regs {:?}", regs);
|
||||||
assert!(reg.is_valid());
|
assert!(regs.is_valid());
|
||||||
|
|
||||||
self.value_lowered_uses[val] += 1;
|
self.value_lowered_uses[val] += 1;
|
||||||
|
|
||||||
@@ -864,12 +905,12 @@ impl<'func, I: VCodeInst> Lower<'func, I> {
|
|||||||
if let ValueDef::Result(i, 0) = self.f.dfg.value_def(val) {
|
if let ValueDef::Result(i, 0) = self.f.dfg.value_def(val) {
|
||||||
if self.f.dfg[i].opcode() == Opcode::GetPinnedReg {
|
if self.f.dfg[i].opcode() == Opcode::GetPinnedReg {
|
||||||
if let Some(pr) = self.pinned_reg {
|
if let Some(pr) = self.pinned_reg {
|
||||||
reg = pr;
|
regs = ValueRegs::one(pr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
reg
|
regs
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the actual inputs for a value. This is the implementation for
|
/// Get the actual inputs for a value. This is the implementation for
|
||||||
@@ -944,8 +985,8 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
|
|||||||
self.vcode.abi()
|
self.vcode.abi()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn retval(&self, idx: usize) -> Writable<Reg> {
|
fn retval(&self, idx: usize) -> ValueRegs<Writable<Reg>> {
|
||||||
Writable::from_reg(self.retval_regs[idx])
|
writable_value_regs(self.retval_regs[idx])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_vm_context(&self) -> Option<Reg> {
|
fn get_vm_context(&self) -> Option<Reg> {
|
||||||
@@ -1050,23 +1091,19 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
|
|||||||
self.get_value_as_source_or_const(val)
|
self.get_value_as_source_or_const(val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn put_input_in_reg(&mut self, ir_inst: Inst, idx: usize) -> Reg {
|
fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
|
||||||
let val = self.f.dfg.inst_args(ir_inst)[idx];
|
let val = self.f.dfg.inst_args(ir_inst)[idx];
|
||||||
let val = self.f.dfg.resolve_aliases(val);
|
let val = self.f.dfg.resolve_aliases(val);
|
||||||
self.put_value_in_reg(val)
|
self.put_value_in_regs(val)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_output(&self, ir_inst: Inst, idx: usize) -> Writable<Reg> {
|
fn get_output(&self, ir_inst: Inst, idx: usize) -> ValueRegs<Writable<Reg>> {
|
||||||
let val = self.f.dfg.inst_results(ir_inst)[idx];
|
let val = self.f.dfg.inst_results(ir_inst)[idx];
|
||||||
Writable::from_reg(self.value_regs[val])
|
writable_value_regs(self.value_regs[val])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn alloc_tmp(&mut self, rc: RegClass, ty: Type) -> Writable<Reg> {
|
fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
|
||||||
let v = self.next_vreg;
|
writable_value_regs(alloc_vregs(ty, &mut self.next_vreg, &mut self.vcode).unwrap())
|
||||||
self.next_vreg += 1;
|
|
||||||
let vreg = Reg::new_virtual(rc, v);
|
|
||||||
self.vcode.set_vreg_type(vreg.as_virtual_reg().unwrap(), ty);
|
|
||||||
Writable::from_reg(vreg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn emit(&mut self, mach_inst: I) {
|
fn emit(&mut self, mach_inst: I) {
|
||||||
@@ -1131,8 +1168,7 @@ impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
|
|||||||
if reg.is_virtual() {
|
if reg.is_virtual() {
|
||||||
reg
|
reg
|
||||||
} else {
|
} else {
|
||||||
let rc = reg.get_class();
|
let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
|
||||||
let new_reg = self.alloc_tmp(rc, ty);
|
|
||||||
self.emit(I::gen_move(new_reg, reg, ty));
|
self.emit(I::gen_move(new_reg, reg, ty));
|
||||||
new_reg.to_reg()
|
new_reg.to_reg()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -135,6 +135,8 @@ pub mod helpers;
|
|||||||
pub use helpers::*;
|
pub use helpers::*;
|
||||||
pub mod inst_common;
|
pub mod inst_common;
|
||||||
pub use inst_common::*;
|
pub use inst_common::*;
|
||||||
|
pub mod valueregs;
|
||||||
|
pub use valueregs::*;
|
||||||
|
|
||||||
/// A machine instruction.
|
/// A machine instruction.
|
||||||
pub trait MachInst: Clone + Debug {
|
pub trait MachInst: Clone + Debug {
|
||||||
@@ -165,9 +167,9 @@ pub trait MachInst: Clone + Debug {
|
|||||||
fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Self;
|
fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Self;
|
||||||
|
|
||||||
/// Generate a constant into a reg.
|
/// Generate a constant into a reg.
|
||||||
fn gen_constant<F: FnMut(RegClass, Type) -> Writable<Reg>>(
|
fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
|
||||||
to_reg: Writable<Reg>,
|
to_regs: ValueRegs<Writable<Reg>>,
|
||||||
value: u64,
|
value: u128,
|
||||||
ty: Type,
|
ty: Type,
|
||||||
alloc_tmp: F,
|
alloc_tmp: F,
|
||||||
) -> SmallVec<[Self; 4]>;
|
) -> SmallVec<[Self; 4]>;
|
||||||
@@ -180,9 +182,19 @@ pub trait MachInst: Clone + Debug {
|
|||||||
/// (e.g., add directly from or directly to memory), like x86.
|
/// (e.g., add directly from or directly to memory), like x86.
|
||||||
fn maybe_direct_reload(&self, reg: VirtualReg, slot: SpillSlot) -> Option<Self>;
|
fn maybe_direct_reload(&self, reg: VirtualReg, slot: SpillSlot) -> Option<Self>;
|
||||||
|
|
||||||
/// Determine a register class to store the given Cranelift type.
|
/// Determine register class(es) to store the given Cranelift type, and the
|
||||||
/// May return an error if the type isn't supported by this backend.
|
/// Cranelift type actually stored in the underlying register(s). May return
|
||||||
fn rc_for_type(ty: Type) -> CodegenResult<RegClass>;
|
/// an error if the type isn't supported by this backend.
|
||||||
|
///
|
||||||
|
/// If the type requires multiple registers, then the list of registers is
|
||||||
|
/// returned in little-endian order.
|
||||||
|
///
|
||||||
|
/// Note that the type actually stored in the register(s) may differ in the
|
||||||
|
/// case that a value is split across registers: for example, on a 32-bit
|
||||||
|
/// target, an I64 may be stored in two registers, each of which holds an
|
||||||
|
/// I32. The actually-stored types are used only to inform the backend when
|
||||||
|
/// generating spills and reloads for individual registers.
|
||||||
|
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])>;
|
||||||
|
|
||||||
/// Generate a jump to another target. Used during lowering of
|
/// Generate a jump to another target. Used during lowering of
|
||||||
/// control flow.
|
/// control flow.
|
||||||
|
|||||||
185
cranelift/codegen/src/machinst/valueregs.rs
Normal file
185
cranelift/codegen/src/machinst/valueregs.rs
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
//! Data structure for tracking the (possibly multiple) registers that hold one
|
||||||
|
//! SSA `Value`.
|
||||||
|
|
||||||
|
use regalloc::{RealReg, Reg, VirtualReg, Writable};
|
||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
#[cfg(feature = "arm32")]
|
||||||
|
const VALUE_REGS_PARTS: usize = 4;
|
||||||
|
|
||||||
|
#[cfg(not(feature = "arm32"))]
|
||||||
|
const VALUE_REGS_PARTS: usize = 2;
|
||||||
|
|
||||||
|
/// Location at which a `Value` is stored in register(s): the value is located
|
||||||
|
/// in one or more registers, depending on its width. A value may be stored in
|
||||||
|
/// more than one register if the machine has no registers wide enough
|
||||||
|
/// otherwise: for example, on a 32-bit architecture, we may store `I64` values
|
||||||
|
/// in two registers, and `I128` values in four.
|
||||||
|
///
|
||||||
|
/// By convention, the register parts are kept in machine-endian order here.
|
||||||
|
///
|
||||||
|
/// N.B.: we cap the capacity of this at four (when any 32-bit target is
|
||||||
|
/// enabled) or two (otherwise), and we use special in-band sentinal `Reg`
|
||||||
|
/// values (`Reg::invalid()`) to avoid the need to carry a separate length. This
|
||||||
|
/// allows the struct to be `Copy` (no heap or drop overhead) and be only 16 or
|
||||||
|
/// 8 bytes, which is important for compiler performance.
|
||||||
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
pub struct ValueRegs<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> {
|
||||||
|
parts: [R; VALUE_REGS_PARTS],
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A type with an "invalid" sentinel value.
|
||||||
|
pub trait InvalidSentinel: Copy + Eq {
|
||||||
|
/// The invalid sentinel value.
|
||||||
|
fn invalid_sentinel() -> Self;
|
||||||
|
/// Is this the invalid sentinel?
|
||||||
|
fn is_invalid_sentinel(self) -> bool {
|
||||||
|
self == Self::invalid_sentinel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl InvalidSentinel for Reg {
|
||||||
|
fn invalid_sentinel() -> Self {
|
||||||
|
Reg::invalid()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl InvalidSentinel for VirtualReg {
|
||||||
|
fn invalid_sentinel() -> Self {
|
||||||
|
VirtualReg::invalid()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl InvalidSentinel for RealReg {
|
||||||
|
fn invalid_sentinel() -> Self {
|
||||||
|
RealReg::invalid()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl InvalidSentinel for Writable<Reg> {
|
||||||
|
fn invalid_sentinel() -> Self {
|
||||||
|
Writable::from_reg(Reg::invalid_sentinel())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> ValueRegs<R> {
|
||||||
|
/// Create an invalid Value-in-Reg.
|
||||||
|
pub fn invalid() -> Self {
|
||||||
|
ValueRegs {
|
||||||
|
parts: [R::invalid_sentinel(); VALUE_REGS_PARTS],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Is this Value-to-Reg mapping valid?
|
||||||
|
pub fn is_valid(self) -> bool {
|
||||||
|
!self.parts[0].is_invalid_sentinel()
|
||||||
|
}
|
||||||
|
/// Is this Value-to-Reg mapping invalid?
|
||||||
|
pub fn is_invalid(self) -> bool {
|
||||||
|
self.parts[0].is_invalid_sentinel()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the single register used for this value, if any.
|
||||||
|
pub fn only_reg(self) -> Option<R> {
|
||||||
|
if self.len() == 1 {
|
||||||
|
Some(self.parts[0])
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return an iterator over the registers storing this value.
|
||||||
|
pub fn regs(&self) -> &[R] {
|
||||||
|
&self.parts[0..self.len()]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "arm32")]
|
||||||
|
impl<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> ValueRegs<R> {
|
||||||
|
/// Create a Value-in-R location for a value stored in one register.
|
||||||
|
pub fn one(reg: R) -> Self {
|
||||||
|
ValueRegs {
|
||||||
|
parts: [
|
||||||
|
reg,
|
||||||
|
R::invalid_sentinel(),
|
||||||
|
R::invalid_sentinel(),
|
||||||
|
R::invalid_sentinel(),
|
||||||
|
],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Create a Value-in-R location for a value stored in two registers.
|
||||||
|
pub fn two(r1: R, r2: R) -> Self {
|
||||||
|
ValueRegs {
|
||||||
|
parts: [r1, r2, R::invalid_sentinel(), R::invalid_sentinel()],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Create a Value-in-R location for a value stored in four registers.
|
||||||
|
pub fn four(r1: R, r2: R, r3: R, r4: R) -> Self {
|
||||||
|
ValueRegs {
|
||||||
|
parts: [r1, r2, r3, r4],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the number of registers used.
|
||||||
|
pub fn len(self) -> usize {
|
||||||
|
// If rustc/LLVM is smart enough, this might even be vectorized...
|
||||||
|
(self.parts[0] != R::invalid_sentinel()) as usize
|
||||||
|
+ (self.parts[1] != R::invalid_sentinel()) as usize
|
||||||
|
+ (self.parts[2] != R::invalid_sentinel()) as usize
|
||||||
|
+ (self.parts[3] != R::invalid_sentinel()) as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map individual registers via a map function.
|
||||||
|
pub fn map<NewR, F>(self, f: F) -> ValueRegs<NewR>
|
||||||
|
where
|
||||||
|
NewR: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel,
|
||||||
|
F: Fn(R) -> NewR,
|
||||||
|
{
|
||||||
|
ValueRegs {
|
||||||
|
parts: [
|
||||||
|
f(self.parts[0]),
|
||||||
|
f(self.parts[1]),
|
||||||
|
f(self.parts[2]),
|
||||||
|
f(self.parts[3]),
|
||||||
|
],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "arm32"))]
|
||||||
|
impl<R: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel> ValueRegs<R> {
|
||||||
|
/// Create a Value-in-R location for a value stored in one register.
|
||||||
|
pub fn one(reg: R) -> Self {
|
||||||
|
ValueRegs {
|
||||||
|
parts: [reg, R::invalid_sentinel()],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Create a Value-in-R location for a value stored in two registers.
|
||||||
|
pub fn two(r1: R, r2: R) -> Self {
|
||||||
|
ValueRegs { parts: [r1, r2] }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the number of registers used.
|
||||||
|
pub fn len(self) -> usize {
|
||||||
|
// If rustc/LLVM is smart enough, this might even be vectorized...
|
||||||
|
(self.parts[0] != R::invalid_sentinel()) as usize
|
||||||
|
+ (self.parts[1] != R::invalid_sentinel()) as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map individual registers via a map function.
|
||||||
|
pub fn map<NewR, F>(self, f: F) -> ValueRegs<NewR>
|
||||||
|
where
|
||||||
|
NewR: Clone + Copy + Debug + PartialEq + Eq + InvalidSentinel,
|
||||||
|
F: Fn(R) -> NewR,
|
||||||
|
{
|
||||||
|
ValueRegs {
|
||||||
|
parts: [f(self.parts[0]), f(self.parts[1])],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a writable ValueRegs.
|
||||||
|
pub(crate) fn writable_value_regs(regs: ValueRegs<Reg>) -> ValueRegs<Writable<Reg>> {
|
||||||
|
regs.map(|r| Writable::from_reg(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Strip a writable ValueRegs down to a readonly ValueRegs.
|
||||||
|
pub(crate) fn non_writable_value_regs(regs: ValueRegs<Writable<Reg>>) -> ValueRegs<Reg> {
|
||||||
|
regs.map(|r| r.to_reg())
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user