AArch64: Migrate calls and returns to ISLE. (#4788)

This commit is contained in:
Chris Fallin
2022-08-26 16:26:39 -07:00
committed by GitHub
parent ca6d648e37
commit 8e8dfdf5f9
18 changed files with 282 additions and 329 deletions

View File

@@ -225,12 +225,12 @@ impl ABIMachineSpec for AArch64MachineDeps {
slots: smallvec![
ABIArgSlot::Reg {
reg: lower_reg.to_real_reg().unwrap(),
ty: param.value_type,
ty: reg_types[0],
extension: param.extension,
},
ABIArgSlot::Reg {
reg: upper_reg.to_real_reg().unwrap(),
ty: param.value_type,
ty: reg_types[1],
extension: param.extension,
},
],

View File

@@ -2818,3 +2818,11 @@
(let ((dst WritableReg (temp_writable_reg $I8X16))
(_ Unit (emit (MInst.IntToFpu op dst src))))
dst))
;;;; Helpers for Emitting Calls ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(decl gen_call (SigRef ExternalName RelocDistance ValueSlice) InstOutput)
(extern constructor gen_call gen_call)
(decl gen_call_indirect (SigRef Value ValueSlice) InstOutput)
(extern constructor gen_call_indirect gen_call_indirect)

View File

@@ -2016,3 +2016,17 @@
(rule (lower (get_return_address))
(aarch64_link))
;;;; Rules for calls ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (call (func_ref_data sig_ref extname dist) inputs))
(gen_call sig_ref extname dist inputs))
(rule (lower (call_indirect sig_ref val inputs))
(gen_call_indirect sig_ref val inputs))
;;;; Rules for `return` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; N.B.: the Ret itself is generated by the ABI.
(rule (lower (return args))
(lower_return (range 0 (value_slice_len args)) args))

View File

@@ -2,6 +2,7 @@
// Pull in the ISLE generated code.
pub mod generated_code;
use generated_code::Context;
// Types that the generated ISLE code uses via `use super::*`.
use super::{
@@ -14,6 +15,7 @@ use super::{
use crate::isa::aarch64::inst::{FPULeftShiftImm, FPURightShiftImm};
use crate::isa::aarch64::lower::{lower_address, lower_splat_const};
use crate::isa::aarch64::settings::Flags as IsaFlags;
use crate::machinst::valueregs;
use crate::machinst::{isle::*, InputSourceInst};
use crate::settings::Flags;
use crate::{
@@ -22,10 +24,11 @@ use crate::{
immediates::*, types::*, AtomicRmwOp, ExternalName, Inst, InstructionData, MemFlags,
TrapCode, Value, ValueList,
},
isa::aarch64::abi::{AArch64Caller, AArch64MachineDeps},
isa::aarch64::inst::args::{ShiftOp, ShiftOpShiftImm},
isa::aarch64::lower::{writable_vreg, writable_xreg, xreg},
isa::unwind::UnwindInst,
machinst::{ty_bits, InsnOutput, Lower, VCodeConstant, VCodeConstantData},
machinst::{ty_bits, InsnOutput, Lower, MachInst, VCodeConstant, VCodeConstantData},
};
use regalloc2::PReg;
use std::boxed::Box;
@@ -69,8 +72,13 @@ pub struct SinkableAtomicLoad {
atomic_addr: Value,
}
impl generated_code::Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> {
impl IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> {
isle_prelude_method_helpers!(AArch64Caller);
}
impl Context for IsleContext<'_, '_, MInst, Flags, IsaFlags, 6> {
isle_prelude_methods!();
isle_prelude_caller_methods!(AArch64MachineDeps, AArch64Caller);
fn sign_return_address_disabled(&mut self) -> Option<()> {
if self.isa_flags.sign_return_address() {

View File

@@ -5,7 +5,6 @@ use crate::binemit::CodeOffset;
use crate::ir::types::*;
use crate::ir::Inst as IRInst;
use crate::ir::{InstructionData, Opcode};
use crate::isa::aarch64::abi::*;
use crate::isa::aarch64::inst::*;
use crate::isa::aarch64::settings as aarch64_settings;
use crate::machinst::lower::*;
@@ -469,29 +468,7 @@ pub(crate) fn lower_insn_to_regs(
}
}
Opcode::Return => {
for (i, input) in inputs.iter().enumerate() {
// N.B.: according to the AArch64 ABI, the top bits of a register
// (above the bits for the value's type) are undefined, so we
// need not extend the return values.
let src_regs = put_input_in_regs(ctx, *input);
let retval_regs = ctx.retval(i);
assert_eq!(src_regs.len(), retval_regs.len());
let ty = ctx.input_ty(insn, i);
let (_, tys) = Inst::rc_for_type(ty)?;
src_regs
.regs()
.iter()
.zip(retval_regs.regs().iter())
.zip(tys.iter())
.for_each(|((&src, &dst), &ty)| {
ctx.emit(Inst::gen_move(dst, src, ty));
});
}
// N.B.: the Ret itself is generated by the ABI.
}
Opcode::Return => implemented_in_isle(ctx),
Opcode::Ifcmp | Opcode::Ffcmp => {
// An Ifcmp/Ffcmp must always be seen as a use of a brif/brff or trueif/trueff
@@ -577,52 +554,7 @@ pub(crate) fn lower_insn_to_regs(
Opcode::SymbolValue => implemented_in_isle(ctx),
Opcode::Call | Opcode::CallIndirect => {
let caller_conv = ctx.abi().call_conv();
let (mut abi, inputs) = match op {
Opcode::Call => {
let (extname, dist) = ctx.call_target(insn).unwrap();
let extname = extname.clone();
let sig = ctx.call_sig(insn).unwrap();
assert!(inputs.len() == sig.params.len());
assert!(outputs.len() == sig.returns.len());
(
AArch64Caller::from_func(sig, &extname, dist, caller_conv, flags)?,
&inputs[..],
)
}
Opcode::CallIndirect => {
let ptr = put_input_in_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend64);
let sig = ctx.call_sig(insn).unwrap();
assert!(inputs.len() - 1 == sig.params.len());
assert!(outputs.len() == sig.returns.len());
(
AArch64Caller::from_ptr(sig, ptr, op, caller_conv, flags)?,
&inputs[1..],
)
}
_ => unreachable!(),
};
abi.emit_stack_pre_adjust(ctx);
assert!(inputs.len() == abi.num_args());
let mut arg_regs = vec![];
for input in inputs {
arg_regs.push(put_input_in_regs(ctx, *input))
}
for (i, arg_regs) in arg_regs.iter().enumerate() {
abi.emit_copy_regs_to_buffer(ctx, i, *arg_regs);
}
for (i, arg_regs) in arg_regs.iter().enumerate() {
abi.emit_copy_regs_to_arg(ctx, i, *arg_regs);
}
abi.emit_call(ctx);
for (i, output) in outputs.iter().enumerate() {
let retval_regs = get_output_reg(ctx, *output);
abi.emit_copy_retval_to_regs(ctx, i, retval_regs);
}
abi.emit_stack_post_adjust(ctx);
}
Opcode::Call | Opcode::CallIndirect => implemented_in_isle(ctx),
Opcode::GetPinnedReg => {
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();