x64 new backend: port ABI implementation to shared infrastructure with AArch64.

Previously, in #2128, we factored out a common "vanilla 64-bit ABI"
implementation from the AArch64 ABI code, with the idea that this should
be largely compatible with x64. This PR alters the new x64 backend to
make use of the shared infrastructure, removing the duplication that
existed previously. The generated code is nearly (not exactly) the same;
the only difference relates to how the clobber-save region is padded in
the prologue.

This also changes some register allocations in the aarch64 code because
call support in the shared ABI infra now passes a temp vreg in, rather
than requiring use of a fixed, non-allocable temp; tests have been
updated, and the runtime behavior is unchanged.
This commit is contained in:
Chris Fallin
2020-08-18 15:54:21 -07:00
parent 3d6c4d312f
commit e8f772c1ac
17 changed files with 747 additions and 1192 deletions

View File

@@ -13,16 +13,15 @@ use alloc::boxed::Box;
use alloc::vec::Vec;
use regalloc::{RealReg, Reg, RegClass, Set, Writable};
use smallvec::SmallVec;
use std::convert::TryFrom;
// We use a generic implementation that factors out AArch64 and x64 ABI commonalities, because
// these ABIs are very similar.
/// Support for the AArch64 ABI from the callee side (within a function body).
pub type AArch64ABIBody = ABIBodyImpl<AArch64MachineImpl>;
pub(crate) type AArch64ABICallee = ABICalleeImpl<AArch64MachineDeps>;
/// Support for the AArch64 ABI from the caller side (at a callsite).
pub type AArch64ABICall = ABICallImpl<AArch64MachineImpl>;
pub(crate) type AArch64ABICaller = ABICallerImpl<AArch64MachineDeps>;
// Spidermonkey specific ABI convention.
@@ -105,9 +104,9 @@ impl Into<AMode> for StackAMode {
/// AArch64-specific ABI behavior. This struct just serves as an implementation
/// point for the trait; it is never actually instantiated.
pub struct AArch64MachineImpl;
pub(crate) struct AArch64MachineDeps;
impl ABIMachineImpl for AArch64MachineImpl {
impl ABIMachineSpec for AArch64MachineDeps {
type I = Inst;
fn compute_arg_locs(
@@ -285,7 +284,8 @@ impl ABIMachineImpl for AArch64MachineImpl {
Inst::Ret
}
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u64) -> SmallVec<[Inst; 4]> {
fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallVec<[Inst; 4]> {
let imm = imm as u64;
let mut insts = SmallVec::new();
if let Some(imm12) = Imm12::maybe_from_u64(imm) {
insts.push(Inst::AluRRImm12 {
@@ -296,6 +296,7 @@ impl ABIMachineImpl for AArch64MachineImpl {
});
} else {
let scratch2 = writable_tmp2_reg();
assert_ne!(scratch2.to_reg(), from_reg);
insts.extend(Inst::load_constant(scratch2, imm.into()));
insts.push(Inst::AluRRRExtend {
alu_op: ALUOp::Add64,
@@ -334,29 +335,29 @@ impl ABIMachineImpl for AArch64MachineImpl {
Inst::LoadAddr { rd: into_reg, mem }
}
fn get_fixed_tmp_reg() -> Reg {
fn get_stacklimit_reg() -> Reg {
spilltmp_reg()
}
fn gen_load_base_offset(into_reg: Writable<Reg>, base: Reg, offset: i64, ty: Type) -> Inst {
let mem = AMode::RegOffset(base, offset, ty);
fn gen_load_base_offset(into_reg: Writable<Reg>, base: Reg, offset: i32, ty: Type) -> Inst {
let mem = AMode::RegOffset(base, offset as i64, ty);
Inst::gen_load(into_reg, mem, ty)
}
fn gen_store_base_offset(base: Reg, offset: i64, from_reg: Reg, ty: Type) -> Inst {
let mem = AMode::RegOffset(base, offset, ty);
fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Inst {
let mem = AMode::RegOffset(base, offset as i64, ty);
Inst::gen_store(mem, from_reg, ty)
}
fn gen_sp_reg_adjust(amount: i64) -> SmallVec<[Inst; 2]> {
fn gen_sp_reg_adjust(amount: i32) -> SmallVec<[Inst; 2]> {
if amount == 0 {
return SmallVec::new();
}
let (amount, is_sub) = if amount > 0 {
(u64::try_from(amount).unwrap(), false)
(amount as u64, false)
} else {
(u64::try_from(-amount).unwrap(), true)
(-amount as u64, true)
};
let alu_op = if is_sub { ALUOp::Sub64 } else { ALUOp::Add64 };
@@ -389,8 +390,10 @@ impl ABIMachineImpl for AArch64MachineImpl {
ret
}
fn gen_nominal_sp_adj(offset: i64) -> Inst {
Inst::VirtualSPOffsetAdj { offset }
fn gen_nominal_sp_adj(offset: i32) -> Inst {
Inst::VirtualSPOffsetAdj {
offset: offset as i64,
}
}
fn gen_prologue_frame_setup() -> SmallVec<[Inst; 2]> {
@@ -553,11 +556,12 @@ impl ABIMachineImpl for AArch64MachineImpl {
defs: Vec<Writable<Reg>>,
loc: SourceLoc,
opcode: ir::Opcode,
) -> SmallVec<[(/* is_safepoint = */ bool, Inst); 2]> {
tmp: Writable<Reg>,
) -> SmallVec<[(InstIsSafepoint, Inst); 2]> {
let mut insts = SmallVec::new();
match &dest {
&CallDest::ExtName(ref name, RelocDistance::Near) => insts.push((
true,
InstIsSafepoint::Yes,
Inst::Call {
info: Box::new(CallInfo {
dest: name.clone(),
@@ -570,19 +574,19 @@ impl ABIMachineImpl for AArch64MachineImpl {
)),
&CallDest::ExtName(ref name, RelocDistance::Far) => {
insts.push((
false,
InstIsSafepoint::No,
Inst::LoadExtName {
rd: writable_spilltmp_reg(),
rd: tmp,
name: Box::new(name.clone()),
offset: 0,
srcloc: loc,
},
));
insts.push((
true,
InstIsSafepoint::Yes,
Inst::CallInd {
info: Box::new(CallIndInfo {
rn: spilltmp_reg(),
rn: tmp.to_reg(),
uses,
defs,
loc,
@@ -592,7 +596,7 @@ impl ABIMachineImpl for AArch64MachineImpl {
));
}
&CallDest::Reg(reg) => insts.push((
true,
InstIsSafepoint::Yes,
Inst::CallInd {
info: Box::new(CallIndInfo {
rn: *reg,
@@ -608,7 +612,7 @@ impl ABIMachineImpl for AArch64MachineImpl {
insts
}
fn get_spillslot_size(rc: RegClass, ty: Type) -> u32 {
fn get_number_of_spillslots_for_value(rc: RegClass, ty: Type) -> u32 {
// We allocate in terms of 8-byte slots.
match (rc, ty) {
(RegClass::I64, _) => 1,
@@ -698,9 +702,10 @@ fn get_callee_saves(
}
}
}
// Sort registers for deterministic code output.
int_saves.sort_by_key(|r| r.to_reg().get_index());
vec_saves.sort_by_key(|r| r.to_reg().get_index());
// Sort registers for deterministic code output. We can do an unstable sort because the
// registers will be unique (there are no dups).
int_saves.sort_unstable_by_key(|r| r.to_reg().get_index());
vec_saves.sort_unstable_by_key(|r| r.to_reg().get_index());
(int_saves, vec_saves)
}