diff --git a/cranelift/codegen/src/isa/aarch64/abi.rs b/cranelift/codegen/src/isa/aarch64/abi.rs
index 0fdf455022..fdb9f29f49 100644
--- a/cranelift/codegen/src/isa/aarch64/abi.rs
+++ b/cranelift/codegen/src/isa/aarch64/abi.rs
@@ -105,6 +105,7 @@ fn compute_arg_locs(call_conv: isa::CallConv, params: &[ir::AbiParam]) -> (Vec (Vec panic!("Unsupported vector-reg argument type"),
};
// Align.
- assert!(size.is_power_of_two());
+ debug_assert!(size.is_power_of_two());
next_stack = (next_stack + size - 1) & !(size - 1);
ret.push(ABIArg::Stack(next_stack as i64, param.value_type));
next_stack += size;
@@ -159,7 +160,7 @@ impl ABISig {
let (rets, _) = compute_arg_locs(sig.call_conv, &sig.returns);
// Verify that there are no return values on the stack.
- assert!(rets.iter().all(|a| match a {
+ debug_assert!(rets.iter().all(|a| match a {
&ABIArg::Stack(..) => false,
_ => true,
}));
@@ -175,20 +176,22 @@ impl ABISig {
/// AArch64 ABI object for a function body.
pub struct AArch64ABIBody {
- /// signature: arg and retval regs
+ /// Signature: arg and retval regs.
sig: ABISig,
- /// offsets to each stackslot
+ /// Offsets to each stackslot.
stackslots: Vec,
- /// total stack size of all stackslots
+ /// Total stack size of all stackslots.
stackslots_size: u32,
- /// clobbered registers, from regalloc.
+ /// Clobbered registers, from regalloc.
clobbered: Set>,
- /// total number of spillslots, from regalloc.
+ /// Total number of spillslots, from regalloc.
spillslots: Option,
/// Total frame size.
frame_size: Option,
/// Calling convention this function expects.
call_conv: isa::CallConv,
+ /// The settings controlling this function's compilation.
+ flags: settings::Flags,
}
fn in_int_reg(ty: ir::Type) -> bool {
@@ -208,14 +211,14 @@ fn in_vec_reg(ty: ir::Type) -> bool {
impl AArch64ABIBody {
/// Create a new body ABI instance.
- pub fn new(f: &ir::Function) -> Self {
+ pub fn new(f: &ir::Function, flags: settings::Flags) -> Self {
debug!("AArch64 ABI: func signature {:?}", f.signature);
let sig = ABISig::from_func_sig(&f.signature);
let call_conv = f.signature.call_conv;
// Only these calling conventions are supported.
- assert!(
+ debug_assert!(
call_conv == isa::CallConv::SystemV
|| call_conv == isa::CallConv::Fast
|| call_conv == isa::CallConv::Cold
@@ -231,7 +234,7 @@ impl AArch64ABIBody {
let off = stack_offset;
stack_offset += data.size;
stack_offset = (stack_offset + 7) & !7;
- assert_eq!(stackslot.as_u32() as usize, stackslots.len());
+ debug_assert_eq!(stackslot.as_u32() as usize, stackslots.len());
stackslots.push(off);
}
@@ -243,6 +246,20 @@ impl AArch64ABIBody {
spillslots: None,
frame_size: None,
call_conv,
+ flags,
+ }
+ }
+
+ /// Returns the size of a function call frame (including return address and FP) for this
+ /// function's body.
+ fn frame_size(&self) -> i64 {
+ if self.call_conv.extends_baldrdash() {
+ let num_words = self.flags.baldrdash_prologue_words() as i64;
+ debug_assert!(num_words > 0, "baldrdash must set baldrdash_prologue_words");
+ debug_assert_eq!(num_words % 2, 0, "stack must be 16-aligned");
+ num_words * 8
+ } else {
+ 16 // frame pointer + return address.
}
}
}
@@ -409,6 +426,10 @@ fn get_caller_saves_set(call_conv: isa::CallConv) -> Set> {
impl ABIBody for AArch64ABIBody {
type I = Inst;
+ fn flags(&self) -> &settings::Flags {
+ &self.flags
+ }
+
fn liveins(&self) -> Set {
let mut set: Set = Set::empty();
for &arg in &self.sig.args {
@@ -444,14 +465,14 @@ impl ABIBody for AArch64ABIBody {
fn gen_copy_arg_to_reg(&self, idx: usize, into_reg: Writable) -> Inst {
match &self.sig.args[idx] {
&ABIArg::Reg(r, ty) => Inst::gen_move(into_reg, r.to_reg(), ty),
- &ABIArg::Stack(off, ty) => load_stack(off + 16, into_reg, ty),
+ &ABIArg::Stack(off, ty) => load_stack(off + self.frame_size(), into_reg, ty),
}
}
fn gen_copy_reg_to_retval(&self, idx: usize, from_reg: Reg) -> Inst {
match &self.sig.rets[idx] {
&ABIArg::Reg(r, ty) => Inst::gen_move(Writable::from_reg(r.to_reg()), from_reg, ty),
- &ABIArg::Stack(off, ty) => store_stack(off + 16, from_reg, ty),
+ &ABIArg::Stack(off, ty) => store_stack(off + self.frame_size(), from_reg, ty),
}
}
@@ -521,7 +542,7 @@ impl ABIBody for AArch64ABIBody {
store_stack(fp_off, from_reg, ty)
}
- fn gen_prologue(&mut self, flags: &settings::Flags) -> Vec {
+ fn gen_prologue(&mut self) -> Vec {
let mut insts = vec![];
if !self.call_conv.extends_baldrdash() {
// stp fp (x29), lr (x30), [sp, #-16]!
@@ -549,10 +570,10 @@ impl ABIBody for AArch64ABIBody {
let mut total_stacksize = self.stackslots_size + 8 * self.spillslots.unwrap() as u32;
if self.call_conv.extends_baldrdash() {
debug_assert!(
- !flags.enable_probestack(),
+ !self.flags.enable_probestack(),
"baldrdash does not expect cranelift to emit stack probes"
);
- total_stacksize += flags.baldrdash_prologue_words() as u32 * 8;
+ total_stacksize += self.flags.baldrdash_prologue_words() as u32 * 8;
}
let total_stacksize = (total_stacksize + 15) & !15; // 16-align the stack.
@@ -629,7 +650,7 @@ impl ABIBody for AArch64ABIBody {
insts
}
- fn gen_epilogue(&self, _flags: &settings::Flags) -> Vec {
+ fn gen_epilogue(&self) -> Vec {
let mut insts = vec![];
// Restore clobbered registers.
diff --git a/cranelift/codegen/src/isa/aarch64/mod.rs b/cranelift/codegen/src/isa/aarch64/mod.rs
index 2a71085929..2f52e618c1 100644
--- a/cranelift/codegen/src/isa/aarch64/mod.rs
+++ b/cranelift/codegen/src/isa/aarch64/mod.rs
@@ -32,11 +32,11 @@ impl AArch64Backend {
AArch64Backend { triple, flags }
}
- fn compile_vcode(&self, func: &Function, flags: &settings::Flags) -> VCode {
- // This performs lowering to VCode, register-allocates the code, computes
- // block layout and finalizes branches. The result is ready for binary emission.
- let abi = Box::new(abi::AArch64ABIBody::new(func));
- compile::compile::(func, self, abi, flags)
+ /// This performs lowering to VCode, register-allocates the code, computes block layout and
+ /// finalizes branches. The result is ready for binary emission.
+ fn compile_vcode(&self, func: &Function, flags: settings::Flags) -> VCode {
+ let abi = Box::new(abi::AArch64ABIBody::new(func, flags));
+ compile::compile::(func, self, abi)
}
}
@@ -47,7 +47,7 @@ impl MachBackend for AArch64Backend {
want_disasm: bool,
) -> CodegenResult {
let flags = self.flags();
- let vcode = self.compile_vcode(func, flags);
+ let vcode = self.compile_vcode(func, flags.clone());
let sections = vcode.emit();
let frame_size = vcode.frame_size();
diff --git a/cranelift/codegen/src/machinst/abi.rs b/cranelift/codegen/src/machinst/abi.rs
index 1410f4265b..070af59d77 100644
--- a/cranelift/codegen/src/machinst/abi.rs
+++ b/cranelift/codegen/src/machinst/abi.rs
@@ -12,6 +12,9 @@ pub trait ABIBody {
/// The instruction type for the ISA associated with this ABI.
type I: VCodeInst;
+ /// Get the settings controlling this function's compilation.
+ fn flags(&self) -> &settings::Flags;
+
/// Get the liveins of the function.
fn liveins(&self) -> Set;
@@ -82,13 +85,13 @@ pub trait ABIBody {
/// `store_retval`, and spillslot accesses.) `self` is mutable so that we
/// can store information in it which will be useful when creating the
/// epilogue.
- fn gen_prologue(&mut self, flags: &settings::Flags) -> Vec;
+ fn gen_prologue(&mut self) -> Vec;
/// Generate an epilogue, post-regalloc. Note that this must generate the
/// actual return instruction (rather than emitting this in the lowering
/// logic), because the epilogue code comes before the return and the two are
/// likely closely related.
- fn gen_epilogue(&self, flags: &settings::Flags) -> Vec;
+ fn gen_epilogue(&self) -> Vec;
/// Returns the full frame size for the given function, after prologue emission has run. This
/// comprises the spill space, incoming argument space, alignment padding, etc.
diff --git a/cranelift/codegen/src/machinst/compile.rs b/cranelift/codegen/src/machinst/compile.rs
index eda3955f88..8b09568644 100644
--- a/cranelift/codegen/src/machinst/compile.rs
+++ b/cranelift/codegen/src/machinst/compile.rs
@@ -2,7 +2,6 @@
use crate::ir::Function;
use crate::machinst::*;
-use crate::settings;
use crate::timing;
use log::debug;
@@ -14,7 +13,6 @@ pub fn compile(
f: &Function,
b: &B,
abi: Box>,
- flags: &settings::Flags,
) -> VCode
where
B::MInst: ShowWithRRU,
@@ -47,7 +45,7 @@ where
// Reorder vcode into final order and copy out final instruction sequence
// all at once. This also inserts prologues/epilogues.
- vcode.replace_insns_from_regalloc(result, flags);
+ vcode.replace_insns_from_regalloc(result);
vcode.remove_redundant_branches();
diff --git a/cranelift/codegen/src/machinst/lower.rs b/cranelift/codegen/src/machinst/lower.rs
index 393c1bdd43..2fa456627a 100644
--- a/cranelift/codegen/src/machinst/lower.rs
+++ b/cranelift/codegen/src/machinst/lower.rs
@@ -102,9 +102,9 @@ pub trait LowerBackend {
/// Machine-independent lowering driver / machine-instruction container. Maintains a correspondence
/// from original Inst to MachInsts.
-pub struct Lower<'a, I: VCodeInst> {
+pub struct Lower<'func, I: VCodeInst> {
/// The function to lower.
- f: &'a Function,
+ f: &'func Function,
/// Lowered machine instructions.
vcode: VCodeBuilder,
@@ -142,9 +142,9 @@ enum GenerateReturn {
No,
}
-impl<'a, I: VCodeInst> Lower<'a, I> {
+impl<'func, I: VCodeInst> Lower<'func, I> {
/// Prepare a new lowering context for the given IR function.
- pub fn new(f: &'a Function, abi: Box>) -> Lower<'a, I> {
+ pub fn new(f: &'func Function, abi: Box>) -> Lower<'func, I> {
let mut vcode = VCodeBuilder::new(abi);
let num_uses = NumUses::compute(f).take_uses();
@@ -516,7 +516,7 @@ impl<'a, I: VCodeInst> Lower<'a, I> {
}
}
-impl<'a, I: VCodeInst> LowerCtx for Lower<'a, I> {
+impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
type I = I;
/// Get the instdata for a given IR instruction.
diff --git a/cranelift/codegen/src/machinst/vcode.rs b/cranelift/codegen/src/machinst/vcode.rs
index 6e3adea53a..5ee0a9798a 100644
--- a/cranelift/codegen/src/machinst/vcode.rs
+++ b/cranelift/codegen/src/machinst/vcode.rs
@@ -299,6 +299,11 @@ impl VCode {
}
}
+ /// Returns the flags controlling this function's compilation.
+ pub fn flags(&self) -> &settings::Flags {
+ self.abi.flags()
+ }
+
/// Get the IR-level type of a VReg.
pub fn vreg_type(&self, vreg: VirtualReg) -> Type {
self.vreg_types[vreg.get_index()]
@@ -329,11 +334,7 @@ impl VCode {
/// Take the results of register allocation, with a sequence of
/// instructions including spliced fill/reload/move instructions, and replace
/// the VCode with them.
- pub fn replace_insns_from_regalloc(
- &mut self,
- result: RegAllocResult,
- flags: &settings::Flags,
- ) {
+ pub fn replace_insns_from_regalloc(&mut self, result: RegAllocResult) {
self.final_block_order = compute_final_block_order(self);
// Record the spillslot count and clobbered registers for the ABI/stack
@@ -355,7 +356,7 @@ impl VCode {
if *block == self.entry {
// Start with the prologue.
- final_insns.extend(self.abi.gen_prologue(flags).into_iter());
+ final_insns.extend(self.abi.gen_prologue().into_iter());
}
for i in start..end {
@@ -371,7 +372,7 @@ impl VCode {
// with the epilogue.
let is_ret = insn.is_term() == MachTerminator::Ret;
if is_ret {
- final_insns.extend(self.abi.gen_epilogue(flags).into_iter());
+ final_insns.extend(self.abi.gen_epilogue().into_iter());
} else {
final_insns.push(insn.clone());
}