Add fixed_nonallocatable constraints when appropriate (#5253)

Plumb the set of allocatable registers through the OperandCollector and use it validate uses of fixed-nonallocatable registers, like %rsp on x86_64.
This commit is contained in:
Trevor Elliott
2022-11-15 12:49:17 -08:00
committed by GitHub
parent f6ae67f3f0
commit a007e02bd2
13 changed files with 133 additions and 66 deletions

View File

@@ -699,6 +699,7 @@ pub(crate) fn emit(
}
Inst::MovFromPReg { src, dst } => {
allocs.next_fixed_nonallocatable(*src);
let src: Reg = (*src).into();
debug_assert!([regs::rsp(), regs::rbp(), regs::pinned_reg()].contains(&src));
let src = Gpr::new(src).unwrap();
@@ -711,6 +712,7 @@ pub(crate) fn emit(
Inst::MovToPReg { src, dst } => {
let src = allocs.next(src.to_reg());
let src = Gpr::new(src).unwrap();
allocs.next_fixed_nonallocatable(*dst);
let dst: Reg = (*dst).into();
debug_assert!([regs::rsp(), regs::rbp(), regs::pinned_reg()].contains(&dst));
let dst = WritableGpr::from_writable_reg(Writable::from_reg(dst)).unwrap();

View File

@@ -1263,6 +1263,7 @@ impl PrettyPrint for Inst {
}
Inst::MovFromPReg { src, dst } => {
allocs.next_fixed_nonallocatable(*src);
let src: Reg = (*src).into();
let src = regs::show_ireg_sized(src, 8);
let dst = pretty_print_reg(dst.to_reg().to_reg(), 8, allocs);
@@ -1271,6 +1272,7 @@ impl PrettyPrint for Inst {
Inst::MovToPReg { src, dst } => {
let src = pretty_print_reg(src.to_reg(), 8, allocs);
allocs.next_fixed_nonallocatable(*dst);
let dst: Reg = (*dst).into();
let dst = regs::show_ireg_sized(dst, 8);
format!("{} {}, {}", ljustify("movq".to_string()), src, dst)
@@ -1919,14 +1921,14 @@ fn x64_get_operands<F: Fn(VReg) -> VReg>(inst: &Inst, collector: &mut OperandCol
collector.reg_def(dst.to_writable_reg());
}
Inst::MovFromPReg { dst, src } => {
debug_assert!([regs::rsp(), regs::rbp(), regs::pinned_reg()].contains(&(*src).into()));
debug_assert!(dst.to_reg().to_reg().is_virtual());
collector.reg_fixed_nonallocatable(*src);
collector.reg_def(dst.to_writable_reg());
}
Inst::MovToPReg { dst, src } => {
debug_assert!(src.to_reg().is_virtual());
debug_assert!([regs::rsp(), regs::rbp(), regs::pinned_reg()].contains(&(*dst).into()));
collector.reg_use(src.to_reg());
collector.reg_fixed_nonallocatable(*dst);
}
Inst::XmmToGpr { src, dst, .. } => {
collector.reg_use(src.to_reg());

View File

@@ -48,14 +48,13 @@ impl X64Backend {
fn compile_vcode(
&self,
func: &Function,
flags: Flags,
) -> CodegenResult<(VCode<inst::Inst>, regalloc2::Output)> {
// This performs lowering to VCode, register-allocates the code, computes
// block layout and finalizes branches. The result is ready for binary emission.
let emit_info = EmitInfo::new(flags.clone(), self.x64_flags.clone());
let emit_info = EmitInfo::new(self.flags.clone(), self.x64_flags.clone());
let sigs = SigSet::new::<abi::X64ABIMachineSpec>(func, &self.flags)?;
let abi = abi::X64Callee::new(&func, self, &self.x64_flags, &sigs)?;
compile::compile::<Self>(&func, flags, self, abi, &self.reg_env, emit_info, sigs)
compile::compile::<Self>(&func, self, abi, emit_info, sigs)
}
}
@@ -65,10 +64,13 @@ impl TargetIsa for X64Backend {
func: &Function,
want_disasm: bool,
) -> CodegenResult<CompiledCodeStencil> {
let flags = self.flags();
let (vcode, regalloc_result) = self.compile_vcode(func, flags.clone())?;
let (vcode, regalloc_result) = self.compile_vcode(func)?;
let emit_result = vcode.emit(&regalloc_result, want_disasm, flags.machine_code_cfg_info());
let emit_result = vcode.emit(
&regalloc_result,
want_disasm,
self.flags.machine_code_cfg_info(),
);
let frame_size = emit_result.frame_size;
let value_labels_ranges = emit_result.value_labels_ranges;
let buffer = emit_result.buffer.finish();
@@ -96,6 +98,10 @@ impl TargetIsa for X64Backend {
&self.flags
}
fn machine_env(&self) -> &MachineEnv {
&self.reg_env
}
fn isa_flags(&self) -> Vec<shared_settings::Value> {
self.x64_flags.iter().collect()
}