From c16f2956dbf7983eda85bc9f1900ef1afe717f62 Mon Sep 17 00:00:00 2001 From: Trevor Elliott Date: Wed, 30 Nov 2022 17:01:14 -0800 Subject: [PATCH] Allocate a temporary for 64-bit constant loads in the s390x backend (#5357) Avoid reusing a destination virtual register for 64-bit constants in the s390x backend. This change addresses a case identified by the regalloc2 ssa validator, as the destination register was written to twice when constants were generated via the MachInst::gen_constant function. --- cranelift/codegen/src/isa/s390x/inst/emit.rs | 2 +- cranelift/codegen/src/isa/s390x/inst/mod.rs | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/cranelift/codegen/src/isa/s390x/inst/emit.rs b/cranelift/codegen/src/isa/s390x/inst/emit.rs index f0cd7b879d..eb7c48bc59 100644 --- a/cranelift/codegen/src/isa/s390x/inst/emit.rs +++ b/cranelift/codegen/src/isa/s390x/inst/emit.rs @@ -104,7 +104,7 @@ pub fn mem_finalize( } else { let tmp = writable_spilltmp_reg(); assert!(base != tmp.to_reg()); - insts.extend(Inst::load_constant64(tmp, off as u64)); + insts.extend(Inst::load_constant64(tmp, off as u64, |_| tmp)); MemArg::reg_plus_reg(base, tmp.to_reg(), mem.get_flags()) } } diff --git a/cranelift/codegen/src/isa/s390x/inst/mod.rs b/cranelift/codegen/src/isa/s390x/inst/mod.rs index 5caf862c17..ad7c761902 100644 --- a/cranelift/codegen/src/isa/s390x/inst/mod.rs +++ b/cranelift/codegen/src/isa/s390x/inst/mod.rs @@ -335,7 +335,12 @@ impl Inst { } /// Create an instruction that loads a 64-bit integer constant. - pub fn load_constant64(rd: Writable, value: u64) -> SmallVec<[Inst; 4]> { + pub fn load_constant64 Writable>( + rd: Writable, + value: u64, + mut alloc_tmp: F, + ) -> SmallVec<[Inst; 4]> { + crate::trace!("loading a constant! {}", value); if let Ok(imm) = i16::try_from(value as i64) { // 16-bit signed immediate smallvec![Inst::Mov64SImm16 { rd, imm }] @@ -353,12 +358,13 @@ impl Inst { let hi = value & 0xffff_ffff_0000_0000u64; let lo = value & 0x0000_0000_ffff_ffffu64; + let hi_rd = alloc_tmp(types::I64); if let Some(imm) = UImm16Shifted::maybe_from_u64(hi) { // 16-bit shifted immediate - insts.push(Inst::Mov64UImm16Shifted { rd, imm }); + insts.push(Inst::Mov64UImm16Shifted { rd: hi_rd, imm }); } else if let Some(imm) = UImm32Shifted::maybe_from_u64(hi) { // 32-bit shifted immediate - insts.push(Inst::Mov64UImm32Shifted { rd, imm }); + insts.push(Inst::Mov64UImm32Shifted { rd: hi_rd, imm }); } else { unreachable!(); } @@ -367,14 +373,14 @@ impl Inst { // 16-bit shifted immediate insts.push(Inst::Insert64UImm16Shifted { rd, - ri: rd.to_reg(), + ri: hi_rd.to_reg(), imm, }); } else if let Some(imm) = UImm32Shifted::maybe_from_u64(lo) { // 32-bit shifted immediate insts.push(Inst::Insert64UImm32Shifted { rd, - ri: rd.to_reg(), + ri: hi_rd.to_reg(), imm, }); } else { @@ -1172,7 +1178,7 @@ impl MachInst for Inst { to_regs: ValueRegs>, value: u128, ty: Type, - _alloc_tmp: F, + alloc_tmp: F, ) -> SmallVec<[Inst; 4]> { let to_reg = to_regs .only_reg() @@ -1204,7 +1210,7 @@ impl MachInst for Inst { )); ret } - types::I64 | types::R64 => Inst::load_constant64(to_reg, value as u64), + types::I64 | types::R64 => Inst::load_constant64(to_reg, value as u64, alloc_tmp), types::I8 | types::I16 | types::I32 => Inst::load_constant32(to_reg, value as u32), _ => unreachable!(), }