diff --git a/Cargo.toml b/Cargo.toml index 6f7f309..5410cf1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ repository = "https://github.com/bytecodealliance/regalloc2" log = { version = "0.4.8", default-features = false } smallvec = "1.6.1" fxhash = "0.2.1" +slice-group-by = "0.3.0" # The below are only needed for fuzzing. # Keep this in sync with libfuzzer_sys's crate version: diff --git a/src/checker.rs b/src/checker.rs index bf5c098..d66610a 100644 --- a/src/checker.rs +++ b/src/checker.rs @@ -78,7 +78,7 @@ use crate::{ Allocation, AllocationKind, Block, Edit, Function, Inst, InstPosition, Operand, - OperandConstraint, OperandKind, OperandPos, Output, PReg, ProgPoint, VReg, + OperandConstraint, OperandKind, OperandPos, Output, PReg, ProgPoint, RegClass, VReg, }; use std::collections::{HashMap, HashSet, VecDeque}; @@ -426,12 +426,22 @@ impl CheckerState { match op.constraint() { OperandConstraint::Any => {} OperandConstraint::Reg => { - if alloc.kind() != AllocationKind::Reg { - return Err(CheckerError::AllocationIsNotReg { inst, op, alloc }); + // Reject pregs that represent a fixed stack slot. + if let Some(preg) = alloc.as_reg() { + if preg.class() == RegClass::Int && (0..32).contains(&preg.hw_enc()) { + return Ok(()); + } } + return Err(CheckerError::AllocationIsNotReg { inst, op, alloc }); } OperandConstraint::Stack => { if alloc.kind() != AllocationKind::Stack { + // Accept pregs that represent a fixed stack slot. + if let Some(preg) = alloc.as_reg() { + if preg.class() == RegClass::Int && (32..63).contains(&preg.hw_enc()) { + return Ok(()); + } + } return Err(CheckerError::AllocationIsNotStack { inst, op, alloc }); } } diff --git a/src/fuzzing/func.rs b/src/fuzzing/func.rs index e5174cb..cbc9717 100644 --- a/src/fuzzing/func.rs +++ b/src/fuzzing/func.rs @@ -465,7 +465,7 @@ impl Func { let mut fixed = vec![]; for _ in 0..u.int_in_range(0..=operands.len() - 1)? { // Pick an operand and make it a fixed reg. - let fixed_reg = PReg::new(u.int_in_range(0..=30)?, RegClass::Int); + let fixed_reg = PReg::new(u.int_in_range(0..=62)?, RegClass::Int); if fixed.contains(&fixed_reg) { break; } @@ -602,15 +602,18 @@ impl std::fmt::Debug for Func { } pub fn machine_env() -> MachineEnv { - // Reg 31 is the scratch reg. - let regs: Vec = (0..31).map(|i| PReg::new(i, RegClass::Int)).collect(); - let preferred_regs_by_class: [Vec; 2] = [regs.iter().cloned().take(24).collect(), vec![]]; - let non_preferred_regs_by_class: [Vec; 2] = - [regs.iter().cloned().skip(24).collect(), vec![]]; - let scratch_by_class: [PReg; 2] = [PReg::new(31, RegClass::Int), PReg::new(0, RegClass::Float)]; + // Reg 63 is the scratch reg. + fn regs(r: std::ops::Range) -> Vec { + r.map(|i| PReg::new(i, RegClass::Int)).collect() + } + let preferred_regs_by_class: [Vec; 2] = [regs(0..24), vec![]]; + let non_preferred_regs_by_class: [Vec; 2] = [regs(24..32), vec![]]; + let scratch_by_class: [PReg; 2] = [PReg::new(63, RegClass::Int), PReg::new(0, RegClass::Float)]; + let fixed_stack_slots = regs(32..63); MachineEnv { preferred_regs_by_class, non_preferred_regs_by_class, scratch_by_class, + fixed_stack_slots, } } diff --git a/src/ion/data_structures.rs b/src/ion/data_structures.rs index 748d846..09f7834 100644 --- a/src/ion/data_structures.rs +++ b/src/ion/data_structures.rs @@ -266,6 +266,16 @@ pub struct VRegData { pub struct PRegData { pub reg: PReg, pub allocations: LiveRangeSet, + pub is_stack: bool, +} + +#[derive(Clone, Debug)] +pub struct MultiFixedRegFixup { + pub pos: ProgPoint, + pub from_slot: u8, + pub to_slot: u8, + pub to_preg: PRegIndex, + pub vreg: VRegIndex, } #[derive(Clone, Debug)] @@ -329,9 +339,7 @@ pub struct Env<'a, F: Function> { // the register available. When we produce the final edit-list, we // will insert a copy from wherever the VReg's primary allocation // was to the approprate PReg. - // - // (progpoint, copy-from-preg, copy-to-preg, to-slot) - pub multi_fixed_reg_fixups: Vec<(ProgPoint, PRegIndex, PRegIndex, VRegIndex, usize)>, + pub multi_fixed_reg_fixups: Vec, pub inserted_moves: Vec, diff --git a/src/ion/liveranges.rs b/src/ion/liveranges.rs index de100c9..404b438 100644 --- a/src/ion/liveranges.rs +++ b/src/ion/liveranges.rs @@ -18,11 +18,13 @@ use super::{ SpillSetIndex, Use, VRegData, VRegIndex, SLOT_NONE, }; use crate::indexset::IndexSet; +use crate::ion::data_structures::MultiFixedRegFixup; use crate::{ Allocation, Block, Function, Inst, InstPosition, Operand, OperandConstraint, OperandKind, OperandPos, PReg, ProgPoint, RegAllocError, VReg, }; use fxhash::FxHashSet; +use slice_group_by::GroupByMut; use smallvec::{smallvec, SmallVec}; use std::collections::{HashSet, VecDeque}; @@ -98,10 +100,11 @@ impl<'a, F: Function> Env<'a, F> { pub fn create_pregs_and_vregs(&mut self) { // Create PRegs from the env. self.pregs.resize( - PReg::MAX_INDEX, + PReg::NUM_INDEX, PRegData { reg: PReg::invalid(), allocations: LiveRangeSet::new(), + is_stack: false, }, ); for i in 0..=PReg::MAX { @@ -110,6 +113,9 @@ impl<'a, F: Function> Env<'a, F> { let preg_float = PReg::new(i, RegClass::Float); self.pregs[preg_float.index()].reg = preg_float; } + for &preg in &self.env.fixed_stack_slots { + self.pregs[preg.index()].is_stack = true; + } // Create VRegs from the vreg count. for idx in 0..self.func.num_vregs() { // We'll fill in the real details when we see the def. @@ -388,6 +394,10 @@ impl<'a, F: Function> Env<'a, F> { return Err(RegAllocError::EntryLivein); } + Ok(()) + } + + pub fn build_liveranges(&mut self) { for &vreg in self.func.reftype_vregs() { self.safepoints_per_vreg.insert(vreg.vreg(), HashSet::new()); } @@ -1138,104 +1148,6 @@ impl<'a, F: Function> Env<'a, F> { } } - // Do a fixed-reg cleanup pass: if there are any LiveRanges with - // multiple uses (or defs) at the same ProgPoint and there is - // more than one FixedReg constraint at that ProgPoint, we - // need to record all but one of them in a special fixup list - // and handle them later; otherwise, bundle-splitting to - // create minimal bundles becomes much more complex (we would - // have to split the multiple uses at the same progpoint into - // different bundles, which breaks invariants related to - // disjoint ranges and bundles). - let mut seen_fixed_for_vreg: SmallVec<[VReg; 16]> = smallvec![]; - let mut first_preg: SmallVec<[PRegIndex; 16]> = smallvec![]; - let mut extra_clobbers: SmallVec<[(PReg, Inst); 8]> = smallvec![]; - for vreg in 0..self.vregs.len() { - for range_idx in 0..self.vregs[vreg].ranges.len() { - let entry = self.vregs[vreg].ranges[range_idx]; - let range = entry.index; - log::trace!( - "multi-fixed-reg cleanup: vreg {:?} range {:?}", - VRegIndex::new(vreg), - range, - ); - let mut last_point = None; - let mut fixup_multi_fixed_vregs = |pos: ProgPoint, - slot: usize, - op: &mut Operand, - fixups: &mut Vec<( - ProgPoint, - PRegIndex, - PRegIndex, - VRegIndex, - usize, - )>| { - if last_point.is_some() && Some(pos) != last_point { - seen_fixed_for_vreg.clear(); - first_preg.clear(); - } - last_point = Some(pos); - - if let OperandConstraint::FixedReg(preg) = op.constraint() { - let vreg_idx = VRegIndex::new(op.vreg().vreg()); - let preg_idx = PRegIndex::new(preg.index()); - log::trace!( - "at pos {:?}, vreg {:?} has fixed constraint to preg {:?}", - pos, - vreg_idx, - preg_idx - ); - if let Some(idx) = seen_fixed_for_vreg.iter().position(|r| *r == op.vreg()) - { - let orig_preg = first_preg[idx]; - if orig_preg != preg_idx { - log::trace!(" -> duplicate; switching to constraint Reg"); - fixups.push((pos, orig_preg, preg_idx, vreg_idx, slot)); - *op = Operand::new( - op.vreg(), - OperandConstraint::Reg, - op.kind(), - op.pos(), - ); - log::trace!( - " -> extra clobber {} at inst{}", - preg, - pos.inst().index() - ); - extra_clobbers.push((preg, pos.inst())); - } - } else { - seen_fixed_for_vreg.push(op.vreg()); - first_preg.push(preg_idx); - } - } - }; - - for u in &mut self.ranges[range.index()].uses { - let pos = u.pos; - let slot = u.slot as usize; - fixup_multi_fixed_vregs( - pos, - slot, - &mut u.operand, - &mut self.multi_fixed_reg_fixups, - ); - } - - for &(clobber, inst) in &extra_clobbers { - let range = CodeRange { - from: ProgPoint::before(inst), - to: ProgPoint::before(inst.next()), - }; - self.add_liverange_to_preg(range, clobber); - } - - extra_clobbers.clear(); - first_preg.clear(); - seen_fixed_for_vreg.clear(); - } - } - self.blockparam_ins.sort_unstable(); self.blockparam_outs.sort_unstable(); self.prog_move_srcs.sort_unstable_by_key(|(pos, _)| *pos); @@ -1247,7 +1159,145 @@ impl<'a, F: Function> Env<'a, F> { self.stats.initial_liverange_count = self.ranges.len(); self.stats.blockparam_ins_count = self.blockparam_ins.len(); self.stats.blockparam_outs_count = self.blockparam_outs.len(); + } - Ok(()) + pub fn fixup_multi_fixed_vregs(&mut self) { + // Do a fixed-reg cleanup pass: if there are any LiveRanges with + // multiple uses (or defs) at the same ProgPoint and there is + // more than one FixedReg constraint at that ProgPoint, we + // need to record all but one of them in a special fixup list + // and handle them later; otherwise, bundle-splitting to + // create minimal bundles becomes much more complex (we would + // have to split the multiple uses at the same progpoint into + // different bundles, which breaks invariants related to + // disjoint ranges and bundles). + let mut extra_clobbers: SmallVec<[(PReg, Inst); 8]> = smallvec![]; + for vreg in 0..self.vregs.len() { + for range_idx in 0..self.vregs[vreg].ranges.len() { + let entry = self.vregs[vreg].ranges[range_idx]; + let range = entry.index; + log::trace!( + "multi-fixed-reg cleanup: vreg {:?} range {:?}", + VRegIndex::new(vreg), + range, + ); + + // Find groups of uses that occur in at the same program point. + for uses in self.ranges[range.index()] + .uses + .linear_group_by_key_mut(|u| u.pos) + { + if uses.len() < 2 { + continue; + } + + // Search for conflicting constraints in the uses. + let mut requires_reg = false; + let mut num_fixed_reg = 0; + let mut num_fixed_stack = 0; + let mut first_reg_slot = None; + let mut first_stack_slot = None; + for u in uses.iter() { + match u.operand.constraint() { + OperandConstraint::Any => { + first_reg_slot.get_or_insert(u.slot); + first_stack_slot.get_or_insert(u.slot); + } + OperandConstraint::Reg | OperandConstraint::Reuse(_) => { + first_reg_slot.get_or_insert(u.slot); + requires_reg = true; + } + OperandConstraint::FixedReg(preg) => { + if self.pregs[preg.index()].is_stack { + num_fixed_stack += 1; + first_stack_slot.get_or_insert(u.slot); + } else { + requires_reg = true; + num_fixed_reg += 1; + first_reg_slot.get_or_insert(u.slot); + } + } + // Maybe this could be supported in this future... + OperandConstraint::Stack => panic!( + "multiple uses of vreg with a Stack constraint are not supported" + ), + } + } + + // Fast path if there are no conflicts. + if num_fixed_reg + num_fixed_stack <= 1 + && !(requires_reg && num_fixed_stack != 0) + { + continue; + } + + // We pick one constraint (in order: FixedReg, Reg, FixedStack) + // and then rewrite any incompatible constraints to Any. + // This allows register allocation to succeed and we will + // later insert moves to satisfy the rewritten constraints. + let source_slot = if requires_reg { + first_reg_slot.unwrap() + } else { + first_stack_slot.unwrap() + }; + let mut first_preg = None; + for u in uses.iter_mut() { + if let OperandConstraint::FixedReg(preg) = u.operand.constraint() { + let vreg_idx = VRegIndex::new(u.operand.vreg().vreg()); + let preg_idx = PRegIndex::new(preg.index()); + log::trace!( + "at pos {:?}, vreg {:?} has fixed constraint to preg {:?}", + u.pos, + vreg_idx, + preg_idx + ); + + // FixedStack is incompatible if there are any + // Reg/FixedReg constraints. FixedReg is + // incompatible if there already is a different + // FixedReg constraint. If either condition is true, + // we edit the constraint below; otherwise, we can + // skip this edit. + if !(requires_reg && self.pregs[preg.index()].is_stack) + && *first_preg.get_or_insert(preg) == preg + { + continue; + } + + log::trace!(" -> duplicate; switching to constraint Any"); + self.multi_fixed_reg_fixups.push(MultiFixedRegFixup { + pos: u.pos, + from_slot: source_slot, + to_slot: u.slot, + to_preg: preg_idx, + vreg: vreg_idx, + }); + u.operand = Operand::new( + u.operand.vreg(), + OperandConstraint::Any, + u.operand.kind(), + u.operand.pos(), + ); + log::trace!( + " -> extra clobber {} at inst{}", + preg, + u.pos.inst().index() + ); + extra_clobbers.push((preg, u.pos.inst())); + } + } + } + + for &(clobber, inst) in &extra_clobbers { + let range = CodeRange { + from: ProgPoint::before(inst), + to: ProgPoint::before(inst.next()), + }; + self.add_liverange_to_preg(range, clobber); + } + + extra_clobbers.clear(); + } + } } } diff --git a/src/ion/merge.rs b/src/ion/merge.rs index 2f3611b..a8e1fe5 100644 --- a/src/ion/merge.rs +++ b/src/ion/merge.rs @@ -13,8 +13,8 @@ //! Bundle merging. use super::{ - Env, LiveBundleIndex, LiveRangeIndex, LiveRangeKey, Requirement, SpillSet, SpillSetIndex, - SpillSlotIndex, VRegIndex, + Env, LiveBundleIndex, LiveRangeIndex, LiveRangeKey, SpillSet, SpillSetIndex, SpillSlotIndex, + VRegIndex, }; use crate::{Function, Inst, OperandConstraint, PReg}; use smallvec::smallvec; @@ -99,10 +99,7 @@ impl<'a, F: Function> Env<'a, F> { || self.bundles[to.index()].cached_stack() || self.bundles[to.index()].cached_fixed() { - let req = self - .compute_requirement(from) - .merge(self.compute_requirement(to)); - if req == Requirement::Conflict { + if self.merge_bundle_requirements(from, to).is_err() { log::trace!(" -> conflicting requirements; aborting merge"); return false; } diff --git a/src/ion/mod.rs b/src/ion/mod.rs index aed56dd..5903f58 100644 --- a/src/ion/mod.rs +++ b/src/ion/mod.rs @@ -92,6 +92,8 @@ impl<'a, F: Function> Env<'a, F> { pub(crate) fn init(&mut self) -> Result<(), RegAllocError> { self.create_pregs_and_vregs(); self.compute_liveness()?; + self.build_liveranges(); + self.fixup_multi_fixed_vregs(); self.merge_vreg_bundles(); self.queue_bundles(); if log::log_enabled!(log::Level::Trace) { diff --git a/src/ion/moves.rs b/src/ion/moves.rs index 2b562f7..9f913c4 100644 --- a/src/ion/moves.rs +++ b/src/ion/moves.rs @@ -35,6 +35,16 @@ impl<'a, F: Function> Env<'a, F> { pos == self.cfginfo.block_exit[block.index()] } + fn allocation_is_stack(&self, alloc: Allocation) -> bool { + if alloc.is_stack() { + true + } else if let Some(preg) = alloc.as_reg() { + self.pregs[preg.index()].is_stack + } else { + false + } + } + pub fn insert_move( &mut self, pos: ProgPoint, @@ -685,27 +695,27 @@ impl<'a, F: Function> Env<'a, F> { } // Handle multi-fixed-reg constraints by copying. - for (progpoint, from_preg, to_preg, to_vreg, slot) in - std::mem::replace(&mut self.multi_fixed_reg_fixups, vec![]) - { + for fixup in std::mem::replace(&mut self.multi_fixed_reg_fixups, vec![]) { + let from_alloc = self.get_alloc(fixup.pos.inst(), fixup.from_slot as usize); + let to_alloc = Allocation::reg(self.pregs[fixup.to_preg.index()].reg); log::trace!( - "multi-fixed-move constraint at {:?} from p{} to p{} for v{}", - progpoint, - from_preg.index(), - to_preg.index(), - to_vreg.index(), + "multi-fixed-move constraint at {:?} from {} to {} for v{}", + fixup.pos, + from_alloc, + to_alloc, + fixup.vreg.index(), ); self.insert_move( - progpoint, + fixup.pos, InsertMovePrio::MultiFixedReg, - Allocation::reg(self.pregs[from_preg.index()].reg), - Allocation::reg(self.pregs[to_preg.index()].reg), - Some(self.vreg_regs[to_vreg.index()]), + from_alloc, + to_alloc, + Some(self.vreg_regs[fixup.vreg.index()]), ); self.set_alloc( - progpoint.inst(), - slot, - Allocation::reg(self.pregs[to_preg.index()].reg), + fixup.pos.inst(), + fixup.to_slot as usize, + Allocation::reg(self.pregs[fixup.to_preg.index()].reg), ); } @@ -968,9 +978,9 @@ impl<'a, F: Function> Env<'a, F> { let scratch_used = resolved.iter().any(|&(src, dst, _)| { src == Allocation::reg(scratch) || dst == Allocation::reg(scratch) }); - let stack_stack_move = resolved - .iter() - .any(|&(src, dst, _)| src.is_stack() && dst.is_stack()); + let stack_stack_move = resolved.iter().any(|&(src, dst, _)| { + self.allocation_is_stack(src) && self.allocation_is_stack(dst) + }); let extra_slot = if scratch_used && stack_stack_move { if self.extra_spillslot[regclass as u8 as usize].is_none() { let slot = self.allocate_spillslot(regclass); @@ -989,7 +999,7 @@ impl<'a, F: Function> Env<'a, F> { if dst == Allocation::reg(scratch) { scratch_used_yet = true; } - if src.is_stack() && dst.is_stack() { + if self.allocation_is_stack(src) && self.allocation_is_stack(dst) { if !scratch_used_yet { self.add_edit( pos, diff --git a/src/ion/process.rs b/src/ion/process.rs index 123ae30..c45bc5b 100644 --- a/src/ion/process.rs +++ b/src/ion/process.rs @@ -18,9 +18,12 @@ use super::{ Requirement, SpillWeight, UseList, }; use crate::{ - ion::data_structures::{ - CodeRange, BUNDLE_MAX_NORMAL_SPILL_WEIGHT, MINIMAL_BUNDLE_SPILL_WEIGHT, - MINIMAL_FIXED_BUNDLE_SPILL_WEIGHT, + ion::{ + data_structures::{ + CodeRange, BUNDLE_MAX_NORMAL_SPILL_WEIGHT, MINIMAL_BUNDLE_SPILL_WEIGHT, + MINIMAL_FIXED_BUNDLE_SPILL_WEIGHT, + }, + requirement::RequirementConflictAt, }, Allocation, Function, Inst, InstPosition, OperandConstraint, OperandKind, PReg, ProgPoint, RegAllocError, @@ -367,29 +370,6 @@ impl<'a, F: Function> Env<'a, F> { } } - pub fn find_conflict_split_point(&self, bundle: LiveBundleIndex) -> ProgPoint { - // Find the first use whose requirement causes the merge up to - // this point to go to Conflict. - let mut req = Requirement::Unknown; - for entry in &self.bundles[bundle.index()].ranges { - for u in &self.ranges[entry.index.index()].uses { - let this_req = Requirement::from_operand(u.operand); - req = req.merge(this_req); - if req == Requirement::Conflict { - return u.pos; - } - } - } - - // Fallback: start of bundle. - self.bundles[bundle.index()] - .ranges - .first() - .unwrap() - .range - .from - } - pub fn get_or_create_spill_bundle( &mut self, bundle: LiveBundleIndex, @@ -752,37 +732,41 @@ impl<'a, F: Function> Env<'a, F> { reg_hint: PReg, ) -> Result<(), RegAllocError> { let class = self.spillsets[self.bundles[bundle.index()].spillset.index()].class; - let req = self.compute_requirement(bundle); // Grab a hint from either the queue or our spillset, if any. - let hint_reg = if reg_hint != PReg::invalid() { + let mut hint_reg = if reg_hint != PReg::invalid() { reg_hint } else { self.spillsets[self.bundles[bundle.index()].spillset.index()].reg_hint }; + if self.pregs[hint_reg.index()].is_stack { + hint_reg = PReg::invalid(); + } log::trace!("process_bundle: bundle {:?} hint {:?}", bundle, hint_reg,); - if let Requirement::Conflict = req { - // We have to split right away. We'll find a point to - // split that would allow at least the first half of the - // split to be conflict-free. - assert!( - !self.minimal_bundle(bundle), - "Minimal bundle with conflict!" - ); - let split_point = self.find_conflict_split_point(bundle); - self.split_and_requeue_bundle( - bundle, - /* split_at_point = */ split_point, - reg_hint, - ); - return Ok(()); - } + let req = match self.compute_requirement(bundle) { + Ok(req) => req, + Err(RequirementConflictAt(split_point)) => { + // We have to split right away. We'll find a point to + // split that would allow at least the first half of the + // split to be conflict-free. + assert!( + !self.minimal_bundle(bundle), + "Minimal bundle with conflict!" + ); + self.split_and_requeue_bundle( + bundle, + /* split_at_point = */ split_point, + reg_hint, + ); + return Ok(()); + } + }; // If no requirement at all (because no uses), and *if* a // spill bundle is already present, then move the LRs over to // the spill bundle right away. match req { - Requirement::Unknown | Requirement::Any => { + Requirement::Any => { if let Some(spill) = self.get_or_create_spill_bundle(bundle, /* create_if_absent = */ false) { @@ -806,7 +790,7 @@ impl<'a, F: Function> Env<'a, F> { debug_assert!(attempts < 100 * self.func.num_insts()); let fixed_preg = match req { - Requirement::Fixed(preg) => Some(preg), + Requirement::FixedReg(preg) | Requirement::FixedStack(preg) => Some(preg), Requirement::Register => None, Requirement::Stack => { // If we must be on the stack, mark our spillset @@ -815,14 +799,10 @@ impl<'a, F: Function> Env<'a, F> { return Ok(()); } - Requirement::Any | Requirement::Unknown => { + Requirement::Any => { self.spilled_bundles.push(bundle); return Ok(()); } - - Requirement::Conflict => { - unreachable!() - } }; // Scan all pregs, or the one fixed preg, and attempt to allocate. diff --git a/src/ion/requirement.rs b/src/ion/requirement.rs index 8aeb824..c14cc38 100644 --- a/src/ion/requirement.rs +++ b/src/ion/requirement.rs @@ -13,57 +13,93 @@ //! Requirements computation. use super::{Env, LiveBundleIndex}; -use crate::{Function, Operand, OperandConstraint, PReg}; +use crate::{Function, Operand, OperandConstraint, PReg, ProgPoint}; + +pub struct RequirementConflict; + +pub struct RequirementConflictAt(pub ProgPoint); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Requirement { - Unknown, - Fixed(PReg), + FixedReg(PReg), + FixedStack(PReg), Register, Stack, Any, - Conflict, } impl Requirement { #[inline(always)] - pub fn merge(self, other: Requirement) -> Requirement { + pub fn merge(self, other: Requirement) -> Result { match (self, other) { - (Requirement::Unknown, other) | (other, Requirement::Unknown) => other, - (Requirement::Conflict, _) | (_, Requirement::Conflict) => Requirement::Conflict, - (other, Requirement::Any) | (Requirement::Any, other) => other, - (Requirement::Stack, Requirement::Stack) => self, - (Requirement::Register, Requirement::Fixed(preg)) - | (Requirement::Fixed(preg), Requirement::Register) => Requirement::Fixed(preg), - (Requirement::Register, Requirement::Register) => self, - (Requirement::Fixed(a), Requirement::Fixed(b)) if a == b => self, - _ => Requirement::Conflict, - } - } - #[inline(always)] - pub fn from_operand(op: Operand) -> Requirement { - match op.constraint() { - OperandConstraint::FixedReg(preg) => Requirement::Fixed(preg), - OperandConstraint::Reg | OperandConstraint::Reuse(_) => Requirement::Register, - OperandConstraint::Stack => Requirement::Stack, - _ => Requirement::Any, + (other, Requirement::Any) | (Requirement::Any, other) => Ok(other), + (Requirement::Register, Requirement::Register) => Ok(self), + (Requirement::Stack, Requirement::Stack) => Ok(self), + (Requirement::Register, Requirement::FixedReg(preg)) + | (Requirement::FixedReg(preg), Requirement::Register) => { + Ok(Requirement::FixedReg(preg)) + } + (Requirement::Stack, Requirement::FixedStack(preg)) + | (Requirement::FixedStack(preg), Requirement::Stack) => { + Ok(Requirement::FixedStack(preg)) + } + (Requirement::FixedReg(a), Requirement::FixedReg(b)) if a == b => Ok(self), + (Requirement::FixedStack(a), Requirement::FixedStack(b)) if a == b => Ok(self), + _ => Err(RequirementConflict), } } } impl<'a, F: Function> Env<'a, F> { - pub fn compute_requirement(&self, bundle: LiveBundleIndex) -> Requirement { - let mut req = Requirement::Unknown; + #[inline(always)] + pub fn requirement_from_operand(&self, op: Operand) -> Requirement { + match op.constraint() { + OperandConstraint::FixedReg(preg) => { + if self.pregs[preg.index()].is_stack { + Requirement::FixedStack(preg) + } else { + Requirement::FixedReg(preg) + } + } + OperandConstraint::Reg | OperandConstraint::Reuse(_) => Requirement::Register, + OperandConstraint::Stack => Requirement::Stack, + OperandConstraint::Any => Requirement::Any, + } + } + + pub fn compute_requirement( + &self, + bundle: LiveBundleIndex, + ) -> Result { + let mut req = Requirement::Any; log::trace!("compute_requirement: {:?}", bundle); - for entry in &self.bundles[bundle.index()].ranges { + let ranges = &self.bundles[bundle.index()].ranges; + for entry in ranges { log::trace!(" -> LR {:?}", entry.index); for u in &self.ranges[entry.index.index()].uses { log::trace!(" -> use {:?}", u); - let r = Requirement::from_operand(u.operand); - req = req.merge(r); + let r = self.requirement_from_operand(u.operand); + req = req.merge(r).map_err(|_| { + log::trace!(" -> conflict"); + RequirementConflictAt(u.pos) + })?; log::trace!(" -> req {:?}", req); } } log::trace!(" -> final: {:?}", req); - req + Ok(req) + } + + pub fn merge_bundle_requirements( + &self, + a: LiveBundleIndex, + b: LiveBundleIndex, + ) -> Result { + let req_a = self + .compute_requirement(a) + .map_err(|_| RequirementConflict)?; + let req_b = self + .compute_requirement(b) + .map_err(|_| RequirementConflict)?; + req_a.merge(req_b) } } diff --git a/src/lib.rs b/src/lib.rs index 6cc4354..36b8042 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -64,14 +64,13 @@ pub enum RegClass { /// integer registers and indices 64..=127 are the 64 float registers. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PReg { - hw_enc: u8, - class: RegClass, + bits: u8, } impl PReg { pub const MAX_BITS: usize = 6; pub const MAX: usize = (1 << Self::MAX_BITS) - 1; - pub const MAX_INDEX: usize = 1 << (Self::MAX_BITS + 1); // including RegClass bit + pub const NUM_INDEX: usize = 1 << (Self::MAX_BITS + 1); // including RegClass bit /// Create a new PReg. The `hw_enc` range is 6 bits. #[inline(always)] @@ -85,22 +84,24 @@ impl PReg { let _ = HW_ENC_MUST_BE_IN_BOUNDS[hw_enc]; PReg { - hw_enc: hw_enc as u8, - class, + bits: ((class as u8) << Self::MAX_BITS) | (hw_enc as u8), } } /// The physical register number, as encoded by the ISA for the particular register class. #[inline(always)] pub fn hw_enc(self) -> usize { - let hw_enc = self.hw_enc as usize; - hw_enc + self.bits as usize & Self::MAX } /// The register class. #[inline(always)] pub fn class(self) -> RegClass { - self.class + if self.bits & (1 << Self::MAX_BITS) == 0 { + RegClass::Int + } else { + RegClass::Float + } } /// Get an index into the (not necessarily contiguous) index space of @@ -108,20 +109,15 @@ impl PReg { /// all PRegs and index it efficiently. #[inline(always)] pub fn index(self) -> usize { - ((self.class as u8 as usize) << Self::MAX_BITS) | (self.hw_enc as usize) + self.bits as usize } /// Construct a PReg from the value returned from `.index()`. #[inline(always)] pub fn from_index(index: usize) -> Self { - let class = (index >> Self::MAX_BITS) & 1; - let class = match class { - 0 => RegClass::Int, - 1 => RegClass::Float, - _ => unreachable!(), - }; - let index = index & Self::MAX; - PReg::new(index, class) + PReg { + bits: (index & (Self::NUM_INDEX - 1)) as u8, + } } /// Return the "invalid PReg", which can be used to initialize @@ -1154,6 +1150,14 @@ pub struct MachineEnv { /// the emission of two machine-code instructions, this lowering /// can use the scratch register between them. pub scratch_by_class: [PReg; 2], + + /// Some `PReg`s can be designated as locations on the stack rather than + /// actual registers. These can be used to tell the register allocator about + /// pre-defined stack slots used for function arguments and return values. + /// + /// `PReg`s in this list cannot be used as a scratch register or as an + /// allocatable regsiter. + pub fixed_stack_slots: Vec, } /// The output of the register allocator.