Cleanup: split allocator implemntation into 11 files of more reasonable size.
This commit is contained in:
15
doc/TODO
Normal file
15
doc/TODO
Normal file
@@ -0,0 +1,15 @@
|
||||
# Features
|
||||
|
||||
- Rematerialization
|
||||
- Stack-location constraints that place operands in user-defined stack
|
||||
locations (distinct from SpillSlots) (e.g., stack args)
|
||||
|
||||
# Performance
|
||||
|
||||
- Investigate better register hinting
|
||||
- Investigate more principled cost functions and split locations,
|
||||
especially around loop nests
|
||||
|
||||
# Cleanup
|
||||
|
||||
- Remove support for non-SSA code once no longer necessary
|
||||
531
src/ion/data_structures.rs
Normal file
531
src/ion/data_structures.rs
Normal file
@@ -0,0 +1,531 @@
|
||||
/*
|
||||
* The following license applies to this file, which was initially
|
||||
* derived from the files `js/src/jit/BacktrackingAllocator.h` and
|
||||
* `js/src/jit/BacktrackingAllocator.cpp` in Mozilla Firefox:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*
|
||||
* Since the initial port, the design has been substantially evolved
|
||||
* and optimized.
|
||||
*/
|
||||
|
||||
//! Data structures for backtracking allocator.
|
||||
|
||||
use crate::bitvec::BitVec;
|
||||
use crate::cfg::CFGInfo;
|
||||
use crate::index::ContainerComparator;
|
||||
use crate::{
|
||||
define_index, Allocation, Block, Edit, Function, Inst, MachineEnv, Operand, PReg, ProgPoint,
|
||||
RegClass, SpillSlot, VReg,
|
||||
};
|
||||
use smallvec::SmallVec;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// A range from `from` (inclusive) to `to` (exclusive).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct CodeRange {
|
||||
pub from: ProgPoint,
|
||||
pub to: ProgPoint,
|
||||
}
|
||||
|
||||
impl CodeRange {
|
||||
#[inline(always)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.from == self.to
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn contains(&self, other: &Self) -> bool {
|
||||
other.from >= self.from && other.to <= self.to
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn contains_point(&self, other: ProgPoint) -> bool {
|
||||
other >= self.from && other < self.to
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn overlaps(&self, other: &Self) -> bool {
|
||||
other.to > self.from && other.from < self.to
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn len(&self) -> usize {
|
||||
self.to.inst().index() - self.from.inst().index()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::PartialOrd for CodeRange {
|
||||
#[inline(always)]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
impl std::cmp::Ord for CodeRange {
|
||||
#[inline(always)]
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
if self.to <= other.from {
|
||||
Ordering::Less
|
||||
} else if self.from >= other.to {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
Ordering::Equal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
define_index!(LiveBundleIndex);
|
||||
define_index!(LiveRangeIndex);
|
||||
define_index!(SpillSetIndex);
|
||||
define_index!(UseIndex);
|
||||
define_index!(VRegIndex);
|
||||
define_index!(PRegIndex);
|
||||
define_index!(SpillSlotIndex);
|
||||
|
||||
/// Used to carry small sets of bundles, e.g. for conflict sets.
|
||||
pub type LiveBundleVec = SmallVec<[LiveBundleIndex; 4]>;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct LiveRangeListEntry {
|
||||
pub range: CodeRange,
|
||||
pub index: LiveRangeIndex,
|
||||
}
|
||||
|
||||
pub type LiveRangeList = SmallVec<[LiveRangeListEntry; 4]>;
|
||||
pub type UseList = SmallVec<[Use; 2]>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LiveRange {
|
||||
pub range: CodeRange,
|
||||
|
||||
pub vreg: VRegIndex,
|
||||
pub bundle: LiveBundleIndex,
|
||||
pub uses_spill_weight_and_flags: u32,
|
||||
|
||||
pub uses: UseList,
|
||||
|
||||
pub merged_into: LiveRangeIndex,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u32)]
|
||||
pub enum LiveRangeFlag {
|
||||
StartsAtDef = 1,
|
||||
}
|
||||
|
||||
impl LiveRange {
|
||||
#[inline(always)]
|
||||
pub fn set_flag(&mut self, flag: LiveRangeFlag) {
|
||||
self.uses_spill_weight_and_flags |= (flag as u32) << 29;
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn clear_flag(&mut self, flag: LiveRangeFlag) {
|
||||
self.uses_spill_weight_and_flags &= !((flag as u32) << 29);
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn assign_flag(&mut self, flag: LiveRangeFlag, val: bool) {
|
||||
let bit = if val { (flag as u32) << 29 } else { 0 };
|
||||
self.uses_spill_weight_and_flags &= 0xe000_0000;
|
||||
self.uses_spill_weight_and_flags |= bit;
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn has_flag(&self, flag: LiveRangeFlag) -> bool {
|
||||
self.uses_spill_weight_and_flags & ((flag as u32) << 29) != 0
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn flag_word(&self) -> u32 {
|
||||
self.uses_spill_weight_and_flags & 0xe000_0000
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn merge_flags(&mut self, flag_word: u32) {
|
||||
self.uses_spill_weight_and_flags |= flag_word;
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn uses_spill_weight(&self) -> u32 {
|
||||
self.uses_spill_weight_and_flags & 0x1fff_ffff
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn set_uses_spill_weight(&mut self, weight: u32) {
|
||||
assert!(weight < (1 << 29));
|
||||
self.uses_spill_weight_and_flags =
|
||||
(self.uses_spill_weight_and_flags & 0xe000_0000) | weight;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Use {
|
||||
pub operand: Operand,
|
||||
pub pos: ProgPoint,
|
||||
pub slot: u8,
|
||||
pub weight: u16,
|
||||
}
|
||||
|
||||
impl Use {
|
||||
#[inline(always)]
|
||||
pub fn new(operand: Operand, pos: ProgPoint, slot: u8) -> Self {
|
||||
Self {
|
||||
operand,
|
||||
pos,
|
||||
slot,
|
||||
// Weight is updated on insertion into LR.
|
||||
weight: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const SLOT_NONE: u8 = u8::MAX;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LiveBundle {
|
||||
pub ranges: LiveRangeList,
|
||||
pub spillset: SpillSetIndex,
|
||||
pub allocation: Allocation,
|
||||
pub prio: u32, // recomputed after every bulk update
|
||||
pub spill_weight_and_props: u32,
|
||||
}
|
||||
|
||||
impl LiveBundle {
|
||||
#[inline(always)]
|
||||
pub fn set_cached_spill_weight_and_props(
|
||||
&mut self,
|
||||
spill_weight: u32,
|
||||
minimal: bool,
|
||||
fixed: bool,
|
||||
stack: bool,
|
||||
) {
|
||||
debug_assert!(spill_weight < ((1 << 29) - 1));
|
||||
self.spill_weight_and_props = spill_weight
|
||||
| (if minimal { 1 << 31 } else { 0 })
|
||||
| (if fixed { 1 << 30 } else { 0 })
|
||||
| (if stack { 1 << 29 } else { 0 });
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn cached_minimal(&self) -> bool {
|
||||
self.spill_weight_and_props & (1 << 31) != 0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn cached_fixed(&self) -> bool {
|
||||
self.spill_weight_and_props & (1 << 30) != 0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn cached_stack(&self) -> bool {
|
||||
self.spill_weight_and_props & (1 << 29) != 0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn set_cached_fixed(&mut self) {
|
||||
self.spill_weight_and_props |= 1 << 30;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn set_cached_stack(&mut self) {
|
||||
self.spill_weight_and_props |= 1 << 29;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn cached_spill_weight(&self) -> u32 {
|
||||
self.spill_weight_and_props & ((1 << 29) - 1)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct BundleProperties {
|
||||
pub minimal: bool,
|
||||
pub fixed: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SpillSet {
|
||||
pub vregs: SmallVec<[VRegIndex; 2]>,
|
||||
pub slot: SpillSlotIndex,
|
||||
pub reg_hint: PReg,
|
||||
pub class: RegClass,
|
||||
pub spill_bundle: LiveBundleIndex,
|
||||
pub required: bool,
|
||||
pub size: u8,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VRegData {
|
||||
pub ranges: LiveRangeList,
|
||||
pub blockparam: Block,
|
||||
pub is_ref: bool,
|
||||
pub is_pinned: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PRegData {
|
||||
pub reg: PReg,
|
||||
pub allocations: LiveRangeSet,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Env<'a, F: Function> {
|
||||
pub func: &'a F,
|
||||
pub env: &'a MachineEnv,
|
||||
pub cfginfo: CFGInfo,
|
||||
pub liveins: Vec<BitVec>,
|
||||
pub liveouts: Vec<BitVec>,
|
||||
/// Blockparam outputs: from-vreg, (end of) from-block, (start of)
|
||||
/// to-block, to-vreg. The field order is significant: these are sorted so
|
||||
/// that a scan over vregs, then blocks in each range, can scan in
|
||||
/// order through this (sorted) list and add allocs to the
|
||||
/// half-move list.
|
||||
pub blockparam_outs: Vec<(VRegIndex, Block, Block, VRegIndex)>,
|
||||
/// Blockparam inputs: to-vreg, (start of) to-block, (end of)
|
||||
/// from-block. As above for `blockparam_outs`, field order is
|
||||
/// significant.
|
||||
pub blockparam_ins: Vec<(VRegIndex, Block, Block)>,
|
||||
/// Blockparam allocs: block, idx, vreg, alloc. Info to describe
|
||||
/// blockparam locations at block entry, for metadata purposes
|
||||
/// (e.g. for the checker).
|
||||
pub blockparam_allocs: Vec<(Block, u32, VRegIndex, Allocation)>,
|
||||
|
||||
pub ranges: Vec<LiveRange>,
|
||||
pub bundles: Vec<LiveBundle>,
|
||||
pub spillsets: Vec<SpillSet>,
|
||||
pub vregs: Vec<VRegData>,
|
||||
pub vreg_regs: Vec<VReg>,
|
||||
pub pregs: Vec<PRegData>,
|
||||
pub allocation_queue: PrioQueue,
|
||||
pub clobbers: Vec<Inst>, // Sorted list of insts with clobbers.
|
||||
pub safepoints: Vec<Inst>, // Sorted list of safepoint insts.
|
||||
pub safepoints_per_vreg: HashMap<usize, HashSet<Inst>>,
|
||||
|
||||
pub spilled_bundles: Vec<LiveBundleIndex>,
|
||||
pub spillslots: Vec<SpillSlotData>,
|
||||
pub slots_by_size: Vec<SpillSlotList>,
|
||||
|
||||
pub extra_spillslot: Vec<Option<Allocation>>,
|
||||
|
||||
// Program moves: these are moves in the provided program that we
|
||||
// handle with our internal machinery, in order to avoid the
|
||||
// overhead of ordinary operand processing. We expect the client
|
||||
// to not generate any code for instructions that return
|
||||
// `Some(..)` for `.is_move()`, and instead use the edits that we
|
||||
// provide to implement those moves (or some simplified version of
|
||||
// them) post-regalloc.
|
||||
//
|
||||
// (from-vreg, inst, from-alloc), sorted by (from-vreg, inst)
|
||||
pub prog_move_srcs: Vec<((VRegIndex, Inst), Allocation)>,
|
||||
// (to-vreg, inst, to-alloc), sorted by (to-vreg, inst)
|
||||
pub prog_move_dsts: Vec<((VRegIndex, Inst), Allocation)>,
|
||||
// (from-vreg, to-vreg) for bundle-merging.
|
||||
pub prog_move_merges: Vec<(LiveRangeIndex, LiveRangeIndex)>,
|
||||
|
||||
// When multiple fixed-register constraints are present on a
|
||||
// single VReg at a single program point (this can happen for,
|
||||
// e.g., call args that use the same value multiple times), we
|
||||
// remove all but one of the fixed-register constraints, make a
|
||||
// note here, and add a clobber with that PReg instread to keep
|
||||
// the register available. When we produce the final edit-list, we
|
||||
// will insert a copy from wherever the VReg's primary allocation
|
||||
// was to the approprate PReg.
|
||||
//
|
||||
// (progpoint, copy-from-preg, copy-to-preg, to-slot)
|
||||
pub multi_fixed_reg_fixups: Vec<(ProgPoint, PRegIndex, PRegIndex, usize)>,
|
||||
|
||||
pub inserted_moves: Vec<InsertedMove>,
|
||||
|
||||
// Output:
|
||||
pub edits: Vec<(u32, InsertMovePrio, Edit)>,
|
||||
pub allocs: Vec<Allocation>,
|
||||
pub inst_alloc_offsets: Vec<u32>,
|
||||
pub num_spillslots: u32,
|
||||
pub safepoint_slots: Vec<(ProgPoint, SpillSlot)>,
|
||||
|
||||
pub stats: Stats,
|
||||
|
||||
// For debug output only: a list of textual annotations at every
|
||||
// ProgPoint to insert into the final allocated program listing.
|
||||
pub debug_annotations: std::collections::HashMap<ProgPoint, Vec<String>>,
|
||||
pub annotations_enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SpillSlotData {
|
||||
pub ranges: LiveRangeSet,
|
||||
pub class: RegClass,
|
||||
pub alloc: Allocation,
|
||||
pub next_spillslot: SpillSlotIndex,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SpillSlotList {
|
||||
pub first_spillslot: SpillSlotIndex,
|
||||
pub last_spillslot: SpillSlotIndex,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PrioQueue {
|
||||
pub heap: std::collections::BinaryHeap<PrioQueueEntry>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct PrioQueueEntry {
|
||||
pub prio: u32,
|
||||
pub bundle: LiveBundleIndex,
|
||||
pub reg_hint: PReg,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LiveRangeSet {
|
||||
pub btree: BTreeMap<LiveRangeKey, LiveRangeIndex>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct LiveRangeKey {
|
||||
pub from: u32,
|
||||
pub to: u32,
|
||||
}
|
||||
|
||||
impl LiveRangeKey {
|
||||
#[inline(always)]
|
||||
pub fn from_range(range: &CodeRange) -> Self {
|
||||
Self {
|
||||
from: range.from.to_index(),
|
||||
to: range.to.to_index(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn to_range(&self) -> CodeRange {
|
||||
CodeRange {
|
||||
from: ProgPoint::from_index(self.from),
|
||||
to: ProgPoint::from_index(self.to),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::PartialEq for LiveRangeKey {
|
||||
#[inline(always)]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.to > other.from && self.from < other.to
|
||||
}
|
||||
}
|
||||
impl std::cmp::Eq for LiveRangeKey {}
|
||||
impl std::cmp::PartialOrd for LiveRangeKey {
|
||||
#[inline(always)]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
impl std::cmp::Ord for LiveRangeKey {
|
||||
#[inline(always)]
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
if self.to <= other.from {
|
||||
std::cmp::Ordering::Less
|
||||
} else if self.from >= other.to {
|
||||
std::cmp::Ordering::Greater
|
||||
} else {
|
||||
std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PrioQueueComparator<'a> {
|
||||
pub prios: &'a [usize],
|
||||
}
|
||||
impl<'a> ContainerComparator for PrioQueueComparator<'a> {
|
||||
type Ix = LiveBundleIndex;
|
||||
fn compare(&self, a: Self::Ix, b: Self::Ix) -> std::cmp::Ordering {
|
||||
self.prios[a.index()].cmp(&self.prios[b.index()])
|
||||
}
|
||||
}
|
||||
|
||||
impl PrioQueue {
|
||||
pub fn new() -> Self {
|
||||
PrioQueue {
|
||||
heap: std::collections::BinaryHeap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn insert(&mut self, bundle: LiveBundleIndex, prio: usize, reg_hint: PReg) {
|
||||
self.heap.push(PrioQueueEntry {
|
||||
prio: prio as u32,
|
||||
bundle,
|
||||
reg_hint,
|
||||
});
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_empty(self) -> bool {
|
||||
self.heap.is_empty()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn pop(&mut self) -> Option<(LiveBundleIndex, PReg)> {
|
||||
self.heap.pop().map(|entry| (entry.bundle, entry.reg_hint))
|
||||
}
|
||||
}
|
||||
|
||||
impl LiveRangeSet {
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
btree: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct InsertedMove {
|
||||
pub pos: ProgPoint,
|
||||
pub prio: InsertMovePrio,
|
||||
pub from_alloc: Allocation,
|
||||
pub to_alloc: Allocation,
|
||||
pub to_vreg: Option<VReg>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum InsertMovePrio {
|
||||
InEdgeMoves,
|
||||
BlockParam,
|
||||
Regular,
|
||||
PostRegular,
|
||||
MultiFixedReg,
|
||||
ReusedInput,
|
||||
OutEdgeMoves,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct Stats {
|
||||
pub livein_blocks: usize,
|
||||
pub livein_iterations: usize,
|
||||
pub initial_liverange_count: usize,
|
||||
pub merged_bundle_count: usize,
|
||||
pub prog_moves: usize,
|
||||
pub prog_moves_dead_src: usize,
|
||||
pub prog_move_merge_attempt: usize,
|
||||
pub prog_move_merge_success: usize,
|
||||
pub process_bundle_count: usize,
|
||||
pub process_bundle_reg_probes_fixed: usize,
|
||||
pub process_bundle_reg_success_fixed: usize,
|
||||
pub process_bundle_bounding_range_probe_start_any: usize,
|
||||
pub process_bundle_bounding_range_probes_any: usize,
|
||||
pub process_bundle_bounding_range_success_any: usize,
|
||||
pub process_bundle_reg_probe_start_any: usize,
|
||||
pub process_bundle_reg_probes_any: usize,
|
||||
pub process_bundle_reg_success_any: usize,
|
||||
pub evict_bundle_event: usize,
|
||||
pub evict_bundle_count: usize,
|
||||
pub splits: usize,
|
||||
pub splits_clobbers: usize,
|
||||
pub splits_hot: usize,
|
||||
pub splits_conflicts: usize,
|
||||
pub splits_defs: usize,
|
||||
pub splits_all: usize,
|
||||
pub final_liverange_count: usize,
|
||||
pub final_bundle_count: usize,
|
||||
pub spill_bundle_count: usize,
|
||||
pub spill_bundle_reg_probes: usize,
|
||||
pub spill_bundle_reg_success: usize,
|
||||
pub blockparam_ins_count: usize,
|
||||
pub blockparam_outs_count: usize,
|
||||
pub blockparam_allocs_count: usize,
|
||||
pub halfmoves_count: usize,
|
||||
pub edits_count: usize,
|
||||
}
|
||||
141
src/ion/dump.rs
Normal file
141
src/ion/dump.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
//! Debugging output.
|
||||
|
||||
use super::Env;
|
||||
use crate::{Function, ProgPoint, Block};
|
||||
|
||||
impl<'a, F: Function> Env<'a, F> {
|
||||
pub fn dump_state(&self) {
|
||||
log::debug!("Bundles:");
|
||||
for (i, b) in self.bundles.iter().enumerate() {
|
||||
log::debug!(
|
||||
"bundle{}: spillset={:?} alloc={:?}",
|
||||
i,
|
||||
b.spillset,
|
||||
b.allocation
|
||||
);
|
||||
for entry in &b.ranges {
|
||||
log::debug!(
|
||||
" * range {:?} -- {:?}: range{}",
|
||||
entry.range.from,
|
||||
entry.range.to,
|
||||
entry.index.index()
|
||||
);
|
||||
}
|
||||
}
|
||||
log::debug!("VRegs:");
|
||||
for (i, v) in self.vregs.iter().enumerate() {
|
||||
log::debug!("vreg{}:", i);
|
||||
for entry in &v.ranges {
|
||||
log::debug!(
|
||||
" * range {:?} -- {:?}: range{}",
|
||||
entry.range.from,
|
||||
entry.range.to,
|
||||
entry.index.index()
|
||||
);
|
||||
}
|
||||
}
|
||||
log::debug!("Ranges:");
|
||||
for (i, r) in self.ranges.iter().enumerate() {
|
||||
log::debug!(
|
||||
"range{}: range={:?} vreg={:?} bundle={:?} weight={}",
|
||||
i,
|
||||
r.range,
|
||||
r.vreg,
|
||||
r.bundle,
|
||||
r.uses_spill_weight(),
|
||||
);
|
||||
for u in &r.uses {
|
||||
log::debug!(" * use at {:?} (slot {}): {:?}", u.pos, u.slot, u.operand);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn annotate(&mut self, progpoint: ProgPoint, s: String) {
|
||||
if self.annotations_enabled {
|
||||
self.debug_annotations
|
||||
.entry(progpoint)
|
||||
.or_insert_with(|| vec![])
|
||||
.push(s);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dump_results(&self) {
|
||||
log::info!("=== REGALLOC RESULTS ===");
|
||||
for block in 0..self.func.blocks() {
|
||||
let block = Block::new(block);
|
||||
log::info!(
|
||||
"block{}: [succs {:?} preds {:?}]",
|
||||
block.index(),
|
||||
self.func
|
||||
.block_succs(block)
|
||||
.iter()
|
||||
.map(|b| b.index())
|
||||
.collect::<Vec<_>>(),
|
||||
self.func
|
||||
.block_preds(block)
|
||||
.iter()
|
||||
.map(|b| b.index())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
for inst in self.func.block_insns(block).iter() {
|
||||
for annotation in self
|
||||
.debug_annotations
|
||||
.get(&ProgPoint::before(inst))
|
||||
.map(|v| &v[..])
|
||||
.unwrap_or(&[])
|
||||
{
|
||||
log::info!(" inst{}-pre: {}", inst.index(), annotation);
|
||||
}
|
||||
let ops = self
|
||||
.func
|
||||
.inst_operands(inst)
|
||||
.iter()
|
||||
.map(|op| format!("{}", op))
|
||||
.collect::<Vec<_>>();
|
||||
let clobbers = self
|
||||
.func
|
||||
.inst_clobbers(inst)
|
||||
.iter()
|
||||
.map(|preg| format!("{}", preg))
|
||||
.collect::<Vec<_>>();
|
||||
let allocs = (0..ops.len())
|
||||
.map(|i| format!("{}", self.get_alloc(inst, i)))
|
||||
.collect::<Vec<_>>();
|
||||
let opname = if self.func.is_branch(inst) {
|
||||
"br"
|
||||
} else if self.func.is_call(inst) {
|
||||
"call"
|
||||
} else if self.func.is_ret(inst) {
|
||||
"ret"
|
||||
} else {
|
||||
"op"
|
||||
};
|
||||
let args = ops
|
||||
.iter()
|
||||
.zip(allocs.iter())
|
||||
.map(|(op, alloc)| format!("{} [{}]", op, alloc))
|
||||
.collect::<Vec<_>>();
|
||||
let clobbers = if clobbers.is_empty() {
|
||||
"".to_string()
|
||||
} else {
|
||||
format!(" [clobber: {}]", clobbers.join(", "))
|
||||
};
|
||||
log::info!(
|
||||
" inst{}: {} {}{}",
|
||||
inst.index(),
|
||||
opname,
|
||||
args.join(", "),
|
||||
clobbers
|
||||
);
|
||||
for annotation in self
|
||||
.debug_annotations
|
||||
.get(&ProgPoint::after(inst))
|
||||
.map(|v| &v[..])
|
||||
.unwrap_or(&[])
|
||||
{
|
||||
log::info!(" inst{}-post: {}", inst.index(), annotation);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
1190
src/ion/liveranges.rs
Normal file
1190
src/ion/liveranges.rs
Normal file
File diff suppressed because it is too large
Load Diff
439
src/ion/merge.rs
Normal file
439
src/ion/merge.rs
Normal file
@@ -0,0 +1,439 @@
|
||||
/*
|
||||
* The following license applies to this file, which was initially
|
||||
* derived from the files `js/src/jit/BacktrackingAllocator.h` and
|
||||
* `js/src/jit/BacktrackingAllocator.cpp` in Mozilla Firefox:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*
|
||||
* Since the initial port, the design has been substantially evolved
|
||||
* and optimized.
|
||||
*/
|
||||
|
||||
//! Bundle merging.
|
||||
|
||||
use super::{
|
||||
Env, LiveBundleIndex, LiveRangeIndex, LiveRangeKey, Requirement, SpillSet, SpillSetIndex,
|
||||
SpillSlotIndex, VRegIndex,
|
||||
};
|
||||
use crate::{Function, Inst, OperandPolicy, PReg};
|
||||
use smallvec::smallvec;
|
||||
|
||||
impl<'a, F: Function> Env<'a, F> {
|
||||
pub fn merge_bundles(&mut self, from: LiveBundleIndex, to: LiveBundleIndex) -> bool {
|
||||
if from == to {
|
||||
// Merge bundle into self -- trivial merge.
|
||||
return true;
|
||||
}
|
||||
log::debug!(
|
||||
"merging from bundle{} to bundle{}",
|
||||
from.index(),
|
||||
to.index()
|
||||
);
|
||||
|
||||
// Both bundles must deal with the same RegClass.
|
||||
let from_rc = self.spillsets[self.bundles[from.index()].spillset.index()].class;
|
||||
let to_rc = self.spillsets[self.bundles[to.index()].spillset.index()].class;
|
||||
if from_rc != to_rc {
|
||||
log::debug!(" -> mismatching reg classes");
|
||||
return false;
|
||||
}
|
||||
|
||||
// If either bundle is already assigned (due to a pinned vreg), don't merge.
|
||||
if !self.bundles[from.index()].allocation.is_none()
|
||||
|| !self.bundles[to.index()].allocation.is_none()
|
||||
{
|
||||
log::debug!("one of the bundles is already assigned (pinned)");
|
||||
return false;
|
||||
}
|
||||
|
||||
#[cfg(debug)]
|
||||
{
|
||||
// Sanity check: both bundles should contain only ranges with appropriate VReg classes.
|
||||
for entry in &self.bundles[from.index()].ranges {
|
||||
let vreg = self.ranges[entry.index.index()].vreg;
|
||||
assert_eq!(rc, self.vregs[vreg.index()].reg.class());
|
||||
}
|
||||
for entry in &self.bundles[to.index()].ranges {
|
||||
let vreg = self.ranges[entry.index.index()].vreg;
|
||||
assert_eq!(rc, self.vregs[vreg.index()].reg.class());
|
||||
}
|
||||
}
|
||||
|
||||
// Check for overlap in LiveRanges and for conflicting
|
||||
// requirements.
|
||||
let ranges_from = &self.bundles[from.index()].ranges[..];
|
||||
let ranges_to = &self.bundles[to.index()].ranges[..];
|
||||
let mut idx_from = 0;
|
||||
let mut idx_to = 0;
|
||||
let mut range_count = 0;
|
||||
while idx_from < ranges_from.len() && idx_to < ranges_to.len() {
|
||||
range_count += 1;
|
||||
if range_count > 200 {
|
||||
log::debug!(
|
||||
"reached merge complexity (range_count = {}); exiting",
|
||||
range_count
|
||||
);
|
||||
// Limit merge complexity.
|
||||
return false;
|
||||
}
|
||||
|
||||
if ranges_from[idx_from].range.from >= ranges_to[idx_to].range.to {
|
||||
idx_to += 1;
|
||||
} else if ranges_to[idx_to].range.from >= ranges_from[idx_from].range.to {
|
||||
idx_from += 1;
|
||||
} else {
|
||||
// Overlap -- cannot merge.
|
||||
log::debug!(
|
||||
" -> overlap between {:?} and {:?}, exiting",
|
||||
ranges_from[idx_from].index,
|
||||
ranges_to[idx_to].index
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for a requirements conflict.
|
||||
if self.bundles[from.index()].cached_stack()
|
||||
|| self.bundles[from.index()].cached_fixed()
|
||||
|| self.bundles[to.index()].cached_stack()
|
||||
|| self.bundles[to.index()].cached_fixed()
|
||||
{
|
||||
let req = self
|
||||
.compute_requirement(from)
|
||||
.merge(self.compute_requirement(to));
|
||||
if req == Requirement::Conflict {
|
||||
log::debug!(" -> conflicting requirements; aborting merge");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
log::debug!(" -> committing to merge");
|
||||
|
||||
// If we reach here, then the bundles do not overlap -- merge
|
||||
// them! We do this with a merge-sort-like scan over both
|
||||
// lists, building a new range list and replacing the list on
|
||||
// `to` when we're done.
|
||||
if ranges_from.is_empty() {
|
||||
// `from` bundle is empty -- trivial merge.
|
||||
log::debug!(" -> from bundle{} is empty; trivial merge", from.index());
|
||||
return true;
|
||||
}
|
||||
if ranges_to.is_empty() {
|
||||
// `to` bundle is empty -- just move the list over from
|
||||
// `from` and set `bundle` up-link on all ranges.
|
||||
log::debug!(" -> to bundle{} is empty; trivial merge", to.index());
|
||||
let list = std::mem::replace(&mut self.bundles[from.index()].ranges, smallvec![]);
|
||||
for entry in &list {
|
||||
self.ranges[entry.index.index()].bundle = to;
|
||||
|
||||
if self.annotations_enabled {
|
||||
self.annotate(
|
||||
entry.range.from,
|
||||
format!(
|
||||
" MERGE range{} v{} from bundle{} to bundle{}",
|
||||
entry.index.index(),
|
||||
self.ranges[entry.index.index()].vreg.index(),
|
||||
from.index(),
|
||||
to.index(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
self.bundles[to.index()].ranges = list;
|
||||
|
||||
if self.bundles[from.index()].cached_stack() {
|
||||
self.bundles[to.index()].set_cached_stack();
|
||||
}
|
||||
if self.bundles[from.index()].cached_fixed() {
|
||||
self.bundles[to.index()].set_cached_fixed();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
"merging: ranges_from = {:?} ranges_to = {:?}",
|
||||
ranges_from,
|
||||
ranges_to
|
||||
);
|
||||
|
||||
// Two non-empty lists of LiveRanges: concatenate and
|
||||
// sort. This is faster than a mergesort-like merge into a new
|
||||
// list, empirically.
|
||||
let from_list = std::mem::replace(&mut self.bundles[from.index()].ranges, smallvec![]);
|
||||
for entry in &from_list {
|
||||
self.ranges[entry.index.index()].bundle = to;
|
||||
}
|
||||
self.bundles[to.index()]
|
||||
.ranges
|
||||
.extend_from_slice(&from_list[..]);
|
||||
self.bundles[to.index()]
|
||||
.ranges
|
||||
.sort_unstable_by_key(|entry| entry.range.from);
|
||||
|
||||
if self.annotations_enabled {
|
||||
log::debug!("merging: merged = {:?}", self.bundles[to.index()].ranges);
|
||||
let mut last_range = None;
|
||||
for i in 0..self.bundles[to.index()].ranges.len() {
|
||||
let entry = self.bundles[to.index()].ranges[i];
|
||||
if last_range.is_some() {
|
||||
assert!(last_range.unwrap() < entry.range);
|
||||
}
|
||||
last_range = Some(entry.range);
|
||||
|
||||
if self.ranges[entry.index.index()].bundle == from {
|
||||
self.annotate(
|
||||
entry.range.from,
|
||||
format!(
|
||||
" MERGE range{} v{} from bundle{} to bundle{}",
|
||||
entry.index.index(),
|
||||
self.ranges[entry.index.index()].vreg.index(),
|
||||
from.index(),
|
||||
to.index(),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
" -> merged result for bundle{}: range{}",
|
||||
to.index(),
|
||||
entry.index.index(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if self.bundles[from.index()].spillset != self.bundles[to.index()].spillset {
|
||||
let from_vregs = std::mem::replace(
|
||||
&mut self.spillsets[self.bundles[from.index()].spillset.index()].vregs,
|
||||
smallvec![],
|
||||
);
|
||||
let to_vregs = &mut self.spillsets[self.bundles[to.index()].spillset.index()].vregs;
|
||||
for vreg in from_vregs {
|
||||
if !to_vregs.contains(&vreg) {
|
||||
to_vregs.push(vreg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if self.bundles[from.index()].cached_stack() {
|
||||
self.bundles[to.index()].set_cached_stack();
|
||||
}
|
||||
if self.bundles[from.index()].cached_fixed() {
|
||||
self.bundles[to.index()].set_cached_fixed();
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
pub fn merge_vreg_bundles(&mut self) {
|
||||
// Create a bundle for every vreg, initially.
|
||||
log::debug!("merge_vreg_bundles: creating vreg bundles");
|
||||
for vreg in 0..self.vregs.len() {
|
||||
let vreg = VRegIndex::new(vreg);
|
||||
if self.vregs[vreg.index()].ranges.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this is a pinned vreg, go ahead and add it to the
|
||||
// commitment map, and avoid creating a bundle entirely.
|
||||
if self.vregs[vreg.index()].is_pinned {
|
||||
for entry in &self.vregs[vreg.index()].ranges {
|
||||
let preg = self
|
||||
.func
|
||||
.is_pinned_vreg(self.vreg_regs[vreg.index()])
|
||||
.unwrap();
|
||||
let key = LiveRangeKey::from_range(&entry.range);
|
||||
self.pregs[preg.index()]
|
||||
.allocations
|
||||
.btree
|
||||
.insert(key, LiveRangeIndex::invalid());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
let bundle = self.create_bundle();
|
||||
self.bundles[bundle.index()].ranges = self.vregs[vreg.index()].ranges.clone();
|
||||
log::debug!("vreg v{} gets bundle{}", vreg.index(), bundle.index());
|
||||
for entry in &self.bundles[bundle.index()].ranges {
|
||||
log::debug!(
|
||||
" -> with LR range{}: {:?}",
|
||||
entry.index.index(),
|
||||
entry.range
|
||||
);
|
||||
self.ranges[entry.index.index()].bundle = bundle;
|
||||
}
|
||||
|
||||
let mut fixed = false;
|
||||
let mut stack = false;
|
||||
for entry in &self.bundles[bundle.index()].ranges {
|
||||
for u in &self.ranges[entry.index.index()].uses {
|
||||
if let OperandPolicy::FixedReg(_) = u.operand.policy() {
|
||||
fixed = true;
|
||||
}
|
||||
if let OperandPolicy::Stack = u.operand.policy() {
|
||||
stack = true;
|
||||
}
|
||||
if fixed && stack {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if fixed {
|
||||
self.bundles[bundle.index()].set_cached_fixed();
|
||||
}
|
||||
if stack {
|
||||
self.bundles[bundle.index()].set_cached_stack();
|
||||
}
|
||||
|
||||
// Create a spillslot for this bundle.
|
||||
let ssidx = SpillSetIndex::new(self.spillsets.len());
|
||||
let reg = self.vreg_regs[vreg.index()];
|
||||
let size = self.func.spillslot_size(reg.class()) as u8;
|
||||
self.spillsets.push(SpillSet {
|
||||
vregs: smallvec![vreg],
|
||||
slot: SpillSlotIndex::invalid(),
|
||||
size,
|
||||
required: false,
|
||||
class: reg.class(),
|
||||
reg_hint: PReg::invalid(),
|
||||
spill_bundle: LiveBundleIndex::invalid(),
|
||||
});
|
||||
self.bundles[bundle.index()].spillset = ssidx;
|
||||
}
|
||||
|
||||
for inst in 0..self.func.insts() {
|
||||
let inst = Inst::new(inst);
|
||||
|
||||
// Attempt to merge Reuse-policy operand outputs with the
|
||||
// corresponding inputs.
|
||||
for op in self.func.inst_operands(inst) {
|
||||
if let OperandPolicy::Reuse(reuse_idx) = op.policy() {
|
||||
let src_vreg = op.vreg();
|
||||
let dst_vreg = self.func.inst_operands(inst)[reuse_idx].vreg();
|
||||
if self.vregs[src_vreg.vreg()].is_pinned
|
||||
|| self.vregs[dst_vreg.vreg()].is_pinned
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
"trying to merge reused-input def: src {} to dst {}",
|
||||
src_vreg,
|
||||
dst_vreg
|
||||
);
|
||||
let src_bundle =
|
||||
self.ranges[self.vregs[src_vreg.vreg()].ranges[0].index.index()].bundle;
|
||||
assert!(src_bundle.is_valid());
|
||||
let dest_bundle =
|
||||
self.ranges[self.vregs[dst_vreg.vreg()].ranges[0].index.index()].bundle;
|
||||
assert!(dest_bundle.is_valid());
|
||||
self.merge_bundles(/* from */ dest_bundle, /* to */ src_bundle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to merge blockparams with their inputs.
|
||||
for i in 0..self.blockparam_outs.len() {
|
||||
let (from_vreg, _, _, to_vreg) = self.blockparam_outs[i];
|
||||
log::debug!(
|
||||
"trying to merge blockparam v{} with input v{}",
|
||||
to_vreg.index(),
|
||||
from_vreg.index()
|
||||
);
|
||||
let to_bundle = self.ranges[self.vregs[to_vreg.index()].ranges[0].index.index()].bundle;
|
||||
assert!(to_bundle.is_valid());
|
||||
let from_bundle =
|
||||
self.ranges[self.vregs[from_vreg.index()].ranges[0].index.index()].bundle;
|
||||
assert!(from_bundle.is_valid());
|
||||
log::debug!(
|
||||
" -> from bundle{} to bundle{}",
|
||||
from_bundle.index(),
|
||||
to_bundle.index()
|
||||
);
|
||||
self.merge_bundles(from_bundle, to_bundle);
|
||||
}
|
||||
|
||||
// Attempt to merge move srcs/dsts.
|
||||
for i in 0..self.prog_move_merges.len() {
|
||||
let (src, dst) = self.prog_move_merges[i];
|
||||
log::debug!("trying to merge move src LR {:?} to dst LR {:?}", src, dst);
|
||||
let src = self.resolve_merged_lr(src);
|
||||
let dst = self.resolve_merged_lr(dst);
|
||||
log::debug!(
|
||||
"resolved LR-construction merging chains: move-merge is now src LR {:?} to dst LR {:?}",
|
||||
src,
|
||||
dst
|
||||
);
|
||||
|
||||
let dst_vreg = self.vreg_regs[self.ranges[dst.index()].vreg.index()];
|
||||
let src_vreg = self.vreg_regs[self.ranges[src.index()].vreg.index()];
|
||||
if self.vregs[src_vreg.vreg()].is_pinned && self.vregs[dst_vreg.vreg()].is_pinned {
|
||||
continue;
|
||||
}
|
||||
if self.vregs[src_vreg.vreg()].is_pinned {
|
||||
let dest_bundle = self.ranges[dst.index()].bundle;
|
||||
let spillset = self.bundles[dest_bundle.index()].spillset;
|
||||
self.spillsets[spillset.index()].reg_hint =
|
||||
self.func.is_pinned_vreg(src_vreg).unwrap();
|
||||
continue;
|
||||
}
|
||||
if self.vregs[dst_vreg.vreg()].is_pinned {
|
||||
let src_bundle = self.ranges[src.index()].bundle;
|
||||
let spillset = self.bundles[src_bundle.index()].spillset;
|
||||
self.spillsets[spillset.index()].reg_hint =
|
||||
self.func.is_pinned_vreg(dst_vreg).unwrap();
|
||||
continue;
|
||||
}
|
||||
|
||||
let src_bundle = self.ranges[src.index()].bundle;
|
||||
assert!(src_bundle.is_valid());
|
||||
let dest_bundle = self.ranges[dst.index()].bundle;
|
||||
assert!(dest_bundle.is_valid());
|
||||
self.stats.prog_move_merge_attempt += 1;
|
||||
if self.merge_bundles(/* from */ dest_bundle, /* to */ src_bundle) {
|
||||
self.stats.prog_move_merge_success += 1;
|
||||
}
|
||||
}
|
||||
|
||||
log::debug!("done merging bundles");
|
||||
}
|
||||
|
||||
pub fn resolve_merged_lr(&self, mut lr: LiveRangeIndex) -> LiveRangeIndex {
|
||||
let mut iter = 0;
|
||||
while iter < 100 && self.ranges[lr.index()].merged_into.is_valid() {
|
||||
lr = self.ranges[lr.index()].merged_into;
|
||||
iter += 1;
|
||||
}
|
||||
lr
|
||||
}
|
||||
|
||||
pub fn compute_bundle_prio(&self, bundle: LiveBundleIndex) -> u32 {
|
||||
// The priority is simply the total "length" -- the number of
|
||||
// instructions covered by all LiveRanges.
|
||||
let mut total = 0;
|
||||
for entry in &self.bundles[bundle.index()].ranges {
|
||||
total += entry.range.len() as u32;
|
||||
}
|
||||
total
|
||||
}
|
||||
|
||||
pub fn queue_bundles(&mut self) {
|
||||
for bundle in 0..self.bundles.len() {
|
||||
log::debug!("enqueueing bundle{}", bundle);
|
||||
if self.bundles[bundle].ranges.is_empty() {
|
||||
log::debug!(" -> no ranges; skipping");
|
||||
continue;
|
||||
}
|
||||
let bundle = LiveBundleIndex::new(bundle);
|
||||
let prio = self.compute_bundle_prio(bundle);
|
||||
log::debug!(" -> prio {}", prio);
|
||||
self.bundles[bundle.index()].prio = prio;
|
||||
self.recompute_bundle_properties(bundle);
|
||||
self.allocation_queue
|
||||
.insert(bundle, prio as usize, PReg::invalid());
|
||||
}
|
||||
self.stats.merged_bundle_count = self.allocation_queue.heap.len();
|
||||
}
|
||||
}
|
||||
5062
src/ion/mod.rs
5062
src/ion/mod.rs
File diff suppressed because it is too large
Load Diff
1167
src/ion/moves.rs
Normal file
1167
src/ion/moves.rs
Normal file
File diff suppressed because it is too large
Load Diff
1057
src/ion/process.rs
Normal file
1057
src/ion/process.rs
Normal file
File diff suppressed because it is too large
Load Diff
142
src/ion/redundant_moves.rs
Normal file
142
src/ion/redundant_moves.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
//! Redundant-move elimination.
|
||||
|
||||
use crate::{Allocation, VReg};
|
||||
use fxhash::FxHashMap;
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum RedundantMoveState {
|
||||
Copy(Allocation, Option<VReg>),
|
||||
Orig(VReg),
|
||||
None,
|
||||
}
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct RedundantMoveEliminator {
|
||||
allocs: FxHashMap<Allocation, RedundantMoveState>,
|
||||
reverse_allocs: FxHashMap<Allocation, SmallVec<[Allocation; 4]>>,
|
||||
}
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct RedundantMoveAction {
|
||||
pub elide: bool,
|
||||
pub def_alloc: Option<(Allocation, VReg)>,
|
||||
}
|
||||
|
||||
impl RedundantMoveEliminator {
|
||||
pub fn process_move(
|
||||
&mut self,
|
||||
from: Allocation,
|
||||
to: Allocation,
|
||||
to_vreg: Option<VReg>,
|
||||
) -> RedundantMoveAction {
|
||||
// Look up the src and dest.
|
||||
let from_state = self
|
||||
.allocs
|
||||
.get(&from)
|
||||
.map(|&p| p)
|
||||
.unwrap_or(RedundantMoveState::None);
|
||||
let to_state = self
|
||||
.allocs
|
||||
.get(&to)
|
||||
.map(|&p| p)
|
||||
.unwrap_or(RedundantMoveState::None);
|
||||
|
||||
log::debug!(
|
||||
" -> redundant move tracker: from {} to {} to_vreg {:?}",
|
||||
from,
|
||||
to,
|
||||
to_vreg
|
||||
);
|
||||
log::debug!(
|
||||
" -> from_state {:?} to_state {:?}",
|
||||
from_state,
|
||||
to_state
|
||||
);
|
||||
|
||||
if from == to && to_vreg.is_some() {
|
||||
self.clear_alloc(to);
|
||||
self.allocs
|
||||
.insert(to, RedundantMoveState::Orig(to_vreg.unwrap()));
|
||||
return RedundantMoveAction {
|
||||
elide: true,
|
||||
def_alloc: Some((to, to_vreg.unwrap())),
|
||||
};
|
||||
}
|
||||
|
||||
let src_vreg = match from_state {
|
||||
RedundantMoveState::Copy(_, opt_r) => opt_r,
|
||||
RedundantMoveState::Orig(r) => Some(r),
|
||||
_ => None,
|
||||
};
|
||||
log::debug!(" -> src_vreg {:?}", src_vreg);
|
||||
let dst_vreg = to_vreg.or(src_vreg);
|
||||
log::debug!(" -> dst_vreg {:?}", dst_vreg);
|
||||
let existing_dst_vreg = match to_state {
|
||||
RedundantMoveState::Copy(_, opt_r) => opt_r,
|
||||
RedundantMoveState::Orig(r) => Some(r),
|
||||
_ => None,
|
||||
};
|
||||
log::debug!(" -> existing_dst_vreg {:?}", existing_dst_vreg);
|
||||
|
||||
let elide = match (from_state, to_state) {
|
||||
(_, RedundantMoveState::Copy(orig_alloc, _)) if orig_alloc == from => true,
|
||||
(RedundantMoveState::Copy(new_alloc, _), _) if new_alloc == to => true,
|
||||
_ => false,
|
||||
};
|
||||
log::debug!(" -> elide {}", elide);
|
||||
|
||||
let def_alloc = if dst_vreg != existing_dst_vreg && dst_vreg.is_some() {
|
||||
Some((to, dst_vreg.unwrap()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
log::debug!(" -> def_alloc {:?}", def_alloc);
|
||||
|
||||
// Invalidate all existing copies of `to` if `to` actually changed value.
|
||||
if !elide {
|
||||
self.clear_alloc(to);
|
||||
}
|
||||
|
||||
// Set up forward and reverse mapping. Don't track stack-to-stack copies.
|
||||
if from.is_reg() || to.is_reg() {
|
||||
self.allocs
|
||||
.insert(to, RedundantMoveState::Copy(from, dst_vreg));
|
||||
log::debug!(
|
||||
" -> create mapping {} -> {:?}",
|
||||
to,
|
||||
RedundantMoveState::Copy(from, dst_vreg)
|
||||
);
|
||||
self.reverse_allocs
|
||||
.entry(from)
|
||||
.or_insert_with(|| smallvec![])
|
||||
.push(to);
|
||||
}
|
||||
|
||||
RedundantMoveAction { elide, def_alloc }
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
log::debug!(" redundant move eliminator cleared");
|
||||
self.allocs.clear();
|
||||
self.reverse_allocs.clear();
|
||||
}
|
||||
|
||||
pub fn clear_alloc(&mut self, alloc: Allocation) {
|
||||
log::debug!(" redundant move eliminator: clear {:?}", alloc);
|
||||
if let Some(ref mut existing_copies) = self.reverse_allocs.get_mut(&alloc) {
|
||||
for to_inval in existing_copies.iter() {
|
||||
log::debug!(" -> clear existing copy: {:?}", to_inval);
|
||||
if let Some(val) = self.allocs.get_mut(to_inval) {
|
||||
match val {
|
||||
RedundantMoveState::Copy(_, Some(vreg)) => {
|
||||
*val = RedundantMoveState::Orig(*vreg);
|
||||
}
|
||||
_ => *val = RedundantMoveState::None,
|
||||
}
|
||||
}
|
||||
self.allocs.remove(to_inval);
|
||||
}
|
||||
existing_copies.clear();
|
||||
}
|
||||
self.allocs.remove(&alloc);
|
||||
}
|
||||
}
|
||||
123
src/ion/reg_traversal.rs
Normal file
123
src/ion/reg_traversal.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use crate::{MachineEnv, PReg, RegClass};
|
||||
|
||||
/// This iterator represents a traversal through all allocatable
|
||||
/// registers of a given class, in a certain order designed to
|
||||
/// minimize allocation contention.
|
||||
///
|
||||
/// The order in which we try registers is somewhat complex:
|
||||
/// - First, if there is a hint, we try that.
|
||||
/// - Then, we try registers in a traversal order that is based on an
|
||||
/// "offset" (usually the bundle index) spreading pressure evenly
|
||||
/// among registers to reduce commitment-map contention.
|
||||
/// - Within that scan, we try registers in two groups: first,
|
||||
/// prferred registers; then, non-preferred registers. (In normal
|
||||
/// usage, these consist of caller-save and callee-save registers
|
||||
/// respectively, to minimize clobber-saves; but they need not.)
|
||||
|
||||
pub struct RegTraversalIter<'a> {
|
||||
env: &'a MachineEnv,
|
||||
class: usize,
|
||||
hints: [Option<PReg>; 2],
|
||||
hint_idx: usize,
|
||||
pref_idx: usize,
|
||||
non_pref_idx: usize,
|
||||
offset_pref: usize,
|
||||
offset_non_pref: usize,
|
||||
is_fixed: bool,
|
||||
fixed: Option<PReg>,
|
||||
}
|
||||
|
||||
impl<'a> RegTraversalIter<'a> {
|
||||
pub fn new(
|
||||
env: &'a MachineEnv,
|
||||
class: RegClass,
|
||||
hint_reg: PReg,
|
||||
hint2_reg: PReg,
|
||||
offset: usize,
|
||||
fixed: Option<PReg>,
|
||||
) -> Self {
|
||||
let mut hint_reg = if hint_reg != PReg::invalid() {
|
||||
Some(hint_reg)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mut hint2_reg = if hint2_reg != PReg::invalid() {
|
||||
Some(hint2_reg)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if hint_reg.is_none() {
|
||||
hint_reg = hint2_reg;
|
||||
hint2_reg = None;
|
||||
}
|
||||
let hints = [hint_reg, hint2_reg];
|
||||
let class = class as u8 as usize;
|
||||
let offset_pref = if env.preferred_regs_by_class[class].len() > 0 {
|
||||
offset % env.preferred_regs_by_class[class].len()
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let offset_non_pref = if env.non_preferred_regs_by_class[class].len() > 0 {
|
||||
offset % env.non_preferred_regs_by_class[class].len()
|
||||
} else {
|
||||
0
|
||||
};
|
||||
Self {
|
||||
env,
|
||||
class,
|
||||
hints,
|
||||
hint_idx: 0,
|
||||
pref_idx: 0,
|
||||
non_pref_idx: 0,
|
||||
offset_pref,
|
||||
offset_non_pref,
|
||||
is_fixed: fixed.is_some(),
|
||||
fixed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::iter::Iterator for RegTraversalIter<'a> {
|
||||
type Item = PReg;
|
||||
|
||||
fn next(&mut self) -> Option<PReg> {
|
||||
if self.is_fixed {
|
||||
let ret = self.fixed;
|
||||
self.fixed = None;
|
||||
return ret;
|
||||
}
|
||||
|
||||
fn wrap(idx: usize, limit: usize) -> usize {
|
||||
if idx >= limit {
|
||||
idx - limit
|
||||
} else {
|
||||
idx
|
||||
}
|
||||
}
|
||||
if self.hint_idx < 2 && self.hints[self.hint_idx].is_some() {
|
||||
let h = self.hints[self.hint_idx];
|
||||
self.hint_idx += 1;
|
||||
return h;
|
||||
}
|
||||
while self.pref_idx < self.env.preferred_regs_by_class[self.class].len() {
|
||||
let arr = &self.env.preferred_regs_by_class[self.class][..];
|
||||
let r = arr[wrap(self.pref_idx + self.offset_pref, arr.len())];
|
||||
self.pref_idx += 1;
|
||||
if Some(r) == self.hints[0] || Some(r) == self.hints[1] {
|
||||
continue;
|
||||
}
|
||||
return Some(r);
|
||||
}
|
||||
while self.non_pref_idx < self.env.non_preferred_regs_by_class[self.class].len() {
|
||||
let arr = &self.env.non_preferred_regs_by_class[self.class][..];
|
||||
let r = arr[wrap(self.non_pref_idx + self.offset_non_pref, arr.len())];
|
||||
self.non_pref_idx += 1;
|
||||
if Some(r) == self.hints[0] || Some(r) == self.hints[1] {
|
||||
continue;
|
||||
}
|
||||
return Some(r);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
92
src/ion/requirement.rs
Normal file
92
src/ion/requirement.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
//! Requirements computation.
|
||||
|
||||
use super::{Env, LiveBundleIndex};
|
||||
use crate::{Function, Operand, OperandPolicy, PReg, RegClass};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum Requirement {
|
||||
Unknown,
|
||||
Fixed(PReg),
|
||||
Register(RegClass),
|
||||
Stack(RegClass),
|
||||
Any(RegClass),
|
||||
Conflict,
|
||||
}
|
||||
impl Requirement {
|
||||
#[inline(always)]
|
||||
pub fn class(self) -> RegClass {
|
||||
match self {
|
||||
Requirement::Unknown => panic!("No class for unknown Requirement"),
|
||||
Requirement::Fixed(preg) => preg.class(),
|
||||
Requirement::Register(class) | Requirement::Any(class) | Requirement::Stack(class) => {
|
||||
class
|
||||
}
|
||||
Requirement::Conflict => panic!("No class for conflicted Requirement"),
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn merge(self, other: Requirement) -> Requirement {
|
||||
match (self, other) {
|
||||
(Requirement::Unknown, other) | (other, Requirement::Unknown) => other,
|
||||
(Requirement::Conflict, _) | (_, Requirement::Conflict) => Requirement::Conflict,
|
||||
(other, Requirement::Any(rc)) | (Requirement::Any(rc), other) => {
|
||||
if other.class() == rc {
|
||||
other
|
||||
} else {
|
||||
Requirement::Conflict
|
||||
}
|
||||
}
|
||||
(Requirement::Stack(rc1), Requirement::Stack(rc2)) => {
|
||||
if rc1 == rc2 {
|
||||
self
|
||||
} else {
|
||||
Requirement::Conflict
|
||||
}
|
||||
}
|
||||
(Requirement::Register(rc), Requirement::Fixed(preg))
|
||||
| (Requirement::Fixed(preg), Requirement::Register(rc)) => {
|
||||
if rc == preg.class() {
|
||||
Requirement::Fixed(preg)
|
||||
} else {
|
||||
Requirement::Conflict
|
||||
}
|
||||
}
|
||||
(Requirement::Register(rc1), Requirement::Register(rc2)) => {
|
||||
if rc1 == rc2 {
|
||||
self
|
||||
} else {
|
||||
Requirement::Conflict
|
||||
}
|
||||
}
|
||||
(Requirement::Fixed(a), Requirement::Fixed(b)) if a == b => self,
|
||||
_ => Requirement::Conflict,
|
||||
}
|
||||
}
|
||||
#[inline(always)]
|
||||
pub fn from_operand(op: Operand) -> Requirement {
|
||||
match op.policy() {
|
||||
OperandPolicy::FixedReg(preg) => Requirement::Fixed(preg),
|
||||
OperandPolicy::Reg | OperandPolicy::Reuse(_) => Requirement::Register(op.class()),
|
||||
OperandPolicy::Stack => Requirement::Stack(op.class()),
|
||||
_ => Requirement::Any(op.class()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F: Function> Env<'a, F> {
|
||||
pub fn compute_requirement(&self, bundle: LiveBundleIndex) -> Requirement {
|
||||
let mut req = Requirement::Unknown;
|
||||
log::debug!("compute_requirement: {:?}", bundle);
|
||||
for entry in &self.bundles[bundle.index()].ranges {
|
||||
log::debug!(" -> LR {:?}", entry.index);
|
||||
for u in &self.ranges[entry.index.index()].uses {
|
||||
log::debug!(" -> use {:?}", u);
|
||||
let r = Requirement::from_operand(u.operand);
|
||||
req = req.merge(r);
|
||||
log::debug!(" -> req {:?}", req);
|
||||
}
|
||||
}
|
||||
log::debug!(" -> final: {:?}", req);
|
||||
req
|
||||
}
|
||||
}
|
||||
218
src/ion/spill.rs
Normal file
218
src/ion/spill.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
* The following license applies to this file, which was initially
|
||||
* derived from the files `js/src/jit/BacktrackingAllocator.h` and
|
||||
* `js/src/jit/BacktrackingAllocator.cpp` in Mozilla Firefox:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*
|
||||
* Since the initial port, the design has been substantially evolved
|
||||
* and optimized.
|
||||
*/
|
||||
|
||||
//! Spillslot allocation.
|
||||
|
||||
use super::{
|
||||
AllocRegResult, Env, LiveRangeKey, LiveRangeSet, PReg, PRegIndex, RegClass, RegTraversalIter,
|
||||
SpillSetIndex, SpillSlotData, SpillSlotIndex, SpillSlotList,
|
||||
};
|
||||
use crate::{Allocation, Function, SpillSlot};
|
||||
|
||||
impl<'a, F: Function> Env<'a, F> {
|
||||
pub fn try_allocating_regs_for_spilled_bundles(&mut self) {
|
||||
log::debug!("allocating regs for spilled bundles");
|
||||
for i in 0..self.spilled_bundles.len() {
|
||||
let bundle = self.spilled_bundles[i]; // don't borrow self
|
||||
|
||||
let class = self.spillsets[self.bundles[bundle.index()].spillset.index()].class;
|
||||
let hint = self.spillsets[self.bundles[bundle.index()].spillset.index()].reg_hint;
|
||||
|
||||
// This may be an empty-range bundle whose ranges are not
|
||||
// sorted; sort all range-lists again here.
|
||||
self.bundles[bundle.index()]
|
||||
.ranges
|
||||
.sort_unstable_by_key(|entry| entry.range.from);
|
||||
|
||||
let mut success = false;
|
||||
self.stats.spill_bundle_reg_probes += 1;
|
||||
for preg in
|
||||
RegTraversalIter::new(self.env, class, hint, PReg::invalid(), bundle.index(), None)
|
||||
{
|
||||
log::debug!("trying bundle {:?} to preg {:?}", bundle, preg);
|
||||
let preg_idx = PRegIndex::new(preg.index());
|
||||
if let AllocRegResult::Allocated(_) =
|
||||
self.try_to_allocate_bundle_to_reg(bundle, preg_idx, None)
|
||||
{
|
||||
self.stats.spill_bundle_reg_success += 1;
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
log::debug!(
|
||||
"spilling bundle {:?}: marking spillset {:?} as required",
|
||||
bundle,
|
||||
self.bundles[bundle.index()].spillset
|
||||
);
|
||||
self.spillsets[self.bundles[bundle.index()].spillset.index()].required = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn spillslot_can_fit_spillset(
|
||||
&mut self,
|
||||
spillslot: SpillSlotIndex,
|
||||
spillset: SpillSetIndex,
|
||||
) -> bool {
|
||||
for &vreg in &self.spillsets[spillset.index()].vregs {
|
||||
for entry in &self.vregs[vreg.index()].ranges {
|
||||
if self.spillslots[spillslot.index()]
|
||||
.ranges
|
||||
.btree
|
||||
.contains_key(&LiveRangeKey::from_range(&entry.range))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
pub fn allocate_spillset_to_spillslot(
|
||||
&mut self,
|
||||
spillset: SpillSetIndex,
|
||||
spillslot: SpillSlotIndex,
|
||||
) {
|
||||
self.spillsets[spillset.index()].slot = spillslot;
|
||||
for i in 0..self.spillsets[spillset.index()].vregs.len() {
|
||||
// don't borrow self
|
||||
let vreg = self.spillsets[spillset.index()].vregs[i];
|
||||
log::debug!(
|
||||
"spillslot {:?} alloc'ed to spillset {:?}: vreg {:?}",
|
||||
spillslot,
|
||||
spillset,
|
||||
vreg,
|
||||
);
|
||||
for entry in &self.vregs[vreg.index()].ranges {
|
||||
log::debug!(
|
||||
"spillslot {:?} getting range {:?} from LR {:?} from vreg {:?}",
|
||||
spillslot,
|
||||
entry.range,
|
||||
entry.index,
|
||||
vreg,
|
||||
);
|
||||
self.spillslots[spillslot.index()]
|
||||
.ranges
|
||||
.btree
|
||||
.insert(LiveRangeKey::from_range(&entry.range), entry.index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocate_spillslots(&mut self) {
|
||||
for spillset in 0..self.spillsets.len() {
|
||||
log::debug!("allocate spillslot: {}", spillset);
|
||||
let spillset = SpillSetIndex::new(spillset);
|
||||
if !self.spillsets[spillset.index()].required {
|
||||
continue;
|
||||
}
|
||||
// Get or create the spillslot list for this size.
|
||||
let size = self.spillsets[spillset.index()].size as usize;
|
||||
if size >= self.slots_by_size.len() {
|
||||
self.slots_by_size.resize(
|
||||
size + 1,
|
||||
SpillSlotList {
|
||||
first_spillslot: SpillSlotIndex::invalid(),
|
||||
last_spillslot: SpillSlotIndex::invalid(),
|
||||
},
|
||||
);
|
||||
}
|
||||
// Try a few existing spillslots.
|
||||
let mut spillslot_iter = self.slots_by_size[size].first_spillslot;
|
||||
let mut first_slot = SpillSlotIndex::invalid();
|
||||
let mut prev = SpillSlotIndex::invalid();
|
||||
let mut success = false;
|
||||
for _attempt in 0..10 {
|
||||
if spillslot_iter.is_invalid() {
|
||||
break;
|
||||
}
|
||||
if spillslot_iter == first_slot {
|
||||
// We've started looking at slots we placed at the end; end search.
|
||||
break;
|
||||
}
|
||||
if first_slot.is_invalid() {
|
||||
first_slot = spillslot_iter;
|
||||
}
|
||||
|
||||
if self.spillslot_can_fit_spillset(spillslot_iter, spillset) {
|
||||
self.allocate_spillset_to_spillslot(spillset, spillslot_iter);
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
// Remove the slot and place it at the end of the respective list.
|
||||
let next = self.spillslots[spillslot_iter.index()].next_spillslot;
|
||||
if prev.is_valid() {
|
||||
self.spillslots[prev.index()].next_spillslot = next;
|
||||
} else {
|
||||
self.slots_by_size[size].first_spillslot = next;
|
||||
}
|
||||
if !next.is_valid() {
|
||||
self.slots_by_size[size].last_spillslot = prev;
|
||||
}
|
||||
|
||||
let last = self.slots_by_size[size].last_spillslot;
|
||||
if last.is_valid() {
|
||||
self.spillslots[last.index()].next_spillslot = spillslot_iter;
|
||||
} else {
|
||||
self.slots_by_size[size].first_spillslot = spillslot_iter;
|
||||
}
|
||||
self.slots_by_size[size].last_spillslot = spillslot_iter;
|
||||
|
||||
prev = spillslot_iter;
|
||||
spillslot_iter = next;
|
||||
}
|
||||
|
||||
if !success {
|
||||
// Allocate a new spillslot.
|
||||
let spillslot = SpillSlotIndex::new(self.spillslots.len());
|
||||
let next = self.slots_by_size[size].first_spillslot;
|
||||
self.spillslots.push(SpillSlotData {
|
||||
ranges: LiveRangeSet::new(),
|
||||
next_spillslot: next,
|
||||
alloc: Allocation::none(),
|
||||
class: self.spillsets[spillset.index()].class,
|
||||
});
|
||||
self.slots_by_size[size].first_spillslot = spillslot;
|
||||
if !next.is_valid() {
|
||||
self.slots_by_size[size].last_spillslot = spillslot;
|
||||
}
|
||||
|
||||
self.allocate_spillset_to_spillslot(spillset, spillslot);
|
||||
}
|
||||
}
|
||||
|
||||
// Assign actual slot indices to spillslots.
|
||||
for i in 0..self.spillslots.len() {
|
||||
self.spillslots[i].alloc = self.allocate_spillslot(self.spillslots[i].class);
|
||||
}
|
||||
|
||||
log::debug!("spillslot allocator done");
|
||||
}
|
||||
|
||||
pub fn allocate_spillslot(&mut self, class: RegClass) -> Allocation {
|
||||
let size = self.func.spillslot_size(class) as u32;
|
||||
let mut offset = self.num_spillslots;
|
||||
// Align up to `size`.
|
||||
debug_assert!(size.is_power_of_two());
|
||||
offset = (offset + size - 1) & !(size - 1);
|
||||
let slot = if self.func.multi_spillslot_named_by_last_slot() {
|
||||
offset + size - 1
|
||||
} else {
|
||||
offset
|
||||
};
|
||||
offset += size;
|
||||
self.num_spillslots = offset;
|
||||
Allocation::stack(SpillSlot::new(slot as usize, class))
|
||||
}
|
||||
}
|
||||
73
src/ion/stackmap.rs
Normal file
73
src/ion/stackmap.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* The following license applies to this file, which was initially
|
||||
* derived from the files `js/src/jit/BacktrackingAllocator.h` and
|
||||
* `js/src/jit/BacktrackingAllocator.cpp` in Mozilla Firefox:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*
|
||||
* Since the initial port, the design has been substantially evolved
|
||||
* and optimized.
|
||||
*/
|
||||
|
||||
//! Stackmap computation.
|
||||
|
||||
use super::{Env, ProgPoint, VRegIndex};
|
||||
use crate::Function;
|
||||
|
||||
impl<'a, F: Function> Env<'a, F> {
|
||||
pub fn compute_stackmaps(&mut self) {
|
||||
// For each ref-typed vreg, iterate through ranges and find
|
||||
// safepoints in-range. Add the SpillSlot to the stackmap.
|
||||
|
||||
if self.func.reftype_vregs().is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Given `safepoints_per_vreg` from the liveness computation,
|
||||
// all we have to do is, for each vreg in this map, step
|
||||
// through the LiveRanges along with a sorted list of
|
||||
// safepoints; and for each safepoint in the current range,
|
||||
// emit the allocation into the `safepoint_slots` list.
|
||||
|
||||
log::debug!("safepoints_per_vreg = {:?}", self.safepoints_per_vreg);
|
||||
|
||||
for vreg in self.func.reftype_vregs() {
|
||||
log::debug!("generating safepoint info for vreg {}", vreg);
|
||||
let vreg = VRegIndex::new(vreg.vreg());
|
||||
let mut safepoints: Vec<ProgPoint> = self
|
||||
.safepoints_per_vreg
|
||||
.get(&vreg.index())
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|&inst| ProgPoint::before(inst))
|
||||
.collect();
|
||||
safepoints.sort_unstable();
|
||||
log::debug!(" -> live over safepoints: {:?}", safepoints);
|
||||
|
||||
let mut safepoint_idx = 0;
|
||||
for entry in &self.vregs[vreg.index()].ranges {
|
||||
let range = entry.range;
|
||||
let alloc = self.get_alloc_for_range(entry.index);
|
||||
log::debug!(" -> range {:?}: alloc {}", range, alloc);
|
||||
while safepoint_idx < safepoints.len() && safepoints[safepoint_idx] < range.to {
|
||||
if safepoints[safepoint_idx] < range.from {
|
||||
safepoint_idx += 1;
|
||||
continue;
|
||||
}
|
||||
log::debug!(" -> covers safepoint {:?}", safepoints[safepoint_idx]);
|
||||
|
||||
let slot = alloc
|
||||
.as_stack()
|
||||
.expect("Reference-typed value not in spillslot at safepoint");
|
||||
self.safepoint_slots.push((safepoints[safepoint_idx], slot));
|
||||
safepoint_idx += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.safepoint_slots.sort_unstable();
|
||||
log::debug!("final safepoint slots info: {:?}", self.safepoint_slots);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user