This commit is contained in:
T0b1
2023-04-12 03:49:50 +02:00
parent f0e9cde328
commit 706c44513e
6 changed files with 942 additions and 17 deletions

9
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,9 @@
{
"editor.formatOnSave": true,
"[rust]": {
"editor.defaultFormatter": "rust-lang.rust-analyzer"
},
"rust-analyzer.cargo.features": [
"default"
]
}

View File

@@ -33,7 +33,7 @@ pub struct CFGInfo {
} }
impl CFGInfo { impl CFGInfo {
pub fn new<F: Function>(f: &F) -> Result<CFGInfo, RegAllocError> { pub fn new<F: Function>(f: &F, needs_loop_depth: bool) -> Result<CFGInfo, RegAllocError> {
let postorder = postorder::calculate(f.num_blocks(), f.entry_block(), |block| { let postorder = postorder::calculate(f.num_blocks(), f.entry_block(), |block| {
f.block_succs(block) f.block_succs(block)
}); });
@@ -98,22 +98,24 @@ impl CFGInfo {
} }
let mut approx_loop_depth = vec![]; let mut approx_loop_depth = vec![];
let mut backedge_stack: SmallVec<[usize; 4]> = smallvec![]; if needs_loop_depth {
let mut cur_depth = 0; let mut backedge_stack: SmallVec<[usize; 4]> = smallvec![];
for block in 0..f.num_blocks() { let mut cur_depth = 0;
if backedge_in[block] > 0 { for block in 0..f.num_blocks() {
cur_depth += 1; if backedge_in[block] > 0 {
backedge_stack.push(backedge_in[block]); cur_depth += 1;
} backedge_stack.push(backedge_in[block]);
}
approx_loop_depth.push(cur_depth); approx_loop_depth.push(cur_depth);
while backedge_stack.len() > 0 && backedge_out[block] > 0 { while backedge_stack.len() > 0 && backedge_out[block] > 0 {
backedge_out[block] -= 1; backedge_out[block] -= 1;
*backedge_stack.last_mut().unwrap() -= 1; *backedge_stack.last_mut().unwrap() -= 1;
if *backedge_stack.last().unwrap() == 0 { if *backedge_stack.last().unwrap() == 0 {
cur_depth -= 1; cur_depth -= 1;
backedge_stack.pop(); backedge_stack.pop();
}
} }
} }
} }

311
src/ion/fast_alloc.rs Normal file
View File

@@ -0,0 +1,311 @@
use alloc::vec;
use alloc::vec::Vec;
use smallvec::{smallvec, SmallVec};
use std::convert::TryFrom;
use crate::{
cfg::CFGInfo, Allocation, Block, Edit, Function, Inst, MachineEnv, Operand, OperandConstraint,
OperandKind, OperandPos, Output, PReg, ProgPoint, RegAllocError, RegClass, SpillSlot, VReg,
};
#[derive(Default, Clone, Copy)]
struct VRegData {
pub preg: Option<PReg>,
pub slot_idx: Option<u32>,
pub reftype: bool,
}
#[derive(Default, Clone, Copy)]
struct PRegData {
pub vreg: Option<u32>,
pub stack_pseudo: bool,
}
#[derive(Default, Clone, Copy)]
struct BlockData {
pub allocated: bool,
}
struct FastAllocState<'a, F: Function> {
pub vregs: Vec<VRegData>,
pub pregs: Vec<PRegData>,
pub blocks: Vec<BlockData>,
pub preorder: Vec<Block>,
pub reg_order_int: Vec<PReg>,
pub reg_order_float: Vec<PReg>,
pub cur_stack_slot_idx: u32,
pub reftype_vregs_in_pregs_count: u32,
pub stack_slot_count_int: u8,
pub stack_slot_count_float: u8,
pub allocs: Vec<Allocation>,
pub inst_alloc_offsets: Vec<u32>,
pub edits: Vec<(ProgPoint, Edit)>,
pub safepoint_slots: Vec<(ProgPoint, Allocation)>,
pub func: &'a F,
pub mach_env: &'a MachineEnv,
pub cfg: &'a CFGInfo,
}
impl<'a, F: Function> FastAllocState<'a, F> {
pub fn init(func: &'a F, mach_env: &'a MachineEnv, cfg: &'a CFGInfo) -> Self {
let vregs = {
let mut vregs = Vec::with_capacity(func.num_vregs());
vregs.resize(func.num_vregs(), VRegData::default());
for vreg in func.reftype_vregs() {
vregs[vreg.vreg()].reftype = true;
}
vregs
};
let pregs = {
let mut pregs = Vec::with_capacity(PReg::NUM_INDEX);
pregs.resize(PReg::NUM_INDEX, PRegData::default());
for preg in &mach_env.fixed_stack_slots {
pregs[preg.index()].stack_pseudo = true;
}
pregs
};
let blocks = {
let mut blocks = Vec::with_capacity(func.num_blocks());
blocks.resize(func.num_blocks(), BlockData::default());
blocks
};
let reg_order_int = {
let class = RegClass::Int as usize;
let amount = mach_env.preferred_regs_by_class[class].len()
+ mach_env.non_preferred_regs_by_class[class].len();
let mut reg_order = Vec::with_capacity(amount);
reg_order.extend_from_slice(&mach_env.preferred_regs_by_class[class]);
reg_order.extend_from_slice(&mach_env.non_preferred_regs_by_class[class]);
reg_order
};
let reg_order_float = {
let class = RegClass::Float as usize;
let amount = mach_env.preferred_regs_by_class[class].len()
+ mach_env.non_preferred_regs_by_class[class].len();
let mut reg_order = Vec::with_capacity(amount);
reg_order.extend_from_slice(&mach_env.preferred_regs_by_class[class]);
reg_order.extend_from_slice(&mach_env.non_preferred_regs_by_class[class]);
reg_order
};
let mut inst_alloc_offsets = Vec::with_capacity(func.num_insts());
inst_alloc_offsets.resize(func.num_insts(), 0);
Self {
vregs,
pregs,
blocks,
preorder: Self::calc_preorder(func),
reg_order_int,
reg_order_float,
cur_stack_slot_idx: 0,
reftype_vregs_in_pregs_count: 0,
stack_slot_count_int: u8::try_from(func.spillslot_size(RegClass::Int))
.expect("that's a big integer"),
stack_slot_count_float: u8::try_from(func.spillslot_size(RegClass::Float))
.expect("that's a big float"),
allocs: Vec::new(),
inst_alloc_offsets,
edits: Vec::new(),
safepoint_slots: Vec::new(),
func,
mach_env,
cfg,
}
}
pub fn get_or_alloc_stack_slot(&mut self, vreg: VReg) -> u32 {
if let Some(idx) = self.vregs[vreg.vreg()].slot_idx {
return idx;
}
self.alloc_stack_slot(vreg)
}
pub fn alloc_stack_slot(&mut self, vreg: VReg) -> u32 {
let data = &mut self.vregs[vreg.vreg()];
if data.slot_idx.is_some() {
panic!("Trying to allocate already allocated stack slot");
}
let size = if vreg.class() == RegClass::Int {
self.stack_slot_count_int
} else {
self.stack_slot_count_float
};
let idx = self.cur_stack_slot_idx;
self.cur_stack_slot_idx += size as u32;
data.slot_idx = Some(idx);
idx
}
pub fn move_to_preg(&mut self, vreg: VReg, preg: PReg, pos: ProgPoint) {
if let Some(vreg) = &self.pregs[preg.index()].vreg {
let vdata = &mut self.vregs[*vreg as usize];
debug_assert!(vdata.preg.is_some());
debug_assert_eq!(vdata.preg.unwrap(), preg);
vdata.preg = None;
}
if let Some(preg) = &self.vregs[vreg.vreg()].preg {
// TODO: allow multiple pregs for a single vreg?
let pdata = &mut self.pregs[preg.index()];
debug_assert!(pdata.vreg.is_some());
debug_assert_eq!(pdata.vreg.unwrap(), vreg.vreg() as u32);
pdata.vreg = None;
}
let vdata = &mut self.vregs[vreg.vreg()];
let pdata = &mut self.pregs[preg.index()];
if vdata.slot_idx.is_none() {
panic!("Trying to move from vreg that has no stack slot to preg");
}
self.edits.push((
pos,
Edit::Move {
from: Allocation::stack(SpillSlot::new(vdata.slot_idx.unwrap() as usize)),
to: Allocation::reg(preg),
},
));
vdata.preg = Some(preg);
pdata.vreg = Some(vreg.vreg() as u32);
if vdata.reftype {
self.reftype_vregs_in_pregs_count += 1;
}
}
pub fn move_to_stack(&mut self, preg: PReg, vreg: VReg, pos: ProgPoint) {
let vdata = &mut self.vregs[vreg.vreg()];
let pdata = &mut self.pregs[preg.index()];
if pdata.vreg.is_none() || vdata.preg.is_none() {
panic!("Trying to move from unallocated preg/vreg to stack");
}
debug_assert_eq!(vdata.preg.unwrap(), preg);
debug_assert_eq!(pdata.vreg.unwrap(), vreg.vreg() as u32);
if vdata.slot_idx.is_none() {
panic!("Trying to move to vreg without stack slot");
}
self.edits.push((
pos,
Edit::Move {
from: Allocation::reg(preg),
to: Allocation::stack(SpillSlot::new(vdata.slot_idx.unwrap() as usize)),
},
));
}
pub fn clear_preg(&mut self, preg: PReg) {
let pdata = &mut self.pregs[preg.index()];
if let Some(vreg) = pdata.vreg {
let vdata = &mut self.vregs[vreg as usize];
debug_assert_eq!(vdata.preg.unwrap(), preg);
vdata.preg = None;
pdata.vreg = None;
if vdata.reftype {
self.reftype_vregs_in_pregs_count -= 1;
}
}
}
pub fn clear_vreg_from_reg(&mut self, vreg: VReg) {
let vdata = &mut self.vregs[vreg.vreg()];
if let Some(preg) = vdata.preg {
debug_assert_eq!(self.pregs[preg.index()].vreg.unwrap(), vreg.vreg() as u32);
self.pregs[preg.index()].vreg = None;
vdata.preg = None;
if vdata.reftype {
self.reftype_vregs_in_pregs_count -= 1;
}
}
}
fn calc_preorder(func: &F) -> Vec<Block> {
let entry = func.entry_block();
let mut ret = vec![entry];
struct State<'a> {
block: Block,
succs: &'a [Block],
next_succ: usize,
}
let mut stack: SmallVec<[State; 64]> = smallvec![];
stack.push(State {
block: entry,
succs: func.block_succs(entry),
next_succ: 0,
});
while let Some(ref mut state) = stack.last_mut() {
if state.next_succ >= state.succs.len() {
stack.pop();
continue;
}
let block = state.succs[state.next_succ];
let succs = func.block_succs(block);
ret.push(block);
state.next_succ += 1;
if state.next_succ >= state.succs.len() {
stack.pop();
}
if !succs.is_empty() {
stack.push(State {
block,
succs: func.block_succs(block),
next_succ: 0,
});
}
}
ret
}
}
pub fn run<F: Function>(
func: &F,
mach_env: &MachineEnv,
cfg: CFGInfo,
) -> Result<Output, RegAllocError> {
if func.multi_spillslot_named_by_last_slot() {
panic!("MultiSpillslotIndexPos not supported");
}
let mut state = FastAllocState::init(func, mach_env, &cfg);
todo!("")
}
fn setup_entry_params<'a, F: Function>(
state: &mut FastAllocState<'a, F>,
) -> Result<(), RegAllocError> {
// we need to set the vreg location for the initial block parameters and copy them to the stack
let entry = state.func.entry_block();
todo!("")
}

594
src/ion/fast_alloc.rs.bak Normal file
View File

@@ -0,0 +1,594 @@
use alloc::vec;
use alloc::vec::Vec;
use smallvec::{smallvec, SmallVec};
use crate::{
cfg::CFGInfo, Allocation, Block, Edit, Function, Inst, MachineEnv, Operand, OperandConstraint,
OperandKind, OperandPos, Output, PReg, ProgPoint, RegAllocError, RegClass, SpillSlot, VReg,
};
#[derive(Clone, Copy)]
struct VRegInfo {
pub stack_slot: Option<u32>,
pub alloc_at_inst: Inst,
pub cur_reg: Option<PReg>,
}
impl Default for VRegInfo {
fn default() -> Self {
Self {
stack_slot: None,
alloc_at_inst: Inst::invalid(),
cur_reg: None,
}
}
}
struct AllocState {
pub vregs: Vec<VRegInfo>,
pub preorder: Vec<Block>,
pub allocs: Vec<Allocation>,
pub inst_alloc_offsets: Vec<u32>,
pub edits: Vec<(ProgPoint, Edit)>,
pub cur_stack_slot_idx: u32,
pub safepoint_slots: Vec<(ProgPoint, Allocation)>,
}
impl AllocState {
pub fn init<F: Function>(func: &F) -> Self {
let mut vregs = Vec::with_capacity(func.num_vregs());
vregs.resize(func.num_vregs(), VRegInfo::default());
let mut inst_alloc_offsets = Vec::with_capacity(func.num_insts());
inst_alloc_offsets.resize(func.num_insts(), 0);
Self {
vregs,
preorder: Self::calc_preorder(func),
allocs: Vec::new(),
inst_alloc_offsets,
edits: Vec::new(),
cur_stack_slot_idx: 0,
safepoint_slots: Vec::new(),
}
}
fn calc_preorder<F: Function>(func: &F) -> Vec<Block> {
let entry = func.entry_block();
let mut ret = vec![entry];
struct State<'a> {
block: Block,
succs: &'a [Block],
next_succ: usize,
}
let mut stack: SmallVec<[State; 64]> = smallvec![];
stack.push(State {
block: entry,
succs: func.block_succs(entry),
next_succ: 0,
});
while let Some(ref mut state) = stack.last_mut() {
if state.next_succ >= state.succs.len() {
stack.pop();
continue;
}
let block = state.succs[state.next_succ];
let succs = func.block_succs(block);
ret.push(block);
state.next_succ += 1;
if state.next_succ >= state.succs.len() {
stack.pop();
}
if !succs.is_empty() {
stack.push(State {
block,
succs: func.block_succs(block),
next_succ: 0,
});
}
}
ret
}
}
pub fn run<F: Function>(
func: &F,
mach_env: &MachineEnv,
cfg: CFGInfo,
) -> Result<Output, RegAllocError> {
let mut state = AllocState::init(func);
let reg_order_int = {
let class = RegClass::Int as usize;
let amount = mach_env.preferred_regs_by_class[class].len()
+ mach_env.non_preferred_regs_by_class[class].len();
let mut reg_order = Vec::with_capacity(amount);
reg_order.extend_from_slice(&mach_env.preferred_regs_by_class[class]);
reg_order.extend_from_slice(&mach_env.non_preferred_regs_by_class[class]);
reg_order
};
let reg_order_float = {
let class = RegClass::Float as usize;
let amount = mach_env.preferred_regs_by_class[class].len()
+ mach_env.non_preferred_regs_by_class[class].len();
let mut reg_order = Vec::with_capacity(amount);
reg_order.extend_from_slice(&mach_env.preferred_regs_by_class[class]);
reg_order.extend_from_slice(&mach_env.non_preferred_regs_by_class[class]);
reg_order
};
let slot_size_int = func.spillslot_size(RegClass::Int) as u32;
let slot_size_float = func.spillslot_size(RegClass::Float) as u32;
// TODO: this currently does not handle multiple defs/uses of the same vreg properly
// TODO: move to AllocState, needs to also contain if this is a pseudo-preg and refers to the stack
let mut preg_state: Vec<VReg> = Vec::with_capacity(PReg::NUM_INDEX);
preg_state.resize(PReg::NUM_INDEX, VReg::invalid());
for block in &state.preorder {
for inst in func.block_insns(*block).iter() {
let operands = func.inst_operands(inst);
let alloc_idx = state.allocs.len();
state.inst_alloc_offsets[inst.index()] = alloc_idx as u32;
state
.allocs
.resize(alloc_idx + operands.len(), Allocation::none());
// both of these smallvecs could hold 16 entries
// late uses may not be chosen as a target for writes
let mut late_use_regs: SmallVec<[PReg; 4]> = smallvec![];
let mut early_write_regs: SmallVec<[PReg; 4]> = smallvec![];
// TODO: wouldnt need this if we look up the inst a vreg was allocated at
let mut regs_allocated: SmallVec<[PReg; 8]> = smallvec![];
let mut cur_idx_int = 0;
let mut cur_idx_float = 0;
let mut has_early_write = false;
// allocate uses; fixed reg first, then the others
for (i, op) in operands.iter().enumerate() {
if op.kind() != OperandKind::Use {
if op.pos() != OperandPos::Early {
continue;
}
has_early_write = true;
}
let vreg = op.vreg();
match op.constraint() {
OperandConstraint::Any | OperandConstraint::Reg | OperandConstraint::Stack => {
continue
}
OperandConstraint::FixedReg(reg) => {
if preg_state[reg.index()] != VReg::invalid()
&& preg_state[reg.index()] != vreg
{
return Err(RegAllocError::TooManyLiveRegs);
}
state.allocs[alloc_idx + i] = Allocation::reg(reg);
preg_state[reg.index()] = vreg;
state.vregs[vreg.vreg()].alloc_at_inst = inst;
state.vregs[vreg.vreg()].cur_reg = Some(reg);
regs_allocated.push(reg);
if op.pos() == OperandPos::Late {
late_use_regs.push(reg);
}
if op.kind() == OperandKind::Use {
// Early Defs do not need to be moved
match state.vregs[vreg.vreg()].stack_slot {
Some(slot) => {
state.edits.push((
ProgPoint::before(inst),
Edit::Move {
from: Allocation::stack(SpillSlot::new(slot as usize)),
to: Allocation::reg(reg),
},
));
}
None => {
return Err(RegAllocError::SSA(vreg, inst));
}
}
} else {
early_write_regs.push(reg);
let slot = match state.vregs[vreg.vreg()].stack_slot {
Some(slot) => slot,
None => {
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.vregs[vreg.vreg()].stack_slot = Some(slot);
slot
}
};
state.edits.push((
ProgPoint::after(inst),
Edit::Move {
from: Allocation::reg(reg),
to: Allocation::stack(SpillSlot::new(slot as usize)),
},
));
}
}
OperandConstraint::Reuse(_) => panic!("Reuse constraint for Use/Early-Write"),
}
}
for (i, op) in operands.iter().enumerate() {
if op.kind() != OperandKind::Use {
if op.pos() != OperandPos::Early {
continue;
}
continue;
}
let vreg = op.vreg();
let vreg_idx = vreg.vreg();
match op.constraint() {
OperandConstraint::Stack => {
// values need to be on stack already
match state.vregs[vreg_idx].stack_slot {
Some(slot) => {
state.allocs[alloc_idx + i] =
Allocation::stack(SpillSlot::new(slot as usize));
}
None => {
if op.kind() == OperandKind::Use {
return Err(RegAllocError::SSA(vreg, inst));
}
// alloc stack slot
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.allocs[alloc_idx + i] =
Allocation::stack(SpillSlot::new(slot as usize));
state.vregs[vreg_idx].stack_slot = Some(slot);
}
}
}
// TODO: do any on stack?
OperandConstraint::Any | OperandConstraint::Reg => {
// check if vreg has been allocated before
let mut alloc = None;
for (j, op) in operands.iter().enumerate() {
if j >= i {
break;
}
if op.vreg() == vreg {
alloc = Some(state.allocs[alloc_idx + j].clone());
break;
}
}
if let Some(a) = alloc {
state.allocs[alloc_idx + i] = a;
continue;
}
// find first non-allocated register
let reg_order = if op.class() == RegClass::Int {
&reg_order_int
} else {
&reg_order_float
};
let reg_order_idx = if op.class() == RegClass::Int {
&mut cur_idx_int
} else {
&mut cur_idx_float
};
loop {
if *reg_order_idx >= reg_order.len() {
break;
}
if preg_state[reg_order[*reg_order_idx].index()] != VReg::invalid() {
*reg_order_idx += 1;
continue;
}
let reg = reg_order[*reg_order_idx];
// TODO: helper func for alloc
state.allocs[alloc_idx + i] = Allocation::reg(reg);
preg_state[reg.index()] = vreg;
state.vregs[vreg.vreg()].alloc_at_inst = inst;
state.vregs[vreg.vreg()].cur_reg = Some(reg);
regs_allocated.push(reg);
if op.pos() == OperandPos::Late {
late_use_regs.push(reg);
}
if op.kind() == OperandKind::Use {
match state.vregs[vreg_idx].stack_slot {
Some(slot) => {
state.edits.push((
ProgPoint::before(inst),
Edit::Move {
from: Allocation::stack(SpillSlot::new(
slot as usize,
)),
to: Allocation::reg(reg),
},
));
}
None => {
return Err(RegAllocError::SSA(vreg, inst));
}
}
} else {
early_write_regs.push(reg);
let slot = match state.vregs[vreg.vreg()].stack_slot {
Some(slot) => slot,
None => {
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.vregs[vreg.vreg()].stack_slot = Some(slot);
slot
}
};
state.edits.push((
ProgPoint::after(inst),
Edit::Move {
from: Allocation::reg(reg),
to: Allocation::stack(SpillSlot::new(slot as usize)),
},
));
}
}
// No register available
// TODO: try to evict vreg that does not need to be in a preg
if op.constraint() == OperandConstraint::Reg {
return Err(RegAllocError::TooManyLiveRegs);
}
// any can live on the stack
match state.vregs[vreg_idx].stack_slot {
Some(slot) => {
state.allocs[alloc_idx + i] =
Allocation::stack(SpillSlot::new(slot as usize));
}
None => {
if op.kind() == OperandKind::Use {
return Err(RegAllocError::SSA(vreg, inst));
}
// alloc stack slot
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.allocs[alloc_idx + i] =
Allocation::stack(SpillSlot::new(slot as usize));
state.vregs[vreg_idx].stack_slot = Some(slot);
}
}
}
OperandConstraint::FixedReg(_) => continue,
OperandConstraint::Reuse(_) => unreachable!(),
}
}
// Handle writes
for (i, op) in operands.iter().enumerate() {
if op.kind() != OperandKind::Def || op.pos() == OperandPos::Early {
continue;
}
let vreg = op.vreg();
let vreg_idx = vreg.vreg();
match op.constraint() {
OperandConstraint::FixedReg(reg) => {
let reg_idx = reg.index();
if preg_state[reg_idx] != VReg::invalid() {
// if the register is occupied by a late use we abort
// TODO: move the allocation, if possible
if late_use_regs.contains(&reg) || early_write_regs.contains(&reg) {
todo!("trying to allocate fixed reg def on late use/early write");
}
// overwrite
let vreg = preg_state[reg_idx].vreg();
state.vregs[vreg].cur_reg = None;
}
preg_state[reg_idx] = vreg;
state.vregs[vreg_idx].cur_reg = Some(reg);
state.allocs[alloc_idx + i] = Allocation::reg(reg);
match state.vregs[vreg_idx].stack_slot {
None => {
// alloc stack slot
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.vregs[vreg_idx].stack_slot = Some(slot);
// move to stack after inst
state.edits.push((
ProgPoint::after(inst),
Edit::Move {
from: Allocation::reg(reg),
to: Allocation::stack(SpillSlot::new(slot as usize)),
},
));
}
Some(_) => {
return Err(RegAllocError::SSA(vreg, inst));
}
}
}
_ => continue,
}
}
cur_idx_int = 0;
cur_idx_float = 0;
for (i, op) in operands.iter().enumerate() {
if op.kind() != OperandKind::Def || op.pos() == OperandPos::Early {
continue;
}
let vreg = op.vreg();
let vreg_idx = vreg.vreg();
match op.constraint() {
OperandConstraint::Reg => {
// find first non-allocated register
let reg_order = if op.class() == RegClass::Int {
&reg_order_int
} else {
&reg_order_float
};
let reg_order_idx = if op.class() == RegClass::Int {
&mut cur_idx_int
} else {
&mut cur_idx_float
};
loop {
if *reg_order_idx >= reg_order.len() {
break;
}
let reg = reg_order[*reg_order_idx];
if late_use_regs.contains(&reg) || early_write_regs.contains(&reg) {
*reg_order_idx += 1;
continue;
}
// remove allocated
if preg_state[reg.index()] != VReg::invalid() {
let vreg_idx = preg_state[reg.index()].vreg();
state.vregs[vreg_idx].cur_reg = None;
} else {
regs_allocated.push(reg);
}
// TODO: helper func for alloc
state.allocs[alloc_idx + i] = Allocation::reg(reg);
preg_state[reg.index()] = vreg;
state.vregs[vreg.vreg()].alloc_at_inst = inst;
state.vregs[vreg.vreg()].cur_reg = Some(reg);
// alloc stack slot
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.vregs[vreg_idx].stack_slot = Some(slot);
match state.vregs[vreg_idx].stack_slot {
Some(slot) => {
state.edits.push((
ProgPoint::after(inst),
Edit::Move {
from: Allocation::reg(reg),
to: Allocation::stack(SpillSlot::new(slot as usize)),
},
));
}
None => {
return Err(RegAllocError::SSA(vreg, inst));
}
}
}
// No register available
// TODO: try to evict vreg that does not need to be in a preg
return Err(RegAllocError::TooManyLiveRegs);
}
OperandConstraint::Any | OperandConstraint::Stack => {
// alloc stack slot
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.vregs[vreg_idx].stack_slot = Some(slot);
state.allocs[alloc_idx + i] =
Allocation::stack(SpillSlot::new(slot as usize));
}
OperandConstraint::Reuse(op_idx) => {
let alloc = state.allocs[alloc_idx + op_idx];
if alloc.is_none() || operands[op_idx].pos() == OperandPos::Late {
panic!("Invalid reuse");
}
state.allocs[alloc_idx + i] = alloc;
if let Some(alloc) = alloc.as_stack() {
state.vregs[vreg_idx].stack_slot = Some(alloc.index() as u32);
state.vregs[operands[op_idx].vreg().vreg()].stack_slot = None;
} else if let Some(reg) = alloc.as_reg() {
state.vregs[operands[op_idx].vreg().vreg()].cur_reg = None;
state.vregs[vreg_idx].cur_reg = Some(reg);
// alloc stack slot
let size = if op.class() == RegClass::Int {
slot_size_int
} else {
slot_size_float
};
let slot = state.cur_stack_slot_idx;
state.cur_stack_slot_idx += size;
state.vregs[vreg_idx].stack_slot = Some(slot);
// move to stack after inst
state.edits.push((
ProgPoint::after(inst),
Edit::Move {
from: Allocation::reg(reg),
to: Allocation::stack(SpillSlot::new(slot as usize)),
},
));
}
}
OperandConstraint::FixedReg(_) => continue,
}
}
}
}
todo!("")
}

View File

@@ -39,6 +39,7 @@ pub(crate) mod dump;
pub(crate) mod moves; pub(crate) mod moves;
pub(crate) mod spill; pub(crate) mod spill;
pub(crate) mod stackmap; pub(crate) mod stackmap;
mod fast_alloc;
impl<'a, F: Function> Env<'a, F> { impl<'a, F: Function> Env<'a, F> {
pub(crate) fn new( pub(crate) fn new(
@@ -120,13 +121,18 @@ pub fn run<F: Function>(
mach_env: &MachineEnv, mach_env: &MachineEnv,
enable_annotations: bool, enable_annotations: bool,
enable_ssa_checker: bool, enable_ssa_checker: bool,
use_fast_alloc: bool,
) -> Result<Output, RegAllocError> { ) -> Result<Output, RegAllocError> {
let cfginfo = CFGInfo::new(func)?; let cfginfo = CFGInfo::new(func, !use_fast_alloc)?;
if enable_ssa_checker { if enable_ssa_checker {
validate_ssa(func, &cfginfo)?; validate_ssa(func, &cfginfo)?;
} }
if use_fast_alloc {
return fast_alloc::run(func, mach_env, cfginfo);
}
let mut env = Env::new(func, mach_env, cfginfo, enable_annotations); let mut env = Env::new(func, mach_env, cfginfo, enable_annotations);
env.init()?; env.init()?;

View File

@@ -1491,7 +1491,7 @@ pub fn run<F: Function>(
env: &MachineEnv, env: &MachineEnv,
options: &RegallocOptions, options: &RegallocOptions,
) -> Result<Output, RegAllocError> { ) -> Result<Output, RegAllocError> {
ion::run(func, env, options.verbose_log, options.validate_ssa) ion::run(func, env, options.verbose_log, options.validate_ssa, options.fast_alloc)
} }
/// Options for allocation. /// Options for allocation.
@@ -1502,4 +1502,7 @@ pub struct RegallocOptions {
/// Run the SSA validator before allocating registers. /// Run the SSA validator before allocating registers.
pub validate_ssa: bool, pub validate_ssa: bool,
/// Use a fast algorithm that may return suboptimal results.
pub fast_alloc: bool,
} }