Replace assert! with debug_assert! in production code paths.
This allows the assertions to be disabled in release builds, so that the code is faster and smaller, at the expense of not performing the checks. Assertions can be re-enabled in release builds with the debug-assertions flag in Cargo.toml, as the top-level Cargo.toml file does.
This commit is contained in:
@@ -211,7 +211,7 @@ def gen_instruction_data_impl(fmt):
|
||||
if f.has_value_list:
|
||||
fmt.line(n + ' { ref mut args, .. } => args,')
|
||||
fmt.line('_ => panic!("No value list: {:?}", self),')
|
||||
fmt.line('assert!(args.is_empty(), "Value list already in use");')
|
||||
fmt.line('debug_assert!(args.is_empty(), "Value list already in use");')
|
||||
fmt.line('*args = vlist;')
|
||||
|
||||
|
||||
|
||||
@@ -348,7 +348,8 @@ def gen_xform(xform, fmt, type_sets):
|
||||
# Delete the original instruction if we didn't have an opportunity to
|
||||
# replace it.
|
||||
if not replace_inst:
|
||||
fmt.line('assert_eq!(pos.remove_inst(), inst);')
|
||||
fmt.line('let removed = pos.remove_inst();')
|
||||
fmt.line('debug_assert_eq!(removed, inst);')
|
||||
fmt.line('return true;')
|
||||
|
||||
|
||||
|
||||
@@ -245,7 +245,7 @@ def gen_constructor(sgrp, parent, fmt):
|
||||
'pub fn new({}) -> Flags {{'.format(args), '}'):
|
||||
fmt.line('let bvec = builder.state_for("{}");'.format(sgrp.name))
|
||||
fmt.line('let mut bytes = [0; {}];'.format(sgrp.byte_size()))
|
||||
fmt.line('assert_eq!(bvec.len(), {});'.format(sgrp.settings_size))
|
||||
fmt.line('debug_assert_eq!(bvec.len(), {});'.format(sgrp.settings_size))
|
||||
with fmt.indented(
|
||||
'for (i, b) in bvec.iter().enumerate() {', '}'):
|
||||
fmt.line('bytes[i] = *b;')
|
||||
|
||||
@@ -150,7 +150,7 @@ pub fn legalize_abi_value(have: Type, arg: &AbiParam) -> ValueConversion {
|
||||
match have_bits.cmp(&arg_bits) {
|
||||
// We have fewer bits than the ABI argument.
|
||||
Ordering::Less => {
|
||||
assert!(
|
||||
debug_assert!(
|
||||
have.is_int() && arg.value_type.is_int(),
|
||||
"Can only extend integer values"
|
||||
);
|
||||
@@ -163,8 +163,8 @@ pub fn legalize_abi_value(have: Type, arg: &AbiParam) -> ValueConversion {
|
||||
// We have the same number of bits as the argument.
|
||||
Ordering::Equal => {
|
||||
// This must be an integer vector that is split and then extended.
|
||||
assert!(arg.value_type.is_int());
|
||||
assert!(have.is_vector());
|
||||
debug_assert!(arg.value_type.is_int());
|
||||
debug_assert!(have.is_vector());
|
||||
ValueConversion::VectorSplit
|
||||
}
|
||||
// We have more bits than the argument.
|
||||
|
||||
@@ -316,7 +316,8 @@ impl<F: Forest> Path<F> {
|
||||
// Now that we have a not-full node, it must be possible to insert.
|
||||
match ins_node {
|
||||
None => {
|
||||
assert!(pool[node].try_leaf_insert(entry, key, value));
|
||||
let inserted = pool[node].try_leaf_insert(entry, key, value);
|
||||
debug_assert!(inserted);
|
||||
// If we inserted at the front of the new rhs_node leaf, we need to propagate
|
||||
// the inserted key as the critical key instead of the previous front key.
|
||||
if entry == 0 && node == rhs_node {
|
||||
@@ -324,7 +325,8 @@ impl<F: Forest> Path<F> {
|
||||
}
|
||||
}
|
||||
Some(n) => {
|
||||
assert!(pool[node].try_inner_insert(entry, key, n));
|
||||
let inserted = pool[node].try_inner_insert(entry, key, n);
|
||||
debug_assert!(inserted);
|
||||
// The lower level was moved to the new RHS node, so make sure that is
|
||||
// reflected here.
|
||||
if n == self.node[level + 1] {
|
||||
|
||||
@@ -110,7 +110,7 @@ where
|
||||
let mut divert = RegDiversions::new();
|
||||
for ebb in func.layout.ebbs() {
|
||||
divert.clear();
|
||||
assert_eq!(func.offsets[ebb], sink.offset());
|
||||
debug_assert_eq!(func.offsets[ebb], sink.offset());
|
||||
for inst in func.layout.ebb_insts(ebb) {
|
||||
emit_inst(func, inst, &mut divert, sink);
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ pub fn relax_branches(func: &mut Function, isa: &TargetIsa) -> Result<CodeOffset
|
||||
while let Some(ebb) = cur.next_ebb() {
|
||||
// Record the offset for `ebb` and make sure we iterate until offsets are stable.
|
||||
if cur.func.offsets[ebb] != offset {
|
||||
assert!(
|
||||
debug_assert!(
|
||||
cur.func.offsets[ebb] < offset,
|
||||
"Code shrinking during relaxation"
|
||||
);
|
||||
@@ -111,7 +111,7 @@ fn fallthroughs(func: &mut Function) {
|
||||
Opcode::Fallthrough => {
|
||||
// Somebody used a fall-through instruction before the branch relaxation pass.
|
||||
// Make sure it is correct, i.e. the destination is the layout successor.
|
||||
assert_eq!(destination, succ, "Illegal fall-through in {}", ebb)
|
||||
debug_assert_eq!(destination, succ, "Illegal fall-through in {}", ebb)
|
||||
}
|
||||
Opcode::Jump => {
|
||||
// If this is a jump to the successor EBB, change it to a fall-through.
|
||||
|
||||
@@ -36,8 +36,8 @@ where
|
||||
|
||||
/// Check if this BitSet contains the number num
|
||||
pub fn contains(&self, num: u8) -> bool {
|
||||
assert!((num as usize) < Self::bits());
|
||||
assert!((num as usize) < Self::max_bits());
|
||||
debug_assert!((num as usize) < Self::bits());
|
||||
debug_assert!((num as usize) < Self::max_bits());
|
||||
self.0.into() & (1 << num) != 0
|
||||
}
|
||||
|
||||
@@ -62,8 +62,8 @@ where
|
||||
|
||||
/// Construct a BitSet with the half-open range [lo,hi) filled in
|
||||
pub fn from_range(lo: u8, hi: u8) -> Self {
|
||||
assert!(lo <= hi);
|
||||
assert!((hi as usize) <= Self::bits());
|
||||
debug_assert!(lo <= hi);
|
||||
debug_assert!((hi as usize) <= Self::bits());
|
||||
let one: T = T::from(1);
|
||||
// I can't just do (one << hi) - one here as the shift may overflow
|
||||
let hi_rng = if hi >= 1 {
|
||||
|
||||
@@ -256,7 +256,7 @@ pub trait Cursor {
|
||||
/// Go to a specific instruction which must be inserted in the layout.
|
||||
/// New instructions will be inserted before `inst`.
|
||||
fn goto_inst(&mut self, inst: ir::Inst) {
|
||||
assert!(self.layout().inst_ebb(inst).is_some());
|
||||
debug_assert!(self.layout().inst_ebb(inst).is_some());
|
||||
self.set_position(CursorPosition::At(inst));
|
||||
}
|
||||
|
||||
@@ -287,14 +287,14 @@ pub trait Cursor {
|
||||
/// At this position, instructions cannot be inserted, but `next_inst()` will move to the first
|
||||
/// instruction in `ebb`.
|
||||
fn goto_top(&mut self, ebb: ir::Ebb) {
|
||||
assert!(self.layout().is_ebb_inserted(ebb));
|
||||
debug_assert!(self.layout().is_ebb_inserted(ebb));
|
||||
self.set_position(CursorPosition::Before(ebb));
|
||||
}
|
||||
|
||||
/// Go to the bottom of `ebb` which must be inserted into the layout.
|
||||
/// At this position, inserted instructions will be appended to `ebb`.
|
||||
fn goto_bottom(&mut self, ebb: ir::Ebb) {
|
||||
assert!(self.layout().is_ebb_inserted(ebb));
|
||||
debug_assert!(self.layout().is_ebb_inserted(ebb));
|
||||
self.set_position(CursorPosition::After(ebb));
|
||||
}
|
||||
|
||||
|
||||
@@ -43,8 +43,8 @@ pub struct MS64 {
|
||||
// The actual "magic number" generators follow.
|
||||
|
||||
pub fn magicU32(d: u32) -> MU32 {
|
||||
assert_ne!(d, 0);
|
||||
assert_ne!(d, 1); // d==1 generates out of range shifts.
|
||||
debug_assert_ne!(d, 0);
|
||||
debug_assert_ne!(d, 1); // d==1 generates out of range shifts.
|
||||
|
||||
let mut do_add: bool = false;
|
||||
let mut p: i32 = 31;
|
||||
@@ -89,8 +89,8 @@ pub fn magicU32(d: u32) -> MU32 {
|
||||
}
|
||||
|
||||
pub fn magicU64(d: u64) -> MU64 {
|
||||
assert_ne!(d, 0);
|
||||
assert_ne!(d, 1); // d==1 generates out of range shifts.
|
||||
debug_assert_ne!(d, 0);
|
||||
debug_assert_ne!(d, 1); // d==1 generates out of range shifts.
|
||||
|
||||
let mut do_add: bool = false;
|
||||
let mut p: i32 = 63;
|
||||
@@ -135,9 +135,9 @@ pub fn magicU64(d: u64) -> MU64 {
|
||||
}
|
||||
|
||||
pub fn magicS32(d: i32) -> MS32 {
|
||||
assert_ne!(d, -1);
|
||||
assert_ne!(d, 0);
|
||||
assert_ne!(d, 1);
|
||||
debug_assert_ne!(d, -1);
|
||||
debug_assert_ne!(d, 0);
|
||||
debug_assert_ne!(d, 1);
|
||||
let two31: u32 = 0x80000000u32;
|
||||
let mut p: i32 = 31;
|
||||
let ad: u32 = i32::wrapping_abs(d) as u32;
|
||||
@@ -178,9 +178,9 @@ pub fn magicS32(d: i32) -> MS32 {
|
||||
}
|
||||
|
||||
pub fn magicS64(d: i64) -> MS64 {
|
||||
assert_ne!(d, -1);
|
||||
assert_ne!(d, 0);
|
||||
assert_ne!(d, 1);
|
||||
debug_assert_ne!(d, -1);
|
||||
debug_assert_ne!(d, 0);
|
||||
debug_assert_ne!(d, 1);
|
||||
let two63: u64 = 0x8000000000000000u64;
|
||||
let mut p: i32 = 63;
|
||||
let ad: u64 = i64::wrapping_abs(d) as u64;
|
||||
|
||||
@@ -197,7 +197,7 @@ impl DominatorTree {
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(a.0, b.0, "Unreachable block passed to common_dominator?");
|
||||
debug_assert_eq!(a.0, b.0, "Unreachable block passed to common_dominator?");
|
||||
|
||||
// We're in the same EBB. The common dominator is the earlier instruction.
|
||||
if layout.cmp(a.1, b.1) == Ordering::Less {
|
||||
@@ -241,7 +241,7 @@ impl DominatorTree {
|
||||
pub fn clear(&mut self) {
|
||||
self.nodes.clear();
|
||||
self.postorder.clear();
|
||||
assert!(self.stack.is_empty());
|
||||
debug_assert!(self.stack.is_empty());
|
||||
self.valid = false;
|
||||
}
|
||||
|
||||
@@ -539,7 +539,7 @@ impl DominatorTreePreorder {
|
||||
/// Recompute this data structure to match `domtree`.
|
||||
pub fn compute(&mut self, domtree: &DominatorTree, layout: &Layout) {
|
||||
self.nodes.clear();
|
||||
assert_eq!(self.stack.len(), 0);
|
||||
debug_assert_eq!(self.stack.len(), 0);
|
||||
|
||||
// Step 1: Populate the child and sibling links.
|
||||
//
|
||||
@@ -557,7 +557,7 @@ impl DominatorTreePreorder {
|
||||
}
|
||||
|
||||
// Step 2. Assign pre-order numbers from a DFS of the dominator tree.
|
||||
assert!(self.stack.len() <= 1);
|
||||
debug_assert!(self.stack.len() <= 1);
|
||||
let mut n = 0;
|
||||
while let Some(ebb) = self.stack.pop() {
|
||||
n += 1;
|
||||
|
||||
@@ -219,8 +219,8 @@ impl<T: EntityRef> ListPool<T> {
|
||||
to_sclass: SizeClass,
|
||||
elems_to_copy: usize,
|
||||
) -> usize {
|
||||
assert!(elems_to_copy <= sclass_size(from_sclass));
|
||||
assert!(elems_to_copy <= sclass_size(to_sclass));
|
||||
debug_assert!(elems_to_copy <= sclass_size(from_sclass));
|
||||
debug_assert!(elems_to_copy <= sclass_size(to_sclass));
|
||||
let new_block = self.alloc(to_sclass);
|
||||
|
||||
if elems_to_copy > 0 {
|
||||
@@ -301,7 +301,7 @@ impl<T: EntityRef> EntityList<T> {
|
||||
pub fn clear(&mut self, pool: &mut ListPool<T>) {
|
||||
let idx = self.index as usize;
|
||||
match pool.len_of(self) {
|
||||
None => assert_eq!(idx, 0, "Invalid pool"),
|
||||
None => debug_assert_eq!(idx, 0, "Invalid pool"),
|
||||
Some(len) => pool.free(idx - 1, sclass_for_length(len)),
|
||||
}
|
||||
// Switch back to the empty list representation which has no storage.
|
||||
@@ -322,7 +322,7 @@ impl<T: EntityRef> EntityList<T> {
|
||||
match pool.len_of(self) {
|
||||
None => {
|
||||
// This is an empty list. Allocate a block and set length=1.
|
||||
assert_eq!(idx, 0, "Invalid pool");
|
||||
debug_assert_eq!(idx, 0, "Invalid pool");
|
||||
let block = pool.alloc(sclass_for_length(1));
|
||||
pool.data[block] = T::new(1);
|
||||
pool.data[block + 1] = element;
|
||||
@@ -358,7 +358,7 @@ impl<T: EntityRef> EntityList<T> {
|
||||
match pool.len_of(self) {
|
||||
None => {
|
||||
// This is an empty list. Allocate a block.
|
||||
assert_eq!(idx, 0, "Invalid pool");
|
||||
debug_assert_eq!(idx, 0, "Invalid pool");
|
||||
if count == 0 {
|
||||
return &mut [];
|
||||
}
|
||||
@@ -409,7 +409,7 @@ impl<T: EntityRef> EntityList<T> {
|
||||
}
|
||||
tail[0] = element;
|
||||
} else {
|
||||
assert_eq!(index, seq.len());
|
||||
debug_assert_eq!(index, seq.len());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -419,7 +419,7 @@ impl<T: EntityRef> EntityList<T> {
|
||||
{
|
||||
let seq = self.as_mut_slice(pool);
|
||||
len = seq.len();
|
||||
assert!(index < len);
|
||||
debug_assert!(index < len);
|
||||
|
||||
// Copy elements down.
|
||||
for i in index..len - 1 {
|
||||
@@ -449,7 +449,7 @@ impl<T: EntityRef> EntityList<T> {
|
||||
/// the list.
|
||||
pub fn swap_remove(&mut self, index: usize, pool: &mut ListPool<T>) {
|
||||
let len = self.len(pool);
|
||||
assert!(index < len);
|
||||
debug_assert!(index < len);
|
||||
if index == len - 1 {
|
||||
self.remove(index, pool);
|
||||
} else {
|
||||
|
||||
@@ -61,7 +61,7 @@ macro_rules! entity_impl {
|
||||
($entity:ident) => {
|
||||
impl $crate::entity::EntityRef for $entity {
|
||||
fn new(index: usize) -> Self {
|
||||
assert!(index < (::std::u32::MAX as usize));
|
||||
debug_assert!(index < (::std::u32::MAX as usize));
|
||||
$entity(index as u32)
|
||||
}
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ where
|
||||
|
||||
// There was no previous entry for `key`. Add it to the end of `dense`.
|
||||
let idx = self.dense.len();
|
||||
assert!(idx <= u32::MAX as usize, "SparseMap overflow");
|
||||
debug_assert!(idx <= u32::MAX as usize, "SparseMap overflow");
|
||||
self.dense.push(value);
|
||||
self.sparse[key] = idx as u32;
|
||||
None
|
||||
|
||||
@@ -217,11 +217,11 @@ impl DataFlowGraph {
|
||||
///
|
||||
/// The `dest` value can't be attached to an instruction or EBB.
|
||||
pub fn change_to_alias(&mut self, dest: Value, src: Value) {
|
||||
assert!(!self.value_is_attached(dest));
|
||||
debug_assert!(!self.value_is_attached(dest));
|
||||
// Try to create short alias chains by finding the original source value.
|
||||
// This also avoids the creation of loops.
|
||||
let original = self.resolve_aliases(src);
|
||||
assert_ne!(
|
||||
debug_assert_ne!(
|
||||
dest,
|
||||
original,
|
||||
"Aliasing {} to {} would create a loop",
|
||||
@@ -229,7 +229,7 @@ impl DataFlowGraph {
|
||||
src
|
||||
);
|
||||
let ty = self.value_type(original);
|
||||
assert_eq!(
|
||||
debug_assert_eq!(
|
||||
self.value_type(dest),
|
||||
ty,
|
||||
"Aliasing {} to {} would change its type {} to {}",
|
||||
@@ -273,7 +273,7 @@ impl DataFlowGraph {
|
||||
{
|
||||
let original = src;
|
||||
let ty = self.value_type(original);
|
||||
assert_eq!(
|
||||
debug_assert_eq!(
|
||||
self.value_type(dest),
|
||||
ty,
|
||||
"Aliasing {} to {} would change its type {} to {}",
|
||||
@@ -498,9 +498,9 @@ impl DataFlowGraph {
|
||||
/// This is a very low-level operation. Usually, instruction results with the correct types are
|
||||
/// created automatically. The `res` value must not be attached to anything else.
|
||||
pub fn attach_result(&mut self, inst: Inst, res: Value) {
|
||||
assert!(!self.value_is_attached(res));
|
||||
debug_assert!(!self.value_is_attached(res));
|
||||
let num = self.results[inst].push(res, &mut self.value_lists);
|
||||
assert!(num <= u16::MAX as usize, "Too many result values");
|
||||
debug_assert!(num <= u16::MAX as usize, "Too many result values");
|
||||
let ty = self.value_type(res);
|
||||
self.values[res] = ValueData::Inst {
|
||||
ty,
|
||||
@@ -533,7 +533,7 @@ impl DataFlowGraph {
|
||||
.expect("Replacing detached result"),
|
||||
new_value,
|
||||
);
|
||||
assert_eq!(
|
||||
debug_assert_eq!(
|
||||
attached,
|
||||
old_value,
|
||||
"{} wasn't detached from {}",
|
||||
@@ -547,7 +547,7 @@ impl DataFlowGraph {
|
||||
pub fn append_result(&mut self, inst: Inst, ty: Type) -> Value {
|
||||
let res = self.values.next_key();
|
||||
let num = self.results[inst].push(res, &mut self.value_lists);
|
||||
assert!(num <= u16::MAX as usize, "Too many result values");
|
||||
debug_assert!(num <= u16::MAX as usize, "Too many result values");
|
||||
self.make_value(ValueData::Inst {
|
||||
ty,
|
||||
inst,
|
||||
@@ -684,7 +684,7 @@ impl DataFlowGraph {
|
||||
pub fn append_ebb_param(&mut self, ebb: Ebb, ty: Type) -> Value {
|
||||
let param = self.values.next_key();
|
||||
let num = self.ebbs[ebb].params.push(param, &mut self.value_lists);
|
||||
assert!(num <= u16::MAX as usize, "Too many parameters on EBB");
|
||||
debug_assert!(num <= u16::MAX as usize, "Too many parameters on EBB");
|
||||
self.make_value(ValueData::Param {
|
||||
ty,
|
||||
num: num as u16,
|
||||
@@ -761,9 +761,9 @@ impl DataFlowGraph {
|
||||
///
|
||||
/// In almost all cases, you should be using `append_ebb_param()` instead of this method.
|
||||
pub fn attach_ebb_param(&mut self, ebb: Ebb, param: Value) {
|
||||
assert!(!self.value_is_attached(param));
|
||||
debug_assert!(!self.value_is_attached(param));
|
||||
let num = self.ebbs[ebb].params.push(param, &mut self.value_lists);
|
||||
assert!(num <= u16::MAX as usize, "Too many parameters on EBB");
|
||||
debug_assert!(num <= u16::MAX as usize, "Too many parameters on EBB");
|
||||
let ty = self.value_type(param);
|
||||
self.values[param] = ValueData::Param {
|
||||
ty,
|
||||
@@ -859,7 +859,7 @@ impl DataFlowGraph {
|
||||
/// to create invalid values for index padding which may be reassigned later.
|
||||
#[cold]
|
||||
fn set_value_type_for_parser(&mut self, v: Value, t: Type) {
|
||||
debug_assert!(
|
||||
assert!(
|
||||
self.value_type(v) == types::VOID,
|
||||
"this function is only for assigning types to previously invalid values"
|
||||
);
|
||||
@@ -882,7 +882,7 @@ impl DataFlowGraph {
|
||||
) -> usize {
|
||||
// Get the call signature if this is a function call.
|
||||
if let Some(sig) = self.call_signature(inst) {
|
||||
debug_assert_eq!(self.insts[inst].opcode().constraints().fixed_results(), 0);
|
||||
assert_eq!(self.insts[inst].opcode().constraints().fixed_results(), 0);
|
||||
for res_idx in 0..self.signatures[sig].returns.len() {
|
||||
let ty = self.signatures[sig].returns[res_idx].value_type;
|
||||
if let Some(v) = reuse.get(res_idx) {
|
||||
|
||||
@@ -490,7 +490,7 @@ fn parse_float(s: &str, w: u8, t: u8) -> Result<u64, &'static str> {
|
||||
significand <<= adjust;
|
||||
exponent -= i32::from(adjust);
|
||||
}
|
||||
assert_eq!(significand >> t, 1);
|
||||
debug_assert_eq!(significand >> t, 1);
|
||||
|
||||
// Trailing significand excludes the high bit.
|
||||
let t_bits = significand & ((1 << t) - 1);
|
||||
|
||||
@@ -560,7 +560,7 @@ impl OpcodeConstraints {
|
||||
/// Get the value type of result number `n`, having resolved the controlling type variable to
|
||||
/// `ctrl_type`.
|
||||
pub fn result_type(self, n: usize, ctrl_type: Type) -> Type {
|
||||
assert!(n < self.fixed_results(), "Invalid result index");
|
||||
debug_assert!(n < self.fixed_results(), "Invalid result index");
|
||||
if let ResolvedConstraint::Bound(t) =
|
||||
OPERAND_CONSTRAINTS[self.constraint_offset() + n].resolve(ctrl_type)
|
||||
{
|
||||
@@ -576,7 +576,7 @@ impl OpcodeConstraints {
|
||||
/// Unlike results, it is possible for some input values to vary freely within a specific
|
||||
/// `ValueTypeSet`. This is represented with the `ArgumentConstraint::Free` variant.
|
||||
pub fn value_argument_constraint(self, n: usize, ctrl_type: Type) -> ResolvedConstraint {
|
||||
assert!(
|
||||
debug_assert!(
|
||||
n < self.fixed_value_arguments(),
|
||||
"Invalid value argument index"
|
||||
);
|
||||
|
||||
@@ -88,7 +88,7 @@ const LOCAL_LIMIT: SequenceNumber = 100 * MINOR_STRIDE;
|
||||
// Compute the midpoint between `a` and `b`.
|
||||
// Return `None` if the midpoint would be equal to either.
|
||||
fn midpoint(a: SequenceNumber, b: SequenceNumber) -> Option<SequenceNumber> {
|
||||
assert!(a < b);
|
||||
debug_assert!(a < b);
|
||||
// Avoid integer overflow.
|
||||
let m = a + (b - a) / 2;
|
||||
if m > a { Some(m) } else { None }
|
||||
@@ -148,7 +148,7 @@ impl Layout {
|
||||
/// Assign a valid sequence number to `ebb` such that the numbers are still monotonic. This may
|
||||
/// require renumbering.
|
||||
fn assign_ebb_seq(&mut self, ebb: Ebb) {
|
||||
assert!(self.is_ebb_inserted(ebb));
|
||||
debug_assert!(self.is_ebb_inserted(ebb));
|
||||
|
||||
// Get the sequence number immediately before `ebb`, or 0.
|
||||
let prev_seq = self.ebbs[ebb]
|
||||
@@ -334,13 +334,13 @@ impl Layout {
|
||||
|
||||
/// Insert `ebb` as the last EBB in the layout.
|
||||
pub fn append_ebb(&mut self, ebb: Ebb) {
|
||||
assert!(
|
||||
debug_assert!(
|
||||
!self.is_ebb_inserted(ebb),
|
||||
"Cannot append EBB that is already in the layout"
|
||||
);
|
||||
{
|
||||
let node = &mut self.ebbs[ebb];
|
||||
assert!(node.first_inst.is_none() && node.last_inst.is_none());
|
||||
debug_assert!(node.first_inst.is_none() && node.last_inst.is_none());
|
||||
node.prev = self.last_ebb.into();
|
||||
node.next = None.into();
|
||||
}
|
||||
@@ -355,11 +355,11 @@ impl Layout {
|
||||
|
||||
/// Insert `ebb` in the layout before the existing EBB `before`.
|
||||
pub fn insert_ebb(&mut self, ebb: Ebb, before: Ebb) {
|
||||
assert!(
|
||||
debug_assert!(
|
||||
!self.is_ebb_inserted(ebb),
|
||||
"Cannot insert EBB that is already in the layout"
|
||||
);
|
||||
assert!(
|
||||
debug_assert!(
|
||||
self.is_ebb_inserted(before),
|
||||
"EBB Insertion point not in the layout"
|
||||
);
|
||||
@@ -379,11 +379,11 @@ impl Layout {
|
||||
|
||||
/// Insert `ebb` in the layout *after* the existing EBB `after`.
|
||||
pub fn insert_ebb_after(&mut self, ebb: Ebb, after: Ebb) {
|
||||
assert!(
|
||||
debug_assert!(
|
||||
!self.is_ebb_inserted(ebb),
|
||||
"Cannot insert EBB that is already in the layout"
|
||||
);
|
||||
assert!(
|
||||
debug_assert!(
|
||||
self.is_ebb_inserted(after),
|
||||
"EBB Insertion point not in the layout"
|
||||
);
|
||||
@@ -403,8 +403,8 @@ impl Layout {
|
||||
|
||||
/// Remove `ebb` from the layout.
|
||||
pub fn remove_ebb(&mut self, ebb: Ebb) {
|
||||
assert!(self.is_ebb_inserted(ebb), "EBB not in the layout");
|
||||
assert!(self.first_inst(ebb).is_none(), "EBB must be empty.");
|
||||
debug_assert!(self.is_ebb_inserted(ebb), "EBB not in the layout");
|
||||
debug_assert!(self.first_inst(ebb).is_none(), "EBB must be empty.");
|
||||
|
||||
// Clear the `ebb` node and extract links.
|
||||
let prev;
|
||||
@@ -521,8 +521,8 @@ impl Layout {
|
||||
|
||||
/// Append `inst` to the end of `ebb`.
|
||||
pub fn append_inst(&mut self, inst: Inst, ebb: Ebb) {
|
||||
assert_eq!(self.inst_ebb(inst), None);
|
||||
assert!(
|
||||
debug_assert_eq!(self.inst_ebb(inst), None);
|
||||
debug_assert!(
|
||||
self.is_ebb_inserted(ebb),
|
||||
"Cannot append instructions to EBB not in layout"
|
||||
);
|
||||
@@ -532,7 +532,7 @@ impl Layout {
|
||||
let inst_node = &mut self.insts[inst];
|
||||
inst_node.ebb = ebb.into();
|
||||
inst_node.prev = ebb_node.last_inst;
|
||||
assert!(inst_node.next.is_none());
|
||||
debug_assert!(inst_node.next.is_none());
|
||||
}
|
||||
if ebb_node.first_inst.is_none() {
|
||||
ebb_node.first_inst = inst.into();
|
||||
@@ -566,7 +566,7 @@ impl Layout {
|
||||
|
||||
/// Insert `inst` before the instruction `before` in the same EBB.
|
||||
pub fn insert_inst(&mut self, inst: Inst, before: Inst) {
|
||||
assert_eq!(self.inst_ebb(inst), None);
|
||||
debug_assert_eq!(self.inst_ebb(inst), None);
|
||||
let ebb = self.inst_ebb(before).expect(
|
||||
"Instruction before insertion point not in the layout",
|
||||
);
|
||||
@@ -645,7 +645,7 @@ impl Layout {
|
||||
let old_ebb = self.inst_ebb(before).expect(
|
||||
"The `before` instruction must be in the layout",
|
||||
);
|
||||
assert!(!self.is_ebb_inserted(new_ebb));
|
||||
debug_assert!(!self.is_ebb_inserted(new_ebb));
|
||||
|
||||
// Insert new_ebb after old_ebb.
|
||||
let next_ebb = self.ebbs[old_ebb].next;
|
||||
|
||||
@@ -19,7 +19,7 @@ pub struct ProgramPoint(u32);
|
||||
impl From<Inst> for ProgramPoint {
|
||||
fn from(inst: Inst) -> ProgramPoint {
|
||||
let idx = inst.index();
|
||||
assert!(idx < (u32::MAX / 2) as usize);
|
||||
debug_assert!(idx < (u32::MAX / 2) as usize);
|
||||
ProgramPoint((idx * 2) as u32)
|
||||
}
|
||||
}
|
||||
@@ -27,7 +27,7 @@ impl From<Inst> for ProgramPoint {
|
||||
impl From<Ebb> for ProgramPoint {
|
||||
fn from(ebb: Ebb) -> ProgramPoint {
|
||||
let idx = ebb.index();
|
||||
assert!(idx < (u32::MAX / 2) as usize);
|
||||
debug_assert!(idx < (u32::MAX / 2) as usize);
|
||||
ProgramPoint((idx * 2 + 1) as u32)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,7 +244,7 @@ impl StackSlots {
|
||||
/// Create a stack slot representing an incoming function argument.
|
||||
pub fn make_incoming_arg(&mut self, ty: Type, offset: StackOffset) -> StackSlot {
|
||||
let mut data = StackSlotData::new(StackSlotKind::IncomingArg, ty.bytes());
|
||||
assert!(offset <= StackOffset::max_value() - data.size as StackOffset);
|
||||
debug_assert!(offset <= StackOffset::max_value() - data.size as StackOffset);
|
||||
data.offset = Some(offset);
|
||||
self.push(data)
|
||||
}
|
||||
@@ -269,7 +269,7 @@ impl StackSlots {
|
||||
|
||||
// No existing slot found. Make one and insert it into `outgoing`.
|
||||
let mut data = StackSlotData::new(StackSlotKind::OutgoingArg, size);
|
||||
assert!(offset <= StackOffset::max_value() - size as StackOffset);
|
||||
debug_assert!(offset <= StackOffset::max_value() - size as StackOffset);
|
||||
data.offset = Some(offset);
|
||||
let ss = self.slots.push(data);
|
||||
self.outgoing.insert(inspos, ss);
|
||||
|
||||
@@ -107,7 +107,7 @@ impl ArgAssigner for Args {
|
||||
// Assign a stack location.
|
||||
let loc = ArgumentLoc::Stack(self.offset as i32);
|
||||
self.offset += self.pointer_bytes;
|
||||
assert!(self.offset <= i32::MAX as u32);
|
||||
debug_assert!(self.offset <= i32::MAX as u32);
|
||||
loc.into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ impl ArgAssigner for Args {
|
||||
// Assign a stack location.
|
||||
let loc = ArgumentLoc::Stack(self.offset as i32);
|
||||
self.offset += self.pointer_bytes;
|
||||
assert!(self.offset <= i32::MAX as u32);
|
||||
debug_assert!(self.offset <= i32::MAX as u32);
|
||||
loc.into()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ fn put_sb<CS: CodeSink + ?Sized>(bits: u16, imm: i64, rs1: RegUnit, rs2: RegUnit
|
||||
let rs1 = u32::from(rs1) & 0x1f;
|
||||
let rs2 = u32::from(rs2) & 0x1f;
|
||||
|
||||
assert!(is_signed_int(imm, 13, 1), "SB out of range {:#x}", imm);
|
||||
debug_assert!(is_signed_int(imm, 13, 1), "SB out of range {:#x}", imm);
|
||||
let imm = imm as u32;
|
||||
|
||||
// 0-6: opcode
|
||||
@@ -164,7 +164,7 @@ fn put_uj<CS: CodeSink + ?Sized>(bits: u16, imm: i64, rd: RegUnit, sink: &mut CS
|
||||
let opcode5 = bits & 0x1f;
|
||||
let rd = u32::from(rd) & 0x1f;
|
||||
|
||||
assert!(is_signed_int(imm, 21, 1), "UJ out of range {:#x}", imm);
|
||||
debug_assert!(is_signed_int(imm, 21, 1), "UJ out of range {:#x}", imm);
|
||||
let imm = imm as u32;
|
||||
|
||||
// 0-6: opcode
|
||||
|
||||
@@ -85,15 +85,15 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
|
||||
ArgumentPurpose::FramePointer => {}
|
||||
ArgumentPurpose::CalleeSaved => {}
|
||||
ArgumentPurpose::StructReturn => {
|
||||
assert!(!has_sret, "Multiple sret arguments found");
|
||||
debug_assert!(!has_sret, "Multiple sret arguments found");
|
||||
has_sret = true;
|
||||
}
|
||||
ArgumentPurpose::VMContext => {
|
||||
assert!(!has_vmctx, "Multiple vmctx arguments found");
|
||||
debug_assert!(!has_vmctx, "Multiple vmctx arguments found");
|
||||
has_vmctx = true;
|
||||
}
|
||||
ArgumentPurpose::SignatureId => {
|
||||
assert!(!has_sigid, "Multiple sigid arguments found");
|
||||
debug_assert!(!has_sigid, "Multiple sigid arguments found");
|
||||
has_sigid = true;
|
||||
}
|
||||
_ => panic!("Unexpected special-purpose arg {}", abi_type),
|
||||
@@ -103,7 +103,7 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
|
||||
// Compute the value we want for `arg` from the legalized ABI parameters.
|
||||
let mut get_arg = |func: &mut Function, ty| {
|
||||
let abi_type = func.signature.params[abi_arg];
|
||||
assert_eq!(
|
||||
debug_assert_eq!(
|
||||
abi_type.purpose,
|
||||
ArgumentPurpose::Normal,
|
||||
"Can't legalize special-purpose argument"
|
||||
@@ -118,7 +118,7 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
|
||||
let converted = convert_from_abi(&mut pos, arg_type, Some(arg), &mut get_arg);
|
||||
// The old `arg` is no longer an attached EBB argument, but there are probably still
|
||||
// uses of the value.
|
||||
assert_eq!(pos.func.dfg.resolve_aliases(arg), converted);
|
||||
debug_assert_eq!(pos.func.dfg.resolve_aliases(arg), converted);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,19 +138,19 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
|
||||
}
|
||||
// These can be meaningfully added by `legalize_signature()`.
|
||||
ArgumentPurpose::Link => {
|
||||
assert!(!has_link, "Multiple link parameters found");
|
||||
debug_assert!(!has_link, "Multiple link parameters found");
|
||||
has_link = true;
|
||||
}
|
||||
ArgumentPurpose::StructReturn => {
|
||||
assert!(!has_sret, "Multiple sret parameters found");
|
||||
debug_assert!(!has_sret, "Multiple sret parameters found");
|
||||
has_sret = true;
|
||||
}
|
||||
ArgumentPurpose::VMContext => {
|
||||
assert!(!has_vmctx, "Multiple vmctx parameters found");
|
||||
debug_assert!(!has_vmctx, "Multiple vmctx parameters found");
|
||||
has_vmctx = true;
|
||||
}
|
||||
ArgumentPurpose::SignatureId => {
|
||||
assert!(!has_sigid, "Multiple sigid parameters found");
|
||||
debug_assert!(!has_sigid, "Multiple sigid parameters found");
|
||||
has_sigid = true;
|
||||
}
|
||||
}
|
||||
@@ -180,7 +180,7 @@ where
|
||||
// We theoretically allow for call instructions that return a number of fixed results before
|
||||
// the call return values. In practice, it doesn't happen.
|
||||
let fixed_results = pos.func.dfg[call].opcode().constraints().fixed_results();
|
||||
assert_eq!(fixed_results, 0, "Fixed results on calls not supported");
|
||||
debug_assert_eq!(fixed_results, 0, "Fixed results on calls not supported");
|
||||
|
||||
let results = pos.func.dfg.detach_results(call);
|
||||
let mut next_res = 0;
|
||||
@@ -209,7 +209,7 @@ where
|
||||
}
|
||||
};
|
||||
let v = convert_from_abi(pos, res_type, Some(res), &mut get_res);
|
||||
assert_eq!(pos.func.dfg.resolve_aliases(res), v);
|
||||
debug_assert_eq!(pos.func.dfg.resolve_aliases(res), v);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,7 +238,7 @@ where
|
||||
let arg_type = match get_arg(pos.func, ty) {
|
||||
Ok(v) => {
|
||||
debug_assert_eq!(pos.func.dfg.value_type(v), ty);
|
||||
assert_eq!(into_result, None);
|
||||
debug_assert_eq!(into_result, None);
|
||||
return v;
|
||||
}
|
||||
Err(t) => t,
|
||||
@@ -274,7 +274,7 @@ where
|
||||
}
|
||||
// Construct a `ty` by bit-casting from an integer type.
|
||||
ValueConversion::IntBits => {
|
||||
assert!(!ty.is_int());
|
||||
debug_assert!(!ty.is_int());
|
||||
let abi_ty = Type::int(ty.bits()).expect("Invalid type for conversion");
|
||||
let arg = convert_from_abi(pos, abi_ty, None, get_arg);
|
||||
pos.ins().with_results([into_result]).bitcast(ty, arg)
|
||||
@@ -340,7 +340,7 @@ fn convert_to_abi<PutArg>(
|
||||
convert_to_abi(pos, cfg, hi, put_arg);
|
||||
}
|
||||
ValueConversion::IntBits => {
|
||||
assert!(!ty.is_int());
|
||||
debug_assert!(!ty.is_int());
|
||||
let abi_ty = Type::int(ty.bits()).expect("Invalid type for conversion");
|
||||
let arg = pos.ins().bitcast(abi_ty, value);
|
||||
convert_to_abi(pos, cfg, arg, put_arg);
|
||||
@@ -555,7 +555,7 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
|
||||
legalize_inst_arguments(pos, cfg, abi_args, |func, abi_arg| {
|
||||
func.signature.returns[abi_arg]
|
||||
});
|
||||
assert_eq!(pos.func.dfg.inst_variable_args(inst).len(), abi_args);
|
||||
debug_assert_eq!(pos.func.dfg.inst_variable_args(inst).len(), abi_args);
|
||||
|
||||
// Append special return arguments for any `sret`, `link`, and `vmctx` return values added to
|
||||
// the legalized signature. These values should simply be propagated from the entry block
|
||||
|
||||
@@ -18,7 +18,7 @@ pub fn expand_global_addr(
|
||||
// Unpack the instruction.
|
||||
let gv = match func.dfg[inst] {
|
||||
ir::InstructionData::UnaryGlobalVar { opcode, global_var } => {
|
||||
assert_eq!(opcode, ir::Opcode::GlobalAddr);
|
||||
debug_assert_eq!(opcode, ir::Opcode::GlobalAddr);
|
||||
global_var
|
||||
}
|
||||
_ => panic!("Wanted global_addr: {}", func.dfg.display_inst(inst, None)),
|
||||
|
||||
@@ -24,7 +24,7 @@ pub fn expand_heap_addr(
|
||||
arg,
|
||||
imm,
|
||||
} => {
|
||||
assert_eq!(opcode, ir::Opcode::HeapAddr);
|
||||
debug_assert_eq!(opcode, ir::Opcode::HeapAddr);
|
||||
(heap, arg, imm.into())
|
||||
}
|
||||
_ => panic!("Wanted heap_addr: {}", func.dfg.display_inst(inst, None)),
|
||||
|
||||
@@ -248,7 +248,7 @@ fn expand_fconst(
|
||||
_isa: &TargetIsa,
|
||||
) {
|
||||
let ty = func.dfg.value_type(func.dfg.first_result(inst));
|
||||
assert!(!ty.is_vector(), "Only scalar fconst supported: {}", ty);
|
||||
debug_assert!(!ty.is_vector(), "Only scalar fconst supported: {}", ty);
|
||||
|
||||
// In the future, we may want to generate constant pool entries for these constants, but for
|
||||
// now use an `iconst` and a bit cast.
|
||||
|
||||
@@ -127,7 +127,7 @@ fn split_any(
|
||||
while let Some(repair) = repairs.pop() {
|
||||
for (_, inst) in cfg.pred_iter(repair.ebb) {
|
||||
let branch_opc = pos.func.dfg[inst].opcode();
|
||||
assert!(
|
||||
debug_assert!(
|
||||
branch_opc.is_branch(),
|
||||
"Predecessor not a branch: {}",
|
||||
pos.func.dfg.display_inst(inst, None)
|
||||
@@ -198,7 +198,7 @@ fn split_value(
|
||||
// This is an instruction result. See if the value was created by a `concat`
|
||||
// instruction.
|
||||
if let InstructionData::Binary { opcode, args, .. } = pos.func.dfg[inst] {
|
||||
assert_eq!(num, 0);
|
||||
debug_assert_eq!(num, 0);
|
||||
if opcode == concat {
|
||||
reuse = Some((args[0], args[1]));
|
||||
}
|
||||
|
||||
@@ -192,10 +192,10 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
// U32 div, rem by a power-of-2
|
||||
&DivRemByConstInfo::DivU32(n1, d) |
|
||||
&DivRemByConstInfo::RemU32(n1, d) if d.is_power_of_two() => {
|
||||
assert!(d >= 2);
|
||||
debug_assert!(d >= 2);
|
||||
// compute k where d == 2^k
|
||||
let k = d.trailing_zeros();
|
||||
assert!(k >= 1 && k <= 31);
|
||||
debug_assert!(k >= 1 && k <= 31);
|
||||
if isRem {
|
||||
let mask = (1u64 << k) - 1;
|
||||
pos.func.dfg.replace(inst).band_imm(n1, mask as i64);
|
||||
@@ -207,7 +207,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
// U32 div, rem by non-power-of-2
|
||||
&DivRemByConstInfo::DivU32(n1, d) |
|
||||
&DivRemByConstInfo::RemU32(n1, d) => {
|
||||
assert!(d >= 3);
|
||||
debug_assert!(d >= 3);
|
||||
let MU32 {
|
||||
mulBy,
|
||||
doAdd,
|
||||
@@ -217,7 +217,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
let q0 = pos.ins().iconst(I32, mulBy as i64);
|
||||
let q1 = pos.ins().umulhi(n1, q0);
|
||||
if doAdd {
|
||||
assert!(shiftBy >= 1 && shiftBy <= 32);
|
||||
debug_assert!(shiftBy >= 1 && shiftBy <= 32);
|
||||
let t1 = pos.ins().isub(n1, q1);
|
||||
let t2 = pos.ins().ushr_imm(t1, 1);
|
||||
let t3 = pos.ins().iadd(t2, q1);
|
||||
@@ -226,7 +226,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
debug_assert!(shiftBy != 1);
|
||||
qf = pos.ins().ushr_imm(t3, (shiftBy - 1) as i64);
|
||||
} else {
|
||||
assert!(shiftBy >= 0 && shiftBy <= 31);
|
||||
debug_assert!(shiftBy >= 0 && shiftBy <= 31);
|
||||
// Whereas there are known cases here for shiftBy == 0.
|
||||
if shiftBy > 0 {
|
||||
qf = pos.ins().ushr_imm(q1, shiftBy as i64);
|
||||
@@ -264,10 +264,10 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
// U64 div, rem by a power-of-2
|
||||
&DivRemByConstInfo::DivU64(n1, d) |
|
||||
&DivRemByConstInfo::RemU64(n1, d) if d.is_power_of_two() => {
|
||||
assert!(d >= 2);
|
||||
debug_assert!(d >= 2);
|
||||
// compute k where d == 2^k
|
||||
let k = d.trailing_zeros();
|
||||
assert!(k >= 1 && k <= 63);
|
||||
debug_assert!(k >= 1 && k <= 63);
|
||||
if isRem {
|
||||
let mask = (1u64 << k) - 1;
|
||||
pos.func.dfg.replace(inst).band_imm(n1, mask as i64);
|
||||
@@ -279,7 +279,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
// U64 div, rem by non-power-of-2
|
||||
&DivRemByConstInfo::DivU64(n1, d) |
|
||||
&DivRemByConstInfo::RemU64(n1, d) => {
|
||||
assert!(d >= 3);
|
||||
debug_assert!(d >= 3);
|
||||
let MU64 {
|
||||
mulBy,
|
||||
doAdd,
|
||||
@@ -289,7 +289,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
let q0 = pos.ins().iconst(I64, mulBy as i64);
|
||||
let q1 = pos.ins().umulhi(n1, q0);
|
||||
if doAdd {
|
||||
assert!(shiftBy >= 1 && shiftBy <= 64);
|
||||
debug_assert!(shiftBy >= 1 && shiftBy <= 64);
|
||||
let t1 = pos.ins().isub(n1, q1);
|
||||
let t2 = pos.ins().ushr_imm(t1, 1);
|
||||
let t3 = pos.ins().iadd(t2, q1);
|
||||
@@ -298,7 +298,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
debug_assert!(shiftBy != 1);
|
||||
qf = pos.ins().ushr_imm(t3, (shiftBy - 1) as i64);
|
||||
} else {
|
||||
assert!(shiftBy >= 0 && shiftBy <= 63);
|
||||
debug_assert!(shiftBy >= 0 && shiftBy <= 63);
|
||||
// Whereas there are known cases here for shiftBy == 0.
|
||||
if shiftBy > 0 {
|
||||
qf = pos.ins().ushr_imm(q1, shiftBy as i64);
|
||||
@@ -339,7 +339,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
&DivRemByConstInfo::RemS32(n1, d) => {
|
||||
if let Some((isNeg, k)) = isPowerOf2_S32(d) {
|
||||
// k can be 31 only in the case that d is -2^31.
|
||||
assert!(k >= 1 && k <= 31);
|
||||
debug_assert!(k >= 1 && k <= 31);
|
||||
let t1 = if k - 1 == 0 {
|
||||
n1
|
||||
} else {
|
||||
@@ -363,7 +363,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
} else {
|
||||
// S32 div, rem by a non-power-of-2
|
||||
assert!(d < -2 || d > 2);
|
||||
debug_assert!(d < -2 || d > 2);
|
||||
let MS32 { mulBy, shiftBy } = magicS32(d);
|
||||
let q0 = pos.ins().iconst(I32, mulBy as i64);
|
||||
let q1 = pos.ins().smulhi(n1, q0);
|
||||
@@ -374,7 +374,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
} else {
|
||||
q1
|
||||
};
|
||||
assert!(shiftBy >= 0 && shiftBy <= 31);
|
||||
debug_assert!(shiftBy >= 0 && shiftBy <= 31);
|
||||
let q3 = if shiftBy == 0 {
|
||||
q2
|
||||
} else {
|
||||
@@ -416,7 +416,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
&DivRemByConstInfo::RemS64(n1, d) => {
|
||||
if let Some((isNeg, k)) = isPowerOf2_S64(d) {
|
||||
// k can be 63 only in the case that d is -2^63.
|
||||
assert!(k >= 1 && k <= 63);
|
||||
debug_assert!(k >= 1 && k <= 63);
|
||||
let t1 = if k - 1 == 0 {
|
||||
n1
|
||||
} else {
|
||||
@@ -440,7 +440,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
} else {
|
||||
// S64 div, rem by a non-power-of-2
|
||||
assert!(d < -2 || d > 2);
|
||||
debug_assert!(d < -2 || d > 2);
|
||||
let MS64 { mulBy, shiftBy } = magicS64(d);
|
||||
let q0 = pos.ins().iconst(I64, mulBy);
|
||||
let q1 = pos.ins().smulhi(n1, q0);
|
||||
@@ -451,7 +451,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
} else {
|
||||
q1
|
||||
};
|
||||
assert!(shiftBy >= 0 && shiftBy <= 63);
|
||||
debug_assert!(shiftBy >= 0 && shiftBy <= 63);
|
||||
let q3 = if shiftBy == 0 {
|
||||
q2
|
||||
} else {
|
||||
|
||||
@@ -238,7 +238,7 @@ impl<'a> Context<'a> {
|
||||
// 1. It is defined in a dominating EBB and live-in to `ebb`.
|
||||
// 2. If is itself a parameter value for `ebb`. This case should already have been
|
||||
// eliminated by `isolate_conflicting_params()`.
|
||||
assert!(
|
||||
debug_assert!(
|
||||
lr.def() != ebb.into(),
|
||||
"{} parameter {} was missed by isolate_conflicting_params()",
|
||||
ebb,
|
||||
@@ -494,8 +494,8 @@ impl<'a> Context<'a> {
|
||||
// Second everything else in reverse layout order. Again, short forward branches get merged
|
||||
// first. There can also be backwards branches mixed in here, though, as long as they are
|
||||
// not loop backedges.
|
||||
assert!(self.predecessors.is_empty());
|
||||
assert!(self.backedges.is_empty());
|
||||
debug_assert!(self.predecessors.is_empty());
|
||||
debug_assert!(self.backedges.is_empty());
|
||||
for (pred_ebb, pred_inst) in self.cfg.pred_iter(ebb) {
|
||||
if self.preorder.dominates(ebb, pred_ebb) {
|
||||
self.backedges.push(pred_inst);
|
||||
@@ -957,7 +957,8 @@ impl VirtualCopies {
|
||||
|
||||
/// Indicate that `param` is now fully merged.
|
||||
pub fn merged_param(&mut self, param: Value, func: &Function) {
|
||||
assert_eq!(self.params.pop(), Some(param));
|
||||
let popped = self.params.pop();
|
||||
debug_assert_eq!(popped, Some(param));
|
||||
|
||||
// The domtree pre-order in `self.params` guarantees that all parameters defined at the
|
||||
// same EBB will be adjacent. This means we can see when all parameters at an EBB have been
|
||||
|
||||
@@ -246,7 +246,7 @@ impl<'a> Context<'a> {
|
||||
/// Return the set of remaining allocatable registers after filtering out the dead arguments.
|
||||
fn color_entry_params(&mut self, args: &[LiveValue]) -> AvailableRegs {
|
||||
let sig = &self.cur.func.signature;
|
||||
assert_eq!(sig.params.len(), args.len());
|
||||
debug_assert_eq!(sig.params.len(), args.len());
|
||||
|
||||
let mut regs = AvailableRegs::new(&self.usable_regs);
|
||||
|
||||
@@ -271,7 +271,7 @@ impl<'a> Context<'a> {
|
||||
|
||||
}
|
||||
// The spiller will have assigned an incoming stack slot already.
|
||||
Affinity::Stack => assert!(abi.location.is_stack()),
|
||||
Affinity::Stack => debug_assert!(abi.location.is_stack()),
|
||||
// This is a ghost value, unused in the function. Don't assign it to a location
|
||||
// either.
|
||||
Affinity::None => {}
|
||||
@@ -340,7 +340,7 @@ impl<'a> Context<'a> {
|
||||
} else {
|
||||
// This is a multi-way branch like `br_table`. We only support arguments on
|
||||
// single-destination branches.
|
||||
assert_eq!(
|
||||
debug_assert_eq!(
|
||||
self.cur.func.dfg.inst_variable_args(inst).len(),
|
||||
0,
|
||||
"Can't handle EBB arguments: {}",
|
||||
@@ -586,7 +586,7 @@ impl<'a> Context<'a> {
|
||||
// Now handle the EBB arguments.
|
||||
let br_args = self.cur.func.dfg.inst_variable_args(inst);
|
||||
let dest_args = self.cur.func.dfg.ebb_params(dest);
|
||||
assert_eq!(br_args.len(), dest_args.len());
|
||||
debug_assert_eq!(br_args.len(), dest_args.len());
|
||||
for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) {
|
||||
// The first time we encounter a branch to `dest`, we get to pick the location. The
|
||||
// following times we see a branch to `dest`, we must follow suit.
|
||||
@@ -631,7 +631,7 @@ impl<'a> Context<'a> {
|
||||
fn color_ebb_params(&mut self, inst: Inst, dest: Ebb) {
|
||||
let br_args = self.cur.func.dfg.inst_variable_args(inst);
|
||||
let dest_args = self.cur.func.dfg.ebb_params(dest);
|
||||
assert_eq!(br_args.len(), dest_args.len());
|
||||
debug_assert_eq!(br_args.len(), dest_args.len());
|
||||
for (&dest_arg, &br_arg) in dest_args.iter().zip(br_args) {
|
||||
match self.cur.func.locations[dest_arg] {
|
||||
ValueLoc::Unassigned => {
|
||||
@@ -741,7 +741,7 @@ impl<'a> Context<'a> {
|
||||
// It's technically possible for a call instruction to have fixed results before the
|
||||
// variable list of results, but we have no known instances of that.
|
||||
// Just assume all results are variable return values.
|
||||
assert_eq!(defs.len(), self.cur.func.dfg.signatures[sig].returns.len());
|
||||
debug_assert_eq!(defs.len(), self.cur.func.dfg.signatures[sig].returns.len());
|
||||
for (i, lv) in defs.iter().enumerate() {
|
||||
let abi = self.cur.func.dfg.signatures[sig].returns[i];
|
||||
if let ArgumentLoc::Reg(reg) = abi.location {
|
||||
@@ -787,7 +787,7 @@ impl<'a> Context<'a> {
|
||||
}
|
||||
|
||||
let ok = self.solver.add_fixed_output(rc, reg);
|
||||
assert!(ok, "Couldn't clear fixed output interference for {}", value);
|
||||
debug_assert!(ok, "Couldn't clear fixed output interference for {}", value);
|
||||
}
|
||||
self.cur.func.locations[value] = ValueLoc::Reg(reg);
|
||||
}
|
||||
@@ -858,11 +858,8 @@ impl<'a> Context<'a> {
|
||||
Ok(regs) => return regs,
|
||||
Err(SolverError::Divert(rc)) => {
|
||||
// Do we have any live-through `rc` registers that are not already variables?
|
||||
assert!(
|
||||
self.try_add_var(rc, throughs),
|
||||
"Ran out of registers in {}",
|
||||
rc
|
||||
);
|
||||
let added = self.try_add_var(rc, throughs);
|
||||
debug_assert!(added, "Ran out of registers in {}", rc);
|
||||
}
|
||||
Err(SolverError::Global(value)) => {
|
||||
dbg!("Not enough global registers for {}, trying as local", value);
|
||||
@@ -941,7 +938,7 @@ impl<'a> Context<'a> {
|
||||
// It is very unlikely (impossible?) that we would need more than one spill per top-level
|
||||
// register class, so avoid allocation by using a fixed array here.
|
||||
let mut slot = [PackedOption::default(); 8];
|
||||
assert!(spills <= slot.len(), "Too many spills ({})", spills);
|
||||
debug_assert!(spills <= slot.len(), "Too many spills ({})", spills);
|
||||
|
||||
for m in self.solver.moves() {
|
||||
match *m {
|
||||
|
||||
@@ -207,7 +207,7 @@ impl LiveValueTracker {
|
||||
let first_arg = self.live.values.len();
|
||||
for &value in dfg.ebb_params(ebb) {
|
||||
let lr = &liveness[value];
|
||||
assert_eq!(lr.def(), ebb.into());
|
||||
debug_assert_eq!(lr.def(), ebb.into());
|
||||
match lr.def_local_end().into() {
|
||||
ExpandedProgramPoint::Inst(endpoint) => {
|
||||
self.live.push(value, endpoint, lr);
|
||||
@@ -215,7 +215,7 @@ impl LiveValueTracker {
|
||||
ExpandedProgramPoint::Ebb(local_ebb) => {
|
||||
// This is a dead EBB parameter which is not even live into the first
|
||||
// instruction in the EBB.
|
||||
assert_eq!(
|
||||
debug_assert_eq!(
|
||||
local_ebb,
|
||||
ebb,
|
||||
"EBB parameter live range ends at wrong EBB header"
|
||||
@@ -273,7 +273,7 @@ impl LiveValueTracker {
|
||||
let first_def = self.live.values.len();
|
||||
for &value in dfg.inst_results(inst) {
|
||||
let lr = &liveness[value];
|
||||
assert_eq!(lr.def(), inst.into());
|
||||
debug_assert_eq!(lr.def(), inst.into());
|
||||
match lr.def_local_end().into() {
|
||||
ExpandedProgramPoint::Inst(endpoint) => {
|
||||
self.live.push(value, endpoint, lr);
|
||||
|
||||
@@ -251,7 +251,7 @@ fn extend_to_use(
|
||||
forest: &mut LiveRangeForest,
|
||||
) {
|
||||
// This is our scratch working space, and we'll leave it empty when we return.
|
||||
assert!(worklist.is_empty());
|
||||
debug_assert!(worklist.is_empty());
|
||||
|
||||
// Extend the range locally in `ebb`.
|
||||
// If there already was a live interval in that block, we're done.
|
||||
@@ -338,7 +338,7 @@ impl Liveness {
|
||||
let old = self.ranges.insert(
|
||||
LiveRange::new(value, def.into(), affinity),
|
||||
);
|
||||
assert!(old.is_none(), "{} already has a live range", value);
|
||||
debug_assert!(old.is_none(), "{} already has a live range", value);
|
||||
}
|
||||
|
||||
/// Move the definition of `value` to `def`.
|
||||
@@ -367,7 +367,7 @@ impl Liveness {
|
||||
debug_assert_eq!(Some(ebb), layout.inst_ebb(user));
|
||||
let lr = self.ranges.get_mut(value).expect("Value has no live range");
|
||||
let livein = lr.extend_in_ebb(ebb, user, layout, &mut self.forest);
|
||||
assert!(!livein, "{} should already be live in {}", value, ebb);
|
||||
debug_assert!(!livein, "{} should already be live in {}", value, ebb);
|
||||
&mut lr.affinity
|
||||
}
|
||||
|
||||
|
||||
@@ -253,7 +253,7 @@ impl<PO: ProgramOrder> GenLiveRange<PO> {
|
||||
order.cmp(to, self.def_begin) != Ordering::Less
|
||||
{
|
||||
let to_pp = to.into();
|
||||
assert_ne!(
|
||||
debug_assert_ne!(
|
||||
to_pp,
|
||||
self.def_begin,
|
||||
"Can't use value in the defining instruction."
|
||||
|
||||
@@ -145,7 +145,7 @@ impl<'a> Context<'a> {
|
||||
);
|
||||
|
||||
if self.cur.func.layout.entry_block() == Some(ebb) {
|
||||
assert_eq!(liveins.len(), 0);
|
||||
debug_assert_eq!(liveins.len(), 0);
|
||||
self.visit_entry_params(ebb, args);
|
||||
} else {
|
||||
self.visit_ebb_params(ebb, args);
|
||||
@@ -155,7 +155,7 @@ impl<'a> Context<'a> {
|
||||
/// Visit the parameters on the entry block.
|
||||
/// These values have ABI constraints from the function signature.
|
||||
fn visit_entry_params(&mut self, ebb: Ebb, args: &[LiveValue]) {
|
||||
assert_eq!(self.cur.func.signature.params.len(), args.len());
|
||||
debug_assert_eq!(self.cur.func.signature.params.len(), args.len());
|
||||
self.cur.goto_first_inst(ebb);
|
||||
|
||||
for (arg_idx, arg) in args.iter().enumerate() {
|
||||
@@ -175,7 +175,7 @@ impl<'a> Context<'a> {
|
||||
}
|
||||
}
|
||||
ArgumentLoc::Stack(_) => {
|
||||
assert!(arg.affinity.is_stack());
|
||||
debug_assert!(arg.affinity.is_stack());
|
||||
}
|
||||
ArgumentLoc::Unassigned => panic!("Unexpected ABI location"),
|
||||
}
|
||||
@@ -203,7 +203,7 @@ impl<'a> Context<'a> {
|
||||
);
|
||||
|
||||
// Identify reload candidates.
|
||||
assert!(self.candidates.is_empty());
|
||||
debug_assert!(self.candidates.is_empty());
|
||||
self.find_candidates(inst, constraints);
|
||||
|
||||
// Insert fill instructions before `inst` and replace `cand.value` with the filled value.
|
||||
@@ -375,7 +375,7 @@ fn handle_abi_args(
|
||||
isa: &TargetIsa,
|
||||
liveness: &Liveness,
|
||||
) {
|
||||
assert_eq!(abi_types.len(), var_args.len());
|
||||
debug_assert_eq!(abi_types.len(), var_args.len());
|
||||
for ((abi, &arg), argidx) in abi_types.iter().zip(var_args).zip(offset..) {
|
||||
if abi.location.is_reg() {
|
||||
let lv = liveness.get(arg).expect("Missing live range for ABI arg");
|
||||
|
||||
@@ -565,7 +565,7 @@ impl Solver {
|
||||
dbg!("-> converting variable {} to a fixed constraint", v);
|
||||
// The spiller is responsible for ensuring that all constraints on the uses of a
|
||||
// value are compatible.
|
||||
assert!(
|
||||
debug_assert!(
|
||||
v.constraint.contains(to),
|
||||
"Incompatible constraints for {}",
|
||||
value
|
||||
@@ -665,7 +665,7 @@ impl Solver {
|
||||
// No variable, then it must be a fixed reassignment.
|
||||
if let Some(a) = self.assignments.get(value) {
|
||||
dbg!("-> already fixed assignment {}", a);
|
||||
assert!(
|
||||
debug_assert!(
|
||||
constraint.contains(a.to),
|
||||
"Incompatible constraints for {}",
|
||||
value
|
||||
@@ -708,7 +708,7 @@ impl Solver {
|
||||
/// Call this method to indicate that there will be no more fixed input reassignments added
|
||||
/// and prepare for the output side constraints.
|
||||
pub fn inputs_done(&mut self) {
|
||||
assert!(!self.has_fixed_input_conflicts());
|
||||
debug_assert!(!self.has_fixed_input_conflicts());
|
||||
|
||||
// At this point, `regs_out` contains the `to` side of the input reassignments, and the
|
||||
// `from` side has already been marked as available in `regs_in`.
|
||||
@@ -746,7 +746,7 @@ impl Solver {
|
||||
// interference constraints on the output side.
|
||||
// Variables representing tied operands will get their `is_output` flag set again later.
|
||||
if let Some(v) = self.vars.iter_mut().find(|v| v.value == value) {
|
||||
assert!(v.is_input);
|
||||
debug_assert!(v.is_input);
|
||||
v.is_output = false;
|
||||
return;
|
||||
}
|
||||
@@ -782,7 +782,7 @@ impl Solver {
|
||||
|
||||
// Check if a variable was created.
|
||||
if let Some(v) = self.vars.iter_mut().find(|v| v.value == value) {
|
||||
assert!(v.is_input);
|
||||
debug_assert!(v.is_input);
|
||||
v.is_output = true;
|
||||
v.is_global = is_global;
|
||||
return None;
|
||||
@@ -1026,7 +1026,7 @@ impl Solver {
|
||||
/// Returns the number of spills that had to be emitted.
|
||||
pub fn schedule_moves(&mut self, regs: &AllocatableSet) -> usize {
|
||||
self.collect_moves();
|
||||
assert!(self.fills.is_empty());
|
||||
debug_assert!(self.fills.is_empty());
|
||||
|
||||
let mut num_spill_slots = 0;
|
||||
let mut avail = regs.clone();
|
||||
|
||||
@@ -242,7 +242,7 @@ impl<'a> Context<'a> {
|
||||
debug_assert_eq!(self.cur.current_ebb(), Some(ebb));
|
||||
|
||||
// We may need to resolve register constraints if there are any noteworthy uses.
|
||||
assert!(self.reg_uses.is_empty());
|
||||
debug_assert!(self.reg_uses.is_empty());
|
||||
self.collect_reg_uses(inst, ebb, constraints);
|
||||
|
||||
// Calls usually have fixed register uses.
|
||||
|
||||
@@ -140,7 +140,7 @@ impl VirtRegs {
|
||||
func: &Function,
|
||||
preorder: &DominatorTreePreorder,
|
||||
) -> VirtReg {
|
||||
assert_eq!(self.get(single), None, "Expected singleton {}", single);
|
||||
debug_assert_eq!(self.get(single), None, "Expected singleton {}", single);
|
||||
|
||||
// Make sure `big` has a vreg.
|
||||
let vreg = self.get(big).unwrap_or_else(|| {
|
||||
@@ -208,7 +208,7 @@ impl VirtRegs {
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
debug_assert_eq!(
|
||||
values.len(),
|
||||
singletons + cleared,
|
||||
"Can't unify partial virtual registers"
|
||||
|
||||
@@ -19,7 +19,7 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
|
||||
// Each object and the whole stack frame must fit in 2 GB such that any relative offset within
|
||||
// the frame fits in a `StackOffset`.
|
||||
let max_size = StackOffset::max_value() as StackSize;
|
||||
assert!(alignment.is_power_of_two() && alignment <= max_size);
|
||||
debug_assert!(alignment.is_power_of_two() && alignment <= max_size);
|
||||
|
||||
// We assume a stack that grows toward lower addresses as implemented by modern ISAs. The
|
||||
// stack layout from high to low addresses will be:
|
||||
@@ -70,7 +70,7 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
|
||||
// The offset is negative, growing downwards.
|
||||
// Start with the smallest alignments for better packing.
|
||||
let mut offset = incoming_min;
|
||||
assert!(min_align.is_power_of_two());
|
||||
debug_assert!(min_align.is_power_of_two());
|
||||
while min_align <= alignment {
|
||||
for ss in frame.keys() {
|
||||
let slot = frame[ss].clone();
|
||||
|
||||
@@ -187,7 +187,7 @@ mod details {
|
||||
let duration = self.start.elapsed();
|
||||
dbg!("timing: Ending {}", self.pass);
|
||||
let old_cur = CURRENT_PASS.with(|p| p.replace(self.prev));
|
||||
assert_eq!(self.pass, old_cur, "Timing tokens dropped out of order");
|
||||
debug_assert_eq!(self.pass, old_cur, "Timing tokens dropped out of order");
|
||||
PASS_TIME.with(|rc| {
|
||||
let mut table = rc.borrow_mut();
|
||||
table.pass[self.pass.idx()].total += duration;
|
||||
|
||||
@@ -15,14 +15,14 @@ pub struct Variable(u32);
|
||||
impl Variable {
|
||||
/// Create a new Variable with the given index.
|
||||
pub fn with_u32(index: u32) -> Self {
|
||||
assert!(index < u32::MAX);
|
||||
debug_assert!(index < u32::MAX);
|
||||
Variable(index)
|
||||
}
|
||||
}
|
||||
|
||||
impl EntityRef for Variable {
|
||||
fn new(index: usize) -> Self {
|
||||
assert!(index < (u32::MAX as usize));
|
||||
debug_assert!(index < (u32::MAX as usize));
|
||||
Variable(index as u32)
|
||||
}
|
||||
|
||||
|
||||
@@ -1026,7 +1026,7 @@ fn get_heap_addr(
|
||||
use std::cmp::min;
|
||||
|
||||
let guard_size: i64 = builder.func.heaps[heap].guard_size.into();
|
||||
assert!(guard_size > 0, "Heap guard pages currently required");
|
||||
debug_assert!(guard_size > 0, "Heap guard pages currently required");
|
||||
|
||||
// Generate `heap_addr` instructions that are friendly to CSE by checking offsets that are
|
||||
// multiples of the guard size. Add one to make sure that we check the pointer itself is in
|
||||
|
||||
@@ -74,8 +74,8 @@ impl FuncTranslator {
|
||||
func.name,
|
||||
func.signature
|
||||
);
|
||||
assert_eq!(func.dfg.num_ebbs(), 0, "Function must be empty");
|
||||
assert_eq!(func.dfg.num_insts(), 0, "Function must be empty");
|
||||
debug_assert_eq!(func.dfg.num_ebbs(), 0, "Function must be empty");
|
||||
debug_assert_eq!(func.dfg.num_insts(), 0, "Function must be empty");
|
||||
|
||||
// This clears the `ILBuilder`.
|
||||
let mut builder = FunctionBuilder::new(func, &mut self.il_builder);
|
||||
@@ -191,7 +191,7 @@ fn parse_function_body<FE: FuncEnvironment + ?Sized>(
|
||||
environ: &mut FE,
|
||||
) -> CtonResult {
|
||||
// The control stack is initialized with a single block representing the whole function.
|
||||
assert_eq!(state.control_stack.len(), 1, "State not initialized");
|
||||
debug_assert_eq!(state.control_stack.len(), 1, "State not initialized");
|
||||
|
||||
// Keep going until the final `End` operator which pops the outermost block.
|
||||
while !state.control_stack.is_empty() {
|
||||
|
||||
Reference in New Issue
Block a user