diff --git a/cranelift/test-all.sh b/cranelift/test-all.sh index 35d68920d8..4941e04610 100755 --- a/cranelift/test-all.sh +++ b/cranelift/test-all.sh @@ -27,7 +27,7 @@ function banner { # Run rustfmt if we have it. banner "Rust formatting" if type rustfmt > /dev/null; then - if ! "$topdir/format-all.sh" --write-mode=check ; then + if ! "$topdir/format-all.sh" --check ; then echo "Formatting diffs detected! Run \"cargo fmt --all\" to correct." exit 1 fi diff --git a/lib/codegen/src/bforest/path.rs b/lib/codegen/src/bforest/path.rs index e845c6a9a2..c0e4a089af 100644 --- a/lib/codegen/src/bforest/path.rs +++ b/lib/codegen/src/bforest/path.rs @@ -621,7 +621,8 @@ impl Path { /// Update the critical key for the right sibling node at `level`. fn update_right_crit_key(&self, level: usize, crit_key: F::Key, pool: &mut NodePool) { - let bl = self.right_sibling_branch_level(level, pool) + let bl = self + .right_sibling_branch_level(level, pool) .expect("No right sibling exists"); match pool[self.node[bl]] { NodeData::Inner { ref mut keys, .. } => { diff --git a/lib/codegen/src/binemit/relaxation.rs b/lib/codegen/src/binemit/relaxation.rs index 0466e8a615..f23d3218f8 100644 --- a/lib/codegen/src/binemit/relaxation.rs +++ b/lib/codegen/src/binemit/relaxation.rs @@ -151,7 +151,8 @@ fn relax_branch( // Pick the first encoding that can handle the branch range. let dfg = &cur.func.dfg; let ctrl_type = dfg.ctrl_typevar(inst); - if let Some(enc) = isa.legal_encodings(cur.func, &dfg[inst], ctrl_type) + if let Some(enc) = isa + .legal_encodings(cur.func, &dfg[inst], ctrl_type) .find(|&enc| { let range = encinfo.branch_range(enc).expect("Branch with no range"); if !range.contains(offset, dest_offset) { diff --git a/lib/codegen/src/binemit/shrink.rs b/lib/codegen/src/binemit/shrink.rs index dc718e97ca..63b0329278 100644 --- a/lib/codegen/src/binemit/shrink.rs +++ b/lib/codegen/src/binemit/shrink.rs @@ -38,14 +38,15 @@ pub fn shrink_instructions(func: &mut Function, isa: &TargetIsa) { | InstructionData::RegSpill { .. } => { divert.apply(&func.dfg[inst]); continue; - } - _ => () + } + _ => (), } let ctrl_type = func.dfg.ctrl_typevar(inst); // Pick the last encoding with constraints that are satisfied. - let best_enc = isa.legal_encodings(func, &func.dfg[inst], ctrl_type) + let best_enc = isa + .legal_encodings(func, &func.dfg[inst], ctrl_type) .filter(|e| encinfo.constraints[e.recipe()].satisfied(inst, &divert, &func)) .min_by_key(|e| encinfo.bytes(*e)) .unwrap(); diff --git a/lib/codegen/src/cursor.rs b/lib/codegen/src/cursor.rs index 9fb559a79b..6d475331ad 100644 --- a/lib/codegen/src/cursor.rs +++ b/lib/codegen/src/cursor.rs @@ -752,7 +752,8 @@ impl<'c, 'f> ir::InstInserterBase<'c> for &'c mut EncCursor<'f> { // Assign an encoding. // XXX Is there a way to describe this error to the user? #[cfg_attr(feature = "cargo-clippy", allow(match_wild_err_arm))] - match self.isa + match self + .isa .encode(&self.func, &self.func.dfg[inst], ctrl_typevar) { Ok(e) => self.func.encodings[inst] = e, diff --git a/lib/codegen/src/dominator_tree.rs b/lib/codegen/src/dominator_tree.rs index d8789fecb9..bd75a16a9f 100644 --- a/lib/codegen/src/dominator_tree.rs +++ b/lib/codegen/src/dominator_tree.rs @@ -422,7 +422,8 @@ impl DominatorTree { // Get an iterator with just the reachable, already visited predecessors to `ebb`. // Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't // been visited yet, 0 for unreachable blocks. - let mut reachable_preds = cfg.pred_iter(ebb) + let mut reachable_preds = cfg + .pred_iter(ebb) .filter(|&BasicBlock { ebb: pred, .. }| self.nodes[pred].rpo_number > 1); // The RPO must visit at least one predecessor before this node. @@ -453,7 +454,8 @@ impl DominatorTree { } // We use the RPO comparison on the postorder list so we invert the operands of the // comparison - let old_ebb_postorder_index = self.postorder + let old_ebb_postorder_index = self + .postorder .as_slice() .binary_search_by(|probe| self.rpo_cmp_ebb(old_ebb, *probe)) .expect("the old ebb is not declared to the dominator tree"); diff --git a/lib/codegen/src/ir/extfunc.rs b/lib/codegen/src/ir/extfunc.rs index 5b7ac8fc5e..5e002b6a0e 100644 --- a/lib/codegen/src/ir/extfunc.rs +++ b/lib/codegen/src/ir/extfunc.rs @@ -62,7 +62,8 @@ impl Signature { /// Even if there are no stack arguments, this will set `params` to `Some(0)` instead /// of `None`. This indicates that the signature has been legalized. pub fn compute_argument_bytes(&mut self) { - let bytes = self.params + let bytes = self + .params .iter() .filter_map(|arg| match arg.location { ArgumentLoc::Stack(offset) if offset >= 0 => { diff --git a/lib/codegen/src/ir/layout.rs b/lib/codegen/src/ir/layout.rs index 1e1110a3d8..9f03969061 100644 --- a/lib/codegen/src/ir/layout.rs +++ b/lib/codegen/src/ir/layout.rs @@ -182,7 +182,8 @@ impl Layout { /// Assign a valid sequence number to `inst` such that the numbers are still monotonic. This may /// require renumbering. fn assign_inst_seq(&mut self, inst: Inst) { - let ebb = self.inst_ebb(inst) + let ebb = self + .inst_ebb(inst) .expect("inst must be inserted before assigning an seq"); // Get the sequence number immediately before `inst`. @@ -569,7 +570,8 @@ impl Layout { /// Insert `inst` before the instruction `before` in the same EBB. pub fn insert_inst(&mut self, inst: Inst, before: Inst) { debug_assert_eq!(self.inst_ebb(inst), None); - let ebb = self.inst_ebb(before) + let ebb = self + .inst_ebb(before) .expect("Instruction before insertion point not in the layout"); let after = self.insts[before].prev; { @@ -643,7 +645,8 @@ impl Layout { /// i4 /// ``` pub fn split_ebb(&mut self, new_ebb: Ebb, before: Inst) { - let old_ebb = self.inst_ebb(before) + let old_ebb = self + .inst_ebb(before) .expect("The `before` instruction must be in the layout"); debug_assert!(!self.is_ebb_inserted(new_ebb)); diff --git a/lib/codegen/src/ir/stackslot.rs b/lib/codegen/src/ir/stackslot.rs index fd5c02314b..41913ac0f0 100644 --- a/lib/codegen/src/ir/stackslot.rs +++ b/lib/codegen/src/ir/stackslot.rs @@ -309,7 +309,8 @@ impl StackSlots { let size = spill_size(ty); // Find the smallest existing slot that can fit the type. - if let Some(&ss) = self.emergency + if let Some(&ss) = self + .emergency .iter() .filter(|&&ss| self[ss].size >= size && !in_use.contains(&ss.into())) .min_by_key(|&&ss| self[ss].size) @@ -318,7 +319,8 @@ impl StackSlots { } // Alternatively, use the largest available slot and make it larger. - if let Some(&ss) = self.emergency + if let Some(&ss) = self + .emergency .iter() .filter(|&&ss| !in_use.contains(&ss.into())) .max_by_key(|&&ss| self[ss].size) diff --git a/lib/codegen/src/isa/x86/enc_tables.rs b/lib/codegen/src/isa/x86/enc_tables.rs index 4c964ad802..03c33f52c8 100644 --- a/lib/codegen/src/isa/x86/enc_tables.rs +++ b/lib/codegen/src/isa/x86/enc_tables.rs @@ -345,7 +345,8 @@ fn expand_fcvt_to_sint( let mut pos = FuncCursor::new(func).after_inst(inst); pos.use_srcloc(inst); - let is_done = pos.ins() + let is_done = pos + .ins() .icmp_imm(IntCC::NotEqual, result, 1 << (ty.lane_bits() - 1)); pos.ins().brnz(is_done, done, &[]); diff --git a/lib/codegen/src/iterators.rs b/lib/codegen/src/iterators.rs index 220b90da0c..71cb8cbdf6 100644 --- a/lib/codegen/src/iterators.rs +++ b/lib/codegen/src/iterators.rs @@ -13,11 +13,7 @@ pub trait IteratorExtras: Iterator { } } -impl IteratorExtras for T -where - T: Iterator, -{ -} +impl IteratorExtras for T where T: Iterator {} /// Adjacent pairs iterator returned by `adjacent_pairs()`. /// diff --git a/lib/codegen/src/legalizer/boundary.rs b/lib/codegen/src/legalizer/boundary.rs index dfc4629566..d89a455454 100644 --- a/lib/codegen/src/legalizer/boundary.rs +++ b/lib/codegen/src/legalizer/boundary.rs @@ -187,7 +187,8 @@ fn legalize_inst_results(pos: &mut FuncCursor, mut get_abi_type: ResTyp where ResType: FnMut(&Function, usize) -> AbiParam, { - let call = pos.current_inst() + let call = pos + .current_inst() .expect("Cursor must point to a call instruction"); // We theoretically allow for call instructions that return a number of fixed results before @@ -419,7 +420,8 @@ fn legalize_inst_arguments( ) where ArgType: FnMut(&Function, usize) -> AbiParam, { - let inst = pos.current_inst() + let inst = pos + .current_inst() .expect("Cursor must point to a call instruction"); // Lift the value list out of the call instruction so we modify it. @@ -550,7 +552,8 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph // Count the special-purpose return values (`link`, `sret`, and `vmctx`) that were appended to // the legalized signature. - let special_args = func.signature + let special_args = func + .signature .returns .iter() .rev() @@ -591,7 +594,8 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph // A `link`/`sret`/`vmctx` return value can only appear in a signature that has a // unique matching argument. They are appended at the end, so search the signature from // the end. - let idx = pos.func + let idx = pos + .func .signature .params .iter() @@ -599,7 +603,8 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph .expect("No matching special purpose argument."); // Get the corresponding entry block value and add it to the return instruction's // arguments. - let val = pos.func + let val = pos + .func .dfg .ebb_params(pos.func.layout.entry_block().unwrap())[idx]; debug_assert_eq!(pos.func.dfg.value_type(val), arg.value_type); @@ -641,9 +646,11 @@ fn spill_entry_params(func: &mut Function, entry: Ebb) { /// or calls between writing the stack slots and the call instruction. Writing the slots earlier /// could help reduce register pressure before the call. fn spill_call_arguments(pos: &mut FuncCursor) -> bool { - let inst = pos.current_inst() + let inst = pos + .current_inst() .expect("Cursor must point to a call instruction"); - let sig_ref = pos.func + let sig_ref = pos + .func .dfg .call_signature(inst) .expect("Call instruction expected."); diff --git a/lib/codegen/src/legalizer/globalvalue.rs b/lib/codegen/src/legalizer/globalvalue.rs index c6f1a1d930..733d10fa92 100644 --- a/lib/codegen/src/legalizer/globalvalue.rs +++ b/lib/codegen/src/legalizer/globalvalue.rs @@ -37,7 +37,8 @@ pub fn expand_global_value( /// Expand a `global_value` instruction for a vmctx global. fn vmctx_addr(inst: ir::Inst, func: &mut ir::Function, offset: i64) { // Get the value representing the `vmctx` argument. - let vmctx = func.special_param(ir::ArgumentPurpose::VMContext) + let vmctx = func + .special_param(ir::ArgumentPurpose::VMContext) .expect("Missing vmctx parameter"); // Simply replace the `global_value` instruction with an `iadd_imm`, reusing the result value. diff --git a/lib/codegen/src/legalizer/heap.rs b/lib/codegen/src/legalizer/heap.rs index e4a04a0bce..c23b5d73b9 100644 --- a/lib/codegen/src/legalizer/heap.rs +++ b/lib/codegen/src/legalizer/heap.rs @@ -61,20 +61,23 @@ fn dynamic_addr( let oob; if access_size == 1 { // `offset > bound - 1` is the same as `offset >= bound`. - oob = pos.ins() + oob = pos + .ins() .icmp(IntCC::UnsignedGreaterThanOrEqual, offset, bound); } else if access_size <= min_size { // We know that bound >= min_size, so here we can compare `offset > bound - access_size` without // wrapping. let adj_bound = pos.ins().iadd_imm(bound, -access_size); - oob = pos.ins() + oob = pos + .ins() .icmp(IntCC::UnsignedGreaterThan, offset, adj_bound); } else { // We need an overflow check for the adjusted offset. let access_size_val = pos.ins().iconst(offset_ty, access_size); let (adj_offset, overflow) = pos.ins().iadd_cout(offset, access_size_val); pos.ins().trapnz(overflow, ir::TrapCode::HeapOutOfBounds); - oob = pos.ins() + oob = pos + .ins() .icmp(IntCC::UnsignedGreaterThan, adj_offset, bound); } pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds); diff --git a/lib/codegen/src/legalizer/split.rs b/lib/codegen/src/legalizer/split.rs index a7cd37dc60..1cce097f26 100644 --- a/lib/codegen/src/legalizer/split.rs +++ b/lib/codegen/src/legalizer/split.rs @@ -139,7 +139,8 @@ fn split_any( .expect("Branches must have value lists."); let num_args = args.len(&pos.func.dfg.value_lists); // Get the old value passed to the EBB argument we're repairing. - let old_arg = args.get(fixed_args + repair.num, &pos.func.dfg.value_lists) + let old_arg = args + .get(fixed_args + repair.num, &pos.func.dfg.value_lists) .expect("Too few branch arguments"); // It's possible that the CFG's predecessor list has duplicates. Detect them here. @@ -153,13 +154,15 @@ fn split_any( let (lo, hi) = split_value(pos, old_arg, repair.concat, &mut repairs); // The `lo` part replaces the original argument. - *args.get_mut(fixed_args + repair.num, &mut pos.func.dfg.value_lists) + *args + .get_mut(fixed_args + repair.num, &mut pos.func.dfg.value_lists) .unwrap() = lo; // The `hi` part goes at the end. Since multiple repairs may have been scheduled to the // same EBB, there could be multiple arguments missing. if num_args > fixed_args + repair.hi_num { - *args.get_mut(fixed_args + repair.hi_num, &mut pos.func.dfg.value_lists) + *args + .get_mut(fixed_args + repair.hi_num, &mut pos.func.dfg.value_lists) .unwrap() = hi; } else { // We need to append one or more arguments. If we're adding more than one argument, diff --git a/lib/codegen/src/postopt.rs b/lib/codegen/src/postopt.rs index b850b5ac08..178f143c6f 100644 --- a/lib/codegen/src/postopt.rs +++ b/lib/codegen/src/postopt.rs @@ -334,7 +334,8 @@ pub fn do_postopt(func: &mut Function, isa: &TargetIsa) { optimize_cpu_flags(&mut pos, inst, last_flags_clobber, isa); // Track the most recent seen instruction that clobbers the flags. - if let Some(constraints) = isa.encoding_info() + if let Some(constraints) = isa + .encoding_info() .operand_constraints(pos.func.encodings[inst]) { if constraints.clobbers_flags { diff --git a/lib/codegen/src/regalloc/coalescing.rs b/lib/codegen/src/regalloc/coalescing.rs index 687cb84c71..b9415524a0 100644 --- a/lib/codegen/src/regalloc/coalescing.rs +++ b/lib/codegen/src/regalloc/coalescing.rs @@ -307,7 +307,8 @@ impl<'a> Context<'a> { // Create a live range for the new value. // TODO: Should we handle ghost values? let affinity = Affinity::new( - &self.encinfo + &self + .encinfo .operand_constraints(pos.func.encodings[inst]) .expect("Bad copy encoding") .outs[0], @@ -352,7 +353,8 @@ impl<'a> Context<'a> { // Create a live range for the new value. // TODO: Handle affinity for ghost values. let affinity = Affinity::new( - &self.encinfo + &self + .encinfo .operand_constraints(pos.func.encodings[inst]) .expect("Bad copy encoding") .outs[0], @@ -419,7 +421,8 @@ impl<'a> Context<'a> { let node = Node::value(value, 0, self.func); // Push this value and get the nearest dominating def back. - let parent = match self.forest + let parent = match self + .forest .push_node(node, self.func, self.domtree, self.preorder) { None => continue, diff --git a/lib/codegen/src/regalloc/coloring.rs b/lib/codegen/src/regalloc/coloring.rs index 8bb29c62cc..b824246d5b 100644 --- a/lib/codegen/src/regalloc/coloring.rs +++ b/lib/codegen/src/regalloc/coloring.rs @@ -527,7 +527,8 @@ impl<'a> Context<'a> { /// all values used by the instruction. fn program_complete_input_constraints(&mut self) { let inst = self.cur.current_inst().expect("Not on an instruction"); - let constraints = self.encinfo + let constraints = self + .encinfo .operand_constraints(self.cur.func.encodings[inst]) .expect("Current instruction not encoded") .ins; @@ -643,7 +644,8 @@ impl<'a> Context<'a> { Pred: FnMut(&LiveRange, LiveRangeContext) -> bool, { for rdiv in self.divert.all() { - let lr = self.liveness + let lr = self + .liveness .get(rdiv.value) .expect("Missing live range for diverted register"); if pred(lr, self.liveness.context(&self.cur.func.layout)) { @@ -942,7 +944,8 @@ impl<'a> Context<'a> { .. } => { debug_assert_eq!(slot[to_slot].expand(), None, "Overwriting slot in use"); - let ss = self.cur + let ss = self + .cur .func .stack_slots .get_emergency_slot(self.cur.func.dfg.value_type(value), &slot[0..spills]); diff --git a/lib/codegen/src/regalloc/live_value_tracker.rs b/lib/codegen/src/regalloc/live_value_tracker.rs index 2665c245c1..9fe6e2615a 100644 --- a/lib/codegen/src/regalloc/live_value_tracker.rs +++ b/lib/codegen/src/regalloc/live_value_tracker.rs @@ -187,7 +187,8 @@ impl LiveValueTracker { // If the immediate dominator exits, we must have a stored list for it. This is a // requirement to the order EBBs are visited: All dominators must have been processed // before the current EBB. - let idom_live_list = self.idom_sets + let idom_live_list = self + .idom_sets .get(&idom) .expect("No stored live set for dominator"); let ctx = liveness.context(layout); diff --git a/lib/codegen/src/regalloc/liveness.rs b/lib/codegen/src/regalloc/liveness.rs index d169acc459..6e71345b13 100644 --- a/lib/codegen/src/regalloc/liveness.rs +++ b/lib/codegen/src/regalloc/liveness.rs @@ -340,7 +340,8 @@ impl Liveness { where PP: Into, { - let old = self.ranges + let old = self + .ranges .insert(LiveRange::new(value, def.into(), affinity)); debug_assert!(old.is_none(), "{} already has a live range", value); } diff --git a/lib/codegen/src/regalloc/pressure.rs b/lib/codegen/src/regalloc/pressure.rs index ef8ec425aa..a70407bad1 100644 --- a/lib/codegen/src/regalloc/pressure.rs +++ b/lib/codegen/src/regalloc/pressure.rs @@ -114,7 +114,8 @@ impl Pressure { } // Compute per-class limits from `usable`. - for (toprc, rc) in p.toprc + for (toprc, rc) in p + .toprc .iter_mut() .take_while(|t| t.num_toprcs > 0) .zip(reginfo.classes) diff --git a/lib/codegen/src/regalloc/reload.rs b/lib/codegen/src/regalloc/reload.rs index b8ba2166ca..a712616af8 100644 --- a/lib/codegen/src/regalloc/reload.rs +++ b/lib/codegen/src/regalloc/reload.rs @@ -166,7 +166,8 @@ impl<'a> Context<'a> { if arg.affinity.is_stack() { // An incoming register parameter was spilled. Replace the parameter value // with a temporary register value that is immediately spilled. - let reg = self.cur + let reg = self + .cur .func .dfg .replace_ebb_param(arg.value, abi.value_type); @@ -199,7 +200,8 @@ impl<'a> Context<'a> { self.cur.use_srcloc(inst); // Get the operand constraints for `inst` that we are trying to satisfy. - let constraints = self.encinfo + let constraints = self + .encinfo .operand_constraints(encoding) .expect("Missing instruction encoding"); @@ -276,7 +278,8 @@ impl<'a> Context<'a> { // Same thing for spilled call return values. let retvals = &defs[constraints.outs.len()..]; if !retvals.is_empty() { - let sig = self.cur + let sig = self + .cur .func .dfg .call_signature(inst) diff --git a/lib/codegen/src/regalloc/spilling.rs b/lib/codegen/src/regalloc/spilling.rs index 65d43e7cb3..457b07c663 100644 --- a/lib/codegen/src/regalloc/spilling.rs +++ b/lib/codegen/src/regalloc/spilling.rs @@ -125,7 +125,8 @@ impl<'a> Context<'a> { self.process_spills(tracker); while let Some(inst) = self.cur.next_inst() { - if let Some(constraints) = self.encinfo + if let Some(constraints) = self + .encinfo .operand_constraints(self.cur.func.encodings[inst]) { self.visit_inst(inst, ebb, constraints, tracker); @@ -494,7 +495,8 @@ impl<'a> Context<'a> { } // Assign a spill slot for the whole virtual register. - let ss = self.cur + let ss = self + .cur .func .stack_slots .make_spill_slot(self.cur.func.dfg.value_type(value)); diff --git a/lib/codegen/src/regalloc/virtregs.rs b/lib/codegen/src/regalloc/virtregs.rs index ab2b749f7c..0e4bcdf5d0 100644 --- a/lib/codegen/src/regalloc/virtregs.rs +++ b/lib/codegen/src/regalloc/virtregs.rs @@ -152,7 +152,8 @@ impl VirtRegs { }); // Determine the insertion position for `single`. - let index = match self.values(vreg) + let index = match self + .values(vreg) .binary_search_by(|&v| preorder.pre_cmp_def(v, single, func)) { Ok(_) => panic!("{} already in {}", single, vreg), diff --git a/lib/codegen/src/stack_layout.rs b/lib/codegen/src/stack_layout.rs index eced3ada0a..9a34e06374 100644 --- a/lib/codegen/src/stack_layout.rs +++ b/lib/codegen/src/stack_layout.rs @@ -49,7 +49,8 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> CodegenResu incoming_min = min(incoming_min, slot.offset.unwrap()); } StackSlotKind::OutgoingArg => { - let offset = slot.offset + let offset = slot + .offset .unwrap() .checked_add(slot.size as StackOffset) .ok_or(CodegenError::ImplLimitExceeded)?; diff --git a/lib/codegen/src/verifier/flags.rs b/lib/codegen/src/verifier/flags.rs index 04615bacd7..3895cd63a0 100644 --- a/lib/codegen/src/verifier/flags.rs +++ b/lib/codegen/src/verifier/flags.rs @@ -98,7 +98,8 @@ impl<'a> FlagsVerifier<'a> { } // Does the instruction have an encoding that clobbers the CPU flags? - if self.encinfo + if self + .encinfo .as_ref() .and_then(|ei| ei.operand_constraints(self.func.encodings[inst])) .map_or(false, |c| c.clobbers_flags) && live_val.is_some() diff --git a/lib/codegen/src/verifier/locations.rs b/lib/codegen/src/verifier/locations.rs index 97dfbdc6fb..cc393bb46f 100644 --- a/lib/codegen/src/verifier/locations.rs +++ b/lib/codegen/src/verifier/locations.rs @@ -87,7 +87,8 @@ impl<'a> LocationVerifier<'a> { enc: isa::Encoding, divert: &RegDiversions, ) -> VerifierResult<()> { - let constraints = self.encinfo + let constraints = self + .encinfo .operand_constraints(enc) .expect("check_enc_constraints requires a legal encoding"); diff --git a/lib/codegen/src/verifier/mod.rs b/lib/codegen/src/verifier/mod.rs index a25be6bcec..e0ff926bbc 100644 --- a/lib/codegen/src/verifier/mod.rs +++ b/lib/codegen/src/verifier/mod.rs @@ -191,7 +191,8 @@ impl<'a> Verifier<'a> { } if let ir::GlobalValueData::VMContext { .. } = self.func.global_values[cur] { - if self.func + if self + .func .special_param(ir::ArgumentPurpose::VMContext) .is_none() { @@ -253,7 +254,8 @@ impl<'a> Verifier<'a> { let fixed_results = inst_data.opcode().constraints().fixed_results(); // var_results is 0 if we aren't a call instruction - let var_results = dfg.call_signature(inst) + let var_results = dfg + .call_signature(inst) .map_or(0, |sig| dfg.signatures[sig].returns.len()); let total_results = fixed_results + var_results; @@ -498,7 +500,8 @@ impl<'a> Verifier<'a> { } // Defining instruction dominates the instruction that uses the value. if is_reachable { - if !self.expected_domtree + if !self + .expected_domtree .dominates(def_inst, loc_inst, &self.func.layout) { return err!(loc_inst, "uses value from non-dominating {}", def_inst); @@ -529,7 +532,8 @@ impl<'a> Verifier<'a> { } // The defining EBB dominates the instruction using this value. if is_reachable - && !self.expected_domtree + && !self + .expected_domtree .dominates(ebb, loc_inst, &self.func.layout) { return err!(loc_inst, "uses value arg from non-dominating {}", ebb); @@ -604,7 +608,8 @@ impl<'a> Verifier<'a> { } // We verify rpo_cmp on pairs of adjacent ebbs in the postorder for (&prev_ebb, &next_ebb) in domtree.cfg_postorder().iter().adjacent_pairs() { - if self.expected_domtree + if self + .expected_domtree .rpo_cmp(prev_ebb, next_ebb, &self.func.layout) != Ordering::Greater { return err!( @@ -743,7 +748,8 @@ impl<'a> Verifier<'a> { fn typecheck_variable_args(&self, inst: Inst) -> VerifierResult<()> { match self.func.dfg.analyze_branch(inst) { BranchInfo::SingleDest(ebb, _) => { - let iter = self.func + let iter = self + .func .dfg .ebb_params(ebb) .iter() @@ -1038,11 +1044,12 @@ impl<'a> Verifier<'a> { let encoding = self.func.encodings[inst]; if encoding.is_legal() { - let mut encodings = isa.legal_encodings( - &self.func, - &self.func.dfg[inst], - self.func.dfg.ctrl_typevar(inst), - ).peekable(); + let mut encodings = + isa.legal_encodings( + &self.func, + &self.func.dfg[inst], + self.func.dfg.ctrl_typevar(inst), + ).peekable(); if encodings.peek().is_none() { return err!( diff --git a/lib/filetests/src/runner.rs b/lib/filetests/src/runner.rs index 80b2e47d15..08ee75430b 100644 --- a/lib/filetests/src/runner.rs +++ b/lib/filetests/src/runner.rs @@ -282,7 +282,8 @@ impl TestRunner { /// Print out a report of slow tests. fn report_slow_tests(&self) { // Collect runtimes of succeeded tests. - let mut times = self.tests + let mut times = self + .tests .iter() .filter_map(|entry| match *entry { QueueEntry { diff --git a/lib/filetests/src/test_binemit.rs b/lib/filetests/src/test_binemit.rs index 0dd2ea6fcf..ef8e93936a 100644 --- a/lib/filetests/src/test_binemit.rs +++ b/lib/filetests/src/test_binemit.rs @@ -116,7 +116,8 @@ impl SubTest for TestBinEmit { let mut func = func.into_owned(); // Fix the stack frame layout so we can test spill/fill encodings. - let min_offset = func.stack_slots + let min_offset = func + .stack_slots .values() .map(|slot| slot.offset.unwrap()) .min(); @@ -133,14 +134,12 @@ impl SubTest for TestBinEmit { // Find an encoding that satisfies both immediate field and register // constraints. if let Some(enc) = { - let mut legal_encodings = isa.legal_encodings( - &func, - &func.dfg[inst], - func.dfg.ctrl_typevar(inst), - ).filter(|e| { - let recipe_constraints = &encinfo.constraints[e.recipe()]; - recipe_constraints.satisfied(inst, &divert, &func) - }); + let mut legal_encodings = isa + .legal_encodings(&func, &func.dfg[inst], func.dfg.ctrl_typevar(inst)) + .filter(|e| { + let recipe_constraints = &encinfo.constraints[e.recipe()]; + recipe_constraints.satisfied(inst, &divert, &func) + }); if opt_level == OptLevel::Best { // Get the smallest legal encoding @@ -207,7 +206,8 @@ impl SubTest for TestBinEmit { // Send legal encodings into the emitter. if enc.is_legal() { // Generate a better error message if output locations are not specified. - if let Some(&v) = func.dfg + if let Some(&v) = func + .dfg .inst_results(inst) .iter() .find(|&&v| !func.locations[v].is_assigned()) @@ -236,7 +236,8 @@ impl SubTest for TestBinEmit { if !enc.is_legal() { // A possible cause of an unencoded instruction is a missing location for // one of the input operands. - if let Some(&v) = func.dfg + if let Some(&v) = func + .dfg .inst_args(inst) .iter() .find(|&&v| !func.locations[v].is_assigned()) @@ -249,11 +250,9 @@ impl SubTest for TestBinEmit { } // Do any encodings exist? - let encodings = isa.legal_encodings( - &func, - &func.dfg[inst], - func.dfg.ctrl_typevar(inst), - ).map(|e| encinfo.display(e)) + let encodings = isa + .legal_encodings(&func, &func.dfg[inst], func.dfg.ctrl_typevar(inst)) + .map(|e| encinfo.display(e)) .collect::>(); if encodings.is_empty() { diff --git a/lib/filetests/src/test_domtree.rs b/lib/filetests/src/test_domtree.rs index 87df48a7a2..93a1b74a9d 100644 --- a/lib/filetests/src/test_domtree.rs +++ b/lib/filetests/src/test_domtree.rs @@ -93,7 +93,8 @@ impl SubTest for TestDomtree { // Now we know that everything in `expected` is consistent with `domtree`. // All other EBB's should be either unreachable or the entry block. - for ebb in func.layout + for ebb in func + .layout .ebbs() .skip(1) .filter(|ebb| !expected.contains_key(ebb)) diff --git a/lib/frontend/src/frontend.rs b/lib/frontend/src/frontend.rs index bccf6cc533..16934282a8 100644 --- a/lib/frontend/src/frontend.rs +++ b/lib/frontend/src/frontend.rs @@ -167,7 +167,8 @@ where // capable of having the same successor appear // multiple times, so we must deduplicate. let mut unique = EntitySet::::new(); - for dest_ebb in self.builder + for dest_ebb in self + .builder .func .jump_tables .get(table) @@ -544,7 +545,8 @@ where Some(entry) => self.position.ebb.unwrap() == entry, }; !is_entry && self.func_ctx.ssa.is_sealed(self.position.ebb.unwrap()) - && !self.func_ctx + && !self + .func_ctx .ssa .has_any_predecessors(self.position.ebb.unwrap()) } diff --git a/lib/frontend/src/ssa.rs b/lib/frontend/src/ssa.rs index 09040bf6fb..f5a3c4bd8a 100644 --- a/lib/frontend/src/ssa.rs +++ b/lib/frontend/src/ssa.rs @@ -103,7 +103,8 @@ impl BlockData { BlockData::EbbHeader(ref mut data) => { // This a linear complexity operation but the number of predecessors is low // in all non-pathological cases - let pred: usize = data.predecessors + let pred: usize = data + .predecessors .iter() .position(|&PredBlock { branch, .. }| branch == inst) .expect("the predecessor you are trying to remove is not declared"); @@ -597,7 +598,8 @@ where } in &mut preds { // We already did a full `use_var` above, so we can do just the fast path. - let pred_val = self.variables + let pred_val = self + .variables .get(temp_arg_var) .unwrap() .get(*pred_block) diff --git a/lib/module/src/module.rs b/lib/module/src/module.rs index 10008caa51..d5c616db66 100644 --- a/lib/module/src/module.rs +++ b/lib/module/src/module.rs @@ -526,7 +526,8 @@ where "imported data cannot contain references" ); self.backend.write_data_funcaddr( - &mut info.compiled + &mut info + .compiled .as_mut() .expect("`data` must refer to a defined data object"), offset, @@ -549,7 +550,8 @@ where "imported data cannot contain references" ); self.backend.write_data_dataaddr( - &mut info.compiled + &mut info + .compiled .as_mut() .expect("`data` must refer to a defined data object"), offset, diff --git a/lib/reader/src/parser.rs b/lib/reader/src/parser.rs index bef37a86fd..2c73d099d9 100644 --- a/lib/reader/src/parser.rs +++ b/lib/reader/src/parser.rs @@ -644,10 +644,12 @@ impl<'a> Parser<'a> { if let Some(Token::Name(name)) = self.token() { self.consume(); match isa { - Some(isa) => isa.register_info() + Some(isa) => isa + .register_info() .parse_regunit(name) .ok_or_else(|| self.error("invalid register name")), - None => name.parse() + None => name + .parse() .map_err(|_| self.error("invalid register number")), } } else { @@ -1032,7 +1034,8 @@ impl<'a> Parser<'a> { self.parse_jump_table_decl() .and_then(|(jt, dat)| ctx.add_jt(jt, dat, self.loc)) } - Some(Token::Identifier("stack_limit")) => self.parse_stack_limit_decl() + Some(Token::Identifier("stack_limit")) => self + .parse_stack_limit_decl() .and_then(|gv| ctx.set_stack_limit(gv, self.loc)), // More to come.. _ => return Ok(()), @@ -1053,7 +1056,8 @@ impl<'a> Parser<'a> { let kind = self.match_enum("expected stack slot kind")?; // stack-slot-decl ::= StackSlot(ss) "=" stack-slot-kind * Bytes {"," stack-slot-flag} - let bytes: i64 = self.match_imm64("expected byte-size in stack_slot decl")? + let bytes: i64 = self + .match_imm64("expected byte-size in stack_slot decl")? .into(); if bytes < 0 { return err!(self.loc, "negative stack slot size"); @@ -1708,7 +1712,8 @@ impl<'a> Parser<'a> { } if let Some(result_locations) = result_locations { - for (&value, loc) in ctx.function + for (&value, loc) in ctx + .function .dfg .inst_results(inst) .iter() diff --git a/lib/simplejit/src/backend.rs b/lib/simplejit/src/backend.rs index e78c9d1124..0ae65a8a99 100644 --- a/lib/simplejit/src/backend.rs +++ b/lib/simplejit/src/backend.rs @@ -122,7 +122,8 @@ impl<'simple_jit_backend> Backend for SimpleJITBackend { code_size: u32, ) -> ModuleResult { let size = code_size as usize; - let ptr = self.code_memory + let ptr = self + .code_memory .allocate(size) .expect("TODO: handle OOM etc."); let mut reloc_sink = SimpleJITRelocSink::new(); @@ -155,10 +156,12 @@ impl<'simple_jit_backend> Backend for SimpleJITBackend { let size = init.size(); let storage = match writable { - Writability::Readonly => self.writable_memory + Writability::Readonly => self + .writable_memory .allocate(size) .expect("TODO: handle OOM etc."), - Writability::Writable => self.writable_memory + Writability::Writable => self + .writable_memory .allocate(size) .expect("TODO: handle OOM etc."), }; diff --git a/lib/wasm/src/environ/dummy.rs b/lib/wasm/src/environ/dummy.rs index d13fd4367d..68e7e2f556 100644 --- a/lib/wasm/src/environ/dummy.rs +++ b/lib/wasm/src/environ/dummy.rs @@ -210,7 +210,8 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ call_args: &[ir::Value], ) -> WasmResult { // Pass the current function's vmctx parameter on to the callee. - let vmctx = pos.func + let vmctx = pos + .func .special_param(ir::ArgumentPurpose::VMContext) .expect("Missing vmctx parameter"); @@ -236,7 +237,8 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ args.extend(call_args.iter().cloned(), &mut pos.func.dfg.value_lists); args.push(vmctx, &mut pos.func.dfg.value_lists); - Ok(pos.ins() + Ok(pos + .ins() .CallIndirect(ir::Opcode::CallIndirect, VOID, sig_ref, args) .0) } @@ -249,7 +251,8 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ call_args: &[ir::Value], ) -> WasmResult { // Pass the current function's vmctx parameter on to the callee. - let vmctx = pos.func + let vmctx = pos + .func .special_param(ir::ArgumentPurpose::VMContext) .expect("Missing vmctx parameter"); diff --git a/lib/wasm/src/func_translator.rs b/lib/wasm/src/func_translator.rs index 01481e9b8f..8f764c102a 100644 --- a/lib/wasm/src/func_translator.rs +++ b/lib/wasm/src/func_translator.rs @@ -248,10 +248,8 @@ mod tests { // ) const BODY: [u8; 7] = [ 0x00, // local decl count - 0x20, - 0x00, // get_local 0 - 0x41, - 0x01, // i32.const 1 + 0x20, 0x00, // get_local 0 + 0x41, 0x01, // i32.const 1 0x6a, // i32.add 0x0b, // end ]; @@ -280,10 +278,8 @@ mod tests { // ) const BODY: [u8; 8] = [ 0x00, // local decl count - 0x20, - 0x00, // get_local 0 - 0x41, - 0x01, // i32.const 1 + 0x20, 0x00, // get_local 0 + 0x41, 0x01, // i32.const 1 0x6a, // i32.add 0x0f, // return 0x0b, // end @@ -318,19 +314,13 @@ mod tests { // ) const BODY: [u8; 16] = [ 0x01, // 1 local decl. - 0x01, - 0x7f, // 1 i32 local. - 0x03, - 0x7f, // loop i32 - 0x20, - 0x00, // get_local 0 - 0x41, - 0x01, // i32.const 0 + 0x01, 0x7f, // 1 i32 local. + 0x03, 0x7f, // loop i32 + 0x20, 0x00, // get_local 0 + 0x41, 0x01, // i32.const 0 0x6a, // i32.add - 0x21, - 0x00, // set_local 0 - 0x0c, - 0x00, // br 0 + 0x21, 0x00, // set_local 0 + 0x0c, 0x00, // br 0 0x0b, // end 0x0b, // end ]; diff --git a/lib/wasm/src/sections_translator.rs b/lib/wasm/src/sections_translator.rs index 6bfc55372b..6fdeb38719 100644 --- a/lib/wasm/src/sections_translator.rs +++ b/lib/wasm/src/sections_translator.rs @@ -412,9 +412,11 @@ pub fn parse_code_section<'data>( } let mut reader = parser.create_binary_reader(); let size = reader.bytes_remaining(); - environ.define_function_body(reader - .read_bytes(size) - .map_err(WasmError::from_binary_reader_error)?)?; + environ.define_function_body( + reader + .read_bytes(size) + .map_err(WasmError::from_binary_reader_error)?, + )?; } Ok(()) } diff --git a/lib/wasm/src/state.rs b/lib/wasm/src/state.rs index c08c514d3a..ff7ced6b78 100644 --- a/lib/wasm/src/state.rs +++ b/lib/wasm/src/state.rs @@ -282,7 +282,8 @@ impl TranslationState { environ: &mut FE, ) -> GlobalVariable { let index = index as GlobalIndex; - *self.globals + *self + .globals .entry(index) .or_insert_with(|| environ.make_global(func, index)) } @@ -296,7 +297,8 @@ impl TranslationState { environ: &mut FE, ) -> ir::Heap { let index = index as MemoryIndex; - *self.heaps + *self + .heaps .entry(index) .or_insert_with(|| environ.make_heap(func, index)) }