diff --git a/cranelift/codegen/src/isa/x64/inst/emit.rs b/cranelift/codegen/src/isa/x64/inst/emit.rs index b47e0ee7ff..26fd5a8947 100644 --- a/cranelift/codegen/src/isa/x64/inst/emit.rs +++ b/cranelift/codegen/src/isa/x64/inst/emit.rs @@ -1949,9 +1949,9 @@ pub(crate) fn emit( // // done: - assert!(src != tmp_gpr1); - assert!(src != tmp_gpr2); - assert!(tmp_gpr1 != tmp_gpr2); + assert_ne!(src, tmp_gpr1); + assert_ne!(src, tmp_gpr2); + assert_ne!(tmp_gpr1, tmp_gpr2); let handle_negative = sink.get_label(); let done = sink.get_label(); @@ -2251,7 +2251,7 @@ pub(crate) fn emit( // // done: - assert!(tmp_xmm != src, "tmp_xmm clobbers src!"); + assert_ne!(tmp_xmm, src, "tmp_xmm clobbers src!"); let (sub_op, cast_op, cmp_op, trunc_op) = if *src_size == OperandSize::Size64 { ( diff --git a/cranelift/codegen/src/isa/x64/lower.rs b/cranelift/codegen/src/isa/x64/lower.rs index 573cc1da14..2d4a22933e 100644 --- a/cranelift/codegen/src/isa/x64/lower.rs +++ b/cranelift/codegen/src/isa/x64/lower.rs @@ -277,7 +277,7 @@ fn emit_vm_call>( abi.emit_stack_pre_adjust(ctx); let vm_context = if call_conv.extends_baldrdash() { 1 } else { 0 }; - assert!(inputs.len() + vm_context == abi.num_args()); + assert_eq!(inputs.len() + vm_context, abi.num_args()); for (i, input) in inputs.iter().enumerate() { let arg_reg = input_to_reg(ctx, *input); @@ -960,8 +960,8 @@ fn lower_insn_to_regs>( Opcode::Call => { let (extname, dist) = ctx.call_target(insn).unwrap(); let sig = ctx.call_sig(insn).unwrap(); - assert!(inputs.len() == sig.params.len()); - assert!(outputs.len() == sig.returns.len()); + assert_eq!(inputs.len(), sig.params.len()); + assert_eq!(outputs.len(), sig.returns.len()); ( X64ABICall::from_func(sig, &extname, dist, loc)?, &inputs[..], @@ -971,8 +971,8 @@ fn lower_insn_to_regs>( Opcode::CallIndirect => { let ptr = input_to_reg(ctx, inputs[0]); let sig = ctx.call_sig(insn).unwrap(); - assert!(inputs.len() - 1 == sig.params.len()); - assert!(outputs.len() == sig.returns.len()); + assert_eq!(inputs.len() - 1, sig.params.len()); + assert_eq!(outputs.len(), sig.returns.len()); (X64ABICall::from_ptr(sig, ptr, loc, op)?, &inputs[1..]) } @@ -980,7 +980,7 @@ fn lower_insn_to_regs>( }; abi.emit_stack_pre_adjust(ctx); - assert!(inputs.len() == abi.num_args()); + assert_eq!(inputs.len(), abi.num_args()); for (i, input) in inputs.iter().enumerate() { let arg_reg = input_to_reg(ctx, *input); abi.emit_copy_reg_to_arg(ctx, i, arg_reg); @@ -1531,7 +1531,7 @@ fn lower_insn_to_regs>( | Opcode::Sload16 | Opcode::Uload32 | Opcode::Sload32 => { - assert!(inputs.len() == 1, "only one input for load operands"); + assert_eq!(inputs.len(), 1, "only one input for load operands"); let base = input_to_reg(ctx, inputs[0]); Amode::imm_reg(offset as u32, base) } @@ -1543,8 +1543,9 @@ fn lower_insn_to_regs>( | Opcode::Sload16Complex | Opcode::Uload32Complex | Opcode::Sload32Complex => { - assert!( - inputs.len() == 2, + assert_eq!( + inputs.len(), + 2, "can't handle more than two inputs in complex load" ); let base = input_to_reg(ctx, inputs[0]); @@ -1618,10 +1619,7 @@ fn lower_insn_to_regs>( let addr = match op { Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => { - assert!( - inputs.len() == 2, - "only one input for store memory operands" - ); + assert_eq!(inputs.len(), 2, "only one input for store memory operands"); let base = input_to_reg(ctx, inputs[1]); // TODO sign? Amode::imm_reg(offset as u32, base) @@ -1631,8 +1629,9 @@ fn lower_insn_to_regs>( | Opcode::Istore8Complex | Opcode::Istore16Complex | Opcode::Istore32Complex => { - assert!( - inputs.len() == 3, + assert_eq!( + inputs.len(), + 3, "can't handle more than two inputs in complex store" ); let base = input_to_reg(ctx, inputs[1]); @@ -2023,7 +2022,7 @@ impl LowerBackend for X64Backend { _ => unimplemented!("branch opcode"), } } else { - assert!(branches.len() == 1); + assert_eq!(branches.len(), 1); // Must be an unconditional branch or trap. let op = ctx.data(branches[0]).opcode();