From b4a2dd37a4ce3c3cf3f44ca145048caebbec7532 Mon Sep 17 00:00:00 2001 From: Benjamin Bouvier Date: Tue, 8 Sep 2020 16:14:27 +0200 Subject: [PATCH] machinst x64: rename input_to_reg to put_input_to_reg; Eventually, we should be able to unify this function's implementation with the aarch64 one; but the latter does much more, and this would require abstractions brought up in another pending PR#2142. --- cranelift/codegen/src/isa/x64/lower.rs | 124 ++++++++++++------------- 1 file changed, 62 insertions(+), 62 deletions(-) diff --git a/cranelift/codegen/src/isa/x64/lower.rs b/cranelift/codegen/src/isa/x64/lower.rs index 6b017c44d6..6d48319477 100644 --- a/cranelift/codegen/src/isa/x64/lower.rs +++ b/cranelift/codegen/src/isa/x64/lower.rs @@ -77,7 +77,7 @@ fn lowerinput_to_reg(ctx: Ctx, input: LowerInput) -> Reg { } /// Put the given input into a register, and mark it as used (side-effect). -fn input_to_reg(ctx: Ctx, spec: InsnInput) -> Reg { +fn put_input_in_reg(ctx: Ctx, spec: InsnInput) -> Reg { let input = ctx.get_input(spec.insn, spec.input); lowerinput_to_reg(ctx, input) } @@ -107,8 +107,8 @@ fn extend_input_to_reg(ctx: Ctx, spec: InsnInput, ext_spec: ExtSpec) -> Reg { }; let ext_mode = match (input_size, requested_size) { - (a, b) if a == b => return input_to_reg(ctx, spec), - (1, 8) => return input_to_reg(ctx, spec), + (a, b) if a == b => return put_input_in_reg(ctx, spec), + (1, 8) => return put_input_in_reg(ctx, spec), (a, 16) | (a, 32) if a == 1 || a == 8 => ExtMode::BL, (a, 64) if a == 1 || a == 8 => ExtMode::BQ, (16, 32) => ExtMode::WL, @@ -191,7 +191,7 @@ fn emit_cmp(ctx: Ctx, insn: IRInst) { let inputs = [InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }]; // TODO Try to commute the operands (and invert the condition) if one is an immediate. - let lhs = input_to_reg(ctx, inputs[0]); + let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem_imm(ctx, inputs[1]); // Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives @@ -267,7 +267,7 @@ fn emit_fcmp(ctx: Ctx, insn: IRInst, mut cond_code: FloatCC, spec: FcmpSpec) -> } else { (inputs[0], inputs[1]) }; - let lhs = input_to_reg(ctx, lhs_input); + let lhs = put_input_in_reg(ctx, lhs_input); let rhs = input_to_reg_mem(ctx, rhs_input); ctx.emit(Inst::xmm_cmp_rm_r(op, rhs, lhs)); @@ -329,7 +329,7 @@ fn emit_vm_call>( assert_eq!(inputs.len() + vm_context, abi.num_args()); for (i, input) in inputs.iter().enumerate() { - let arg_reg = input_to_reg(ctx, *input); + let arg_reg = put_input_in_reg(ctx, *input); abi.emit_copy_reg_to_arg(ctx, i, arg_reg); } if call_conv.extends_baldrdash() { @@ -396,22 +396,22 @@ fn lower_to_amode>(ctx: &mut C, spec: InsnInput, offset: u matches_small_constant_shift(ctx, add_inputs[0]) { ( - input_to_reg(ctx, add_inputs[1]), - input_to_reg(ctx, shift_input), + put_input_in_reg(ctx, add_inputs[1]), + put_input_in_reg(ctx, shift_input), shift_amt, ) } else if let Some((shift_input, shift_amt)) = matches_small_constant_shift(ctx, add_inputs[1]) { ( - input_to_reg(ctx, add_inputs[0]), - input_to_reg(ctx, shift_input), + put_input_in_reg(ctx, add_inputs[0]), + put_input_in_reg(ctx, shift_input), shift_amt, ) } else { ( - input_to_reg(ctx, add_inputs[0]), - input_to_reg(ctx, add_inputs[1]), + put_input_in_reg(ctx, add_inputs[0]), + put_input_in_reg(ctx, add_inputs[1]), 0, ) }; @@ -419,7 +419,7 @@ fn lower_to_amode>(ctx: &mut C, spec: InsnInput, offset: u return Amode::imm_reg_reg_shift(offset, base, index, shift); } - let input = input_to_reg(ctx, spec); + let input = put_input_in_reg(ctx, spec); Amode::imm_reg(offset, input) } @@ -489,7 +489,7 @@ fn lower_insn_to_regs>( }, _ => panic!("Unsupported packed instruction"), }; - let lhs = input_to_reg(ctx, inputs[0]); + let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]); @@ -518,16 +518,16 @@ fn lower_insn_to_regs>( // For commutative operations, try to commute operands if one is an // immediate. if let Some(imm) = input_to_sext_imm(ctx, inputs[0]) { - (input_to_reg(ctx, inputs[1]), RegMemImm::imm(imm)) + (put_input_in_reg(ctx, inputs[1]), RegMemImm::imm(imm)) } else { ( - input_to_reg(ctx, inputs[0]), + put_input_in_reg(ctx, inputs[0]), input_to_reg_mem_imm(ctx, inputs[1]), ) } } Opcode::Isub => ( - input_to_reg(ctx, inputs[0]), + put_input_in_reg(ctx, inputs[0]), input_to_reg_mem_imm(ctx, inputs[1]), ), _ => unreachable!(), @@ -547,7 +547,7 @@ fn lower_insn_to_regs>( unimplemented!("bool bnot") } else { let size = ty.bytes() as u8; - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::not(size, dst)); @@ -560,7 +560,7 @@ fn lower_insn_to_regs>( let (size, lhs) = match dst_ty { types::I8 | types::I16 => match op { - Opcode::Ishl => (4, input_to_reg(ctx, inputs[0])), + Opcode::Ishl => (4, put_input_in_reg(ctx, inputs[0])), Opcode::Ushr => ( 4, extend_input_to_reg(ctx, inputs[0], ExtSpec::ZeroExtendTo32), @@ -570,11 +570,11 @@ fn lower_insn_to_regs>( extend_input_to_reg(ctx, inputs[0], ExtSpec::SignExtendTo32), ), Opcode::Rotl | Opcode::Rotr => { - (dst_ty.bytes() as u8, input_to_reg(ctx, inputs[0])) + (dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0])) } _ => unreachable!(), }, - types::I32 | types::I64 => (dst_ty.bytes() as u8, input_to_reg(ctx, inputs[0])), + types::I32 | types::I64 => (dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0])), _ => unreachable!("unhandled output type for shift/rotates: {}", dst_ty), }; @@ -583,7 +583,7 @@ fn lower_insn_to_regs>( let cst = (cst as u8) & (dst_ty.bits() as u8 - 1); (Some(cst), None) } else { - (None, Some(input_to_reg(ctx, inputs[1]))) + (None, Some(put_input_in_reg(ctx, inputs[1]))) }; let dst = get_output_reg(ctx, outputs[0]); @@ -639,7 +639,7 @@ fn lower_insn_to_regs>( )); } else { let size = ty.bytes() as u8; - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::neg(size, dst)); } @@ -1003,7 +1003,7 @@ fn lower_insn_to_regs>( // Null references are represented by the constant value 0; invalid references are // represented by the constant value -1. See `define_reftypes()` in // `meta/src/isa/x86/encodings.rs` to confirm. - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); let ty = ctx.input_ty(insn, 0); let imm = match op { @@ -1157,12 +1157,12 @@ fn lower_insn_to_regs>( // Determine the operands of the comparison, possibly by flipping them. let (lhs, rhs) = if flip { ( - input_to_reg(ctx, inputs[1]), + put_input_in_reg(ctx, inputs[1]), input_to_reg_mem(ctx, inputs[0]), ) } else { ( - input_to_reg(ctx, inputs[0]), + put_input_in_reg(ctx, inputs[0]), input_to_reg_mem(ctx, inputs[1]), ) }; @@ -1180,7 +1180,7 @@ fn lower_insn_to_regs>( Opcode::FallthroughReturn | Opcode::Return => { for i in 0..ctx.num_inputs(insn) { - let src_reg = input_to_reg(ctx, inputs[i]); + let src_reg = put_input_in_reg(ctx, inputs[i]); let retval_reg = ctx.retval(i); let ty = ctx.input_ty(insn, i); ctx.emit(Inst::gen_move(retval_reg, src_reg, ty)); @@ -1203,7 +1203,7 @@ fn lower_insn_to_regs>( } Opcode::CallIndirect => { - let ptr = input_to_reg(ctx, inputs[0]); + let ptr = put_input_in_reg(ctx, inputs[0]); let sig = ctx.call_sig(insn).unwrap(); assert_eq!(inputs.len() - 1, sig.params.len()); assert_eq!(outputs.len(), sig.returns.len()); @@ -1216,7 +1216,7 @@ fn lower_insn_to_regs>( abi.emit_stack_pre_adjust(ctx); assert_eq!(inputs.len(), abi.num_args()); for (i, input) in inputs.iter().enumerate() { - let arg_reg = input_to_reg(ctx, *input); + let arg_reg = put_input_in_reg(ctx, *input); abi.emit_copy_reg_to_arg(ctx, i, arg_reg); } abi.emit_call(ctx); @@ -1336,7 +1336,7 @@ fn lower_insn_to_regs>( } Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv => { - let lhs = input_to_reg(ctx, inputs[0]); + let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]); let ty = ty.unwrap(); @@ -1387,8 +1387,8 @@ fn lower_insn_to_regs>( } Opcode::Fmin | Opcode::Fmax => { - let lhs = input_to_reg(ctx, inputs[0]); - let rhs = input_to_reg(ctx, inputs[1]); + let lhs = put_input_in_reg(ctx, inputs[0]); + let rhs = put_input_in_reg(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]); let is_min = op == Opcode::Fmin; let output_ty = ty.unwrap(); @@ -1479,7 +1479,7 @@ fn lower_insn_to_regs>( } types::I64 => { - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let src_copy = ctx.alloc_tmp(RegClass::I64, types::I64); ctx.emit(Inst::gen_move(src_copy, src, types::I64)); @@ -1500,7 +1500,7 @@ fn lower_insn_to_regs>( } Opcode::FcvtToUint | Opcode::FcvtToUintSat | Opcode::FcvtToSint | Opcode::FcvtToSintSat => { - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); let input_ty = ctx.input_ty(insn, 0); @@ -1545,7 +1545,7 @@ fn lower_insn_to_regs>( let output_ty = ctx.output_ty(insn, 0); match (input_ty, output_ty) { (types::F32, types::I32) => { - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::xmm_to_gpr( SseOpcode::Movd, @@ -1565,7 +1565,7 @@ fn lower_insn_to_regs>( )); } (types::F64, types::I64) => { - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); ctx.emit(Inst::xmm_to_gpr( SseOpcode::Movq, @@ -1627,7 +1627,7 @@ fn lower_insn_to_regs>( // Move the `lhs` to the same register as `dst`; this may not emit an actual move // but ensures that the registers are the same to match x86's read-write operand // encoding. - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::gen_move(dst, src, output_ty)); // Generate an all 1s constant in an XMM register. This uses CMPPS but could @@ -1668,8 +1668,8 @@ fn lower_insn_to_regs>( Opcode::Fcopysign => { let dst = get_output_reg(ctx, outputs[0]); - let lhs = input_to_reg(ctx, inputs[0]); - let rhs = input_to_reg(ctx, inputs[1]); + let lhs = put_input_in_reg(ctx, inputs[0]); + let rhs = put_input_in_reg(ctx, inputs[1]); let ty = ty.unwrap(); @@ -1823,8 +1823,8 @@ fn lower_insn_to_regs>( 2, "can't handle more than two inputs in complex load" ); - let base = input_to_reg(ctx, inputs[0]); - let index = input_to_reg(ctx, inputs[1]); + let base = put_input_in_reg(ctx, inputs[0]); + let index = put_input_in_reg(ctx, inputs[1]); let shift = 0; Amode::imm_reg_reg_shift(offset as u32, base, index, shift) } @@ -1911,8 +1911,8 @@ fn lower_insn_to_regs>( 3, "can't handle more than two inputs in complex store" ); - let base = input_to_reg(ctx, inputs[1]); - let index = input_to_reg(ctx, inputs[2]); + let base = put_input_in_reg(ctx, inputs[1]); + let index = put_input_in_reg(ctx, inputs[2]); let shift = 0; Amode::imm_reg_reg_shift(offset as u32, base, index, shift) } @@ -1920,7 +1920,7 @@ fn lower_insn_to_regs>( _ => unreachable!(), }; - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let srcloc = Some(ctx.srcloc(insn)); @@ -1946,8 +1946,8 @@ fn lower_insn_to_regs>( // left for another day. // TODO: filed as https://github.com/bytecodealliance/wasmtime/issues/2153 let dst = get_output_reg(ctx, outputs[0]); - let mut addr = input_to_reg(ctx, inputs[0]); - let mut arg2 = input_to_reg(ctx, inputs[1]); + let mut addr = put_input_in_reg(ctx, inputs[0]); + let mut arg2 = put_input_in_reg(ctx, inputs[1]); let ty_access = ty.unwrap(); assert!(is_valid_atomic_transaction_ty(ty_access)); let memflags = ctx.memflags(insn).expect("memory flags"); @@ -1994,8 +1994,8 @@ fn lower_insn_to_regs>( // `AtomicRmw`, there's no need to zero-extend narrow values here. let dst = get_output_reg(ctx, outputs[0]); let addr = lower_to_amode(ctx, inputs[0], 0); - let expected = input_to_reg(ctx, inputs[1]); - let replacement = input_to_reg(ctx, inputs[2]); + let expected = put_input_in_reg(ctx, inputs[1]); + let replacement = put_input_in_reg(ctx, inputs[2]); let ty_access = ty.unwrap(); assert!(is_valid_atomic_transaction_ty(ty_access)); let memflags = ctx.memflags(insn).expect("memory flags"); @@ -2054,7 +2054,7 @@ fn lower_insn_to_regs>( Opcode::AtomicStore => { // This is a normal store, followed by an `mfence` instruction. - let data = input_to_reg(ctx, inputs[0]); + let data = put_input_in_reg(ctx, inputs[0]); let addr = lower_to_amode(ctx, inputs[1], 0); let ty_access = ctx.input_ty(insn, 0); assert!(is_valid_atomic_transaction_ty(ty_access)); @@ -2139,14 +2139,14 @@ fn lower_insn_to_regs>( }; let ty = ctx.output_ty(insn, 0); - let rhs = input_to_reg(ctx, rhs_input); + let rhs = put_input_in_reg(ctx, rhs_input); let dst = get_output_reg(ctx, outputs[0]); let lhs = if is_int_ty(ty) && ty.bytes() < 4 { // Special case: since the higher bits are undefined per CLIF semantics, we // can just apply a 32-bit cmove here. Force inputs into registers, to // avoid partial spilling out-of-bounds with memory accesses, though. // Sign-extend operands to 32, then do a cmove of size 4. - RegMem::reg(input_to_reg(ctx, lhs_input)) + RegMem::reg(put_input_in_reg(ctx, lhs_input)) } else { input_to_reg_mem(ctx, lhs_input) }; @@ -2187,12 +2187,12 @@ fn lower_insn_to_regs>( } else { // The input is a boolean value, compare it against zero. let size = ctx.input_ty(insn, 0).bytes() as u8; - let test = input_to_reg(ctx, inputs[0]); + let test = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::cmp_rmi_r(size, RegMemImm::imm(0), test)); CC::NZ }; - let rhs = input_to_reg(ctx, inputs[2]); + let rhs = put_input_in_reg(ctx, inputs[2]); let dst = get_output_reg(ctx, outputs[0]); let ty = ctx.output_ty(insn, 0); @@ -2205,7 +2205,7 @@ fn lower_insn_to_regs>( // can just apply a 32-bit cmove here. Force inputs into registers, to // avoid partial spilling out-of-bounds with memory accesses, though. size = 4; - RegMem::reg(input_to_reg(ctx, inputs[1])) + RegMem::reg(put_input_in_reg(ctx, inputs[1])) } else { input_to_reg_mem(ctx, inputs[1]) }; @@ -2231,7 +2231,7 @@ fn lower_insn_to_regs>( let cc = CC::from_intcc(ctx.data(insn).cond_code().unwrap()); let lhs = input_to_reg_mem(ctx, inputs[1]); - let rhs = input_to_reg(ctx, inputs[2]); + let rhs = put_input_in_reg(ctx, inputs[2]); let dst = get_output_reg(ctx, outputs[0]); let ty = ctx.output_ty(insn, 0); @@ -2268,7 +2268,7 @@ fn lower_insn_to_regs>( let input_ty = ctx.input_ty(insn, 0); let size = input_ty.bytes() as u8; - let dividend = input_to_reg(ctx, inputs[0]); + let dividend = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); let srcloc = ctx.srcloc(insn); @@ -2286,7 +2286,7 @@ fn lower_insn_to_regs>( // Note it keeps the result in $rax (for divide) or $rdx (for rem), so that // regalloc is aware of the coalescing opportunity between rax/rdx and the // destination register. - let divisor = input_to_reg(ctx, inputs[1]); + let divisor = put_input_in_reg(ctx, inputs[1]); let divisor_copy = ctx.alloc_tmp(RegClass::I64, types::I64); ctx.emit(Inst::gen_move(divisor_copy, divisor, types::I64)); @@ -2352,7 +2352,7 @@ fn lower_insn_to_regs>( let input_ty = ctx.input_ty(insn, 0); let size = input_ty.bytes() as u8; - let lhs = input_to_reg(ctx, inputs[0]); + let lhs = put_input_in_reg(ctx, inputs[0]); let rhs = input_to_reg_mem(ctx, inputs[1]); let dst = get_output_reg(ctx, outputs[0]); @@ -2377,7 +2377,7 @@ fn lower_insn_to_regs>( } Opcode::SetPinnedReg => { - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); ctx.emit(Inst::gen_move( Writable::from_reg(regs::pinned_reg()), src, @@ -2404,7 +2404,7 @@ fn lower_insn_to_regs>( // https://github.com/bytecodealliance/wasmtime/issues/1147). As such, this IR // instruction should emit no machine code but a move is necessary to give the register // allocator a definition for the output virtual register. - let src = input_to_reg(ctx, inputs[0]); + let src = put_input_in_reg(ctx, inputs[0]); let dst = get_output_reg(ctx, outputs[0]); let ty = ty.unwrap(); ctx.emit(Inst::gen_move(dst, src, ty)); @@ -2531,7 +2531,7 @@ impl LowerBackend for X64Backend { FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(), } } else if is_int_ty(src_ty) || is_bool_ty(src_ty) { - let src = input_to_reg( + let src = put_input_in_reg( ctx, InsnInput { insn: branches[0], @@ -2554,7 +2554,7 @@ impl LowerBackend for X64Backend { Opcode::BrIcmp => { let src_ty = ctx.input_ty(branches[0], 0); if is_int_ty(src_ty) || is_bool_ty(src_ty) { - let lhs = input_to_reg( + let lhs = put_input_in_reg( ctx, InsnInput { insn: branches[0],