machinst x64: rename input_to_reg to put_input_to_reg;

Eventually, we should be able to unify this function's implementation
with the aarch64 one; but the latter does much more, and this would
require abstractions brought up in another pending PR#2142.
This commit is contained in:
Benjamin Bouvier
2020-09-08 16:14:27 +02:00
parent cb96d16ac7
commit b4a2dd37a4

View File

@@ -77,7 +77,7 @@ fn lowerinput_to_reg(ctx: Ctx, input: LowerInput) -> Reg {
} }
/// Put the given input into a register, and mark it as used (side-effect). /// Put the given input into a register, and mark it as used (side-effect).
fn input_to_reg(ctx: Ctx, spec: InsnInput) -> Reg { fn put_input_in_reg(ctx: Ctx, spec: InsnInput) -> Reg {
let input = ctx.get_input(spec.insn, spec.input); let input = ctx.get_input(spec.insn, spec.input);
lowerinput_to_reg(ctx, input) lowerinput_to_reg(ctx, input)
} }
@@ -107,8 +107,8 @@ fn extend_input_to_reg(ctx: Ctx, spec: InsnInput, ext_spec: ExtSpec) -> Reg {
}; };
let ext_mode = match (input_size, requested_size) { let ext_mode = match (input_size, requested_size) {
(a, b) if a == b => return input_to_reg(ctx, spec), (a, b) if a == b => return put_input_in_reg(ctx, spec),
(1, 8) => return input_to_reg(ctx, spec), (1, 8) => return put_input_in_reg(ctx, spec),
(a, 16) | (a, 32) if a == 1 || a == 8 => ExtMode::BL, (a, 16) | (a, 32) if a == 1 || a == 8 => ExtMode::BL,
(a, 64) if a == 1 || a == 8 => ExtMode::BQ, (a, 64) if a == 1 || a == 8 => ExtMode::BQ,
(16, 32) => ExtMode::WL, (16, 32) => ExtMode::WL,
@@ -191,7 +191,7 @@ fn emit_cmp(ctx: Ctx, insn: IRInst) {
let inputs = [InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }]; let inputs = [InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }];
// TODO Try to commute the operands (and invert the condition) if one is an immediate. // TODO Try to commute the operands (and invert the condition) if one is an immediate.
let lhs = input_to_reg(ctx, inputs[0]); let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem_imm(ctx, inputs[1]); let rhs = input_to_reg_mem_imm(ctx, inputs[1]);
// Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives // Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives
@@ -267,7 +267,7 @@ fn emit_fcmp(ctx: Ctx, insn: IRInst, mut cond_code: FloatCC, spec: FcmpSpec) ->
} else { } else {
(inputs[0], inputs[1]) (inputs[0], inputs[1])
}; };
let lhs = input_to_reg(ctx, lhs_input); let lhs = put_input_in_reg(ctx, lhs_input);
let rhs = input_to_reg_mem(ctx, rhs_input); let rhs = input_to_reg_mem(ctx, rhs_input);
ctx.emit(Inst::xmm_cmp_rm_r(op, rhs, lhs)); ctx.emit(Inst::xmm_cmp_rm_r(op, rhs, lhs));
@@ -329,7 +329,7 @@ fn emit_vm_call<C: LowerCtx<I = Inst>>(
assert_eq!(inputs.len() + vm_context, abi.num_args()); assert_eq!(inputs.len() + vm_context, abi.num_args());
for (i, input) in inputs.iter().enumerate() { for (i, input) in inputs.iter().enumerate() {
let arg_reg = input_to_reg(ctx, *input); let arg_reg = put_input_in_reg(ctx, *input);
abi.emit_copy_reg_to_arg(ctx, i, arg_reg); abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
} }
if call_conv.extends_baldrdash() { if call_conv.extends_baldrdash() {
@@ -396,22 +396,22 @@ fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: u
matches_small_constant_shift(ctx, add_inputs[0]) matches_small_constant_shift(ctx, add_inputs[0])
{ {
( (
input_to_reg(ctx, add_inputs[1]), put_input_in_reg(ctx, add_inputs[1]),
input_to_reg(ctx, shift_input), put_input_in_reg(ctx, shift_input),
shift_amt, shift_amt,
) )
} else if let Some((shift_input, shift_amt)) = } else if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[1]) matches_small_constant_shift(ctx, add_inputs[1])
{ {
( (
input_to_reg(ctx, add_inputs[0]), put_input_in_reg(ctx, add_inputs[0]),
input_to_reg(ctx, shift_input), put_input_in_reg(ctx, shift_input),
shift_amt, shift_amt,
) )
} else { } else {
( (
input_to_reg(ctx, add_inputs[0]), put_input_in_reg(ctx, add_inputs[0]),
input_to_reg(ctx, add_inputs[1]), put_input_in_reg(ctx, add_inputs[1]),
0, 0,
) )
}; };
@@ -419,7 +419,7 @@ fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: u
return Amode::imm_reg_reg_shift(offset, base, index, shift); return Amode::imm_reg_reg_shift(offset, base, index, shift);
} }
let input = input_to_reg(ctx, spec); let input = put_input_in_reg(ctx, spec);
Amode::imm_reg(offset, input) Amode::imm_reg(offset, input)
} }
@@ -489,7 +489,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}, },
_ => panic!("Unsupported packed instruction"), _ => panic!("Unsupported packed instruction"),
}; };
let lhs = input_to_reg(ctx, inputs[0]); let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]); let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
@@ -518,16 +518,16 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// For commutative operations, try to commute operands if one is an // For commutative operations, try to commute operands if one is an
// immediate. // immediate.
if let Some(imm) = input_to_sext_imm(ctx, inputs[0]) { if let Some(imm) = input_to_sext_imm(ctx, inputs[0]) {
(input_to_reg(ctx, inputs[1]), RegMemImm::imm(imm)) (put_input_in_reg(ctx, inputs[1]), RegMemImm::imm(imm))
} else { } else {
( (
input_to_reg(ctx, inputs[0]), put_input_in_reg(ctx, inputs[0]),
input_to_reg_mem_imm(ctx, inputs[1]), input_to_reg_mem_imm(ctx, inputs[1]),
) )
} }
} }
Opcode::Isub => ( Opcode::Isub => (
input_to_reg(ctx, inputs[0]), put_input_in_reg(ctx, inputs[0]),
input_to_reg_mem_imm(ctx, inputs[1]), input_to_reg_mem_imm(ctx, inputs[1]),
), ),
_ => unreachable!(), _ => unreachable!(),
@@ -547,7 +547,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
unimplemented!("bool bnot") unimplemented!("bool bnot")
} else { } else {
let size = ty.bytes() as u8; let size = ty.bytes() as u8;
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::not(size, dst)); ctx.emit(Inst::not(size, dst));
@@ -560,7 +560,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let (size, lhs) = match dst_ty { let (size, lhs) = match dst_ty {
types::I8 | types::I16 => match op { types::I8 | types::I16 => match op {
Opcode::Ishl => (4, input_to_reg(ctx, inputs[0])), Opcode::Ishl => (4, put_input_in_reg(ctx, inputs[0])),
Opcode::Ushr => ( Opcode::Ushr => (
4, 4,
extend_input_to_reg(ctx, inputs[0], ExtSpec::ZeroExtendTo32), extend_input_to_reg(ctx, inputs[0], ExtSpec::ZeroExtendTo32),
@@ -570,11 +570,11 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
extend_input_to_reg(ctx, inputs[0], ExtSpec::SignExtendTo32), extend_input_to_reg(ctx, inputs[0], ExtSpec::SignExtendTo32),
), ),
Opcode::Rotl | Opcode::Rotr => { Opcode::Rotl | Opcode::Rotr => {
(dst_ty.bytes() as u8, input_to_reg(ctx, inputs[0])) (dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0]))
} }
_ => unreachable!(), _ => unreachable!(),
}, },
types::I32 | types::I64 => (dst_ty.bytes() as u8, input_to_reg(ctx, inputs[0])), types::I32 | types::I64 => (dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0])),
_ => unreachable!("unhandled output type for shift/rotates: {}", dst_ty), _ => unreachable!("unhandled output type for shift/rotates: {}", dst_ty),
}; };
@@ -583,7 +583,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let cst = (cst as u8) & (dst_ty.bits() as u8 - 1); let cst = (cst as u8) & (dst_ty.bits() as u8 - 1);
(Some(cst), None) (Some(cst), None)
} else { } else {
(None, Some(input_to_reg(ctx, inputs[1]))) (None, Some(put_input_in_reg(ctx, inputs[1])))
}; };
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
@@ -639,7 +639,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
)); ));
} else { } else {
let size = ty.bytes() as u8; let size = ty.bytes() as u8;
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::neg(size, dst)); ctx.emit(Inst::neg(size, dst));
} }
@@ -1003,7 +1003,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Null references are represented by the constant value 0; invalid references are // Null references are represented by the constant value 0; invalid references are
// represented by the constant value -1. See `define_reftypes()` in // represented by the constant value -1. See `define_reftypes()` in
// `meta/src/isa/x86/encodings.rs` to confirm. // `meta/src/isa/x86/encodings.rs` to confirm.
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let ty = ctx.input_ty(insn, 0); let ty = ctx.input_ty(insn, 0);
let imm = match op { let imm = match op {
@@ -1157,12 +1157,12 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Determine the operands of the comparison, possibly by flipping them. // Determine the operands of the comparison, possibly by flipping them.
let (lhs, rhs) = if flip { let (lhs, rhs) = if flip {
( (
input_to_reg(ctx, inputs[1]), put_input_in_reg(ctx, inputs[1]),
input_to_reg_mem(ctx, inputs[0]), input_to_reg_mem(ctx, inputs[0]),
) )
} else { } else {
( (
input_to_reg(ctx, inputs[0]), put_input_in_reg(ctx, inputs[0]),
input_to_reg_mem(ctx, inputs[1]), input_to_reg_mem(ctx, inputs[1]),
) )
}; };
@@ -1180,7 +1180,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::FallthroughReturn | Opcode::Return => { Opcode::FallthroughReturn | Opcode::Return => {
for i in 0..ctx.num_inputs(insn) { for i in 0..ctx.num_inputs(insn) {
let src_reg = input_to_reg(ctx, inputs[i]); let src_reg = put_input_in_reg(ctx, inputs[i]);
let retval_reg = ctx.retval(i); let retval_reg = ctx.retval(i);
let ty = ctx.input_ty(insn, i); let ty = ctx.input_ty(insn, i);
ctx.emit(Inst::gen_move(retval_reg, src_reg, ty)); ctx.emit(Inst::gen_move(retval_reg, src_reg, ty));
@@ -1203,7 +1203,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
Opcode::CallIndirect => { Opcode::CallIndirect => {
let ptr = input_to_reg(ctx, inputs[0]); let ptr = put_input_in_reg(ctx, inputs[0]);
let sig = ctx.call_sig(insn).unwrap(); let sig = ctx.call_sig(insn).unwrap();
assert_eq!(inputs.len() - 1, sig.params.len()); assert_eq!(inputs.len() - 1, sig.params.len());
assert_eq!(outputs.len(), sig.returns.len()); assert_eq!(outputs.len(), sig.returns.len());
@@ -1216,7 +1216,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
abi.emit_stack_pre_adjust(ctx); abi.emit_stack_pre_adjust(ctx);
assert_eq!(inputs.len(), abi.num_args()); assert_eq!(inputs.len(), abi.num_args());
for (i, input) in inputs.iter().enumerate() { for (i, input) in inputs.iter().enumerate() {
let arg_reg = input_to_reg(ctx, *input); let arg_reg = put_input_in_reg(ctx, *input);
abi.emit_copy_reg_to_arg(ctx, i, arg_reg); abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
} }
abi.emit_call(ctx); abi.emit_call(ctx);
@@ -1336,7 +1336,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv => { Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv => {
let lhs = input_to_reg(ctx, inputs[0]); let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]); let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap(); let ty = ty.unwrap();
@@ -1387,8 +1387,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
Opcode::Fmin | Opcode::Fmax => { Opcode::Fmin | Opcode::Fmax => {
let lhs = input_to_reg(ctx, inputs[0]); let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg(ctx, inputs[1]); let rhs = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let is_min = op == Opcode::Fmin; let is_min = op == Opcode::Fmin;
let output_ty = ty.unwrap(); let output_ty = ty.unwrap();
@@ -1479,7 +1479,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
types::I64 => { types::I64 => {
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let src_copy = ctx.alloc_tmp(RegClass::I64, types::I64); let src_copy = ctx.alloc_tmp(RegClass::I64, types::I64);
ctx.emit(Inst::gen_move(src_copy, src, types::I64)); ctx.emit(Inst::gen_move(src_copy, src, types::I64));
@@ -1500,7 +1500,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
Opcode::FcvtToUint | Opcode::FcvtToUintSat | Opcode::FcvtToSint | Opcode::FcvtToSintSat => { Opcode::FcvtToUint | Opcode::FcvtToUintSat | Opcode::FcvtToSint | Opcode::FcvtToSintSat => {
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let input_ty = ctx.input_ty(insn, 0); let input_ty = ctx.input_ty(insn, 0);
@@ -1545,7 +1545,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let output_ty = ctx.output_ty(insn, 0); let output_ty = ctx.output_ty(insn, 0);
match (input_ty, output_ty) { match (input_ty, output_ty) {
(types::F32, types::I32) => { (types::F32, types::I32) => {
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::xmm_to_gpr( ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Movd, SseOpcode::Movd,
@@ -1565,7 +1565,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
)); ));
} }
(types::F64, types::I64) => { (types::F64, types::I64) => {
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::xmm_to_gpr( ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Movq, SseOpcode::Movq,
@@ -1627,7 +1627,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Move the `lhs` to the same register as `dst`; this may not emit an actual move // Move the `lhs` to the same register as `dst`; this may not emit an actual move
// but ensures that the registers are the same to match x86's read-write operand // but ensures that the registers are the same to match x86's read-write operand
// encoding. // encoding.
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(dst, src, output_ty)); ctx.emit(Inst::gen_move(dst, src, output_ty));
// Generate an all 1s constant in an XMM register. This uses CMPPS but could // Generate an all 1s constant in an XMM register. This uses CMPPS but could
@@ -1668,8 +1668,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Fcopysign => { Opcode::Fcopysign => {
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let lhs = input_to_reg(ctx, inputs[0]); let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg(ctx, inputs[1]); let rhs = put_input_in_reg(ctx, inputs[1]);
let ty = ty.unwrap(); let ty = ty.unwrap();
@@ -1823,8 +1823,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
2, 2,
"can't handle more than two inputs in complex load" "can't handle more than two inputs in complex load"
); );
let base = input_to_reg(ctx, inputs[0]); let base = put_input_in_reg(ctx, inputs[0]);
let index = input_to_reg(ctx, inputs[1]); let index = put_input_in_reg(ctx, inputs[1]);
let shift = 0; let shift = 0;
Amode::imm_reg_reg_shift(offset as u32, base, index, shift) Amode::imm_reg_reg_shift(offset as u32, base, index, shift)
} }
@@ -1911,8 +1911,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
3, 3,
"can't handle more than two inputs in complex store" "can't handle more than two inputs in complex store"
); );
let base = input_to_reg(ctx, inputs[1]); let base = put_input_in_reg(ctx, inputs[1]);
let index = input_to_reg(ctx, inputs[2]); let index = put_input_in_reg(ctx, inputs[2]);
let shift = 0; let shift = 0;
Amode::imm_reg_reg_shift(offset as u32, base, index, shift) Amode::imm_reg_reg_shift(offset as u32, base, index, shift)
} }
@@ -1920,7 +1920,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
_ => unreachable!(), _ => unreachable!(),
}; };
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let srcloc = Some(ctx.srcloc(insn)); let srcloc = Some(ctx.srcloc(insn));
@@ -1946,8 +1946,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// left for another day. // left for another day.
// TODO: filed as https://github.com/bytecodealliance/wasmtime/issues/2153 // TODO: filed as https://github.com/bytecodealliance/wasmtime/issues/2153
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let mut addr = input_to_reg(ctx, inputs[0]); let mut addr = put_input_in_reg(ctx, inputs[0]);
let mut arg2 = input_to_reg(ctx, inputs[1]); let mut arg2 = put_input_in_reg(ctx, inputs[1]);
let ty_access = ty.unwrap(); let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access)); assert!(is_valid_atomic_transaction_ty(ty_access));
let memflags = ctx.memflags(insn).expect("memory flags"); let memflags = ctx.memflags(insn).expect("memory flags");
@@ -1994,8 +1994,8 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// `AtomicRmw`, there's no need to zero-extend narrow values here. // `AtomicRmw`, there's no need to zero-extend narrow values here.
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let addr = lower_to_amode(ctx, inputs[0], 0); let addr = lower_to_amode(ctx, inputs[0], 0);
let expected = input_to_reg(ctx, inputs[1]); let expected = put_input_in_reg(ctx, inputs[1]);
let replacement = input_to_reg(ctx, inputs[2]); let replacement = put_input_in_reg(ctx, inputs[2]);
let ty_access = ty.unwrap(); let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access)); assert!(is_valid_atomic_transaction_ty(ty_access));
let memflags = ctx.memflags(insn).expect("memory flags"); let memflags = ctx.memflags(insn).expect("memory flags");
@@ -2054,7 +2054,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::AtomicStore => { Opcode::AtomicStore => {
// This is a normal store, followed by an `mfence` instruction. // This is a normal store, followed by an `mfence` instruction.
let data = input_to_reg(ctx, inputs[0]); let data = put_input_in_reg(ctx, inputs[0]);
let addr = lower_to_amode(ctx, inputs[1], 0); let addr = lower_to_amode(ctx, inputs[1], 0);
let ty_access = ctx.input_ty(insn, 0); let ty_access = ctx.input_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(ty_access)); assert!(is_valid_atomic_transaction_ty(ty_access));
@@ -2139,14 +2139,14 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
}; };
let ty = ctx.output_ty(insn, 0); let ty = ctx.output_ty(insn, 0);
let rhs = input_to_reg(ctx, rhs_input); let rhs = put_input_in_reg(ctx, rhs_input);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let lhs = if is_int_ty(ty) && ty.bytes() < 4 { let lhs = if is_int_ty(ty) && ty.bytes() < 4 {
// Special case: since the higher bits are undefined per CLIF semantics, we // Special case: since the higher bits are undefined per CLIF semantics, we
// can just apply a 32-bit cmove here. Force inputs into registers, to // can just apply a 32-bit cmove here. Force inputs into registers, to
// avoid partial spilling out-of-bounds with memory accesses, though. // avoid partial spilling out-of-bounds with memory accesses, though.
// Sign-extend operands to 32, then do a cmove of size 4. // Sign-extend operands to 32, then do a cmove of size 4.
RegMem::reg(input_to_reg(ctx, lhs_input)) RegMem::reg(put_input_in_reg(ctx, lhs_input))
} else { } else {
input_to_reg_mem(ctx, lhs_input) input_to_reg_mem(ctx, lhs_input)
}; };
@@ -2187,12 +2187,12 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} else { } else {
// The input is a boolean value, compare it against zero. // The input is a boolean value, compare it against zero.
let size = ctx.input_ty(insn, 0).bytes() as u8; let size = ctx.input_ty(insn, 0).bytes() as u8;
let test = input_to_reg(ctx, inputs[0]); let test = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::cmp_rmi_r(size, RegMemImm::imm(0), test)); ctx.emit(Inst::cmp_rmi_r(size, RegMemImm::imm(0), test));
CC::NZ CC::NZ
}; };
let rhs = input_to_reg(ctx, inputs[2]); let rhs = put_input_in_reg(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let ty = ctx.output_ty(insn, 0); let ty = ctx.output_ty(insn, 0);
@@ -2205,7 +2205,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// can just apply a 32-bit cmove here. Force inputs into registers, to // can just apply a 32-bit cmove here. Force inputs into registers, to
// avoid partial spilling out-of-bounds with memory accesses, though. // avoid partial spilling out-of-bounds with memory accesses, though.
size = 4; size = 4;
RegMem::reg(input_to_reg(ctx, inputs[1])) RegMem::reg(put_input_in_reg(ctx, inputs[1]))
} else { } else {
input_to_reg_mem(ctx, inputs[1]) input_to_reg_mem(ctx, inputs[1])
}; };
@@ -2231,7 +2231,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let cc = CC::from_intcc(ctx.data(insn).cond_code().unwrap()); let cc = CC::from_intcc(ctx.data(insn).cond_code().unwrap());
let lhs = input_to_reg_mem(ctx, inputs[1]); let lhs = input_to_reg_mem(ctx, inputs[1]);
let rhs = input_to_reg(ctx, inputs[2]); let rhs = put_input_in_reg(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let ty = ctx.output_ty(insn, 0); let ty = ctx.output_ty(insn, 0);
@@ -2268,7 +2268,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let input_ty = ctx.input_ty(insn, 0); let input_ty = ctx.input_ty(insn, 0);
let size = input_ty.bytes() as u8; let size = input_ty.bytes() as u8;
let dividend = input_to_reg(ctx, inputs[0]); let dividend = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let srcloc = ctx.srcloc(insn); let srcloc = ctx.srcloc(insn);
@@ -2286,7 +2286,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// Note it keeps the result in $rax (for divide) or $rdx (for rem), so that // Note it keeps the result in $rax (for divide) or $rdx (for rem), so that
// regalloc is aware of the coalescing opportunity between rax/rdx and the // regalloc is aware of the coalescing opportunity between rax/rdx and the
// destination register. // destination register.
let divisor = input_to_reg(ctx, inputs[1]); let divisor = put_input_in_reg(ctx, inputs[1]);
let divisor_copy = ctx.alloc_tmp(RegClass::I64, types::I64); let divisor_copy = ctx.alloc_tmp(RegClass::I64, types::I64);
ctx.emit(Inst::gen_move(divisor_copy, divisor, types::I64)); ctx.emit(Inst::gen_move(divisor_copy, divisor, types::I64));
@@ -2352,7 +2352,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
let input_ty = ctx.input_ty(insn, 0); let input_ty = ctx.input_ty(insn, 0);
let size = input_ty.bytes() as u8; let size = input_ty.bytes() as u8;
let lhs = input_to_reg(ctx, inputs[0]); let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]); let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
@@ -2377,7 +2377,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
Opcode::SetPinnedReg => { Opcode::SetPinnedReg => {
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move( ctx.emit(Inst::gen_move(
Writable::from_reg(regs::pinned_reg()), Writable::from_reg(regs::pinned_reg()),
src, src,
@@ -2404,7 +2404,7 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
// https://github.com/bytecodealliance/wasmtime/issues/1147). As such, this IR // https://github.com/bytecodealliance/wasmtime/issues/1147). As such, this IR
// instruction should emit no machine code but a move is necessary to give the register // instruction should emit no machine code but a move is necessary to give the register
// allocator a definition for the output virtual register. // allocator a definition for the output virtual register.
let src = input_to_reg(ctx, inputs[0]); let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]); let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap(); let ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, src, ty)); ctx.emit(Inst::gen_move(dst, src, ty));
@@ -2531,7 +2531,7 @@ impl LowerBackend for X64Backend {
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(), FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
} }
} else if is_int_ty(src_ty) || is_bool_ty(src_ty) { } else if is_int_ty(src_ty) || is_bool_ty(src_ty) {
let src = input_to_reg( let src = put_input_in_reg(
ctx, ctx,
InsnInput { InsnInput {
insn: branches[0], insn: branches[0],
@@ -2554,7 +2554,7 @@ impl LowerBackend for X64Backend {
Opcode::BrIcmp => { Opcode::BrIcmp => {
let src_ty = ctx.input_ty(branches[0], 0); let src_ty = ctx.input_ty(branches[0], 0);
if is_int_ty(src_ty) || is_bool_ty(src_ty) { if is_int_ty(src_ty) || is_bool_ty(src_ty) {
let lhs = input_to_reg( let lhs = put_input_in_reg(
ctx, ctx,
InsnInput { InsnInput {
insn: branches[0], insn: branches[0],