x64: Migrate the return and fallthrough_return lowerings to ISLE (#4518)
https://github.com/bytecodealliance/wasmtime/pull/4518
This commit is contained in:
@@ -1486,8 +1486,34 @@
|
||||
r
|
||||
(OperandSize.Size32)))
|
||||
|
||||
|
||||
;;;; Helpers for Emitting Loads ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; Generate a move between two registers.
|
||||
(decl gen_move (Type WritableReg Reg) MInst)
|
||||
(extern constructor gen_move gen_move)
|
||||
|
||||
;; Copy a return value to a set of registers.
|
||||
(decl copy_to_regs (WritableValueRegs Value) Unit)
|
||||
(rule (copy_to_regs dsts val @ (value_type ty))
|
||||
(let ((srcs ValueRegs (put_in_regs val)))
|
||||
(copy_to_regs_range ty (value_regs_range srcs) dsts srcs)))
|
||||
|
||||
;; Helper for `copy_to_regs` that uses a range to index into the reg/value
|
||||
;; vectors. Fails for the empty range.
|
||||
(decl copy_to_regs_range (Type Range WritableValueRegs ValueRegs) Unit)
|
||||
|
||||
(rule (copy_to_regs_range ty (range_singleton idx) dsts srcs)
|
||||
(let ((dst WritableReg (writable_regs_get dsts idx))
|
||||
(src Reg (value_regs_get srcs idx)))
|
||||
(emit (gen_move ty dst src))))
|
||||
|
||||
(rule (copy_to_regs_range ty (range_unwrap head tail) dsts srcs)
|
||||
(let ((dst WritableReg (writable_regs_get dsts head))
|
||||
(src Reg (value_regs_get srcs head))
|
||||
(_ Unit (emit (gen_move ty dst src))))
|
||||
(copy_to_regs_range ty tail dsts srcs)))
|
||||
|
||||
;; Helper for constructing a LoadExtName instruction.
|
||||
(decl load_ext_name (ExternalName i64) Reg)
|
||||
(rule (load_ext_name extname offset)
|
||||
|
||||
@@ -1443,6 +1443,22 @@
|
||||
(rule (lower (resumable_trap code))
|
||||
(side_effect (x64_ud2 code)))
|
||||
|
||||
;;;; Rules for `return` and `fallthrough_return` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; N.B.: the Ret itself is generated by the ABI.
|
||||
(rule (lower (return args))
|
||||
(lower_return (range 0 (value_slice_len args)) args))
|
||||
|
||||
(rule (lower (fallthrough_return args))
|
||||
(lower_return (range 0 (value_slice_len args)) args))
|
||||
|
||||
(decl lower_return (Range ValueSlice) InstOutput)
|
||||
(rule (lower_return (range_empty) _) (output_none))
|
||||
(rule (lower_return (range_unwrap head tail) args)
|
||||
(let ((_ Unit (copy_to_regs (retval head) (value_slice_get args head))))
|
||||
(lower_return tail args)))
|
||||
|
||||
|
||||
;;;; Rules for `icmp` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
;; For GPR-held values we only need to emit `CMP + SETCC`. We rely here on
|
||||
|
||||
@@ -923,29 +923,12 @@ fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
|
||||
| Opcode::AtomicStore
|
||||
| Opcode::Fence
|
||||
| Opcode::FuncAddr
|
||||
| Opcode::SymbolValue => {
|
||||
| Opcode::SymbolValue
|
||||
| Opcode::FallthroughReturn
|
||||
| Opcode::Return => {
|
||||
implemented_in_isle(ctx);
|
||||
}
|
||||
|
||||
Opcode::FallthroughReturn | Opcode::Return => {
|
||||
for i in 0..ctx.num_inputs(insn) {
|
||||
let src_reg = put_input_in_regs(ctx, inputs[i]);
|
||||
let retval_reg = ctx.retval(i);
|
||||
let ty = ctx.input_ty(insn, i);
|
||||
assert!(src_reg.len() == retval_reg.len());
|
||||
let (_, tys) = Inst::rc_for_type(ty)?;
|
||||
for ((&src, &dst), &ty) in src_reg
|
||||
.regs()
|
||||
.iter()
|
||||
.zip(retval_reg.regs().iter())
|
||||
.zip(tys.iter())
|
||||
{
|
||||
ctx.emit(Inst::gen_move(dst, src, ty));
|
||||
}
|
||||
}
|
||||
// N.B.: the Ret itself is generated by the ABI.
|
||||
}
|
||||
|
||||
Opcode::Call | Opcode::CallIndirect => {
|
||||
let caller_conv = ctx.abi().call_conv();
|
||||
let (mut abi, inputs) = match op {
|
||||
|
||||
@@ -26,7 +26,8 @@ use crate::{
|
||||
},
|
||||
},
|
||||
machinst::{
|
||||
isle::*, InsnInput, InsnOutput, LowerCtx, MachAtomicRmwOp, VCodeConstant, VCodeConstantData,
|
||||
isle::*, InsnInput, InsnOutput, LowerCtx, MachAtomicRmwOp, MachInst, VCodeConstant,
|
||||
VCodeConstantData,
|
||||
},
|
||||
};
|
||||
use std::boxed::Box;
|
||||
@@ -573,6 +574,11 @@ where
|
||||
fn atomic_rmw_op_to_mach_atomic_rmw_op(&mut self, op: &AtomicRmwOp) -> MachAtomicRmwOp {
|
||||
MachAtomicRmwOp::from(*op)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gen_move(&mut self, ty: Type, dst: WritableReg, src: Reg) -> MInst {
|
||||
MInst::gen_move(dst, src, ty)
|
||||
}
|
||||
}
|
||||
|
||||
// Since x64 doesn't have 8x16 shifts and we must use a 16x8 shift instead, we
|
||||
|
||||
Reference in New Issue
Block a user