Fully support multiple returns in Wasmtime (#2806)

* Fully support multiple returns in Wasmtime

For quite some time now Wasmtime has "supported" multiple return values,
but only in the mose bare bones ways. Up until recently you couldn't get
a typed version of functions with multiple return values, and never have
you been able to use `Func::wrap` with functions that return multiple
values. Even recently where `Func::typed` can call functions that return
multiple values it uses a double-indirection by calling a trampoline
which calls the real function.

The underlying reason for this lack of support is that cranelift's ABI
for returning multiple values is not possible to write in Rust. For
example if a wasm function returns two `i32` values there is no Rust (or
C!) function you can write to correspond to that. This commit, however
fixes that.

This commit adds two new ABIs to Cranelift: `WasmtimeSystemV` and
`WasmtimeFastcall`. The intention is that these Wasmtime-specific ABIs
match their corresponding ABI (e.g. `SystemV` or `WindowsFastcall`) for
everything *except* how multiple values are returned. For multiple
return values we simply define our own version of the ABI which Wasmtime
implements, which is that for N return values the first is returned as
if the function only returned that and the latter N-1 return values are
returned via an out-ptr that's the last parameter to the function.

These custom ABIs provides the ability for Wasmtime to bind these in
Rust meaning that `Func::wrap` can now wrap functions that return
multiple values and `Func::typed` no longer uses trampolines when
calling functions that return multiple values. Although there's lots of
internal changes there's no actual changes in the API surface area of
Wasmtime, just a few more impls of more public traits which means that
more types are supported in more places!

Another change made with this PR is a consolidation of how the ABI of
each function in a wasm module is selected. The native `SystemV` ABI,
for example, is more efficient at returning multiple values than the
wasmtime version of the ABI (since more things are in more registers).
To continue to take advantage of this Wasmtime will now classify some
functions in a wasm module with the "fast" ABI. Only functions that are
not reachable externally from the module are classified with the fast
ABI (e.g. those not exported, used in tables, or used with `ref.func`).
This should enable purely internal functions of modules to have a faster
calling convention than those which might be exposed to Wasmtime itself.

Closes #1178

* Tweak some names and add docs

* "fix" lightbeam compile

* Fix TODO with dummy environ

* Unwind info is a property of the target, not the ABI

* Remove lightbeam unused imports

* Attempt to fix arm64

* Document new ABIs aren't stable

* Fix filetests to use the right target

* Don't always do 64-bit stores with cranelift

This was overwriting upper bits when 32-bit registers were being stored
into return values, so fix the code inline to do a sized store instead
of one-size-fits-all store.

* At least get tests passing on the old backend

* Fix a typo

* Add some filetests with mixed abi calls

* Get `multi` example working

* Fix doctests on old x86 backend

* Add a mixture of wasmtime/system_v tests
This commit is contained in:
Alex Crichton
2021-04-07 12:34:26 -05:00
committed by GitHub
parent 7588565078
commit 195bf0e29a
37 changed files with 1116 additions and 459 deletions

View File

@@ -267,13 +267,7 @@ impl Context {
isa: &dyn TargetIsa,
) -> CodegenResult<Option<crate::isa::unwind::UnwindInfo>> {
if let Some(backend) = isa.get_mach_backend() {
use crate::isa::CallConv;
use crate::machinst::UnwindInfoKind;
let unwind_info_kind = match self.func.signature.call_conv {
CallConv::Fast | CallConv::Cold | CallConv::SystemV => UnwindInfoKind::SystemV,
CallConv::WindowsFastcall => UnwindInfoKind::Windows,
_ => UnwindInfoKind::None,
};
let unwind_info_kind = isa.unwind_info_kind();
let result = self.mach_compile_result.as_ref().unwrap();
return backend.emit_unwind_info(result, unwind_info_kind);
}

View File

@@ -197,18 +197,26 @@ impl ABIMachineSpec for AArch64MachineDeps {
next_stack = 16;
}
// Note on return values: on the regular non-baldrdash ABI, we may return values in 8
// registers for V128 and I64 registers independently of the number of register values
// returned in the other class. That is, we can return values in up to 8 integer and 8
// vector registers at once.
// In Baldrdash, we can only use one register for return value for all the register
// classes. That is, we can't return values in both one integer and one vector register;
// only one return value may be in a register.
let (max_per_class_reg_vals, mut remaining_reg_vals) = match args_or_rets {
ArgsOrRets::Args => (8, 16), // x0-x7 and v0-v7
let (max_per_class_reg_vals, mut remaining_reg_vals) = match (args_or_rets, is_baldrdash) {
(ArgsOrRets::Args, _) => (8, 16), // x0-x7 and v0-v7
(ArgsOrRets::Rets, false) => (8, 16), // x0-x7 and v0-v7
(ArgsOrRets::Rets, true) => (1, 1), // x0 or v0, but not both
// Note on return values: on the regular ABI, we may return values
// in 8 registers for V128 and I64 registers independently of the
// number of register values returned in the other class. That is,
// we can return values in up to 8 integer and
// 8 vector registers at once.
//
// In Baldrdash and Wasmtime, we can only use one register for
// return value for all the register classes. That is, we can't
// return values in both one integer and one vector register; only
// one return value may be in a register.
ArgsOrRets::Rets => {
if is_baldrdash || call_conv.extends_wasmtime() {
(1, 1) // x0 or v0, but not both
} else {
(8, 16) // x0-x7 and v0-v7
}
}
};
for i in 0..params.len() {
@@ -282,15 +290,18 @@ impl ABIMachineSpec for AArch64MachineDeps {
// Compute the stack slot's size.
let size = (ty_bits(param.value_type) / 8) as u64;
let size = if call_conv != isa::CallConv::AppleAarch64 {
let size = if call_conv == isa::CallConv::AppleAarch64
|| (call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets)
{
// MacOS aarch64 and Wasmtime allow stack slots with
// sizes less than 8 bytes. They still need to be
// properly aligned on their natural data alignment,
// though.
size
} else {
// Every arg takes a minimum slot of 8 bytes. (16-byte stack
// alignment happens separately after all args.)
std::cmp::max(size, 8)
} else {
// MacOS aarch64 allows stack slots with sizes less than 8
// bytes. They still need to be properly aligned on their
// natural data alignment, though.
size
};
// Align the stack slot.

View File

@@ -29,6 +29,16 @@ pub enum CallConv {
Baldrdash2020,
/// Specialized convention for the probestack function.
Probestack,
/// Wasmtime equivalent of SystemV, not ABI-stable.
///
/// Currently only differs in how multiple return values are handled,
/// returning the first return value in a register and everything else
/// through a return-pointer.
WasmtimeSystemV,
/// Wasmtime equivalent of WindowsFastcall, not ABI-stable.
///
/// Differs from fastcall in the same way as `WasmtimeSystemV`.
WasmtimeFastcall,
}
impl CallConv {
@@ -63,7 +73,7 @@ impl CallConv {
/// Is the calling convention extending the Windows Fastcall ABI?
pub fn extends_windows_fastcall(self) -> bool {
match self {
Self::WindowsFastcall | Self::BaldrdashWindows => true,
Self::WindowsFastcall | Self::BaldrdashWindows | Self::WasmtimeFastcall => true,
_ => false,
}
}
@@ -75,6 +85,14 @@ impl CallConv {
_ => false,
}
}
/// Is the calling convention extending the Wasmtime ABI?
pub fn extends_wasmtime(self) -> bool {
match self {
Self::WasmtimeSystemV | Self::WasmtimeFastcall => true,
_ => false,
}
}
}
impl fmt::Display for CallConv {
@@ -89,6 +107,8 @@ impl fmt::Display for CallConv {
Self::BaldrdashWindows => "baldrdash_windows",
Self::Baldrdash2020 => "baldrdash_2020",
Self::Probestack => "probestack",
Self::WasmtimeSystemV => "wasmtime_system_v",
Self::WasmtimeFastcall => "wasmtime_fastcall",
})
}
}
@@ -106,6 +126,8 @@ impl str::FromStr for CallConv {
"baldrdash_windows" => Ok(Self::BaldrdashWindows),
"baldrdash_2020" => Ok(Self::Baldrdash2020),
"probestack" => Ok(Self::Probestack),
"wasmtime_system_v" => Ok(Self::WasmtimeSystemV),
"wasmtime_fastcall" => Ok(Self::WasmtimeFastcall),
_ => Err(()),
}
}

View File

@@ -57,7 +57,7 @@ use crate::flowgraph;
use crate::ir;
#[cfg(feature = "unwind")]
use crate::isa::unwind::systemv::RegisterMappingError;
use crate::machinst::MachBackend;
use crate::machinst::{MachBackend, UnwindInfoKind};
use crate::regalloc;
use crate::result::CodegenResult;
use crate::settings;
@@ -68,7 +68,7 @@ use core::any::Any;
use core::fmt;
use core::fmt::{Debug, Formatter};
use core::hash::Hasher;
use target_lexicon::{triple, Architecture, PointerWidth, Triple};
use target_lexicon::{triple, Architecture, OperatingSystem, PointerWidth, Triple};
use thiserror::Error;
#[cfg(feature = "riscv")]
@@ -476,6 +476,18 @@ pub trait TargetIsa: fmt::Display + Send + Sync {
/// IntCC condition for Unsigned Subtraction Overflow (Borrow/Carry).
fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC;
/// Returns the flavor of unwind information emitted for this target.
fn unwind_info_kind(&self) -> UnwindInfoKind {
match self.triple().operating_system {
#[cfg(feature = "unwind")]
OperatingSystem::Windows => UnwindInfoKind::Windows,
#[cfg(feature = "unwind")]
_ => UnwindInfoKind::SystemV,
#[cfg(not(feature = "unwind"))]
_ => UnwindInfoKind::None,
}
}
/// Creates unwind information for the function.
///
/// Returns `None` if there is no unwind information for the function.

View File

@@ -237,10 +237,20 @@ impl ABIMachineSpec for X64ABIMachineSpec {
extension: param.extension,
});
} else {
// Compute size. Every arg takes a minimum slot of 8 bytes. (16-byte
// stack alignment happens separately after all args.)
// Compute size. For the wasmtime ABI it differs from native
// ABIs in how multiple values are returned, so we take a
// leaf out of arm64's book by not rounding everything up to
// 8 bytes. For all ABI arguments, and other ABI returns,
// though, each slot takes a minimum of 8 bytes.
//
// Note that in all cases 16-byte stack alignment happens
// separately after all args.
let size = (reg_ty.bits() / 8) as u64;
let size = std::cmp::max(size, 8);
let size = if args_or_rets == ArgsOrRets::Rets && call_conv.extends_wasmtime() {
size
} else {
std::cmp::max(size, 8)
};
// Align.
debug_assert!(size.is_power_of_two());
next_stack = align_to(next_stack, size);
@@ -824,15 +834,7 @@ impl From<StackAMode> for SyntheticAmode {
}
fn get_intreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Option<Reg> {
let is_fastcall = match call_conv {
CallConv::Fast
| CallConv::Cold
| CallConv::SystemV
| CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020 => false,
CallConv::WindowsFastcall => true,
_ => panic!("int args only supported for SysV or Fastcall calling convention"),
};
let is_fastcall = call_conv.extends_windows_fastcall();
// Fastcall counts by absolute argument number; SysV counts by argument of
// this (integer) class.
@@ -853,15 +855,7 @@ fn get_intreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Optio
}
fn get_fltreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Option<Reg> {
let is_fastcall = match call_conv {
CallConv::Fast
| CallConv::Cold
| CallConv::SystemV
| CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020 => false,
CallConv::WindowsFastcall => true,
_ => panic!("float args only supported for SysV or Fastcall calling convention"),
};
let is_fastcall = call_conv.extends_windows_fastcall();
// Fastcall counts by absolute argument number; SysV counts by argument of
// this (floating-point) class.
@@ -894,7 +888,10 @@ fn get_intreg_for_retval(
1 => Some(regs::rdx()),
_ => None,
},
CallConv::BaldrdashSystemV | CallConv::Baldrdash2020 => {
CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020
| CallConv::WasmtimeSystemV
| CallConv::WasmtimeFastcall => {
if intreg_idx == 0 && retval_idx == 0 {
Some(regs::rax())
} else {
@@ -922,7 +919,10 @@ fn get_fltreg_for_retval(
1 => Some(regs::xmm1()),
_ => None,
},
CallConv::BaldrdashSystemV | CallConv::Baldrdash2020 => {
CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020
| CallConv::WasmtimeFastcall
| CallConv::WasmtimeSystemV => {
if fltreg_idx == 0 && retval_idx == 0 {
Some(regs::xmm0())
} else {
@@ -992,12 +992,12 @@ fn get_callee_saves(call_conv: &CallConv, regs: &Set<Writable<RealReg>>) -> Vec<
CallConv::BaldrdashWindows => {
todo!("baldrdash windows");
}
CallConv::Fast | CallConv::Cold | CallConv::SystemV => regs
CallConv::Fast | CallConv::Cold | CallConv::SystemV | CallConv::WasmtimeSystemV => regs
.iter()
.cloned()
.filter(|r| is_callee_save_systemv(r.to_reg()))
.collect(),
CallConv::WindowsFastcall => regs
CallConv::WindowsFastcall | CallConv::WasmtimeFastcall => regs
.iter()
.cloned()
.filter(|r| is_callee_save_fastcall(r.to_reg()))

View File

@@ -1122,11 +1122,16 @@ impl Inst {
pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst {
let rc = from_reg.get_class();
match rc {
RegClass::I64 => {
// Always store the full register, to ensure that the high bits are properly set
// when doing a full reload.
Inst::mov_r_m(OperandSize::Size64, from_reg, to_addr)
}
RegClass::I64 => Inst::mov_r_m(
match ty {
types::B1 => OperandSize::Size8,
types::I32 | types::R32 => OperandSize::Size32,
types::I64 | types::R64 => OperandSize::Size64,
_ => unimplemented!("integer store of type: {}", ty),
},
from_reg,
to_addr,
),
RegClass::V128 => {
let opcode = match ty {
types::F32 => SseOpcode::Movss,

View File

@@ -503,10 +503,12 @@ fn callee_saved_regs_used(isa: &dyn TargetIsa, func: &ir::Function) -> RegisterS
pub fn prologue_epilogue(func: &mut ir::Function, isa: &dyn TargetIsa) -> CodegenResult<()> {
match func.signature.call_conv {
// For now, just translate fast and cold as system_v.
CallConv::Fast | CallConv::Cold | CallConv::SystemV => {
CallConv::Fast | CallConv::Cold | CallConv::SystemV | CallConv::WasmtimeSystemV => {
system_v_prologue_epilogue(func, isa)
}
CallConv::WindowsFastcall => fastcall_prologue_epilogue(func, isa),
CallConv::WindowsFastcall | CallConv::WasmtimeFastcall => {
fastcall_prologue_epilogue(func, isa)
}
CallConv::BaldrdashSystemV | CallConv::BaldrdashWindows => {
baldrdash_prologue_epilogue(func, isa)
}
@@ -1084,16 +1086,17 @@ pub fn create_unwind_info(
isa: &dyn TargetIsa,
) -> CodegenResult<Option<crate::isa::unwind::UnwindInfo>> {
use crate::isa::unwind::UnwindInfo;
use crate::machinst::UnwindInfoKind;
// Assumption: RBP is being used as the frame pointer for both calling conventions
// In the future, we should be omitting frame pointer as an optimization, so this will change
Ok(match func.signature.call_conv {
CallConv::Fast | CallConv::Cold | CallConv::SystemV => {
Ok(match isa.unwind_info_kind() {
UnwindInfoKind::SystemV => {
super::unwind::systemv::create_unwind_info(func, isa)?.map(|u| UnwindInfo::SystemV(u))
}
CallConv::WindowsFastcall => {
UnwindInfoKind::Windows => {
super::unwind::winx64::create_unwind_info(func, isa)?.map(|u| UnwindInfo::WindowsX64(u))
}
_ => None,
UnwindInfoKind::None => None,
})
}

View File

@@ -3,7 +3,7 @@
use crate::ir::Function;
use crate::isa::{
unwind::systemv::{RegisterMappingError, UnwindInfo},
CallConv, RegUnit, TargetIsa,
RegUnit, TargetIsa,
};
use crate::result::CodegenResult;
use gimli::{write::CommonInformationEntry, Encoding, Format, Register, X86_64};
@@ -97,8 +97,8 @@ pub(crate) fn create_unwind_info(
isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> {
// Only System V-like calling conventions are supported
match func.signature.call_conv {
CallConv::Fast | CallConv::Cold | CallConv::SystemV => {}
match isa.unwind_info_kind() {
crate::machinst::UnwindInfoKind::SystemV => {}
_ => return Ok(None),
}

View File

@@ -189,9 +189,6 @@ pub trait ABICallee {
from_slot: SpillSlot,
ty: Option<Type>,
) -> Self::I;
/// Desired unwind info type.
fn unwind_info_kind(&self) -> UnwindInfoKind;
}
/// Trait implemented by an object that tracks ABI-related state and can

View File

@@ -647,7 +647,8 @@ impl<M: ABIMachineSpec> ABICalleeImpl<M> {
|| call_conv == isa::CallConv::Cold
|| call_conv.extends_baldrdash()
|| call_conv.extends_windows_fastcall()
|| call_conv == isa::CallConv::AppleAarch64,
|| call_conv == isa::CallConv::AppleAarch64
|| call_conv == isa::CallConv::WasmtimeSystemV,
"Unsupported calling convention: {:?}",
call_conv
);
@@ -1370,18 +1371,6 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
.next()
.unwrap()
}
fn unwind_info_kind(&self) -> UnwindInfoKind {
match self.sig.call_conv {
#[cfg(feature = "unwind")]
isa::CallConv::Fast | isa::CallConv::Cold | isa::CallConv::SystemV => {
UnwindInfoKind::SystemV
}
#[cfg(feature = "unwind")]
isa::CallConv::WindowsFastcall => UnwindInfoKind::Windows,
_ => UnwindInfoKind::None,
}
}
}
fn abisig_to_uses_and_defs<M: ABIMachineSpec>(sig: &ABISig) -> (Vec<Reg>, Vec<Writable<Reg>>) {

View File

@@ -0,0 +1,344 @@
test compile
target x86_64 machinst
;; system_v has first param in %rdi, fascall in %rcx
function %one_arg(i32) system_v {
sig0 = (i32) windows_fastcall
block0(v0: i32):
; check: movq %rdi, %rcx
; nextln: call *%rdi
call_indirect sig0, v0(v0)
return
}
;; system_v has params in %rdi, %xmm0, fascall in %rcx, %xmm1
function %two_args(i32, f32) system_v {
sig0 = (i32, f32) windows_fastcall
sig1 = (i32, f32) system_v
block0(v0: i32, v1: f32):
; check: movq %rdi, %rsi
; check: movaps %xmm0, %xmm6
; check: movq %rsi, %rcx
; nextln: movaps %xmm6, %xmm1
; nextln: call *%rsi
call_indirect sig0, v0(v0, v1)
; check: movq %rsi, %rdi
; nextln: movaps %xmm6, %xmm0
; nextln: call *%rsi
call_indirect sig1, v0(v0, v1)
return
}
;; fastcall preserves xmm6+, rbx, rbp, rdi, rsi, r12-r15
;; system_v preserves no xmm registers, rbx, rbp, r12-r15
function %fastcall_to_systemv(i32) windows_fastcall {
sig0 = () system_v
block0(v0: i32):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$176, %rsp
; nextln: movdqu %xmm6, 0(%rsp)
; nextln: movdqu %xmm7, 16(%rsp)
; nextln: movdqu %xmm8, 32(%rsp)
; nextln: movdqu %xmm9, 48(%rsp)
; nextln: movdqu %xmm10, 64(%rsp)
; nextln: movdqu %xmm11, 80(%rsp)
; nextln: movdqu %xmm12, 96(%rsp)
; nextln: movdqu %xmm13, 112(%rsp)
; nextln: movdqu %xmm14, 128(%rsp)
; nextln: movdqu %xmm15, 144(%rsp)
; nextln: movq %rsi, 160(%rsp)
; nextln: movq %rdi, 168(%rsp)
; nextln: call *%rcx
; nextln: movdqu 0(%rsp), %xmm6
; nextln: movdqu 16(%rsp), %xmm7
; nextln: movdqu 32(%rsp), %xmm8
; nextln: movdqu 48(%rsp), %xmm9
; nextln: movdqu 64(%rsp), %xmm10
; nextln: movdqu 80(%rsp), %xmm11
; nextln: movdqu 96(%rsp), %xmm12
; nextln: movdqu 112(%rsp), %xmm13
; nextln: movdqu 128(%rsp), %xmm14
; nextln: movdqu 144(%rsp), %xmm15
; nextln: movq 160(%rsp), %rsi
; nextln: movq 168(%rsp), %rdi
; nextln: addq $$176, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0()
return
}
function %many_args(
;; rdi, rsi, rdx, rcx, r8, r9,
i64, i64, i64, i64, i64, i64,
;; xmm0-7
f64, f64, f64, f64, f64, f64, f64, f64,
;; stack args
i64, i32, f32, f64
) system_v {
sig0 = (
i64, i64, i64, i64, i64, i64, f64, f64, f64, f64, f64, f64, f64, f64, i64,
i32, f32, f64
) windows_fastcall
block0(
v0: i64, v1:i64, v2:i64, v3:i64,
v4:i64, v5:i64,
v6: f64, v7: f64, v8:f64, v9:f64, v10:f64, v11:f64, v12:f64, v13:f64,
v14:i64, v15:i32, v16:f32, v17:f64
):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$32, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %r13, 8(%rsp)
; nextln: movq %r14, 16(%rsp)
; nextln: movq %rdx, %rax
; nextln: movq %rcx, %r10
; nextln: movq %r8, %r11
; nextln: movq %r9, %r12
; nextln: movq 16(%rbp), %r13
; nextln: movslq 24(%rbp), %r14
; nextln: movss 32(%rbp), %xmm8
; nextln: movsd 40(%rbp), %xmm9
; nextln: subq $$144, %rsp
; nextln: virtual_sp_offset_adjust 144
; nextln: movq %rdi, %rcx
; nextln: movq %rsi, %rdx
; nextln: movq %rax, %r8
; nextln: movq %r10, %r9
; nextln: movq %r11, 32(%rsp)
; nextln: movq %r12, 40(%rsp)
; nextln: movsd %xmm0, 48(%rsp)
; nextln: movsd %xmm1, 56(%rsp)
; nextln: movsd %xmm2, 64(%rsp)
; nextln: movsd %xmm3, 72(%rsp)
; nextln: movsd %xmm4, 80(%rsp)
; nextln: movsd %xmm5, 88(%rsp)
; nextln: movsd %xmm6, 96(%rsp)
; nextln: movsd %xmm7, 104(%rsp)
; nextln: movq %r13, 112(%rsp)
; nextln: movl %r14d, 120(%rsp)
; nextln: movss %xmm8, 128(%rsp)
; nextln: movsd %xmm9, 136(%rsp)
; nextln: call *%rdi
; nextln: addq $$144, %rsp
; nextln: virtual_sp_offset_adjust -144
; nextln: movq 0(%rsp), %r12
; nextln: movq 8(%rsp), %r13
; nextln: movq 16(%rsp), %r14
; nextln: addq $$32, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(
v0, v1, v2, v3,
v4, v5, v6, v7,
v8, v9, v10, v11,
v12, v13, v14, v15,
v16, v17
)
return
}
; rdi => rcx
; rsi => rdx
; rdx => r8
; rcx => r9
; r8 => stack
function %many_ints(i64, i64, i64, i64, i64) system_v {
sig0 = (i64, i64, i64, i64, i64) windows_fastcall
block0(v0: i64, v1:i64, v2:i64, v3:i64, v4:i64):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rdx, %rax
; nextln: movq %rcx, %r9
; nextln: movq %r8, %r10
; nextln: subq $$48, %rsp
; nextln: virtual_sp_offset_adjust 48
; nextln: movq %rdi, %rcx
; nextln: movq %rsi, %rdx
; nextln: movq %rax, %r8
; nextln: movq %r10, 32(%rsp)
; nextln: call *%rdi
; nextln: addq $$48, %rsp
; nextln: virtual_sp_offset_adjust -48
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(v0, v1, v2, v3, v4)
return
}
function %many_args2(i32, f32, i64, f64, i32, i32, i32, f32, f64, f32, f64) system_v {
sig0 = (i32, f32, i64, f64, i32, i32, i32, f32, f64, f32, f64) windows_fastcall
block0(v0: i32, v1: f32, v2: i64, v3: f64, v4: i32, v5: i32, v6: i32, v7: f32, v8: f64, v9: f32, v10: f64):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movaps %xmm1, %xmm6
; nextln: movq %rcx, %rax
; nextln: movq %r8, %r9
; nextln: movaps %xmm3, %xmm7
; nextln: subq $$96, %rsp
; nextln: virtual_sp_offset_adjust 96
; nextln: movq %rdi, %rcx
; nextln: movaps %xmm0, %xmm1
; nextln: movq %rsi, %r8
; nextln: movaps %xmm6, %xmm3
; nextln: movl %edx, 32(%rsp)
; nextln: movl %eax, 40(%rsp)
; nextln: movl %r9d, 48(%rsp)
; nextln: movss %xmm2, 56(%rsp)
; nextln: movsd %xmm7, 64(%rsp)
; nextln: movss %xmm4, 72(%rsp)
; nextln: movsd %xmm5, 80(%rsp)
; nextln: call *%rdi
; nextln: addq $$96, %rsp
; nextln: virtual_sp_offset_adjust -96
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10)
return
}
function %wasmtime_mix1(i32) wasmtime_system_v {
sig0 = (i32) system_v
block0(v0: i32):
; check: movq %rdi, %rsi
; nextln: movq %rsi, %rdi
; nextln: call *%rsi
call_indirect sig0, v0(v0)
return
}
function %wasmtime_mix2(i32) system_v {
sig0 = (i32) wasmtime_system_v
block0(v0: i32):
; check: movq %rdi, %rsi
; nextln: movq %rsi, %rdi
; nextln: call *%rsi
call_indirect sig0, v0(v0)
return
}
function %wasmtime_mix2() -> i32, i32 system_v {
sig0 = () -> i32, i32 wasmtime_system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movl $$1, %esi
; nextln: subq $$16, %rsp
; nextln: virtual_sp_offset_adjust 16
; nextln: lea 0(%rsp), %rdi
; nextln: call *%rsi
; nextln: movslq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: virtual_sp_offset_adjust -16
; nextln: movq %rsi, %rdx
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v2 = iconst.i32 1
v0, v1 = call_indirect sig0, v2()
return v0, v1
}
function %wasmtime_mix3() -> i32, i32 wasmtime_system_v {
sig0 = () -> i32, i32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: call *%rsi
; nextln: movl %edx, 0(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v2 = iconst.i32 1
v0, v1 = call_indirect sig0, v2()
return v0, v1
}
function %wasmtime_mix4() -> i32, i64, i32 wasmtime_system_v {
sig0 = () -> i32, i64, i32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: subq $$16, %rsp
; nextln: virtual_sp_offset_adjust 16
; nextln: lea 0(%rsp), %rdi
; nextln: call *%rsi
; nextln: movslq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: virtual_sp_offset_adjust -16
; nextln: movq %rdx, 0(%r12)
; nextln: movl %esi, 8(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v3 = iconst.i32 1
v0, v1, v2 = call_indirect sig0, v3()
return v0, v1, v2
}
function %wasmtime_mix5() -> f32, i64, i32, f32 wasmtime_system_v {
sig0 = () -> f32, i64, i32, f32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: call *%rsi
; nextln: movq %rax, 0(%r12)
; nextln: movl %edx, 8(%r12)
; nextln: movss %xmm1, 12(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v5 = iconst.i32 1
v0, v1, v2, v3 = call_indirect sig0, v5()
return v0, v1, v2, v3
}
function %wasmtime_mix6(f32, i64, i32, f32) -> f32, i64, i32, f32 wasmtime_system_v {
sig0 = (f32, i64, i32, f32) -> f32, i64, i32, f32 system_v
block0(v0: f32, v1: i64, v2: i32, v3: f32):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdx, %r12
; nextln: movl $$1, %eax
; nextln: call *%rax
; nextln: movq %rax, 0(%r12)
; nextln: movl %edx, 8(%r12)
; nextln: movss %xmm1, 12(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v4 = iconst.i32 1
v5, v6, v7, v8 = call_indirect sig0, v4(v0, v1, v2, v3)
return v5, v6, v7, v8
}

View File

@@ -1,7 +1,7 @@
test unwind
set opt_level=speed_and_size
set is_pic
target x86_64 legacy haswell
target x86_64-linux legacy haswell
; check the unwind information with a function with no args
function %no_args() system_v {

View File

@@ -1,7 +1,7 @@
test unwind
set opt_level=speed_and_size
set is_pic
target x86_64 legacy haswell
target x86_64-windows legacy haswell
; check the unwind information with a leaf function with no args
function %no_args_leaf() windows_fastcall {

View File

@@ -15,12 +15,13 @@ use crate::translation_utils::{
DataIndex, DefinedFuncIndex, ElemIndex, FuncIndex, Global, GlobalIndex, Memory, MemoryIndex,
Table, TableIndex, TypeIndex,
};
use crate::WasmType;
use core::convert::TryFrom;
use cranelift_codegen::cursor::FuncCursor;
use cranelift_codegen::ir::immediates::{Offset32, Uimm64};
use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{self, InstBuilder};
use cranelift_codegen::isa::TargetFrontendConfig;
use cranelift_codegen::isa::{CallConv, TargetFrontendConfig};
use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap};
use cranelift_frontend::FunctionBuilder;
use std::boxed::Box;
@@ -660,7 +661,25 @@ impl TargetEnvironment for DummyEnvironment {
}
impl<'data> ModuleEnvironment<'data> for DummyEnvironment {
fn declare_type_func(&mut self, _wasm: WasmFuncType, sig: ir::Signature) -> WasmResult<()> {
fn declare_type_func(&mut self, wasm: WasmFuncType) -> WasmResult<()> {
let mut sig = ir::Signature::new(CallConv::Fast);
let mut cvt = |ty: &WasmType| {
let reference_type = match self.pointer_type() {
ir::types::I32 => ir::types::R32,
ir::types::I64 => ir::types::R64,
_ => panic!("unsupported pointer type"),
};
ir::AbiParam::new(match ty {
WasmType::I32 => ir::types::I32,
WasmType::I64 => ir::types::I64,
WasmType::F32 => ir::types::F32,
WasmType::F64 => ir::types::F64,
WasmType::V128 => ir::types::I8X16,
WasmType::FuncRef | WasmType::ExternRef | WasmType::ExnRef => reference_type,
})
};
sig.params.extend(wasm.params.iter().map(&mut cvt));
sig.returns.extend(wasm.returns.iter().map(&mut cvt));
self.info.signatures.push(sig);
Ok(())
}

View File

@@ -702,11 +702,7 @@ pub trait ModuleEnvironment<'data>: TargetEnvironment {
}
/// Declares a function signature to the environment.
fn declare_type_func(
&mut self,
wasm_func_type: WasmFuncType,
sig: ir::Signature,
) -> WasmResult<()>;
fn declare_type_func(&mut self, wasm_func_type: WasmFuncType) -> WasmResult<()>;
/// Declares a module type signature to the environment.
fn declare_type_module(

View File

@@ -18,7 +18,6 @@ use crate::wasm_unsupported;
use core::convert::TryFrom;
use core::convert::TryInto;
use cranelift_codegen::ir::immediates::V128Imm;
use cranelift_codegen::ir::{self, AbiParam, Signature};
use cranelift_entity::packed_option::ReservedValue;
use cranelift_entity::EntityRef;
use std::boxed::Box;
@@ -110,18 +109,7 @@ pub fn parse_type_section<'a>(
for entry in types {
match entry? {
TypeDef::Func(wasm_func_ty) => {
let mut sig = Signature::new(environ.target_config().default_call_conv);
sig.params.extend(wasm_func_ty.params.iter().map(|ty| {
let cret_arg: ir::Type = type_to_type(*ty, environ)
.expect("only numeric types are supported in function signatures");
AbiParam::new(cret_arg)
}));
sig.returns.extend(wasm_func_ty.returns.iter().map(|ty| {
let cret_arg: ir::Type = type_to_type(*ty, environ)
.expect("only numeric types are supported in function signatures");
AbiParam::new(cret_arg)
}));
environ.declare_type_func(wasm_func_ty.clone().try_into()?, sig)?;
environ.declare_type_func(wasm_func_ty.clone().try_into()?)?;
module_translation_state
.wasm_types
.push((wasm_func_ty.params, wasm_func_ty.returns));

View File

@@ -19,6 +19,7 @@ entity_impl!(FuncIndex);
/// Index type of a defined function inside the WebAssembly module.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct DefinedFuncIndex(u32);
entity_impl!(DefinedFuncIndex);