Fully support multiple returns in Wasmtime (#2806)

* Fully support multiple returns in Wasmtime

For quite some time now Wasmtime has "supported" multiple return values,
but only in the mose bare bones ways. Up until recently you couldn't get
a typed version of functions with multiple return values, and never have
you been able to use `Func::wrap` with functions that return multiple
values. Even recently where `Func::typed` can call functions that return
multiple values it uses a double-indirection by calling a trampoline
which calls the real function.

The underlying reason for this lack of support is that cranelift's ABI
for returning multiple values is not possible to write in Rust. For
example if a wasm function returns two `i32` values there is no Rust (or
C!) function you can write to correspond to that. This commit, however
fixes that.

This commit adds two new ABIs to Cranelift: `WasmtimeSystemV` and
`WasmtimeFastcall`. The intention is that these Wasmtime-specific ABIs
match their corresponding ABI (e.g. `SystemV` or `WindowsFastcall`) for
everything *except* how multiple values are returned. For multiple
return values we simply define our own version of the ABI which Wasmtime
implements, which is that for N return values the first is returned as
if the function only returned that and the latter N-1 return values are
returned via an out-ptr that's the last parameter to the function.

These custom ABIs provides the ability for Wasmtime to bind these in
Rust meaning that `Func::wrap` can now wrap functions that return
multiple values and `Func::typed` no longer uses trampolines when
calling functions that return multiple values. Although there's lots of
internal changes there's no actual changes in the API surface area of
Wasmtime, just a few more impls of more public traits which means that
more types are supported in more places!

Another change made with this PR is a consolidation of how the ABI of
each function in a wasm module is selected. The native `SystemV` ABI,
for example, is more efficient at returning multiple values than the
wasmtime version of the ABI (since more things are in more registers).
To continue to take advantage of this Wasmtime will now classify some
functions in a wasm module with the "fast" ABI. Only functions that are
not reachable externally from the module are classified with the fast
ABI (e.g. those not exported, used in tables, or used with `ref.func`).
This should enable purely internal functions of modules to have a faster
calling convention than those which might be exposed to Wasmtime itself.

Closes #1178

* Tweak some names and add docs

* "fix" lightbeam compile

* Fix TODO with dummy environ

* Unwind info is a property of the target, not the ABI

* Remove lightbeam unused imports

* Attempt to fix arm64

* Document new ABIs aren't stable

* Fix filetests to use the right target

* Don't always do 64-bit stores with cranelift

This was overwriting upper bits when 32-bit registers were being stored
into return values, so fix the code inline to do a sized store instead
of one-size-fits-all store.

* At least get tests passing on the old backend

* Fix a typo

* Add some filetests with mixed abi calls

* Get `multi` example working

* Fix doctests on old x86 backend

* Add a mixture of wasmtime/system_v tests
This commit is contained in:
Alex Crichton
2021-04-07 12:34:26 -05:00
committed by GitHub
parent 7588565078
commit 195bf0e29a
37 changed files with 1116 additions and 459 deletions

1
Cargo.lock generated
View File

@@ -3310,6 +3310,7 @@ dependencies = [
"cranelift-entity", "cranelift-entity",
"cranelift-frontend", "cranelift-frontend",
"cranelift-wasm", "cranelift-wasm",
"target-lexicon",
"wasmparser", "wasmparser",
"wasmtime-environ", "wasmtime-environ",
] ]

View File

@@ -98,7 +98,7 @@ all-arch = ["wasmtime/all-arch"]
experimental_x64 = [] experimental_x64 = []
# Use the old x86 backend. # Use the old x86 backend.
old-x86-backend = ["wasmtime-jit/old-x86-backend"] old-x86-backend = ["wasmtime/old-x86-backend"]
[badges] [badges]
maintenance = { status = "actively-developed" } maintenance = { status = "actively-developed" }

View File

@@ -267,13 +267,7 @@ impl Context {
isa: &dyn TargetIsa, isa: &dyn TargetIsa,
) -> CodegenResult<Option<crate::isa::unwind::UnwindInfo>> { ) -> CodegenResult<Option<crate::isa::unwind::UnwindInfo>> {
if let Some(backend) = isa.get_mach_backend() { if let Some(backend) = isa.get_mach_backend() {
use crate::isa::CallConv; let unwind_info_kind = isa.unwind_info_kind();
use crate::machinst::UnwindInfoKind;
let unwind_info_kind = match self.func.signature.call_conv {
CallConv::Fast | CallConv::Cold | CallConv::SystemV => UnwindInfoKind::SystemV,
CallConv::WindowsFastcall => UnwindInfoKind::Windows,
_ => UnwindInfoKind::None,
};
let result = self.mach_compile_result.as_ref().unwrap(); let result = self.mach_compile_result.as_ref().unwrap();
return backend.emit_unwind_info(result, unwind_info_kind); return backend.emit_unwind_info(result, unwind_info_kind);
} }

View File

@@ -197,18 +197,26 @@ impl ABIMachineSpec for AArch64MachineDeps {
next_stack = 16; next_stack = 16;
} }
// Note on return values: on the regular non-baldrdash ABI, we may return values in 8 let (max_per_class_reg_vals, mut remaining_reg_vals) = match args_or_rets {
// registers for V128 and I64 registers independently of the number of register values ArgsOrRets::Args => (8, 16), // x0-x7 and v0-v7
// returned in the other class. That is, we can return values in up to 8 integer and 8
// vector registers at once.
// In Baldrdash, we can only use one register for return value for all the register
// classes. That is, we can't return values in both one integer and one vector register;
// only one return value may be in a register.
let (max_per_class_reg_vals, mut remaining_reg_vals) = match (args_or_rets, is_baldrdash) { // Note on return values: on the regular ABI, we may return values
(ArgsOrRets::Args, _) => (8, 16), // x0-x7 and v0-v7 // in 8 registers for V128 and I64 registers independently of the
(ArgsOrRets::Rets, false) => (8, 16), // x0-x7 and v0-v7 // number of register values returned in the other class. That is,
(ArgsOrRets::Rets, true) => (1, 1), // x0 or v0, but not both // we can return values in up to 8 integer and
// 8 vector registers at once.
//
// In Baldrdash and Wasmtime, we can only use one register for
// return value for all the register classes. That is, we can't
// return values in both one integer and one vector register; only
// one return value may be in a register.
ArgsOrRets::Rets => {
if is_baldrdash || call_conv.extends_wasmtime() {
(1, 1) // x0 or v0, but not both
} else {
(8, 16) // x0-x7 and v0-v7
}
}
}; };
for i in 0..params.len() { for i in 0..params.len() {
@@ -282,15 +290,18 @@ impl ABIMachineSpec for AArch64MachineDeps {
// Compute the stack slot's size. // Compute the stack slot's size.
let size = (ty_bits(param.value_type) / 8) as u64; let size = (ty_bits(param.value_type) / 8) as u64;
let size = if call_conv != isa::CallConv::AppleAarch64 { let size = if call_conv == isa::CallConv::AppleAarch64
|| (call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets)
{
// MacOS aarch64 and Wasmtime allow stack slots with
// sizes less than 8 bytes. They still need to be
// properly aligned on their natural data alignment,
// though.
size
} else {
// Every arg takes a minimum slot of 8 bytes. (16-byte stack // Every arg takes a minimum slot of 8 bytes. (16-byte stack
// alignment happens separately after all args.) // alignment happens separately after all args.)
std::cmp::max(size, 8) std::cmp::max(size, 8)
} else {
// MacOS aarch64 allows stack slots with sizes less than 8
// bytes. They still need to be properly aligned on their
// natural data alignment, though.
size
}; };
// Align the stack slot. // Align the stack slot.

View File

@@ -29,6 +29,16 @@ pub enum CallConv {
Baldrdash2020, Baldrdash2020,
/// Specialized convention for the probestack function. /// Specialized convention for the probestack function.
Probestack, Probestack,
/// Wasmtime equivalent of SystemV, not ABI-stable.
///
/// Currently only differs in how multiple return values are handled,
/// returning the first return value in a register and everything else
/// through a return-pointer.
WasmtimeSystemV,
/// Wasmtime equivalent of WindowsFastcall, not ABI-stable.
///
/// Differs from fastcall in the same way as `WasmtimeSystemV`.
WasmtimeFastcall,
} }
impl CallConv { impl CallConv {
@@ -63,7 +73,7 @@ impl CallConv {
/// Is the calling convention extending the Windows Fastcall ABI? /// Is the calling convention extending the Windows Fastcall ABI?
pub fn extends_windows_fastcall(self) -> bool { pub fn extends_windows_fastcall(self) -> bool {
match self { match self {
Self::WindowsFastcall | Self::BaldrdashWindows => true, Self::WindowsFastcall | Self::BaldrdashWindows | Self::WasmtimeFastcall => true,
_ => false, _ => false,
} }
} }
@@ -75,6 +85,14 @@ impl CallConv {
_ => false, _ => false,
} }
} }
/// Is the calling convention extending the Wasmtime ABI?
pub fn extends_wasmtime(self) -> bool {
match self {
Self::WasmtimeSystemV | Self::WasmtimeFastcall => true,
_ => false,
}
}
} }
impl fmt::Display for CallConv { impl fmt::Display for CallConv {
@@ -89,6 +107,8 @@ impl fmt::Display for CallConv {
Self::BaldrdashWindows => "baldrdash_windows", Self::BaldrdashWindows => "baldrdash_windows",
Self::Baldrdash2020 => "baldrdash_2020", Self::Baldrdash2020 => "baldrdash_2020",
Self::Probestack => "probestack", Self::Probestack => "probestack",
Self::WasmtimeSystemV => "wasmtime_system_v",
Self::WasmtimeFastcall => "wasmtime_fastcall",
}) })
} }
} }
@@ -106,6 +126,8 @@ impl str::FromStr for CallConv {
"baldrdash_windows" => Ok(Self::BaldrdashWindows), "baldrdash_windows" => Ok(Self::BaldrdashWindows),
"baldrdash_2020" => Ok(Self::Baldrdash2020), "baldrdash_2020" => Ok(Self::Baldrdash2020),
"probestack" => Ok(Self::Probestack), "probestack" => Ok(Self::Probestack),
"wasmtime_system_v" => Ok(Self::WasmtimeSystemV),
"wasmtime_fastcall" => Ok(Self::WasmtimeFastcall),
_ => Err(()), _ => Err(()),
} }
} }

View File

@@ -57,7 +57,7 @@ use crate::flowgraph;
use crate::ir; use crate::ir;
#[cfg(feature = "unwind")] #[cfg(feature = "unwind")]
use crate::isa::unwind::systemv::RegisterMappingError; use crate::isa::unwind::systemv::RegisterMappingError;
use crate::machinst::MachBackend; use crate::machinst::{MachBackend, UnwindInfoKind};
use crate::regalloc; use crate::regalloc;
use crate::result::CodegenResult; use crate::result::CodegenResult;
use crate::settings; use crate::settings;
@@ -68,7 +68,7 @@ use core::any::Any;
use core::fmt; use core::fmt;
use core::fmt::{Debug, Formatter}; use core::fmt::{Debug, Formatter};
use core::hash::Hasher; use core::hash::Hasher;
use target_lexicon::{triple, Architecture, PointerWidth, Triple}; use target_lexicon::{triple, Architecture, OperatingSystem, PointerWidth, Triple};
use thiserror::Error; use thiserror::Error;
#[cfg(feature = "riscv")] #[cfg(feature = "riscv")]
@@ -476,6 +476,18 @@ pub trait TargetIsa: fmt::Display + Send + Sync {
/// IntCC condition for Unsigned Subtraction Overflow (Borrow/Carry). /// IntCC condition for Unsigned Subtraction Overflow (Borrow/Carry).
fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC; fn unsigned_sub_overflow_condition(&self) -> ir::condcodes::IntCC;
/// Returns the flavor of unwind information emitted for this target.
fn unwind_info_kind(&self) -> UnwindInfoKind {
match self.triple().operating_system {
#[cfg(feature = "unwind")]
OperatingSystem::Windows => UnwindInfoKind::Windows,
#[cfg(feature = "unwind")]
_ => UnwindInfoKind::SystemV,
#[cfg(not(feature = "unwind"))]
_ => UnwindInfoKind::None,
}
}
/// Creates unwind information for the function. /// Creates unwind information for the function.
/// ///
/// Returns `None` if there is no unwind information for the function. /// Returns `None` if there is no unwind information for the function.

View File

@@ -237,10 +237,20 @@ impl ABIMachineSpec for X64ABIMachineSpec {
extension: param.extension, extension: param.extension,
}); });
} else { } else {
// Compute size. Every arg takes a minimum slot of 8 bytes. (16-byte // Compute size. For the wasmtime ABI it differs from native
// stack alignment happens separately after all args.) // ABIs in how multiple values are returned, so we take a
// leaf out of arm64's book by not rounding everything up to
// 8 bytes. For all ABI arguments, and other ABI returns,
// though, each slot takes a minimum of 8 bytes.
//
// Note that in all cases 16-byte stack alignment happens
// separately after all args.
let size = (reg_ty.bits() / 8) as u64; let size = (reg_ty.bits() / 8) as u64;
let size = std::cmp::max(size, 8); let size = if args_or_rets == ArgsOrRets::Rets && call_conv.extends_wasmtime() {
size
} else {
std::cmp::max(size, 8)
};
// Align. // Align.
debug_assert!(size.is_power_of_two()); debug_assert!(size.is_power_of_two());
next_stack = align_to(next_stack, size); next_stack = align_to(next_stack, size);
@@ -824,15 +834,7 @@ impl From<StackAMode> for SyntheticAmode {
} }
fn get_intreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Option<Reg> { fn get_intreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Option<Reg> {
let is_fastcall = match call_conv { let is_fastcall = call_conv.extends_windows_fastcall();
CallConv::Fast
| CallConv::Cold
| CallConv::SystemV
| CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020 => false,
CallConv::WindowsFastcall => true,
_ => panic!("int args only supported for SysV or Fastcall calling convention"),
};
// Fastcall counts by absolute argument number; SysV counts by argument of // Fastcall counts by absolute argument number; SysV counts by argument of
// this (integer) class. // this (integer) class.
@@ -853,15 +855,7 @@ fn get_intreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Optio
} }
fn get_fltreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Option<Reg> { fn get_fltreg_for_arg(call_conv: &CallConv, idx: usize, arg_idx: usize) -> Option<Reg> {
let is_fastcall = match call_conv { let is_fastcall = call_conv.extends_windows_fastcall();
CallConv::Fast
| CallConv::Cold
| CallConv::SystemV
| CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020 => false,
CallConv::WindowsFastcall => true,
_ => panic!("float args only supported for SysV or Fastcall calling convention"),
};
// Fastcall counts by absolute argument number; SysV counts by argument of // Fastcall counts by absolute argument number; SysV counts by argument of
// this (floating-point) class. // this (floating-point) class.
@@ -894,7 +888,10 @@ fn get_intreg_for_retval(
1 => Some(regs::rdx()), 1 => Some(regs::rdx()),
_ => None, _ => None,
}, },
CallConv::BaldrdashSystemV | CallConv::Baldrdash2020 => { CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020
| CallConv::WasmtimeSystemV
| CallConv::WasmtimeFastcall => {
if intreg_idx == 0 && retval_idx == 0 { if intreg_idx == 0 && retval_idx == 0 {
Some(regs::rax()) Some(regs::rax())
} else { } else {
@@ -922,7 +919,10 @@ fn get_fltreg_for_retval(
1 => Some(regs::xmm1()), 1 => Some(regs::xmm1()),
_ => None, _ => None,
}, },
CallConv::BaldrdashSystemV | CallConv::Baldrdash2020 => { CallConv::BaldrdashSystemV
| CallConv::Baldrdash2020
| CallConv::WasmtimeFastcall
| CallConv::WasmtimeSystemV => {
if fltreg_idx == 0 && retval_idx == 0 { if fltreg_idx == 0 && retval_idx == 0 {
Some(regs::xmm0()) Some(regs::xmm0())
} else { } else {
@@ -992,12 +992,12 @@ fn get_callee_saves(call_conv: &CallConv, regs: &Set<Writable<RealReg>>) -> Vec<
CallConv::BaldrdashWindows => { CallConv::BaldrdashWindows => {
todo!("baldrdash windows"); todo!("baldrdash windows");
} }
CallConv::Fast | CallConv::Cold | CallConv::SystemV => regs CallConv::Fast | CallConv::Cold | CallConv::SystemV | CallConv::WasmtimeSystemV => regs
.iter() .iter()
.cloned() .cloned()
.filter(|r| is_callee_save_systemv(r.to_reg())) .filter(|r| is_callee_save_systemv(r.to_reg()))
.collect(), .collect(),
CallConv::WindowsFastcall => regs CallConv::WindowsFastcall | CallConv::WasmtimeFastcall => regs
.iter() .iter()
.cloned() .cloned()
.filter(|r| is_callee_save_fastcall(r.to_reg())) .filter(|r| is_callee_save_fastcall(r.to_reg()))

View File

@@ -1122,11 +1122,16 @@ impl Inst {
pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst { pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst {
let rc = from_reg.get_class(); let rc = from_reg.get_class();
match rc { match rc {
RegClass::I64 => { RegClass::I64 => Inst::mov_r_m(
// Always store the full register, to ensure that the high bits are properly set match ty {
// when doing a full reload. types::B1 => OperandSize::Size8,
Inst::mov_r_m(OperandSize::Size64, from_reg, to_addr) types::I32 | types::R32 => OperandSize::Size32,
} types::I64 | types::R64 => OperandSize::Size64,
_ => unimplemented!("integer store of type: {}", ty),
},
from_reg,
to_addr,
),
RegClass::V128 => { RegClass::V128 => {
let opcode = match ty { let opcode = match ty {
types::F32 => SseOpcode::Movss, types::F32 => SseOpcode::Movss,

View File

@@ -503,10 +503,12 @@ fn callee_saved_regs_used(isa: &dyn TargetIsa, func: &ir::Function) -> RegisterS
pub fn prologue_epilogue(func: &mut ir::Function, isa: &dyn TargetIsa) -> CodegenResult<()> { pub fn prologue_epilogue(func: &mut ir::Function, isa: &dyn TargetIsa) -> CodegenResult<()> {
match func.signature.call_conv { match func.signature.call_conv {
// For now, just translate fast and cold as system_v. // For now, just translate fast and cold as system_v.
CallConv::Fast | CallConv::Cold | CallConv::SystemV => { CallConv::Fast | CallConv::Cold | CallConv::SystemV | CallConv::WasmtimeSystemV => {
system_v_prologue_epilogue(func, isa) system_v_prologue_epilogue(func, isa)
} }
CallConv::WindowsFastcall => fastcall_prologue_epilogue(func, isa), CallConv::WindowsFastcall | CallConv::WasmtimeFastcall => {
fastcall_prologue_epilogue(func, isa)
}
CallConv::BaldrdashSystemV | CallConv::BaldrdashWindows => { CallConv::BaldrdashSystemV | CallConv::BaldrdashWindows => {
baldrdash_prologue_epilogue(func, isa) baldrdash_prologue_epilogue(func, isa)
} }
@@ -1084,16 +1086,17 @@ pub fn create_unwind_info(
isa: &dyn TargetIsa, isa: &dyn TargetIsa,
) -> CodegenResult<Option<crate::isa::unwind::UnwindInfo>> { ) -> CodegenResult<Option<crate::isa::unwind::UnwindInfo>> {
use crate::isa::unwind::UnwindInfo; use crate::isa::unwind::UnwindInfo;
use crate::machinst::UnwindInfoKind;
// Assumption: RBP is being used as the frame pointer for both calling conventions // Assumption: RBP is being used as the frame pointer for both calling conventions
// In the future, we should be omitting frame pointer as an optimization, so this will change // In the future, we should be omitting frame pointer as an optimization, so this will change
Ok(match func.signature.call_conv { Ok(match isa.unwind_info_kind() {
CallConv::Fast | CallConv::Cold | CallConv::SystemV => { UnwindInfoKind::SystemV => {
super::unwind::systemv::create_unwind_info(func, isa)?.map(|u| UnwindInfo::SystemV(u)) super::unwind::systemv::create_unwind_info(func, isa)?.map(|u| UnwindInfo::SystemV(u))
} }
CallConv::WindowsFastcall => { UnwindInfoKind::Windows => {
super::unwind::winx64::create_unwind_info(func, isa)?.map(|u| UnwindInfo::WindowsX64(u)) super::unwind::winx64::create_unwind_info(func, isa)?.map(|u| UnwindInfo::WindowsX64(u))
} }
_ => None, UnwindInfoKind::None => None,
}) })
} }

View File

@@ -3,7 +3,7 @@
use crate::ir::Function; use crate::ir::Function;
use crate::isa::{ use crate::isa::{
unwind::systemv::{RegisterMappingError, UnwindInfo}, unwind::systemv::{RegisterMappingError, UnwindInfo},
CallConv, RegUnit, TargetIsa, RegUnit, TargetIsa,
}; };
use crate::result::CodegenResult; use crate::result::CodegenResult;
use gimli::{write::CommonInformationEntry, Encoding, Format, Register, X86_64}; use gimli::{write::CommonInformationEntry, Encoding, Format, Register, X86_64};
@@ -97,8 +97,8 @@ pub(crate) fn create_unwind_info(
isa: &dyn TargetIsa, isa: &dyn TargetIsa,
) -> CodegenResult<Option<UnwindInfo>> { ) -> CodegenResult<Option<UnwindInfo>> {
// Only System V-like calling conventions are supported // Only System V-like calling conventions are supported
match func.signature.call_conv { match isa.unwind_info_kind() {
CallConv::Fast | CallConv::Cold | CallConv::SystemV => {} crate::machinst::UnwindInfoKind::SystemV => {}
_ => return Ok(None), _ => return Ok(None),
} }

View File

@@ -189,9 +189,6 @@ pub trait ABICallee {
from_slot: SpillSlot, from_slot: SpillSlot,
ty: Option<Type>, ty: Option<Type>,
) -> Self::I; ) -> Self::I;
/// Desired unwind info type.
fn unwind_info_kind(&self) -> UnwindInfoKind;
} }
/// Trait implemented by an object that tracks ABI-related state and can /// Trait implemented by an object that tracks ABI-related state and can

View File

@@ -647,7 +647,8 @@ impl<M: ABIMachineSpec> ABICalleeImpl<M> {
|| call_conv == isa::CallConv::Cold || call_conv == isa::CallConv::Cold
|| call_conv.extends_baldrdash() || call_conv.extends_baldrdash()
|| call_conv.extends_windows_fastcall() || call_conv.extends_windows_fastcall()
|| call_conv == isa::CallConv::AppleAarch64, || call_conv == isa::CallConv::AppleAarch64
|| call_conv == isa::CallConv::WasmtimeSystemV,
"Unsupported calling convention: {:?}", "Unsupported calling convention: {:?}",
call_conv call_conv
); );
@@ -1370,18 +1371,6 @@ impl<M: ABIMachineSpec> ABICallee for ABICalleeImpl<M> {
.next() .next()
.unwrap() .unwrap()
} }
fn unwind_info_kind(&self) -> UnwindInfoKind {
match self.sig.call_conv {
#[cfg(feature = "unwind")]
isa::CallConv::Fast | isa::CallConv::Cold | isa::CallConv::SystemV => {
UnwindInfoKind::SystemV
}
#[cfg(feature = "unwind")]
isa::CallConv::WindowsFastcall => UnwindInfoKind::Windows,
_ => UnwindInfoKind::None,
}
}
} }
fn abisig_to_uses_and_defs<M: ABIMachineSpec>(sig: &ABISig) -> (Vec<Reg>, Vec<Writable<Reg>>) { fn abisig_to_uses_and_defs<M: ABIMachineSpec>(sig: &ABISig) -> (Vec<Reg>, Vec<Writable<Reg>>) {

View File

@@ -0,0 +1,344 @@
test compile
target x86_64 machinst
;; system_v has first param in %rdi, fascall in %rcx
function %one_arg(i32) system_v {
sig0 = (i32) windows_fastcall
block0(v0: i32):
; check: movq %rdi, %rcx
; nextln: call *%rdi
call_indirect sig0, v0(v0)
return
}
;; system_v has params in %rdi, %xmm0, fascall in %rcx, %xmm1
function %two_args(i32, f32) system_v {
sig0 = (i32, f32) windows_fastcall
sig1 = (i32, f32) system_v
block0(v0: i32, v1: f32):
; check: movq %rdi, %rsi
; check: movaps %xmm0, %xmm6
; check: movq %rsi, %rcx
; nextln: movaps %xmm6, %xmm1
; nextln: call *%rsi
call_indirect sig0, v0(v0, v1)
; check: movq %rsi, %rdi
; nextln: movaps %xmm6, %xmm0
; nextln: call *%rsi
call_indirect sig1, v0(v0, v1)
return
}
;; fastcall preserves xmm6+, rbx, rbp, rdi, rsi, r12-r15
;; system_v preserves no xmm registers, rbx, rbp, r12-r15
function %fastcall_to_systemv(i32) windows_fastcall {
sig0 = () system_v
block0(v0: i32):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$176, %rsp
; nextln: movdqu %xmm6, 0(%rsp)
; nextln: movdqu %xmm7, 16(%rsp)
; nextln: movdqu %xmm8, 32(%rsp)
; nextln: movdqu %xmm9, 48(%rsp)
; nextln: movdqu %xmm10, 64(%rsp)
; nextln: movdqu %xmm11, 80(%rsp)
; nextln: movdqu %xmm12, 96(%rsp)
; nextln: movdqu %xmm13, 112(%rsp)
; nextln: movdqu %xmm14, 128(%rsp)
; nextln: movdqu %xmm15, 144(%rsp)
; nextln: movq %rsi, 160(%rsp)
; nextln: movq %rdi, 168(%rsp)
; nextln: call *%rcx
; nextln: movdqu 0(%rsp), %xmm6
; nextln: movdqu 16(%rsp), %xmm7
; nextln: movdqu 32(%rsp), %xmm8
; nextln: movdqu 48(%rsp), %xmm9
; nextln: movdqu 64(%rsp), %xmm10
; nextln: movdqu 80(%rsp), %xmm11
; nextln: movdqu 96(%rsp), %xmm12
; nextln: movdqu 112(%rsp), %xmm13
; nextln: movdqu 128(%rsp), %xmm14
; nextln: movdqu 144(%rsp), %xmm15
; nextln: movq 160(%rsp), %rsi
; nextln: movq 168(%rsp), %rdi
; nextln: addq $$176, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0()
return
}
function %many_args(
;; rdi, rsi, rdx, rcx, r8, r9,
i64, i64, i64, i64, i64, i64,
;; xmm0-7
f64, f64, f64, f64, f64, f64, f64, f64,
;; stack args
i64, i32, f32, f64
) system_v {
sig0 = (
i64, i64, i64, i64, i64, i64, f64, f64, f64, f64, f64, f64, f64, f64, i64,
i32, f32, f64
) windows_fastcall
block0(
v0: i64, v1:i64, v2:i64, v3:i64,
v4:i64, v5:i64,
v6: f64, v7: f64, v8:f64, v9:f64, v10:f64, v11:f64, v12:f64, v13:f64,
v14:i64, v15:i32, v16:f32, v17:f64
):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$32, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %r13, 8(%rsp)
; nextln: movq %r14, 16(%rsp)
; nextln: movq %rdx, %rax
; nextln: movq %rcx, %r10
; nextln: movq %r8, %r11
; nextln: movq %r9, %r12
; nextln: movq 16(%rbp), %r13
; nextln: movslq 24(%rbp), %r14
; nextln: movss 32(%rbp), %xmm8
; nextln: movsd 40(%rbp), %xmm9
; nextln: subq $$144, %rsp
; nextln: virtual_sp_offset_adjust 144
; nextln: movq %rdi, %rcx
; nextln: movq %rsi, %rdx
; nextln: movq %rax, %r8
; nextln: movq %r10, %r9
; nextln: movq %r11, 32(%rsp)
; nextln: movq %r12, 40(%rsp)
; nextln: movsd %xmm0, 48(%rsp)
; nextln: movsd %xmm1, 56(%rsp)
; nextln: movsd %xmm2, 64(%rsp)
; nextln: movsd %xmm3, 72(%rsp)
; nextln: movsd %xmm4, 80(%rsp)
; nextln: movsd %xmm5, 88(%rsp)
; nextln: movsd %xmm6, 96(%rsp)
; nextln: movsd %xmm7, 104(%rsp)
; nextln: movq %r13, 112(%rsp)
; nextln: movl %r14d, 120(%rsp)
; nextln: movss %xmm8, 128(%rsp)
; nextln: movsd %xmm9, 136(%rsp)
; nextln: call *%rdi
; nextln: addq $$144, %rsp
; nextln: virtual_sp_offset_adjust -144
; nextln: movq 0(%rsp), %r12
; nextln: movq 8(%rsp), %r13
; nextln: movq 16(%rsp), %r14
; nextln: addq $$32, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(
v0, v1, v2, v3,
v4, v5, v6, v7,
v8, v9, v10, v11,
v12, v13, v14, v15,
v16, v17
)
return
}
; rdi => rcx
; rsi => rdx
; rdx => r8
; rcx => r9
; r8 => stack
function %many_ints(i64, i64, i64, i64, i64) system_v {
sig0 = (i64, i64, i64, i64, i64) windows_fastcall
block0(v0: i64, v1:i64, v2:i64, v3:i64, v4:i64):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rdx, %rax
; nextln: movq %rcx, %r9
; nextln: movq %r8, %r10
; nextln: subq $$48, %rsp
; nextln: virtual_sp_offset_adjust 48
; nextln: movq %rdi, %rcx
; nextln: movq %rsi, %rdx
; nextln: movq %rax, %r8
; nextln: movq %r10, 32(%rsp)
; nextln: call *%rdi
; nextln: addq $$48, %rsp
; nextln: virtual_sp_offset_adjust -48
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(v0, v1, v2, v3, v4)
return
}
function %many_args2(i32, f32, i64, f64, i32, i32, i32, f32, f64, f32, f64) system_v {
sig0 = (i32, f32, i64, f64, i32, i32, i32, f32, f64, f32, f64) windows_fastcall
block0(v0: i32, v1: f32, v2: i64, v3: f64, v4: i32, v5: i32, v6: i32, v7: f32, v8: f64, v9: f32, v10: f64):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movaps %xmm1, %xmm6
; nextln: movq %rcx, %rax
; nextln: movq %r8, %r9
; nextln: movaps %xmm3, %xmm7
; nextln: subq $$96, %rsp
; nextln: virtual_sp_offset_adjust 96
; nextln: movq %rdi, %rcx
; nextln: movaps %xmm0, %xmm1
; nextln: movq %rsi, %r8
; nextln: movaps %xmm6, %xmm3
; nextln: movl %edx, 32(%rsp)
; nextln: movl %eax, 40(%rsp)
; nextln: movl %r9d, 48(%rsp)
; nextln: movss %xmm2, 56(%rsp)
; nextln: movsd %xmm7, 64(%rsp)
; nextln: movss %xmm4, 72(%rsp)
; nextln: movsd %xmm5, 80(%rsp)
; nextln: call *%rdi
; nextln: addq $$96, %rsp
; nextln: virtual_sp_offset_adjust -96
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10)
return
}
function %wasmtime_mix1(i32) wasmtime_system_v {
sig0 = (i32) system_v
block0(v0: i32):
; check: movq %rdi, %rsi
; nextln: movq %rsi, %rdi
; nextln: call *%rsi
call_indirect sig0, v0(v0)
return
}
function %wasmtime_mix2(i32) system_v {
sig0 = (i32) wasmtime_system_v
block0(v0: i32):
; check: movq %rdi, %rsi
; nextln: movq %rsi, %rdi
; nextln: call *%rsi
call_indirect sig0, v0(v0)
return
}
function %wasmtime_mix2() -> i32, i32 system_v {
sig0 = () -> i32, i32 wasmtime_system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movl $$1, %esi
; nextln: subq $$16, %rsp
; nextln: virtual_sp_offset_adjust 16
; nextln: lea 0(%rsp), %rdi
; nextln: call *%rsi
; nextln: movslq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: virtual_sp_offset_adjust -16
; nextln: movq %rsi, %rdx
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v2 = iconst.i32 1
v0, v1 = call_indirect sig0, v2()
return v0, v1
}
function %wasmtime_mix3() -> i32, i32 wasmtime_system_v {
sig0 = () -> i32, i32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: call *%rsi
; nextln: movl %edx, 0(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v2 = iconst.i32 1
v0, v1 = call_indirect sig0, v2()
return v0, v1
}
function %wasmtime_mix4() -> i32, i64, i32 wasmtime_system_v {
sig0 = () -> i32, i64, i32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: subq $$16, %rsp
; nextln: virtual_sp_offset_adjust 16
; nextln: lea 0(%rsp), %rdi
; nextln: call *%rsi
; nextln: movslq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: virtual_sp_offset_adjust -16
; nextln: movq %rdx, 0(%r12)
; nextln: movl %esi, 8(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v3 = iconst.i32 1
v0, v1, v2 = call_indirect sig0, v3()
return v0, v1, v2
}
function %wasmtime_mix5() -> f32, i64, i32, f32 wasmtime_system_v {
sig0 = () -> f32, i64, i32, f32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: call *%rsi
; nextln: movq %rax, 0(%r12)
; nextln: movl %edx, 8(%r12)
; nextln: movss %xmm1, 12(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v5 = iconst.i32 1
v0, v1, v2, v3 = call_indirect sig0, v5()
return v0, v1, v2, v3
}
function %wasmtime_mix6(f32, i64, i32, f32) -> f32, i64, i32, f32 wasmtime_system_v {
sig0 = (f32, i64, i32, f32) -> f32, i64, i32, f32 system_v
block0(v0: f32, v1: i64, v2: i32, v3: f32):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdx, %r12
; nextln: movl $$1, %eax
; nextln: call *%rax
; nextln: movq %rax, 0(%r12)
; nextln: movl %edx, 8(%r12)
; nextln: movss %xmm1, 12(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v4 = iconst.i32 1
v5, v6, v7, v8 = call_indirect sig0, v4(v0, v1, v2, v3)
return v5, v6, v7, v8
}

View File

@@ -1,7 +1,7 @@
test unwind test unwind
set opt_level=speed_and_size set opt_level=speed_and_size
set is_pic set is_pic
target x86_64 legacy haswell target x86_64-linux legacy haswell
; check the unwind information with a function with no args ; check the unwind information with a function with no args
function %no_args() system_v { function %no_args() system_v {

View File

@@ -1,7 +1,7 @@
test unwind test unwind
set opt_level=speed_and_size set opt_level=speed_and_size
set is_pic set is_pic
target x86_64 legacy haswell target x86_64-windows legacy haswell
; check the unwind information with a leaf function with no args ; check the unwind information with a leaf function with no args
function %no_args_leaf() windows_fastcall { function %no_args_leaf() windows_fastcall {

View File

@@ -15,12 +15,13 @@ use crate::translation_utils::{
DataIndex, DefinedFuncIndex, ElemIndex, FuncIndex, Global, GlobalIndex, Memory, MemoryIndex, DataIndex, DefinedFuncIndex, ElemIndex, FuncIndex, Global, GlobalIndex, Memory, MemoryIndex,
Table, TableIndex, TypeIndex, Table, TableIndex, TypeIndex,
}; };
use crate::WasmType;
use core::convert::TryFrom; use core::convert::TryFrom;
use cranelift_codegen::cursor::FuncCursor; use cranelift_codegen::cursor::FuncCursor;
use cranelift_codegen::ir::immediates::{Offset32, Uimm64}; use cranelift_codegen::ir::immediates::{Offset32, Uimm64};
use cranelift_codegen::ir::types::*; use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{self, InstBuilder}; use cranelift_codegen::ir::{self, InstBuilder};
use cranelift_codegen::isa::TargetFrontendConfig; use cranelift_codegen::isa::{CallConv, TargetFrontendConfig};
use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap}; use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap};
use cranelift_frontend::FunctionBuilder; use cranelift_frontend::FunctionBuilder;
use std::boxed::Box; use std::boxed::Box;
@@ -660,7 +661,25 @@ impl TargetEnvironment for DummyEnvironment {
} }
impl<'data> ModuleEnvironment<'data> for DummyEnvironment { impl<'data> ModuleEnvironment<'data> for DummyEnvironment {
fn declare_type_func(&mut self, _wasm: WasmFuncType, sig: ir::Signature) -> WasmResult<()> { fn declare_type_func(&mut self, wasm: WasmFuncType) -> WasmResult<()> {
let mut sig = ir::Signature::new(CallConv::Fast);
let mut cvt = |ty: &WasmType| {
let reference_type = match self.pointer_type() {
ir::types::I32 => ir::types::R32,
ir::types::I64 => ir::types::R64,
_ => panic!("unsupported pointer type"),
};
ir::AbiParam::new(match ty {
WasmType::I32 => ir::types::I32,
WasmType::I64 => ir::types::I64,
WasmType::F32 => ir::types::F32,
WasmType::F64 => ir::types::F64,
WasmType::V128 => ir::types::I8X16,
WasmType::FuncRef | WasmType::ExternRef | WasmType::ExnRef => reference_type,
})
};
sig.params.extend(wasm.params.iter().map(&mut cvt));
sig.returns.extend(wasm.returns.iter().map(&mut cvt));
self.info.signatures.push(sig); self.info.signatures.push(sig);
Ok(()) Ok(())
} }

View File

@@ -702,11 +702,7 @@ pub trait ModuleEnvironment<'data>: TargetEnvironment {
} }
/// Declares a function signature to the environment. /// Declares a function signature to the environment.
fn declare_type_func( fn declare_type_func(&mut self, wasm_func_type: WasmFuncType) -> WasmResult<()>;
&mut self,
wasm_func_type: WasmFuncType,
sig: ir::Signature,
) -> WasmResult<()>;
/// Declares a module type signature to the environment. /// Declares a module type signature to the environment.
fn declare_type_module( fn declare_type_module(

View File

@@ -18,7 +18,6 @@ use crate::wasm_unsupported;
use core::convert::TryFrom; use core::convert::TryFrom;
use core::convert::TryInto; use core::convert::TryInto;
use cranelift_codegen::ir::immediates::V128Imm; use cranelift_codegen::ir::immediates::V128Imm;
use cranelift_codegen::ir::{self, AbiParam, Signature};
use cranelift_entity::packed_option::ReservedValue; use cranelift_entity::packed_option::ReservedValue;
use cranelift_entity::EntityRef; use cranelift_entity::EntityRef;
use std::boxed::Box; use std::boxed::Box;
@@ -110,18 +109,7 @@ pub fn parse_type_section<'a>(
for entry in types { for entry in types {
match entry? { match entry? {
TypeDef::Func(wasm_func_ty) => { TypeDef::Func(wasm_func_ty) => {
let mut sig = Signature::new(environ.target_config().default_call_conv); environ.declare_type_func(wasm_func_ty.clone().try_into()?)?;
sig.params.extend(wasm_func_ty.params.iter().map(|ty| {
let cret_arg: ir::Type = type_to_type(*ty, environ)
.expect("only numeric types are supported in function signatures");
AbiParam::new(cret_arg)
}));
sig.returns.extend(wasm_func_ty.returns.iter().map(|ty| {
let cret_arg: ir::Type = type_to_type(*ty, environ)
.expect("only numeric types are supported in function signatures");
AbiParam::new(cret_arg)
}));
environ.declare_type_func(wasm_func_ty.clone().try_into()?, sig)?;
module_translation_state module_translation_state
.wasm_types .wasm_types
.push((wasm_func_ty.params, wasm_func_ty.returns)); .push((wasm_func_ty.params, wasm_func_ty.returns));

View File

@@ -19,6 +19,7 @@ entity_impl!(FuncIndex);
/// Index type of a defined function inside the WebAssembly module. /// Index type of a defined function inside the WebAssembly module.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
pub struct DefinedFuncIndex(u32); pub struct DefinedFuncIndex(u32);
entity_impl!(DefinedFuncIndex); entity_impl!(DefinedFuncIndex);

View File

@@ -18,3 +18,4 @@ cranelift-codegen = { path = "../../cranelift/codegen", version = "0.73.0" }
cranelift-frontend = { path = "../../cranelift/frontend", version = "0.73.0" } cranelift-frontend = { path = "../../cranelift/frontend", version = "0.73.0" }
cranelift-entity = { path = "../../cranelift/entity", version = "0.73.0" } cranelift-entity = { path = "../../cranelift/entity", version = "0.73.0" }
wasmparser = "0.77.0" wasmparser = "0.77.0"
target-lexicon = "0.12"

View File

@@ -4,20 +4,20 @@ use cranelift_codegen::ir::condcodes::*;
use cranelift_codegen::ir::immediates::{Offset32, Uimm64}; use cranelift_codegen::ir::immediates::{Offset32, Uimm64};
use cranelift_codegen::ir::types::*; use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature}; use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature};
use cranelift_codegen::isa::{self, TargetFrontendConfig}; use cranelift_codegen::isa::{self, TargetFrontendConfig, TargetIsa};
use cranelift_entity::{EntityRef, PrimaryMap}; use cranelift_entity::EntityRef;
use cranelift_frontend::FunctionBuilder; use cranelift_frontend::FunctionBuilder;
use cranelift_frontend::Variable; use cranelift_frontend::Variable;
use cranelift_wasm::{ use cranelift_wasm::{
self, FuncIndex, FuncTranslationState, GlobalIndex, GlobalVariable, MemoryIndex, self, FuncIndex, FuncTranslationState, GlobalIndex, GlobalVariable, MemoryIndex, TableIndex,
SignatureIndex, TableIndex, TargetEnvironment, TypeIndex, WasmError, WasmResult, WasmType, TargetEnvironment, TypeIndex, WasmError, WasmResult, WasmType,
}; };
use std::convert::TryFrom; use std::convert::TryFrom;
use std::mem; use std::mem;
use wasmparser::Operator; use wasmparser::Operator;
use wasmtime_environ::{ use wasmtime_environ::{
BuiltinFunctionIndex, MemoryPlan, MemoryStyle, Module, TableStyle, Tunables, VMOffsets, BuiltinFunctionIndex, MemoryPlan, MemoryStyle, Module, TableStyle, Tunables, TypeTables,
INTERRUPTED, WASM_PAGE_SIZE, VMOffsets, INTERRUPTED, WASM_PAGE_SIZE,
}; };
/// Compute an `ir::ExternalName` for a given wasm function index. /// Compute an `ir::ExternalName` for a given wasm function index.
@@ -109,14 +109,9 @@ wasmtime_environ::foreach_builtin_function!(declare_function_signatures);
/// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`. /// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`.
pub struct FuncEnvironment<'module_environment> { pub struct FuncEnvironment<'module_environment> {
/// Target-specified configuration. isa: &'module_environment (dyn TargetIsa + 'module_environment),
target_config: TargetFrontendConfig,
/// The module-level environment which this function-level environment belongs to.
module: &'module_environment Module, module: &'module_environment Module,
types: &'module_environment TypeTables,
/// The native signatures for each type signature in this module
native_signatures: &'module_environment PrimaryMap<SignatureIndex, ir::Signature>,
/// The Cranelift global holding the vmctx address. /// The Cranelift global holding the vmctx address.
vmctx: Option<ir::GlobalValue>, vmctx: Option<ir::GlobalValue>,
@@ -146,27 +141,27 @@ pub struct FuncEnvironment<'module_environment> {
impl<'module_environment> FuncEnvironment<'module_environment> { impl<'module_environment> FuncEnvironment<'module_environment> {
pub fn new( pub fn new(
target_config: TargetFrontendConfig, isa: &'module_environment (dyn TargetIsa + 'module_environment),
module: &'module_environment Module, module: &'module_environment Module,
native_signatures: &'module_environment PrimaryMap<SignatureIndex, ir::Signature>, types: &'module_environment TypeTables,
tunables: &'module_environment Tunables, tunables: &'module_environment Tunables,
) -> Self { ) -> Self {
let builtin_function_signatures = BuiltinFunctionSignatures::new( let builtin_function_signatures = BuiltinFunctionSignatures::new(
target_config.pointer_type(), isa.pointer_type(),
match target_config.pointer_type() { match isa.pointer_type() {
ir::types::I32 => ir::types::R32, ir::types::I32 => ir::types::R32,
ir::types::I64 => ir::types::R64, ir::types::I64 => ir::types::R64,
_ => panic!(), _ => panic!(),
}, },
target_config.default_call_conv, crate::wasmtime_call_conv(isa),
); );
Self { Self {
target_config, isa,
module, module,
native_signatures, types,
vmctx: None, vmctx: None,
builtin_function_signatures, builtin_function_signatures,
offsets: VMOffsets::new(target_config.pointer_bytes(), module), offsets: VMOffsets::new(isa.pointer_bytes(), module),
tunables, tunables,
fuel_var: Variable::new(0), fuel_var: Variable::new(0),
vminterrupts_ptr: Variable::new(0), vminterrupts_ptr: Variable::new(0),
@@ -178,7 +173,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
} }
fn pointer_type(&self) -> ir::Type { fn pointer_type(&self) -> ir::Type {
self.target_config.pointer_type() self.isa.pointer_type()
} }
fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue { fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue {
@@ -680,7 +675,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> { impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> {
fn target_config(&self) -> TargetFrontendConfig { fn target_config(&self) -> TargetFrontendConfig {
self.target_config self.isa.frontend_config()
} }
fn reference_type(&self, ty: WasmType) -> ir::Type { fn reference_type(&self, ty: WasmType) -> ir::Type {
@@ -1339,7 +1334,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
index: TypeIndex, index: TypeIndex,
) -> WasmResult<ir::SigRef> { ) -> WasmResult<ir::SigRef> {
let index = self.module.types[index].unwrap_function(); let index = self.module.types[index].unwrap_function();
Ok(func.import_signature(self.native_signatures[index].clone())) let sig = crate::indirect_signature(self.isa, self.types, index);
Ok(func.import_signature(sig))
} }
fn make_direct_func( fn make_direct_func(
@@ -1347,8 +1343,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
func: &mut ir::Function, func: &mut ir::Function,
index: FuncIndex, index: FuncIndex,
) -> WasmResult<ir::FuncRef> { ) -> WasmResult<ir::FuncRef> {
let sig_index = self.module.functions[index]; let sig = crate::func_signature(self.isa, self.module, self.types, index);
let sig = self.native_signatures[sig_index].clone();
let signature = func.import_signature(sig); let signature = func.import_signature(sig);
let name = get_func_name(index); let name = get_func_name(index);
Ok(func.import_function(ir::ExtFuncData { Ok(func.import_function(ir::ExtFuncData {

View File

@@ -90,16 +90,18 @@
use crate::func_environ::{get_func_name, FuncEnvironment}; use crate::func_environ::{get_func_name, FuncEnvironment};
use cranelift_codegen::ir::{self, ExternalName}; use cranelift_codegen::ir::{self, ExternalName};
use cranelift_codegen::isa::{CallConv, TargetIsa};
use cranelift_codegen::machinst::buffer::MachSrcLoc; use cranelift_codegen::machinst::buffer::MachSrcLoc;
use cranelift_codegen::print_errors::pretty_error; use cranelift_codegen::print_errors::pretty_error;
use cranelift_codegen::{binemit, isa, Context}; use cranelift_codegen::{binemit, isa, Context};
use cranelift_wasm::{DefinedFuncIndex, FuncIndex, FuncTranslator}; use cranelift_wasm::{DefinedFuncIndex, FuncIndex, FuncTranslator, SignatureIndex, WasmType};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::sync::Mutex; use std::sync::Mutex;
use target_lexicon::CallingConvention;
use wasmtime_environ::{ use wasmtime_environ::{
CompileError, CompiledFunction, Compiler, FunctionAddressMap, FunctionBodyData, CompileError, CompiledFunction, Compiler, FunctionAddressMap, FunctionBodyData,
InstructionAddressMap, ModuleTranslation, Relocation, RelocationTarget, StackMapInformation, InstructionAddressMap, Module, ModuleTranslation, Relocation, RelocationTarget,
TrapInformation, Tunables, TypeTables, StackMapInformation, TrapInformation, Tunables, TypeTables,
}; };
mod func_environ; mod func_environ;
@@ -354,18 +356,12 @@ impl Compiler for Cranelift {
let func_index = module.func_index(func_index); let func_index = module.func_index(func_index);
let mut context = Context::new(); let mut context = Context::new();
context.func.name = get_func_name(func_index); context.func.name = get_func_name(func_index);
let sig_index = module.functions[func_index]; context.func.signature = func_signature(isa, module, types, func_index);
context.func.signature = types.native_signatures[sig_index].clone();
if tunables.generate_native_debuginfo { if tunables.generate_native_debuginfo {
context.func.collect_debug_info(); context.func.collect_debug_info();
} }
let mut func_env = FuncEnvironment::new( let mut func_env = FuncEnvironment::new(isa, module, types, tunables);
isa.frontend_config(),
module,
&types.native_signatures,
tunables,
);
// We use these as constant offsets below in // We use these as constant offsets below in
// `stack_limit_from_arguments`, so assert their values here. This // `stack_limit_from_arguments`, so assert their values here. This
@@ -457,3 +453,83 @@ impl Compiler for Cranelift {
}) })
} }
} }
pub fn blank_sig(isa: &dyn TargetIsa, call_conv: CallConv) -> ir::Signature {
let pointer_type = isa.pointer_type();
let mut sig = ir::Signature::new(call_conv);
// Add the caller/callee `vmctx` parameters.
sig.params.push(ir::AbiParam::special(
pointer_type,
ir::ArgumentPurpose::VMContext,
));
sig.params.push(ir::AbiParam::new(pointer_type));
return sig;
}
pub fn wasmtime_call_conv(isa: &dyn TargetIsa) -> CallConv {
match isa.triple().default_calling_convention() {
Ok(CallingConvention::SystemV) | Ok(CallingConvention::AppleAarch64) | Err(()) => {
CallConv::WasmtimeSystemV
}
Ok(CallingConvention::WindowsFastcall) => CallConv::WasmtimeFastcall,
Ok(unimp) => unimplemented!("calling convention: {:?}", unimp),
}
}
pub fn push_types(
isa: &dyn TargetIsa,
sig: &mut ir::Signature,
types: &TypeTables,
index: SignatureIndex,
) {
let wasm = &types.wasm_signatures[index];
let cvt = |ty: &WasmType| {
ir::AbiParam::new(match ty {
WasmType::I32 => ir::types::I32,
WasmType::I64 => ir::types::I64,
WasmType::F32 => ir::types::F32,
WasmType::F64 => ir::types::F64,
WasmType::V128 => ir::types::I8X16,
WasmType::FuncRef | WasmType::ExternRef => {
wasmtime_environ::reference_type(*ty, isa.pointer_type())
}
WasmType::ExnRef => unimplemented!(),
})
};
sig.params.extend(wasm.params.iter().map(&cvt));
sig.returns.extend(wasm.returns.iter().map(&cvt));
}
pub fn indirect_signature(
isa: &dyn TargetIsa,
types: &TypeTables,
index: SignatureIndex,
) -> ir::Signature {
let mut sig = blank_sig(isa, wasmtime_call_conv(isa));
push_types(isa, &mut sig, types, index);
return sig;
}
pub fn func_signature(
isa: &dyn TargetIsa,
module: &Module,
types: &TypeTables,
index: FuncIndex,
) -> ir::Signature {
let call_conv = match module.defined_func_index(index) {
// If this is a defined function in the module and it's never possibly
// exported, then we can optimize this function to use the fastest
// calling convention since it's purely an internal implementation
// detail of the module itself.
Some(idx) if !module.possibly_exported_funcs.contains(&idx) => CallConv::Fast,
// ... otherwise if it's an imported function or if it's a possibly
// exported function then we use the default ABI wasmtime would
// otherwise select.
_ => wasmtime_call_conv(isa),
};
let mut sig = blank_sig(isa, call_conv);
push_types(isa, &mut sig, types, module.functions[index]);
return sig;
}

View File

@@ -2,12 +2,11 @@
use crate::tunables::Tunables; use crate::tunables::Tunables;
use crate::WASM_MAX_PAGES; use crate::WASM_MAX_PAGES;
use cranelift_codegen::ir;
use cranelift_entity::{EntityRef, PrimaryMap}; use cranelift_entity::{EntityRef, PrimaryMap};
use cranelift_wasm::*; use cranelift_wasm::*;
use indexmap::IndexMap; use indexmap::IndexMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use std::sync::Arc; use std::sync::Arc;
/// Implemenation styles for WebAssembly linear memory. /// Implemenation styles for WebAssembly linear memory.
@@ -367,6 +366,10 @@ pub struct Module {
/// The type of each nested wasm module this module contains. /// The type of each nested wasm module this module contains.
pub modules: PrimaryMap<ModuleIndex, ModuleTypeIndex>, pub modules: PrimaryMap<ModuleIndex, ModuleTypeIndex>,
/// The set of defined functions within this module which are located in
/// element segments.
pub possibly_exported_funcs: HashSet<DefinedFuncIndex>,
} }
/// Initialization routines for creating an instance, encompassing imports, /// Initialization routines for creating an instance, encompassing imports,
@@ -564,7 +567,6 @@ impl Module {
#[allow(missing_docs)] #[allow(missing_docs)]
pub struct TypeTables { pub struct TypeTables {
pub wasm_signatures: PrimaryMap<SignatureIndex, WasmFuncType>, pub wasm_signatures: PrimaryMap<SignatureIndex, WasmFuncType>,
pub native_signatures: PrimaryMap<SignatureIndex, ir::Signature>,
pub module_signatures: PrimaryMap<ModuleTypeIndex, ModuleSignature>, pub module_signatures: PrimaryMap<ModuleTypeIndex, ModuleSignature>,
pub instance_signatures: PrimaryMap<InstanceTypeIndex, InstanceSignature>, pub instance_signatures: PrimaryMap<InstanceTypeIndex, InstanceSignature>,
} }

View File

@@ -4,14 +4,14 @@ use crate::module::{
}; };
use crate::tunables::Tunables; use crate::tunables::Tunables;
use cranelift_codegen::ir; use cranelift_codegen::ir;
use cranelift_codegen::ir::{AbiParam, ArgumentPurpose};
use cranelift_codegen::isa::TargetFrontendConfig; use cranelift_codegen::isa::TargetFrontendConfig;
use cranelift_codegen::packed_option::ReservedValue;
use cranelift_entity::PrimaryMap; use cranelift_entity::PrimaryMap;
use cranelift_wasm::{ use cranelift_wasm::{
self, translate_module, Alias, DataIndex, DefinedFuncIndex, ElemIndex, EntityIndex, EntityType, self, translate_module, Alias, DataIndex, DefinedFuncIndex, ElemIndex, EntityIndex, EntityType,
FuncIndex, Global, GlobalIndex, InstanceIndex, InstanceTypeIndex, Memory, MemoryIndex, FuncIndex, Global, GlobalIndex, GlobalInit, InstanceIndex, InstanceTypeIndex, Memory,
ModuleIndex, ModuleTypeIndex, SignatureIndex, Table, TableIndex, TargetEnvironment, TypeIndex, MemoryIndex, ModuleIndex, ModuleTypeIndex, SignatureIndex, Table, TableIndex,
WasmError, WasmFuncType, WasmResult, TargetEnvironment, TypeIndex, WasmError, WasmFuncType, WasmResult,
}; };
use std::collections::{hash_map::Entry, HashMap}; use std::collections::{hash_map::Entry, HashMap};
use std::convert::TryFrom; use std::convert::TryFrom;
@@ -357,6 +357,15 @@ impl<'data> ModuleEnvironment<'data> {
.module_signatures .module_signatures
.push(ModuleSignature { imports, exports }) .push(ModuleSignature { imports, exports })
} }
fn flag_func_possibly_exported(&mut self, func: FuncIndex) {
if func.is_reserved_value() {
return;
}
if let Some(idx) = self.result.module.defined_func_index(func) {
self.result.module.possibly_exported_funcs.insert(idx);
}
}
} }
impl<'data> TargetEnvironment for ModuleEnvironment<'data> { impl<'data> TargetEnvironment for ModuleEnvironment<'data> {
@@ -375,21 +384,17 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
fn reserve_types(&mut self, num: u32) -> WasmResult<()> { fn reserve_types(&mut self, num: u32) -> WasmResult<()> {
let num = usize::try_from(num).unwrap(); let num = usize::try_from(num).unwrap();
self.result.module.types.reserve(num); self.result.module.types.reserve(num);
self.types.native_signatures.reserve(num);
self.types.wasm_signatures.reserve(num); self.types.wasm_signatures.reserve(num);
Ok(()) Ok(())
} }
fn declare_type_func(&mut self, wasm: WasmFuncType, sig: ir::Signature) -> WasmResult<()> { fn declare_type_func(&mut self, wasm: WasmFuncType) -> WasmResult<()> {
// Deduplicate wasm function signatures through `interned_func_types`, // Deduplicate wasm function signatures through `interned_func_types`,
// which also deduplicates across wasm modules with module linking. // which also deduplicates across wasm modules with module linking.
let sig_index = match self.interned_func_types.get(&wasm) { let sig_index = match self.interned_func_types.get(&wasm) {
Some(idx) => *idx, Some(idx) => *idx,
None => { None => {
let sig = translate_signature(sig, self.pointer_type()); let sig_index = self.types.wasm_signatures.push(wasm.clone());
let sig_index = self.types.native_signatures.push(sig);
let sig_index2 = self.types.wasm_signatures.push(wasm.clone());
debug_assert_eq!(sig_index, sig_index2);
self.interned_func_types.insert(wasm, sig_index); self.interned_func_types.insert(wasm, sig_index);
sig_index sig_index
} }
@@ -641,6 +646,9 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
} }
fn declare_global(&mut self, global: Global) -> WasmResult<()> { fn declare_global(&mut self, global: Global) -> WasmResult<()> {
if let GlobalInit::RefFunc(index) = global.initializer {
self.flag_func_possibly_exported(index);
}
self.result.module.globals.push(global); self.result.module.globals.push(global);
Ok(()) Ok(())
} }
@@ -654,6 +662,7 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
} }
fn declare_func_export(&mut self, func_index: FuncIndex, name: &str) -> WasmResult<()> { fn declare_func_export(&mut self, func_index: FuncIndex, name: &str) -> WasmResult<()> {
self.flag_func_possibly_exported(func_index);
self.declare_export(EntityIndex::Function(func_index), name) self.declare_export(EntityIndex::Function(func_index), name)
} }
@@ -678,6 +687,7 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
} }
fn declare_start_func(&mut self, func_index: FuncIndex) -> WasmResult<()> { fn declare_start_func(&mut self, func_index: FuncIndex) -> WasmResult<()> {
self.flag_func_possibly_exported(func_index);
debug_assert!(self.result.module.start_func.is_none()); debug_assert!(self.result.module.start_func.is_none());
self.result.module.start_func = Some(func_index); self.result.module.start_func = Some(func_index);
Ok(()) Ok(())
@@ -698,6 +708,9 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
offset: usize, offset: usize,
elements: Box<[FuncIndex]>, elements: Box<[FuncIndex]>,
) -> WasmResult<()> { ) -> WasmResult<()> {
for element in elements.iter() {
self.flag_func_possibly_exported(*element);
}
self.result self.result
.module .module
.table_initializers .table_initializers
@@ -715,6 +728,9 @@ impl<'data> cranelift_wasm::ModuleEnvironment<'data> for ModuleEnvironment<'data
elem_index: ElemIndex, elem_index: ElemIndex,
segments: Box<[FuncIndex]>, segments: Box<[FuncIndex]>,
) -> WasmResult<()> { ) -> WasmResult<()> {
for element in segments.iter() {
self.flag_func_possibly_exported(*element);
}
let index = self.result.module.passive_elements.len(); let index = self.result.module.passive_elements.len();
self.result.module.passive_elements.push(segments); self.result.module.passive_elements.push(segments);
let old = self let old = self
@@ -1070,15 +1086,3 @@ and for re-adding support for interface types you can see this issue:
Ok(()) Ok(())
} }
} }
/// Add environment-specific function parameters.
pub fn translate_signature(mut sig: ir::Signature, pointer_type: ir::Type) -> ir::Signature {
// Prepend the vmctx argument.
sig.params.insert(
0,
AbiParam::special(pointer_type, ArgumentPurpose::VMContext),
);
// Prepend the caller vmctx argument.
sig.params.insert(1, AbiParam::new(pointer_type));
sig
}

View File

@@ -50,6 +50,7 @@ pub use crate::instantiate::{
CompilationArtifacts, CompiledModule, ModuleCode, SetupError, SymbolizeContext, TypeTables, CompilationArtifacts, CompiledModule, ModuleCode, SetupError, SymbolizeContext, TypeTables,
}; };
pub use crate::link::link_module; pub use crate::link::link_module;
pub use wasmtime_cranelift::{blank_sig, wasmtime_call_conv};
/// Version number of this crate. /// Version number of this crate.
pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const VERSION: &str = env!("CARGO_PKG_VERSION");

View File

@@ -11,9 +11,4 @@ pub fn builder_without_flags() -> cranelift_codegen::isa::Builder {
.expect("host machine is not a supported target") .expect("host machine is not a supported target")
} }
pub fn call_conv() -> cranelift_codegen::isa::CallConv {
use target_lexicon::HOST;
cranelift_codegen::isa::CallConv::triple_default(&HOST)
}
pub use cranelift_codegen::isa::lookup; pub use cranelift_codegen::isa::lookup;

View File

@@ -8,7 +8,7 @@ use std::collections::BTreeSet;
use wasmtime_debug::DwarfSection; use wasmtime_debug::DwarfSection;
use wasmtime_environ::isa::{unwind::UnwindInfo, TargetIsa}; use wasmtime_environ::isa::{unwind::UnwindInfo, TargetIsa};
use wasmtime_environ::wasm::{FuncIndex, SignatureIndex}; use wasmtime_environ::wasm::{FuncIndex, SignatureIndex};
use wasmtime_environ::{CompiledFunctions, ModuleTranslation, ModuleType, TypeTables}; use wasmtime_environ::{CompiledFunctions, ModuleTranslation, TypeTables};
use wasmtime_obj::{ObjectBuilder, ObjectBuilderTarget}; use wasmtime_obj::{ObjectBuilder, ObjectBuilderTarget};
pub use wasmtime_obj::utils; pub use wasmtime_obj::utils;
@@ -42,18 +42,18 @@ pub(crate) fn build_object(
// Build trampolines for every signature that can be used by this module. // Build trampolines for every signature that can be used by this module.
let signatures = translation let signatures = translation
.module .module
.types .functions
.values() .iter()
.filter_map(|t| match t { .filter_map(|(i, sig)| match translation.module.defined_func_index(i) {
ModuleType::Function(f) => Some(*f), Some(i) if !translation.module.possibly_exported_funcs.contains(&i) => None,
_ => None, _ => Some(*sig),
}) })
.collect::<BTreeSet<_>>(); .collect::<BTreeSet<_>>();
let mut trampolines = Vec::with_capacity(signatures.len()); let mut trampolines = Vec::with_capacity(signatures.len());
let mut cx = FunctionBuilderContext::new(); let mut cx = FunctionBuilderContext::new();
for i in signatures { for i in signatures {
let native_sig = &types.native_signatures[i]; let native_sig = wasmtime_cranelift::indirect_signature(isa, &types, i);
let func = build_trampoline(isa, &mut cx, native_sig, std::mem::size_of::<u128>())?; let func = build_trampoline(isa, &mut cx, &native_sig, std::mem::size_of::<u128>())?;
// Preserve trampoline function unwind info. // Preserve trampoline function unwind info.
if let Some(info) = &func.unwind_info { if let Some(info) = &func.unwind_info {
unwind_info.push(ObjectUnwindInfo::Trampoline(i, info.clone())) unwind_info.push(ObjectUnwindInfo::Trampoline(i, info.clone()))

View File

@@ -9,7 +9,7 @@ use wasmtime_runtime::{InstantiationError, VMFunctionBody, VMTrampoline};
pub mod ir { pub mod ir {
pub(super) use cranelift_codegen::ir::{ pub(super) use cranelift_codegen::ir::{
AbiParam, ArgumentPurpose, ConstantOffset, JumpTable, Signature, SourceLoc, AbiParam, ConstantOffset, JumpTable, Signature, SourceLoc,
}; };
pub use cranelift_codegen::ir::{ pub use cranelift_codegen::ir::{
ExternalName, Function, InstBuilder, MemFlags, StackSlotData, StackSlotKind, ExternalName, Function, InstBuilder, MemFlags, StackSlotData, StackSlotKind,
@@ -52,16 +52,8 @@ pub(crate) fn build_trampoline(
value_size: usize, value_size: usize,
) -> Result<CompiledFunction, SetupError> { ) -> Result<CompiledFunction, SetupError> {
let pointer_type = isa.pointer_type(); let pointer_type = isa.pointer_type();
let mut wrapper_sig = ir::Signature::new(isa.frontend_config().default_call_conv); let mut wrapper_sig =
wasmtime_cranelift::blank_sig(isa, wasmtime_cranelift::wasmtime_call_conv(isa));
// Add the callee `vmctx` parameter.
wrapper_sig.params.push(ir::AbiParam::special(
pointer_type,
ir::ArgumentPurpose::VMContext,
));
// Add the caller `vmctx` parameter.
wrapper_sig.params.push(ir::AbiParam::new(pointer_type));
// Add the `callee_address` parameter. // Add the `callee_address` parameter.
wrapper_sig.params.push(ir::AbiParam::new(pointer_type)); wrapper_sig.params.push(ir::AbiParam::new(pointer_type));

View File

@@ -9,12 +9,12 @@ use cranelift_codegen::isa;
use lightbeam::{CodeGenSession, NullOffsetSink, Sinks}; use lightbeam::{CodeGenSession, NullOffsetSink, Sinks};
use wasmtime_environ::wasm::{ use wasmtime_environ::wasm::{
DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex,
GlobalIndex, MemoryIndex, SignatureIndex, TableIndex, TypeIndex, GlobalIndex, MemoryIndex, TableIndex, TypeIndex,
}; };
use wasmtime_environ::{ use wasmtime_environ::{
entity::PrimaryMap, BuiltinFunctionIndex, CompileError, CompiledFunction, Compiler, BuiltinFunctionIndex, CompileError, CompiledFunction, Compiler, FunctionBodyData, Module,
FunctionBodyData, Module, ModuleTranslation, Relocation, RelocationTarget, TrapInformation, ModuleTranslation, Relocation, RelocationTarget, TrapInformation, Tunables, TypeTables,
Tunables, TypeTables, VMOffsets, VMOffsets,
}; };
/// A compiler that compiles a WebAssembly module with Lightbeam, directly translating the Wasm file. /// A compiler that compiles a WebAssembly module with Lightbeam, directly translating the Wasm file.
@@ -28,14 +28,14 @@ impl Compiler for Lightbeam {
function_body: FunctionBodyData<'_>, function_body: FunctionBodyData<'_>,
isa: &dyn isa::TargetIsa, isa: &dyn isa::TargetIsa,
tunables: &Tunables, tunables: &Tunables,
types: &TypeTables, _types: &TypeTables,
) -> Result<CompiledFunction, CompileError> { ) -> Result<CompiledFunction, CompileError> {
if tunables.generate_native_debuginfo { if tunables.generate_native_debuginfo {
return Err(CompileError::DebugInfoNotSupported); return Err(CompileError::DebugInfoNotSupported);
} }
let func_index = translation.module.func_index(i); let func_index = translation.module.func_index(i);
let env = FuncEnvironment::new(isa.frontend_config().pointer_bytes(), translation, types); let env = FuncEnvironment::new(isa.frontend_config().pointer_bytes(), translation);
let mut codegen_session: CodeGenSession<_> = CodeGenSession::new( let mut codegen_session: CodeGenSession<_> = CodeGenSession::new(
translation.function_body_inputs.len() as u32, translation.function_body_inputs.len() as u32,
&env, &env,
@@ -174,22 +174,15 @@ struct FuncEnvironment<'module_environment> {
/// The module-level environment which this function-level environment belongs to. /// The module-level environment which this function-level environment belongs to.
module: &'module_environment Module, module: &'module_environment Module,
native_signatures: &'module_environment PrimaryMap<SignatureIndex, ir::Signature>,
/// Offsets to struct fields accessed by JIT code. /// Offsets to struct fields accessed by JIT code.
offsets: VMOffsets, offsets: VMOffsets,
} }
impl<'module_environment> FuncEnvironment<'module_environment> { impl<'module_environment> FuncEnvironment<'module_environment> {
fn new( fn new(pointer_bytes: u8, translation: &'module_environment ModuleTranslation<'_>) -> Self {
pointer_bytes: u8,
translation: &'module_environment ModuleTranslation<'_>,
types: &'module_environment TypeTables,
) -> Self {
Self { Self {
module: &translation.module, module: &translation.module,
offsets: VMOffsets::new(pointer_bytes, &translation.module), offsets: VMOffsets::new(pointer_bytes, &translation.module),
native_signatures: &types.native_signatures,
} }
} }
} }
@@ -227,8 +220,8 @@ impl lightbeam::ModuleContext for FuncEnvironment<'_> {
self.module.functions[FuncIndex::from_u32(func_idx)].as_u32() self.module.functions[FuncIndex::from_u32(func_idx)].as_u32()
} }
fn signature(&self, index: u32) -> &Self::Signature { fn signature(&self, _index: u32) -> &Self::Signature {
&self.native_signatures[SignatureIndex::from_u32(index)] panic!("not implemented")
} }
fn defined_table_index(&self, table_index: u32) -> Option<u32> { fn defined_table_index(&self, table_index: u32) -> Option<u32> {

View File

@@ -562,14 +562,14 @@ impl Func {
/// addition to some extra types /// addition to some extra types
/// ///
/// | Rust Return Type | WebAssembly Return Type | Meaning | /// | Rust Return Type | WebAssembly Return Type | Meaning |
/// |-------------------|-------------------------|-------------------| /// |-------------------|-------------------------|-----------------------|
/// | `()` | nothing | no return value | /// | `()` | nothing | no return value |
/// | `Result<T, Trap>` | `T` | function may trap | /// | `T` | `T` | a single return value |
/// | `(T1, T2, ...)` | `T1 T2 ...` | multiple returns |
/// ///
/// At this time multi-value returns are not supported, and supporting this /// Note that all return types can also be wrapped in `Result<_, Trap>` to
/// is the subject of [#1178]. /// indicate that the host function can generate a trap as well as possibly
/// /// returning a value.
/// [#1178]: https://github.com/bytecodealliance/wasmtime/issues/1178
/// ///
/// Finally you can also optionally take [`Caller`] as the first argument of /// Finally you can also optionally take [`Caller`] as the first argument of
/// your closure. If inserted then you're able to inspect the caller's /// your closure. If inserted then you're able to inspect the caller's
@@ -1094,7 +1094,9 @@ impl Func {
/// and similarly if a function has multiple results you can bind that too /// and similarly if a function has multiple results you can bind that too
/// ///
/// ``` /// ```
/// # #[cfg(not(feature = "old-x86-backend"))]
/// # use wasmtime::*; /// # use wasmtime::*;
/// # #[cfg(not(feature = "old-x86-backend"))]
/// # fn foo(add_with_overflow: &Func) -> anyhow::Result<()> { /// # fn foo(add_with_overflow: &Func) -> anyhow::Result<()> {
/// let typed = add_with_overflow.typed::<(u32, u32), (u32, i32)>()?; /// let typed = add_with_overflow.typed::<(u32, u32), (u32, i32)>()?;
/// let (result, overflow) = typed.call((u32::max_value(), 2))?; /// let (result, overflow) = typed.call((u32::max_value(), 2))?;
@@ -1264,6 +1266,8 @@ pub unsafe trait WasmRet {
// Same as `WasmTy::Abi`. // Same as `WasmTy::Abi`.
#[doc(hidden)] #[doc(hidden)]
type Abi: Copy; type Abi: Copy;
#[doc(hidden)]
type Retptr: Copy;
// Same as `WasmTy::compatible_with_store`. // Same as `WasmTy::compatible_with_store`.
#[doc(hidden)] #[doc(hidden)]
@@ -1276,11 +1280,13 @@ pub unsafe trait WasmRet {
// `invoke_wasm_and_catch_traps` is on the stack, and therefore this method // `invoke_wasm_and_catch_traps` is on the stack, and therefore this method
// is unsafe. // is unsafe.
#[doc(hidden)] #[doc(hidden)]
unsafe fn into_abi_for_ret(self, store: &Store) -> Result<Self::Abi, Trap>; unsafe fn into_abi_for_ret(self, store: &Store, ptr: Self::Retptr) -> Result<Self::Abi, Trap>;
// Same as `WasmTy::push`.
#[doc(hidden)] #[doc(hidden)]
fn valtype() -> Option<ValType>; fn func_type(params: impl Iterator<Item = ValType>) -> FuncType;
#[doc(hidden)]
unsafe fn wrap_trampoline(ptr: *mut u128, f: impl FnOnce(Self::Retptr) -> Self::Abi);
// Utilities used to convert an instance of this type to a `Result` // Utilities used to convert an instance of this type to a `Result`
// explicitly, used when wrapping async functions which always bottom-out // explicitly, used when wrapping async functions which always bottom-out
@@ -1293,83 +1299,28 @@ pub unsafe trait WasmRet {
fn fallible_from_trap(trap: Trap) -> Self::Fallible; fn fallible_from_trap(trap: Trap) -> Self::Fallible;
} }
unsafe impl WasmRet for () {
type Abi = ();
type Fallible = Result<(), Trap>;
#[inline]
fn compatible_with_store(&self, _store: &Store) -> bool {
true
}
#[inline]
unsafe fn into_abi_for_ret(self, _store: &Store) -> Result<(), Trap> {
Ok(())
}
#[inline]
fn valtype() -> Option<ValType> {
None
}
#[inline]
fn into_fallible(self) -> Result<(), Trap> {
Ok(())
}
#[inline]
fn fallible_from_trap(trap: Trap) -> Result<(), Trap> {
Err(trap)
}
}
unsafe impl WasmRet for Result<(), Trap> {
type Abi = ();
type Fallible = Self;
#[inline]
fn compatible_with_store(&self, _store: &Store) -> bool {
true
}
#[inline]
unsafe fn into_abi_for_ret(self, _store: &Store) -> Result<(), Trap> {
self
}
#[inline]
fn valtype() -> Option<ValType> {
None
}
#[inline]
fn into_fallible(self) -> Result<(), Trap> {
self
}
#[inline]
fn fallible_from_trap(trap: Trap) -> Result<(), Trap> {
Err(trap)
}
}
unsafe impl<T> WasmRet for T unsafe impl<T> WasmRet for T
where where
T: WasmTy, T: WasmTy,
{ {
type Abi = <T as WasmTy>::Abi; type Abi = <T as WasmTy>::Abi;
type Retptr = ();
type Fallible = Result<T, Trap>; type Fallible = Result<T, Trap>;
fn compatible_with_store(&self, store: &Store) -> bool { fn compatible_with_store(&self, store: &Store) -> bool {
<Self as WasmTy>::compatible_with_store(self, store) <Self as WasmTy>::compatible_with_store(self, store)
} }
unsafe fn into_abi_for_ret(self, store: &Store) -> Result<Self::Abi, Trap> { unsafe fn into_abi_for_ret(self, store: &Store, _retptr: ()) -> Result<Self::Abi, Trap> {
Ok(<Self as WasmTy>::into_abi(self, store)) Ok(<Self as WasmTy>::into_abi(self, store))
} }
fn valtype() -> Option<ValType> { fn func_type(params: impl Iterator<Item = ValType>) -> FuncType {
Some(<Self as WasmTy>::valtype()) FuncType::new(params, Some(<Self as WasmTy>::valtype()))
}
unsafe fn wrap_trampoline(ptr: *mut u128, f: impl FnOnce(Self::Retptr) -> Self::Abi) {
*ptr.cast::<Self::Abi>() = f(());
} }
fn into_fallible(self) -> Result<T, Trap> { fn into_fallible(self) -> Result<T, Trap> {
@@ -1383,24 +1334,33 @@ where
unsafe impl<T> WasmRet for Result<T, Trap> unsafe impl<T> WasmRet for Result<T, Trap>
where where
T: WasmTy, T: WasmRet,
{ {
type Abi = <T as WasmTy>::Abi; type Abi = <T as WasmRet>::Abi;
type Retptr = <T as WasmRet>::Retptr;
type Fallible = Self; type Fallible = Self;
fn compatible_with_store(&self, store: &Store) -> bool { fn compatible_with_store(&self, store: &Store) -> bool {
match self { match self {
Ok(x) => <T as WasmTy>::compatible_with_store(x, store), Ok(x) => <T as WasmRet>::compatible_with_store(x, store),
Err(_) => true, Err(_) => true,
} }
} }
unsafe fn into_abi_for_ret(self, store: &Store) -> Result<Self::Abi, Trap> { unsafe fn into_abi_for_ret(
self.map(|val| <T as WasmTy>::into_abi(val, store)) self,
store: &Store,
retptr: Self::Retptr,
) -> Result<Self::Abi, Trap> {
self.and_then(|val| val.into_abi_for_ret(store, retptr))
} }
fn valtype() -> Option<ValType> { fn func_type(params: impl Iterator<Item = ValType>) -> FuncType {
Some(<T as WasmTy>::valtype()) T::func_type(params)
}
unsafe fn wrap_trampoline(ptr: *mut u128, f: impl FnOnce(Self::Retptr) -> Self::Abi) {
T::wrap_trampoline(ptr, f)
} }
fn into_fallible(self) -> Result<T, Trap> { fn into_fallible(self) -> Result<T, Trap> {
@@ -1412,6 +1372,169 @@ where
} }
} }
macro_rules! impl_wasm_host_results {
($n:tt $($t:ident)*) => (
#[allow(non_snake_case)]
unsafe impl<$($t),*> WasmRet for ($($t,)*)
where
$($t: WasmTy,)*
($($t::Abi,)*): HostAbi,
{
type Abi = <($($t::Abi,)*) as HostAbi>::Abi;
type Retptr = <($($t::Abi,)*) as HostAbi>::Retptr;
type Fallible = Result<Self, Trap>;
#[inline]
fn compatible_with_store(&self, _store: &Store) -> bool {
let ($($t,)*) = self;
$( $t.compatible_with_store(_store) && )* true
}
#[inline]
unsafe fn into_abi_for_ret(self, _store: &Store, ptr: Self::Retptr) -> Result<Self::Abi, Trap> {
let ($($t,)*) = self;
let abi = ($($t.into_abi(_store),)*);
Ok(<($($t::Abi,)*) as HostAbi>::into_abi(abi, ptr))
}
fn func_type(params: impl Iterator<Item = ValType>) -> FuncType {
FuncType::new(
params,
std::array::IntoIter::new([$($t::valtype(),)*]),
)
}
#[allow(unused_assignments)]
unsafe fn wrap_trampoline(mut _ptr: *mut u128, f: impl FnOnce(Self::Retptr) -> Self::Abi) {
let ($($t,)*) = <($($t::Abi,)*) as HostAbi>::call(f);
$(
*_ptr.cast() = $t;
_ptr = _ptr.add(1);
)*
}
#[inline]
fn into_fallible(self) -> Result<Self, Trap> {
Ok(self)
}
#[inline]
fn fallible_from_trap(trap: Trap) -> Result<Self, Trap> {
Err(trap)
}
}
)
}
for_each_function_signature!(impl_wasm_host_results);
// Internal trait representing how to communicate tuples of return values across
// an ABI boundary. This internally corresponds to the "wasmtime" ABI inside of
// cranelift itself. Notably the first element of each tuple is returned via the
// typical system ABI (e.g. systemv or fastcall depending on platform) and all
// other values are returned packed via the stack.
//
// This trait helps to encapsulate all the details of that.
#[doc(hidden)]
pub trait HostAbi {
// A value returned from native functions which return `Self`
type Abi: Copy;
// A return pointer, added to the end of the argument list, for native
// functions that return `Self`. Note that a 0-sized type here should get
// elided at the ABI level.
type Retptr: Copy;
// Converts a value of `self` into its components. Stores necessary values
// into `ptr` and then returns whatever needs to be returned from the
// function.
unsafe fn into_abi(self, ptr: Self::Retptr) -> Self::Abi;
// Calls `f` with a suitably sized return area and requires `f` to return
// the raw abi value of the first element of our tuple. This will then
// unpack the `Retptr` and assemble it with `Self::Abi` to return an
// instance of the whole tuple.
unsafe fn call(f: impl FnOnce(Self::Retptr) -> Self::Abi) -> Self;
}
macro_rules! impl_host_abi {
// Base case, everything is `()`
(0) => {
impl HostAbi for () {
type Abi = ();
type Retptr = ();
unsafe fn into_abi(self, _ptr: Self::Retptr) -> Self::Abi {}
unsafe fn call(f: impl FnOnce(Self::Retptr) -> Self::Abi) -> Self {
f(())
}
}
};
// In the 1-case the retptr is not present, so it's a 0-sized value.
(1 $a:ident) => {
impl<$a: Copy> HostAbi for ($a,) {
type Abi = $a;
type Retptr = ();
unsafe fn into_abi(self, _ptr: Self::Retptr) -> Self::Abi {
self.0
}
unsafe fn call(f: impl FnOnce(Self::Retptr) -> Self::Abi) -> Self {
(f(()),)
}
}
};
// This is where the more interesting case happens. The first element of the
// tuple is returned via `Abi` and all other elements are returned via
// `Retptr`. We create a `TupleRetNN` structure to represent all of the
// return values here.
//
// Also note that this isn't implemented for the old backend right now due
// to the original author not really being sure how to implement this in the
// old backend.
($n:tt $t:ident $($u:ident)*) => {paste::paste!{
#[doc(hidden)]
#[allow(non_snake_case)]
#[repr(C)]
#[cfg(not(feature = "old-x86-backend"))]
pub struct [<TupleRet $n>]<$($u,)*> {
$($u: $u,)*
}
#[cfg(not(feature = "old-x86-backend"))]
#[allow(non_snake_case, unused_assignments)]
impl<$t: Copy, $($u: Copy,)*> HostAbi for ($t, $($u,)*) {
type Abi = $t;
type Retptr = *mut [<TupleRet $n>]<$($u,)*>;
unsafe fn into_abi(self, ptr: Self::Retptr) -> Self::Abi {
let ($t, $($u,)*) = self;
// Store the tail of our tuple into the return pointer...
$((*ptr).$u = $u;)*
// ... and return the head raw.
$t
}
unsafe fn call(f: impl FnOnce(Self::Retptr) -> Self::Abi) -> Self {
// Create space to store all the return values and then invoke
// the function.
let mut space = std::mem::MaybeUninit::uninit();
let t = f(space.as_mut_ptr());
let space = space.assume_init();
// Use the return value as the head of the tuple and unpack our
// return area to get the rest of the tuple.
(t, $(space.$u,)*)
}
}
}};
}
for_each_function_signature!(impl_host_abi);
/// Internal trait implemented for all arguments that can be passed to /// Internal trait implemented for all arguments that can be passed to
/// [`Func::wrap`] and [`Config::wrap_host_func`](crate::Config::wrap_host_func). /// [`Func::wrap`] and [`Config::wrap_host_func`](crate::Config::wrap_host_func).
/// ///
@@ -1563,6 +1686,7 @@ macro_rules! impl_into_func {
vmctx: *mut VMContext, vmctx: *mut VMContext,
caller_vmctx: *mut VMContext, caller_vmctx: *mut VMContext,
$( $args: $args::Abi, )* $( $args: $args::Abi, )*
retptr: R::Retptr,
) -> R::Abi ) -> R::Abi
where where
F: Fn(Caller<'_>, $( $args ),*) -> R + 'static, F: Fn(Caller<'_>, $( $args ),*) -> R + 'static,
@@ -1624,7 +1748,7 @@ macro_rules! impl_into_func {
raise_cross_store_trap(); raise_cross_store_trap();
} }
match ret.into_abi_for_ret(&store) { match ret.into_abi_for_ret(&store, retptr) {
Ok(val) => CallResult::Ok(val), Ok(val) => CallResult::Ok(val),
Err(trap) => CallResult::Trap(trap), Err(trap) => CallResult::Trap(trap),
} }
@@ -1662,6 +1786,7 @@ macro_rules! impl_into_func {
*mut VMContext, *mut VMContext,
*mut VMContext, *mut VMContext,
$( $args::Abi, )* $( $args::Abi, )*
R::Retptr,
) -> R::Abi, ) -> R::Abi,
>(ptr); >(ptr);
@@ -1670,15 +1795,14 @@ macro_rules! impl_into_func {
let $args = *args.add(_n).cast::<$args::Abi>(); let $args = *args.add(_n).cast::<$args::Abi>();
_n += 1; _n += 1;
)* )*
let ret = ptr(callee_vmctx, caller_vmctx, $( $args ),*); R::wrap_trampoline(args, |retptr| {
*args.cast::<R::Abi>() = ret; ptr(callee_vmctx, caller_vmctx, $( $args, )* retptr)
});
} }
let ty = FuncType::new( let ty = R::func_type(
None::<ValType>.into_iter() None::<ValType>.into_iter()
$(.chain(Some($args::valtype())))* $(.chain(Some($args::valtype())))*
,
R::valtype(),
); );
let trampoline = host_trampoline::<$($args,)* R>; let trampoline = host_trampoline::<$($args,)* R>;
@@ -1686,7 +1810,7 @@ macro_rules! impl_into_func {
// If not given a registry, use a default signature index that is guaranteed to trap // If not given a registry, use a default signature index that is guaranteed to trap
// if the function is called indirectly without first being associated with a store (a bug condition). // if the function is called indirectly without first being associated with a store (a bug condition).
let shared_signature_id = registry let shared_signature_id = registry
.map(|r| r.register(ty.as_wasm_func_type(), trampoline)) .map(|r| r.register(ty.as_wasm_func_type(), Some(trampoline)))
.unwrap_or(VMSharedSignatureIndex::default()); .unwrap_or(VMSharedSignatureIndex::default());
let instance = unsafe { let instance = unsafe {

View File

@@ -1,10 +1,10 @@
use super::invoke_wasm_and_catch_traps; use super::{invoke_wasm_and_catch_traps, HostAbi};
use crate::{ExternRef, Func, Store, Trap, ValType}; use crate::{ExternRef, Func, Store, Trap, ValType};
use anyhow::{bail, Result}; use anyhow::{bail, Result};
use std::marker; use std::marker;
use std::mem::{self, MaybeUninit}; use std::mem::{self, MaybeUninit};
use std::ptr; use std::ptr;
use wasmtime_runtime::{VMContext, VMFunctionBody, VMTrampoline}; use wasmtime_runtime::{VMContext, VMFunctionBody};
/// A statically typed WebAssembly function. /// A statically typed WebAssembly function.
/// ///
@@ -103,7 +103,6 @@ where
let anyfunc = self.func.export.anyfunc.as_ref(); let anyfunc = self.func.export.anyfunc.as_ref();
let result = params.invoke::<Results>( let result = params.invoke::<Results>(
&self.func.instance.store, &self.func.instance.store,
self.func.trampoline,
anyfunc.func_ptr.as_ptr(), anyfunc.func_ptr.as_ptr(),
anyfunc.vmctx, anyfunc.vmctx,
ptr::null_mut(), ptr::null_mut(),
@@ -274,7 +273,6 @@ pub unsafe trait WasmParams {
unsafe fn invoke<R: WasmResults>( unsafe fn invoke<R: WasmResults>(
self, self,
store: &Store, store: &Store,
trampoline: VMTrampoline,
func: *const VMFunctionBody, func: *const VMFunctionBody,
vmctx1: *mut VMContext, vmctx1: *mut VMContext,
vmctx2: *mut VMContext, vmctx2: *mut VMContext,
@@ -296,12 +294,11 @@ where
unsafe fn invoke<R: WasmResults>( unsafe fn invoke<R: WasmResults>(
self, self,
store: &Store, store: &Store,
trampoline: VMTrampoline,
func: *const VMFunctionBody, func: *const VMFunctionBody,
vmctx1: *mut VMContext, vmctx1: *mut VMContext,
vmctx2: *mut VMContext, vmctx2: *mut VMContext,
) -> R { ) -> R {
<(T,)>::invoke((self,), store, trampoline, func, vmctx1, vmctx2) <(T,)>::invoke((self,), store, func, vmctx1, vmctx2)
} }
} }
@@ -333,66 +330,30 @@ macro_rules! impl_wasm_params {
unsafe fn invoke<R: WasmResults>( unsafe fn invoke<R: WasmResults>(
self, self,
store: &Store, store: &Store,
trampoline: VMTrampoline,
func: *const VMFunctionBody, func: *const VMFunctionBody,
vmctx1: *mut VMContext, vmctx1: *mut VMContext,
vmctx2: *mut VMContext, vmctx2: *mut VMContext,
) -> R { ) -> R {
// Some signatures can go directly into JIT code which uses the
// default platform ABI, but basically only those without
// multiple return values. With multiple return values we can't
// natively in Rust call such a function because there's no way
// to model it (yet).
//
// To work around that we use the trampoline which passes
// arguments/values via the stack which allows us to match the
// expected ABI. Note that this branch, using the trampoline,
// is slower as a result and has an extra indirect function
// call as well. In the future if this is a problem we should
// consider updating JIT code to use an ABI we can call from
// Rust itself.
if R::uses_trampoline() {
R::with_space(|space1| {
// Figure out whether the parameters or the results
// require more space, and use the bigger one as where
// to store arguments and load return values from.
let mut space2 = [0; $n];
let space = if space1.len() < space2.len() {
space2.as_mut_ptr()
} else {
space1.as_mut_ptr()
};
// ... store the ABI for all values into our storage
// area...
let ($($t,)*) = self;
let mut _n = 0;
$(
*space.add(_n).cast::<$t::Abi>() = $t.into_abi(store);
_n += 1;
)*
// ... make the indirect call through the trampoline
// which will read from `space` and also write all the
// results to `space`...
trampoline(vmctx1, vmctx2, func, space);
// ... and then we can decode all the return values
// from `space`.
R::from_storage(space, store)
})
} else {
let fnptr = mem::transmute::< let fnptr = mem::transmute::<
*const VMFunctionBody, *const VMFunctionBody,
unsafe extern "C" fn( unsafe extern "C" fn(
*mut VMContext, *mut VMContext,
*mut VMContext, *mut VMContext,
$($t::Abi,)* $($t::Abi,)*
R::Retptr,
) -> R::Abi, ) -> R::Abi,
>(func); >(func);
let ($($t,)*) = self; let ($($t,)*) = self;
R::from_abi(fnptr(vmctx1, vmctx2, $($t.into_abi(store),)*), store) // Use the `call` function to acquire a `retptr` which we'll
} // forward to the native function. Once we have it we also
// convert all our arguments to abi arguments to go to the raw
// function.
//
// Upon returning `R::call` will convert all the returns back
// into `R`.
R::call(store, |retptr| {
fnptr(vmctx1, vmctx2, $($t.into_abi(store),)* retptr)
})
} }
} }
}; };
@@ -408,80 +369,45 @@ for_each_function_signature!(impl_wasm_params);
/// `TypedFunc` is not currently supported. /// `TypedFunc` is not currently supported.
pub unsafe trait WasmResults: WasmParams { pub unsafe trait WasmResults: WasmParams {
#[doc(hidden)] #[doc(hidden)]
type Abi; type Abi: Copy;
#[doc(hidden)] #[doc(hidden)]
unsafe fn from_abi(abi: Self::Abi, store: &Store) -> Self; type Retptr: Copy;
#[doc(hidden)] #[doc(hidden)]
fn uses_trampoline() -> bool; unsafe fn call(store: &Store, f: impl FnOnce(Self::Retptr) -> Self::Abi) -> Self;
// Provides a stack-allocated array with enough space to store all these
// result values.
//
// It'd be nice if we didn't have to have this API and could do something
// with const-generics (or something like that), but I couldn't figure it
// out. If a future Rust explorer is able to get something like `const LEN:
// usize` working that'd be great!
#[doc(hidden)]
fn with_space<R>(_: impl FnOnce(&mut [u128]) -> R) -> R;
#[doc(hidden)]
unsafe fn from_storage(ptr: *const u128, store: &Store) -> Self;
} }
unsafe impl<T: WasmTy> WasmResults for T { // Forwards from a bare type `T` to the 1-tuple type `(T,)`
unsafe impl<T: WasmTy> WasmResults for T
where
(T::Abi,): HostAbi,
{
type Abi = <(T,) as WasmResults>::Abi; type Abi = <(T,) as WasmResults>::Abi;
unsafe fn from_abi(abi: Self::Abi, store: &Store) -> Self { type Retptr = <(T,) as WasmResults>::Retptr;
<(T,) as WasmResults>::from_abi(abi, store).0
}
fn uses_trampoline() -> bool {
<(T,) as WasmResults>::uses_trampoline()
}
fn with_space<R>(f: impl FnOnce(&mut [u128]) -> R) -> R {
<(T,) as WasmResults>::with_space(f)
}
unsafe fn from_storage(ptr: *const u128, store: &Store) -> Self {
<(T,) as WasmResults>::from_storage(ptr, store).0
}
}
#[doc(hidden)] unsafe fn call(store: &Store, f: impl FnOnce(Self::Retptr) -> Self::Abi) -> Self {
pub enum Void {} <(T,) as WasmResults>::call(store, f).0
}
}
macro_rules! impl_wasm_results { macro_rules! impl_wasm_results {
($n:tt $($t:ident)*) => { ($n:tt $($t:ident)*) => {
#[allow(non_snake_case, unused_variables)] #[allow(non_snake_case, unused_variables)]
unsafe impl<$($t: WasmTy,)*> WasmResults for ($($t,)*) { unsafe impl<$($t: WasmTy,)*> WasmResults for ($($t,)*)
type Abi = impl_wasm_results!(@abi $n $($t)*); where ($($t::Abi,)*): HostAbi
unsafe fn from_abi(abi: Self::Abi, store: &Store) -> Self { {
impl_wasm_results!(@from_abi abi store $n $($t)*) type Abi = <($($t::Abi,)*) as HostAbi>::Abi;
} type Retptr = <($($t::Abi,)*) as HostAbi>::Retptr;
fn uses_trampoline() -> bool {
$n > 1 unsafe fn call(store: &Store, f: impl FnOnce(Self::Retptr) -> Self::Abi) -> Self {
} // Delegate via the host abi to figure out what the actual ABI
fn with_space<R>(f: impl FnOnce(&mut [u128]) -> R) -> R { // for dealing with this tuple type is, and then we can re-tuple
f(&mut [0; $n]) // everything and create actual values via `from_abi` after the
} // call is complete.
unsafe fn from_storage(ptr: *const u128, store: &Store) -> Self { let ($($t,)*) = <($($t::Abi,)*) as HostAbi>::call(f);
let mut _n = 0; ($($t::from_abi($t, store),)*)
$(
let $t = $t::from_abi(*ptr.add(_n).cast::<$t::Abi>(), store);
_n += 1;
)*
($($t,)*)
} }
} }
}; };
// 0/1 return values we can use natively, everything else isn't expressible
// and won't be used so define the abi type to Void.
(@abi 0) => (());
(@abi 1 $t:ident) => ($t::Abi);
(@abi $($t:tt)*) => (Void);
(@from_abi $abi:ident $store:ident 0) => (());
(@from_abi $abi:ident $store:ident 1 $t:ident) => (($t::from_abi($abi, $store),));
(@from_abi $abi:ident $store:ident $($t:tt)*) => ({
debug_assert!(false);
match $abi {}
});
} }
for_each_function_signature!(impl_wasm_results); for_each_function_signature!(impl_wasm_results);

View File

@@ -29,20 +29,33 @@ struct Entry {
// Note that the code memory for this trampoline is not owned by this // Note that the code memory for this trampoline is not owned by this
// type, but instead it's expected to be owned by the store that this // type, but instead it's expected to be owned by the store that this
// registry lives within. // registry lives within.
trampoline: VMTrampoline, trampoline: Option<VMTrampoline>,
} }
impl SignatureRegistry { impl SignatureRegistry {
/// Register a signature and return its unique index. /// Register a signature and return its unique index.
///
/// Note that `trampoline` can be `None` which indicates that an index is
/// desired for this signature but the trampoline for it is not compiled or
/// available.
pub fn register( pub fn register(
&mut self, &mut self,
wasm: &WasmFuncType, wasm: &WasmFuncType,
trampoline: VMTrampoline, trampoline: Option<VMTrampoline>,
) -> VMSharedSignatureIndex { ) -> VMSharedSignatureIndex {
let len = self.wasm2index.len(); let len = self.wasm2index.len();
match self.wasm2index.entry(wasm.clone()) { match self.wasm2index.entry(wasm.clone()) {
hash_map::Entry::Occupied(entry) => *entry.get(), hash_map::Entry::Occupied(entry) => {
let ret = *entry.get();
let entry = &mut self.index_map[ret.bits() as usize];
// If the entry does not previously have a trampoline, then
// overwrite it with whatever was specified by this function.
if entry.trampoline.is_none() {
entry.trampoline = trampoline;
}
ret
}
hash_map::Entry::Vacant(entry) => { hash_map::Entry::Vacant(entry) => {
// Keep `signature_hash` len under 2**32 -- VMSharedSignatureIndex::new(std::u32::MAX) // Keep `signature_hash` len under 2**32 -- VMSharedSignatureIndex::new(std::u32::MAX)
// is reserved for VMSharedSignatureIndex::default(). // is reserved for VMSharedSignatureIndex::default().
@@ -75,8 +88,10 @@ impl SignatureRegistry {
&self, &self,
idx: VMSharedSignatureIndex, idx: VMSharedSignatureIndex,
) -> Option<(&WasmFuncType, VMTrampoline)> { ) -> Option<(&WasmFuncType, VMTrampoline)> {
self.index_map let (wasm, trampoline) = self
.index_map
.get(idx.bits() as usize) .get(idx.bits() as usize)
.map(|e| (&e.wasm, e.trampoline)) .map(|e| (&e.wasm, e.trampoline))?;
Some((wasm, trampoline?))
} }
} }

View File

@@ -202,7 +202,7 @@ impl Store {
.inner .inner
.signatures .signatures
.borrow_mut() .borrow_mut()
.register(ty.as_wasm_func_type(), trampoline); .register(ty.as_wasm_func_type(), Some(trampoline));
Box::new(anyfunc) Box::new(anyfunc)
}); });
@@ -322,9 +322,21 @@ impl Store {
fn register_signatures(&self, module: &Module) { fn register_signatures(&self, module: &Module) {
let mut signatures = self.signatures().borrow_mut(); let mut signatures = self.signatures().borrow_mut();
let types = module.types(); let types = module.types();
// Register a unique index for all types in this module, even if they
// don't have a trampoline.
for (_, ty) in module.compiled_module().module().types.iter() {
if let wasmtime_environ::ModuleType::Function(index) = ty {
let wasm = &types.wasm_signatures[*index];
signatures.register(wasm, None);
}
}
// Afterwards register all compiled trampolines for this module with the
// signature registry as well.
for (index, trampoline) in module.compiled_module().trampolines() { for (index, trampoline) in module.compiled_module().trampolines() {
let wasm = &types.wasm_signatures[*index]; let wasm = &types.wasm_signatures[*index];
signatures.register(wasm, *trampoline); signatures.register(wasm, Some(*trampoline));
} }
} }

View File

@@ -18,6 +18,7 @@ use wasmtime_jit::trampoline::{
self, binemit, pretty_error, Context, FunctionBuilder, FunctionBuilderContext, self, binemit, pretty_error, Context, FunctionBuilder, FunctionBuilderContext,
}; };
use wasmtime_jit::CodeMemory; use wasmtime_jit::CodeMemory;
use wasmtime_jit::{blank_sig, wasmtime_call_conv};
use wasmtime_runtime::{ use wasmtime_runtime::{
Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle, Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
OnDemandInstanceAllocator, VMContext, VMFunctionBody, VMSharedSignatureIndex, VMTrampoline, OnDemandInstanceAllocator, VMContext, VMFunctionBody, VMSharedSignatureIndex, VMTrampoline,
@@ -91,16 +92,7 @@ fn make_trampoline(
// Mostly reverse copy of the similar method from wasmtime's // Mostly reverse copy of the similar method from wasmtime's
// wasmtime-jit/src/compiler.rs. // wasmtime-jit/src/compiler.rs.
let pointer_type = isa.pointer_type(); let pointer_type = isa.pointer_type();
let mut stub_sig = ir::Signature::new(isa.frontend_config().default_call_conv); let mut stub_sig = blank_sig(isa, wasmtime_call_conv(isa));
// Add the caller/callee `vmctx` parameters.
stub_sig.params.push(ir::AbiParam::special(
pointer_type,
ir::ArgumentPurpose::VMContext,
));
// Add the caller `vmctx` parameter.
stub_sig.params.push(ir::AbiParam::new(pointer_type));
// Add the `values_vec` parameter. // Add the `values_vec` parameter.
stub_sig.params.push(ir::AbiParam::new(pointer_type)); stub_sig.params.push(ir::AbiParam::new(pointer_type));
@@ -220,8 +212,15 @@ fn create_function_trampoline(
// reference types which requires safepoints. // reference types which requires safepoints.
let isa = config.target_isa_with_reference_types(); let isa = config.target_isa_with_reference_types();
let pointer_type = isa.pointer_type(); let mut sig = blank_sig(&*isa, wasmtime_call_conv(&*isa));
let sig = ft.get_wasmtime_signature(pointer_type); sig.params.extend(
ft.params()
.map(|p| ir::AbiParam::new(p.get_wasmtime_type())),
);
sig.returns.extend(
ft.results()
.map(|p| ir::AbiParam::new(p.get_wasmtime_type())),
);
let mut fn_builder_ctx = FunctionBuilderContext::new(); let mut fn_builder_ctx = FunctionBuilderContext::new();
let mut module = Module::new(); let mut module = Module::new();
@@ -271,7 +270,7 @@ pub fn create_function(
// If there is no signature registry, use the default signature index which is // If there is no signature registry, use the default signature index which is
// guaranteed to trap if there is ever an indirect call on the function (should not happen) // guaranteed to trap if there is ever an indirect call on the function (should not happen)
let shared_signature_id = registry let shared_signature_id = registry
.map(|r| r.register(ft.as_wasm_func_type(), trampoline)) .map(|r| r.register(ft.as_wasm_func_type(), Some(trampoline)))
.unwrap_or(VMSharedSignatureIndex::default()); .unwrap_or(VMSharedSignatureIndex::default());
unsafe { unsafe {

View File

@@ -298,28 +298,6 @@ impl FuncType {
&self.sig &self.sig
} }
/// Get the Cranelift-compatible function signature.
pub(crate) fn get_wasmtime_signature(&self, pointer_type: ir::Type) -> ir::Signature {
use wasmtime_environ::ir::{AbiParam, ArgumentPurpose, Signature};
use wasmtime_jit::native;
let call_conv = native::call_conv();
let mut params = vec![
AbiParam::special(pointer_type, ArgumentPurpose::VMContext),
AbiParam::new(pointer_type),
];
params.extend(self.params().map(|p| AbiParam::new(p.get_wasmtime_type())));
let returns = self
.results()
.map(|p| AbiParam::new(p.get_wasmtime_type()))
.collect::<Vec<_>>();
Signature {
params,
returns,
call_conv,
}
}
pub(crate) fn from_wasm_func_type(sig: &wasm::WasmFuncType) -> FuncType { pub(crate) fn from_wasm_func_type(sig: &wasm::WasmFuncType) -> FuncType {
FuncType { sig: sig.clone() } FuncType { sig: sig.clone() }
} }

View File

@@ -8,9 +8,11 @@
// You can execute this example with `cargo run --example multi` // You can execute this example with `cargo run --example multi`
use anyhow::Result; use anyhow::Result;
#[cfg(not(feature = "old-x86-backend"))]
fn main() -> Result<()> {
use wasmtime::*; use wasmtime::*;
fn main() -> Result<()> {
println!("Initializing..."); println!("Initializing...");
let engine = Engine::default(); let engine = Engine::default();
let store = Store::new(&engine); let store = Store::new(&engine);
@@ -68,3 +70,8 @@ fn main() -> Result<()> {
Ok(()) Ok(())
} }
#[cfg(feature = "old-x86-backend")]
fn main() -> Result<()> {
Ok(())
}

View File

@@ -550,6 +550,7 @@ fn trampolines_always_valid() -> anyhow::Result<()> {
} }
#[test] #[test]
#[cfg(not(feature = "old-x86-backend"))]
fn typed_multiple_results() -> anyhow::Result<()> { fn typed_multiple_results() -> anyhow::Result<()> {
let store = Store::default(); let store = Store::default();
let module = Module::new( let module = Module::new(
@@ -619,3 +620,157 @@ fn trap_doesnt_leak() -> anyhow::Result<()> {
assert!(dtor2_run.get()); assert!(dtor2_run.get());
Ok(()) Ok(())
} }
#[test]
#[cfg(not(feature = "old-x86-backend"))]
fn wrap_multiple_results() -> anyhow::Result<()> {
fn test<T>(store: &Store, t: T) -> anyhow::Result<()>
where
T: WasmRet + WasmResults + PartialEq + Copy + std::fmt::Debug + EqualToValues + 'static,
{
let f = Func::wrap(store, move || t);
assert_eq!(f.typed::<(), T>()?.call(())?, t);
assert!(t.eq_values(&f.call(&[])?));
let module = Module::new(store.engine(), &T::gen_wasm())?;
let instance = Instance::new(store, &module, &[f.into()])?;
let f = instance.get_func("foo").unwrap();
assert_eq!(f.typed::<(), T>()?.call(())?, t);
assert!(t.eq_values(&f.call(&[])?));
Ok(())
}
let store = Store::default();
// 0 element
test(&store, ())?;
// 1 element
test(&store, (1i32,))?;
test(&store, (2u32,))?;
test(&store, (3i64,))?;
test(&store, (4u64,))?;
test(&store, (5.0f32,))?;
test(&store, (6.0f64,))?;
// 2 element ...
test(&store, (7i32, 8i32))?;
test(&store, (7i32, 8i64))?;
test(&store, (7i32, 8f32))?;
test(&store, (7i32, 8f64))?;
test(&store, (7i64, 8i32))?;
test(&store, (7i64, 8i64))?;
test(&store, (7i64, 8f32))?;
test(&store, (7i64, 8f64))?;
test(&store, (7f32, 8i32))?;
test(&store, (7f32, 8i64))?;
test(&store, (7f32, 8f32))?;
test(&store, (7f32, 8f64))?;
test(&store, (7f64, 8i32))?;
test(&store, (7f64, 8i64))?;
test(&store, (7f64, 8f32))?;
test(&store, (7f64, 8f64))?;
// and beyond...
test(&store, (1i32, 2i32, 3i32))?;
test(&store, (1i32, 2f32, 3i32))?;
test(&store, (1f64, 2f32, 3i32))?;
test(&store, (1f64, 2i64, 3i32))?;
test(&store, (1f32, 2f32, 3i64, 4f64))?;
test(&store, (1f64, 2i64, 3i32, 4i64, 5f32))?;
test(&store, (1i32, 2f64, 3i64, 4f64, 5f64, 6f32))?;
test(&store, (1i64, 2i32, 3i64, 4f32, 5f32, 6i32, 7u64))?;
test(&store, (1u32, 2f32, 3u64, 4f64, 5i32, 6f32, 7u64, 8u32))?;
test(
&store,
(1f32, 2f64, 3f32, 4i32, 5u32, 6i64, 7f32, 8i32, 9u64),
)?;
return Ok(());
trait EqualToValues {
fn eq_values(&self, values: &[Val]) -> bool;
fn gen_wasm() -> String;
}
macro_rules! equal_tuples {
($($cnt:tt ($($a:ident),*))*) => ($(
#[allow(non_snake_case)]
impl<$($a: EqualToValue,)*> EqualToValues for ($($a,)*) {
fn eq_values(&self, values: &[Val]) -> bool {
let ($($a,)*) = self;
let mut _values = values.iter();
_values.len() == $cnt &&
$($a.eq_value(_values.next().unwrap()) &&)*
true
}
fn gen_wasm() -> String {
let mut wasm = String::new();
wasm.push_str("(module ");
wasm.push_str("(type $t (func (result ");
$(
wasm.push_str($a::wasm_ty());
wasm.push_str(" ");
)*
wasm.push_str(")))");
wasm.push_str("(import \"\" \"\" (func $host (type $t)))");
wasm.push_str("(func (export \"foo\") (type $t)");
wasm.push_str("call $host");
wasm.push_str(")");
wasm.push_str(")");
wasm
}
}
)*)
}
equal_tuples! {
0 ()
1 (A1)
2 (A1, A2)
3 (A1, A2, A3)
4 (A1, A2, A3, A4)
5 (A1, A2, A3, A4, A5)
6 (A1, A2, A3, A4, A5, A6)
7 (A1, A2, A3, A4, A5, A6, A7)
8 (A1, A2, A3, A4, A5, A6, A7, A8)
9 (A1, A2, A3, A4, A5, A6, A7, A8, A9)
}
trait EqualToValue {
fn eq_value(&self, value: &Val) -> bool;
fn wasm_ty() -> &'static str;
}
macro_rules! equal_values {
($a:ident $($ty:ident $wasm:tt $variant:ident $e:expr,)*) => ($(
impl EqualToValue for $ty {
fn eq_value(&self, val: &Val) -> bool {
if let Val::$variant($a) = *val {
return *self == $e;
}
false
}
fn wasm_ty() -> &'static str {
$wasm
}
}
)*)
}
equal_values! {
a
i32 "i32" I32 a,
u32 "i32" I32 a as u32,
i64 "i64" I64 a,
u64 "i64" I64 a as u64,
f32 "f32" F32 f32::from_bits(a),
f64 "f64" F64 f64::from_bits(a),
}
}