Initial support for function, table, memory, and global imports.
This commit is contained in:
@@ -15,14 +15,10 @@ cranelift-entity = { git = "https://github.com/sunfishcode/cranelift.git", branc
|
||||
cranelift-wasm = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
cranelift-frontend = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
|
||||
wasmtime-environ = { path = "../environ" }
|
||||
wasmtime-runtime = { path = "../runtime" }
|
||||
region = "1.0.0"
|
||||
lazy_static = "1.2.0"
|
||||
libc = { version = "0.2.44", default-features = false }
|
||||
errno = "0.2.4"
|
||||
memoffset = "0.2.1"
|
||||
cast = { version = "0.2.2", default-features = false }
|
||||
failure = "0.1.3"
|
||||
failure_derive = "0.1.3"
|
||||
failure = { version = "0.1.3", default-features = false }
|
||||
failure_derive = { version = "0.1.3", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
cmake = "0.1.35"
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
extern crate bindgen;
|
||||
extern crate cmake;
|
||||
extern crate regex;
|
||||
|
||||
use cmake::Config;
|
||||
use regex::Regex;
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
let dst = Config::new("signalhandlers").build();
|
||||
|
||||
println!("cargo:rustc-link-search=native={}", dst.display());
|
||||
println!("cargo:rustc-link-lib=static=SignalHandlers");
|
||||
|
||||
let mut bindings_builder = bindgen::Builder::default()
|
||||
.header("signalhandlers/SignalHandlers.h")
|
||||
.whitelist_type("CodeSegment")
|
||||
.whitelist_type("TrapContext")
|
||||
.whitelist_type("jmp_buf")
|
||||
.whitelist_function("EnsureEagerSignalHandlers");
|
||||
|
||||
// If we're compiling for Darwin, compile in extra Darwin support routines.
|
||||
if Regex::new(r"-darwin[[:digit:].]*$")
|
||||
.unwrap()
|
||||
.is_match(&env::var("TARGET").unwrap())
|
||||
{
|
||||
bindings_builder = bindings_builder.whitelist_function("EnsureDarwinMachPorts");
|
||||
}
|
||||
|
||||
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
|
||||
|
||||
bindings_builder
|
||||
.generate()
|
||||
.expect("Unable to generate bindings")
|
||||
.write_to_file(out_path.join("signalhandlers.rs"))
|
||||
.expect("Couldn't write bindings!");
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
project(SignalHandlers CXX)
|
||||
|
||||
set(CMAKE_CXX_FLAGS "-std=c++11 -fno-exceptions -fno-rtti -fPIC")
|
||||
|
||||
add_library(SignalHandlers STATIC SignalHandlers.cpp)
|
||||
|
||||
install(TARGETS SignalHandlers DESTINATION .)
|
||||
@@ -1,823 +0,0 @@
|
||||
//! This file is largely derived from the code in WasmSignalHandlers.cpp in SpiderMonkey:
|
||||
//!
|
||||
//! https://dxr.mozilla.org/mozilla-central/source/js/src/wasm/WasmSignalHandlers.cpp
|
||||
//!
|
||||
//! Use of Mach ports on Darwin platforms (the USE_APPLE_MACH_PORTS code below) is
|
||||
//! currently disabled.
|
||||
|
||||
#include "SignalHandlers.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#if defined(_WIN32)
|
||||
# include <winternl.h> // must include before util/Windows.h's `#undef`s
|
||||
# include "util/Windows.h"
|
||||
#elif defined(USE_APPLE_MACH_PORTS)
|
||||
# include <mach/exc.h>
|
||||
# include <mach/mach.h>
|
||||
# include <pthread.h>
|
||||
#else
|
||||
# include <signal.h>
|
||||
#endif
|
||||
|
||||
// =============================================================================
|
||||
// This following pile of macros and includes defines the ToRegisterState() and
|
||||
// the ContextToPC() functions from the (highly) platform-specific CONTEXT
|
||||
// struct which is provided to the signal handler.
|
||||
// =============================================================================
|
||||
|
||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
# include <sys/ucontext.h> // for ucontext_t, mcontext_t
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__)
|
||||
# if defined(__DragonFly__)
|
||||
# include <machine/npx.h> // for union savefpu
|
||||
# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
|
||||
defined(__NetBSD__) || defined(__OpenBSD__)
|
||||
# include <machine/fpu.h> // for struct savefpu/fxsave64
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
# define EIP_sig(p) ((p)->Eip)
|
||||
# define EBP_sig(p) ((p)->Ebp)
|
||||
# define ESP_sig(p) ((p)->Esp)
|
||||
# define RIP_sig(p) ((p)->Rip)
|
||||
# define RSP_sig(p) ((p)->Rsp)
|
||||
# define RBP_sig(p) ((p)->Rbp)
|
||||
# define R11_sig(p) ((p)->R11)
|
||||
# define R13_sig(p) ((p)->R13)
|
||||
# define R14_sig(p) ((p)->R14)
|
||||
# define R15_sig(p) ((p)->R15)
|
||||
# define EPC_sig(p) ((p)->Pc)
|
||||
# define RFP_sig(p) ((p)->Fp)
|
||||
# define R31_sig(p) ((p)->Sp)
|
||||
# define RLR_sig(p) ((p)->Lr)
|
||||
#elif defined(__OpenBSD__)
|
||||
# define EIP_sig(p) ((p)->sc_eip)
|
||||
# define EBP_sig(p) ((p)->sc_ebp)
|
||||
# define ESP_sig(p) ((p)->sc_esp)
|
||||
# define RIP_sig(p) ((p)->sc_rip)
|
||||
# define RSP_sig(p) ((p)->sc_rsp)
|
||||
# define RBP_sig(p) ((p)->sc_rbp)
|
||||
# define R11_sig(p) ((p)->sc_r11)
|
||||
# if defined(__arm__)
|
||||
# define R13_sig(p) ((p)->sc_usr_sp)
|
||||
# define R14_sig(p) ((p)->sc_usr_lr)
|
||||
# define R15_sig(p) ((p)->sc_pc)
|
||||
# else
|
||||
# define R13_sig(p) ((p)->sc_r13)
|
||||
# define R14_sig(p) ((p)->sc_r14)
|
||||
# define R15_sig(p) ((p)->sc_r15)
|
||||
# endif
|
||||
# if defined(__aarch64__)
|
||||
# define EPC_sig(p) ((p)->sc_elr)
|
||||
# define RFP_sig(p) ((p)->sc_x[29])
|
||||
# define RLR_sig(p) ((p)->sc_lr)
|
||||
# define R31_sig(p) ((p)->sc_sp)
|
||||
# endif
|
||||
# if defined(__mips__)
|
||||
# define EPC_sig(p) ((p)->sc_pc)
|
||||
# define RFP_sig(p) ((p)->sc_regs[30])
|
||||
# endif
|
||||
#elif defined(__linux__) || defined(__sun)
|
||||
# if defined(__linux__)
|
||||
# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
|
||||
# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
|
||||
# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
|
||||
# else
|
||||
# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
|
||||
# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
|
||||
# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
|
||||
# endif
|
||||
# define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
|
||||
# define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
|
||||
# define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
|
||||
# if defined(__linux__) && defined(__arm__)
|
||||
# define R11_sig(p) ((p)->uc_mcontext.arm_fp)
|
||||
# define R13_sig(p) ((p)->uc_mcontext.arm_sp)
|
||||
# define R14_sig(p) ((p)->uc_mcontext.arm_lr)
|
||||
# define R15_sig(p) ((p)->uc_mcontext.arm_pc)
|
||||
# else
|
||||
# define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
|
||||
# define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
|
||||
# define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
|
||||
# define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
|
||||
# endif
|
||||
# if defined(__linux__) && defined(__aarch64__)
|
||||
# define EPC_sig(p) ((p)->uc_mcontext.pc)
|
||||
# define RFP_sig(p) ((p)->uc_mcontext.regs[29])
|
||||
# define RLR_sig(p) ((p)->uc_mcontext.regs[30])
|
||||
# define R31_sig(p) ((p)->uc_mcontext.regs[31])
|
||||
# endif
|
||||
# if defined(__linux__) && defined(__mips__)
|
||||
# define EPC_sig(p) ((p)->uc_mcontext.pc)
|
||||
# define RFP_sig(p) ((p)->uc_mcontext.gregs[30])
|
||||
# define RSP_sig(p) ((p)->uc_mcontext.gregs[29])
|
||||
# define R31_sig(p) ((p)->uc_mcontext.gregs[31])
|
||||
# endif
|
||||
# if defined(__linux__) && (defined(__sparc__) && defined(__arch64__))
|
||||
# define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC])
|
||||
# define FP_sig(p) ((p)->uc_mcontext.mc_fp)
|
||||
# define SP_sig(p) ((p)->uc_mcontext.mc_i7)
|
||||
# endif
|
||||
# if defined(__linux__) && \
|
||||
(defined(__ppc64__) || defined (__PPC64__) || defined(__ppc64le__) || defined (__PPC64LE__))
|
||||
# define R01_sig(p) ((p)->uc_mcontext.gp_regs[1])
|
||||
# define R32_sig(p) ((p)->uc_mcontext.gp_regs[32])
|
||||
# endif
|
||||
#elif defined(__NetBSD__)
|
||||
# define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
|
||||
# define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
|
||||
# define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
|
||||
# define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
|
||||
# define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
|
||||
# define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
|
||||
# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
|
||||
# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
|
||||
# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
|
||||
# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
|
||||
# if defined(__aarch64__)
|
||||
# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
|
||||
# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
|
||||
# define RLR_sig(p) ((p)->uc_mcontext.__gregs[_REG_X30])
|
||||
# define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_SP])
|
||||
# endif
|
||||
# if defined(__mips__)
|
||||
# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
|
||||
# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
|
||||
# endif
|
||||
#elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
# define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
|
||||
# define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
|
||||
# define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
|
||||
# define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
|
||||
# define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
|
||||
# define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
|
||||
# if defined(__FreeBSD__) && defined(__arm__)
|
||||
# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
|
||||
# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
|
||||
# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
|
||||
# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
|
||||
# else
|
||||
# define R11_sig(p) ((p)->uc_mcontext.mc_r11)
|
||||
# define R13_sig(p) ((p)->uc_mcontext.mc_r13)
|
||||
# define R14_sig(p) ((p)->uc_mcontext.mc_r14)
|
||||
# define R15_sig(p) ((p)->uc_mcontext.mc_r15)
|
||||
# endif
|
||||
# if defined(__FreeBSD__) && defined(__aarch64__)
|
||||
# define EPC_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_elr)
|
||||
# define RFP_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_x[29])
|
||||
# define RLR_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_lr)
|
||||
# define R31_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_sp)
|
||||
# endif
|
||||
# if defined(__FreeBSD__) && defined(__mips__)
|
||||
# define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
|
||||
# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
|
||||
# endif
|
||||
#elif defined(USE_APPLE_MACH_PORTS)
|
||||
# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
|
||||
# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
|
||||
# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
|
||||
# define RIP_sig(p) ((p)->thread.__rip)
|
||||
# define RBP_sig(p) ((p)->thread.__rbp)
|
||||
# define RSP_sig(p) ((p)->thread.__rsp)
|
||||
# define R11_sig(p) ((p)->thread.__r[11])
|
||||
# define R13_sig(p) ((p)->thread.__sp)
|
||||
# define R14_sig(p) ((p)->thread.__lr)
|
||||
# define R15_sig(p) ((p)->thread.__pc)
|
||||
#elif defined(__APPLE__)
|
||||
# define EIP_sig(p) ((p)->uc_mcontext->__ss.__eip)
|
||||
# define EBP_sig(p) ((p)->uc_mcontext->__ss.__ebp)
|
||||
# define ESP_sig(p) ((p)->uc_mcontext->__ss.__esp)
|
||||
# define RIP_sig(p) ((p)->uc_mcontext->__ss.__rip)
|
||||
# define RBP_sig(p) ((p)->uc_mcontext->__ss.__rbp)
|
||||
# define RSP_sig(p) ((p)->uc_mcontext->__ss.__rsp)
|
||||
# define R11_sig(p) ((p)->uc_mcontext->__ss.__r11)
|
||||
# define R13_sig(p) ((p)->uc_mcontext->__ss.__sp)
|
||||
# define R14_sig(p) ((p)->uc_mcontext->__ss.__lr)
|
||||
# define R15_sig(p) ((p)->uc_mcontext->__ss.__pc)
|
||||
#else
|
||||
# error "Don't know how to read/write to the thread state via the mcontext_t."
|
||||
#endif
|
||||
|
||||
#if defined(ANDROID)
|
||||
// Not all versions of the Android NDK define ucontext_t or mcontext_t.
|
||||
// Detect this and provide custom but compatible definitions. Note that these
|
||||
// follow the GLibc naming convention to access register values from
|
||||
// mcontext_t.
|
||||
//
|
||||
// See: https://chromiumcodereview.appspot.com/10829122/
|
||||
// See: http://code.google.com/p/android/issues/detail?id=34784
|
||||
# if !defined(__BIONIC_HAVE_UCONTEXT_T)
|
||||
# if defined(__arm__)
|
||||
|
||||
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
|
||||
// Old versions of the C library <signal.h> didn't define the type.
|
||||
# if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
|
||||
# include <asm/sigcontext.h>
|
||||
# endif
|
||||
|
||||
typedef struct sigcontext mcontext_t;
|
||||
|
||||
typedef struct ucontext {
|
||||
uint32_t uc_flags;
|
||||
struct ucontext* uc_link;
|
||||
stack_t uc_stack;
|
||||
mcontext_t uc_mcontext;
|
||||
// Other fields are not used so don't define them here.
|
||||
} ucontext_t;
|
||||
|
||||
# elif defined(__mips__)
|
||||
|
||||
typedef struct {
|
||||
uint32_t regmask;
|
||||
uint32_t status;
|
||||
uint64_t pc;
|
||||
uint64_t gregs[32];
|
||||
uint64_t fpregs[32];
|
||||
uint32_t acx;
|
||||
uint32_t fpc_csr;
|
||||
uint32_t fpc_eir;
|
||||
uint32_t used_math;
|
||||
uint32_t dsp;
|
||||
uint64_t mdhi;
|
||||
uint64_t mdlo;
|
||||
uint32_t hi1;
|
||||
uint32_t lo1;
|
||||
uint32_t hi2;
|
||||
uint32_t lo2;
|
||||
uint32_t hi3;
|
||||
uint32_t lo3;
|
||||
} mcontext_t;
|
||||
|
||||
typedef struct ucontext {
|
||||
uint32_t uc_flags;
|
||||
struct ucontext* uc_link;
|
||||
stack_t uc_stack;
|
||||
mcontext_t uc_mcontext;
|
||||
// Other fields are not used so don't define them here.
|
||||
} ucontext_t;
|
||||
|
||||
# elif defined(__i386__)
|
||||
// x86 version for Android.
|
||||
typedef struct {
|
||||
uint32_t gregs[19];
|
||||
void* fpregs;
|
||||
uint32_t oldmask;
|
||||
uint32_t cr2;
|
||||
} mcontext_t;
|
||||
|
||||
typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
|
||||
typedef struct ucontext {
|
||||
uint32_t uc_flags;
|
||||
struct ucontext* uc_link;
|
||||
stack_t uc_stack;
|
||||
mcontext_t uc_mcontext;
|
||||
// Other fields are not used by V8, don't define them here.
|
||||
} ucontext_t;
|
||||
enum { REG_EIP = 14 };
|
||||
# endif // defined(__i386__)
|
||||
# endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
|
||||
#endif // defined(ANDROID)
|
||||
|
||||
#if defined(USE_APPLE_MACH_PORTS)
|
||||
# if defined(__x86_64__)
|
||||
struct macos_x64_context {
|
||||
x86_thread_state64_t thread;
|
||||
x86_float_state64_t float_;
|
||||
};
|
||||
# define CONTEXT macos_x64_context
|
||||
# elif defined(__i386__)
|
||||
struct macos_x86_context {
|
||||
x86_thread_state_t thread;
|
||||
x86_float_state_t float_;
|
||||
};
|
||||
# define CONTEXT macos_x86_context
|
||||
# elif defined(__arm__)
|
||||
struct macos_arm_context {
|
||||
arm_thread_state_t thread;
|
||||
arm_neon_state_t float_;
|
||||
};
|
||||
# define CONTEXT macos_arm_context
|
||||
# else
|
||||
# error Unsupported architecture
|
||||
# endif
|
||||
#elif !defined(_WIN32)
|
||||
# define CONTEXT ucontext_t
|
||||
#endif
|
||||
|
||||
#if defined(_M_X64) || defined(__x86_64__)
|
||||
# define PC_sig(p) RIP_sig(p)
|
||||
# define FP_sig(p) RBP_sig(p)
|
||||
# define SP_sig(p) RSP_sig(p)
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
# define PC_sig(p) EIP_sig(p)
|
||||
# define FP_sig(p) EBP_sig(p)
|
||||
# define SP_sig(p) ESP_sig(p)
|
||||
#elif defined(__arm__)
|
||||
# define FP_sig(p) R11_sig(p)
|
||||
# define SP_sig(p) R13_sig(p)
|
||||
# define LR_sig(p) R14_sig(p)
|
||||
# define PC_sig(p) R15_sig(p)
|
||||
#elif defined(_M_ARM64) || defined(__aarch64__)
|
||||
# define PC_sig(p) EPC_sig(p)
|
||||
# define FP_sig(p) RFP_sig(p)
|
||||
# define SP_sig(p) R31_sig(p)
|
||||
# define LR_sig(p) RLR_sig(p)
|
||||
#elif defined(__mips__)
|
||||
# define PC_sig(p) EPC_sig(p)
|
||||
# define FP_sig(p) RFP_sig(p)
|
||||
# define SP_sig(p) RSP_sig(p)
|
||||
# define LR_sig(p) R31_sig(p)
|
||||
#elif defined(__ppc64__) || defined (__PPC64__) || defined(__ppc64le__) || defined (__PPC64LE__)
|
||||
# define PC_sig(p) R32_sig(p)
|
||||
# define SP_sig(p) R01_sig(p)
|
||||
# define FP_sig(p) R01_sig(p)
|
||||
#endif
|
||||
|
||||
static void
|
||||
SetContextPC(CONTEXT* context, const uint8_t* pc)
|
||||
{
|
||||
#ifdef PC_sig
|
||||
PC_sig(context) = reinterpret_cast<uintptr_t>(pc);
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
|
||||
static const uint8_t*
|
||||
ContextToPC(CONTEXT* context)
|
||||
{
|
||||
#ifdef PC_sig
|
||||
return reinterpret_cast<const uint8_t*>(static_cast<uintptr_t>(PC_sig(context)));
|
||||
#else
|
||||
abort();
|
||||
#endif
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// All signals/exceptions funnel down to this one trap-handling function which
|
||||
// tests whether the pc is in a wasm module and, if so, whether there is
|
||||
// actually a trap expected at this pc. These tests both avoid real bugs being
|
||||
// silently converted to wasm traps and provides the trapping wasm bytecode
|
||||
// offset we need to report in the error.
|
||||
//
|
||||
// Crashing inside wasm trap handling (due to a bug in trap handling or exposed
|
||||
// during trap handling) must be reported like a normal crash, not cause the
|
||||
// crash report to be lost. On Windows and non-Mach Unix, a crash during the
|
||||
// handler reenters the handler, possibly repeatedly until exhausting the stack,
|
||||
// and so we prevent recursion with the thread-local sAlreadyHandlingTrap. On
|
||||
// Mach, the wasm exception handler has its own thread and is installed only on
|
||||
// the thread-level debugging ports of our threads, so a crash on
|
||||
// exception handler thread will not recurse; it will bubble up to the
|
||||
// process-level debugging ports (where Breakpad is installed).
|
||||
// =============================================================================
|
||||
|
||||
static thread_local bool sAlreadyHandlingTrap;
|
||||
|
||||
namespace {
|
||||
|
||||
struct AutoHandlingTrap
|
||||
{
|
||||
AutoHandlingTrap() {
|
||||
assert(!sAlreadyHandlingTrap);
|
||||
sAlreadyHandlingTrap = true;
|
||||
}
|
||||
|
||||
~AutoHandlingTrap() {
|
||||
assert(sAlreadyHandlingTrap);
|
||||
sAlreadyHandlingTrap = false;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
static
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
__attribute__ ((warn_unused_result))
|
||||
#endif
|
||||
bool
|
||||
HandleTrap(CONTEXT* context)
|
||||
{
|
||||
assert(sAlreadyHandlingTrap);
|
||||
|
||||
const uint8_t* pc = ContextToPC(context);
|
||||
const CodeSegment* codeSegment = LookupCodeSegment(pc);
|
||||
if (!codeSegment) {
|
||||
return false;
|
||||
}
|
||||
|
||||
RecordTrap(pc, codeSegment);
|
||||
|
||||
// Unwind calls longjmp, so it doesn't run the automatic
|
||||
// sAlreadhHanldingTrap cleanups, so reset it manually before doing
|
||||
// a longjmp.
|
||||
sAlreadyHandlingTrap = false;
|
||||
|
||||
#if defined(USE_APPLE_MACH_PORTS)
|
||||
// Reroute the PC to run the Unwind function on the main stack after the
|
||||
// handler exits. This doesn't yet work for stack overflow traps, because
|
||||
// in that case the main thread doesn't have any space left to run.
|
||||
SetContextPC(context, reinterpret_cast<const uint8_t*>(&Unwind));
|
||||
#else
|
||||
// For now, just call Unwind directly, rather than redirecting the PC there,
|
||||
// so that it runs on the alternate signal handler stack. To run on the main
|
||||
// stack, reroute the context PC like this:
|
||||
Unwind();
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// The following platform-specific handlers funnel all signals/exceptions into
|
||||
// the shared HandleTrap() above.
|
||||
// =============================================================================
|
||||
|
||||
#if defined(_WIN32)
|
||||
// Obtained empirically from thread_local codegen on x86/x64/arm64.
|
||||
// Compiled in all user binaries, so should be stable over time.
|
||||
static const unsigned sThreadLocalArrayPointerIndex = 11;
|
||||
|
||||
static LONG WINAPI
|
||||
WasmTrapHandler(LPEXCEPTION_POINTERS exception)
|
||||
{
|
||||
// Make sure TLS is initialized before reading sAlreadyHandlingTrap.
|
||||
if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
if (sAlreadyHandlingTrap) {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
AutoHandlingTrap aht;
|
||||
|
||||
EXCEPTION_RECORD* record = exception->ExceptionRecord;
|
||||
if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
|
||||
record->ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION)
|
||||
{
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
if (!HandleTrap(exception->ContextRecord)) {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
|
||||
#elif defined(USE_APPLE_MACH_PORTS)
|
||||
// On OSX we are forced to use the lower-level Mach exception mechanism instead
|
||||
// of Unix signals because breakpad uses Mach exceptions and would otherwise
|
||||
// report a crash before wasm gets a chance to handle the exception.
|
||||
|
||||
// This definition was generated by mig (the Mach Interface Generator) for the
|
||||
// routine 'exception_raise' (exc.defs).
|
||||
#pragma pack(4)
|
||||
typedef struct {
|
||||
mach_msg_header_t Head;
|
||||
/* start of the kernel processed data */
|
||||
mach_msg_body_t msgh_body;
|
||||
mach_msg_port_descriptor_t thread;
|
||||
mach_msg_port_descriptor_t task;
|
||||
/* end of the kernel processed data */
|
||||
NDR_record_t NDR;
|
||||
exception_type_t exception;
|
||||
mach_msg_type_number_t codeCnt;
|
||||
int64_t code[2];
|
||||
} Request__mach_exception_raise_t;
|
||||
#pragma pack()
|
||||
|
||||
// The full Mach message also includes a trailer.
|
||||
struct ExceptionRequest
|
||||
{
|
||||
Request__mach_exception_raise_t body;
|
||||
mach_msg_trailer_t trailer;
|
||||
};
|
||||
|
||||
static bool
|
||||
HandleMachException(const ExceptionRequest& request)
|
||||
{
|
||||
// Get the port of the thread from the message.
|
||||
mach_port_t cxThread = request.body.thread.name;
|
||||
|
||||
// Read out the thread's register state.
|
||||
CONTEXT context;
|
||||
# if defined(__x86_64__)
|
||||
unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
|
||||
unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
|
||||
int thread_state = x86_THREAD_STATE64;
|
||||
int float_state = x86_FLOAT_STATE64;
|
||||
# elif defined(__i386__)
|
||||
unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
|
||||
unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
|
||||
int thread_state = x86_THREAD_STATE;
|
||||
int float_state = x86_FLOAT_STATE;
|
||||
# elif defined(__arm__)
|
||||
unsigned int thread_state_count = ARM_THREAD_STATE_COUNT;
|
||||
unsigned int float_state_count = ARM_NEON_STATE_COUNT;
|
||||
int thread_state = ARM_THREAD_STATE;
|
||||
int float_state = ARM_NEON_STATE;
|
||||
# else
|
||||
# error Unsupported architecture
|
||||
# endif
|
||||
kern_return_t kret;
|
||||
kret = thread_get_state(cxThread, thread_state,
|
||||
(thread_state_t)&context.thread, &thread_state_count);
|
||||
if (kret != KERN_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
kret = thread_get_state(cxThread, float_state,
|
||||
(thread_state_t)&context.float_, &float_state_count);
|
||||
if (kret != KERN_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (request.body.exception != EXC_BAD_ACCESS &&
|
||||
request.body.exception != EXC_BAD_INSTRUCTION)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
{
|
||||
AutoHandlingTrap aht;
|
||||
if (!HandleTrap(&context)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the thread state with the new pc and register values.
|
||||
kret = thread_set_state(cxThread, float_state, (thread_state_t)&context.float_, float_state_count);
|
||||
if (kret != KERN_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
kret = thread_set_state(cxThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
|
||||
if (kret != KERN_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static mach_port_t sMachDebugPort = MACH_PORT_NULL;
|
||||
|
||||
static void*
|
||||
MachExceptionHandlerThread(void* arg)
|
||||
{
|
||||
// Taken from mach_exc in /usr/include/mach/mach_exc.defs.
|
||||
static const unsigned EXCEPTION_MSG_ID = 2405;
|
||||
|
||||
while (true) {
|
||||
ExceptionRequest request;
|
||||
kern_return_t kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
|
||||
sMachDebugPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
|
||||
|
||||
// If we fail even receiving the message, we can't even send a reply!
|
||||
// Rather than hanging the faulting thread (hanging the browser), crash.
|
||||
if (kret != KERN_SUCCESS) {
|
||||
fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
|
||||
abort();
|
||||
}
|
||||
|
||||
if (request.body.Head.msgh_id != EXCEPTION_MSG_ID) {
|
||||
fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits);
|
||||
abort();
|
||||
}
|
||||
|
||||
// Some thread just commited an EXC_BAD_ACCESS and has been suspended by
|
||||
// the kernel. The kernel is waiting for us to reply with instructions.
|
||||
// Our default is the "not handled" reply (by setting the RetCode field
|
||||
// of the reply to KERN_FAILURE) which tells the kernel to continue
|
||||
// searching at the process and system level. If this is an
|
||||
// expected exception, we handle it and return KERN_SUCCESS.
|
||||
bool handled = HandleMachException(request);
|
||||
kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
|
||||
|
||||
// This magic incantation to send a reply back to the kernel was
|
||||
// derived from the exc_server generated by
|
||||
// 'mig -v /usr/include/mach/mach_exc.defs'.
|
||||
__Reply__exception_raise_t reply;
|
||||
reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
|
||||
reply.Head.msgh_size = sizeof(reply);
|
||||
reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
|
||||
reply.Head.msgh_local_port = MACH_PORT_NULL;
|
||||
reply.Head.msgh_id = request.body.Head.msgh_id + 100;
|
||||
reply.NDR = NDR_record;
|
||||
reply.RetCode = replyCode;
|
||||
mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
|
||||
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#else // If not Windows or Mac, assume Unix
|
||||
|
||||
static struct sigaction sPrevSIGSEGVHandler;
|
||||
static struct sigaction sPrevSIGBUSHandler;
|
||||
static struct sigaction sPrevSIGILLHandler;
|
||||
static struct sigaction sPrevSIGFPEHandler;
|
||||
|
||||
static void
|
||||
WasmTrapHandler(int signum, siginfo_t* info, void* context)
|
||||
{
|
||||
if (!sAlreadyHandlingTrap) {
|
||||
AutoHandlingTrap aht;
|
||||
assert(signum == SIGSEGV || signum == SIGBUS || signum == SIGFPE || signum == SIGILL);
|
||||
if (HandleTrap(static_cast<CONTEXT*>(context))) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct sigaction* previousSignal = nullptr;
|
||||
switch (signum) {
|
||||
case SIGSEGV: previousSignal = &sPrevSIGSEGVHandler; break;
|
||||
case SIGBUS: previousSignal = &sPrevSIGBUSHandler; break;
|
||||
case SIGFPE: previousSignal = &sPrevSIGFPEHandler; break;
|
||||
case SIGILL: previousSignal = &sPrevSIGILLHandler; break;
|
||||
}
|
||||
assert(previousSignal);
|
||||
|
||||
// This signal is not for any JIT code we expect, so we need to forward
|
||||
// the signal to the next handler. If there is no next handler (SIG_IGN or
|
||||
// SIG_DFL), then it's time to crash. To do this, we set the signal back to
|
||||
// its original disposition and return. This will cause the faulting op to
|
||||
// be re-executed which will crash in the normal way. The advantage of
|
||||
// doing this to calling _exit() is that we remove ourselves from the crash
|
||||
// stack which improves crash reports. If there is a next handler, call it.
|
||||
// It will either crash synchronously, fix up the instruction so that
|
||||
// execution can continue and return, or trigger a crash by returning the
|
||||
// signal to it's original disposition and returning.
|
||||
//
|
||||
// Note: the order of these tests matter.
|
||||
if (previousSignal->sa_flags & SA_SIGINFO) {
|
||||
previousSignal->sa_sigaction(signum, info, context);
|
||||
} else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN) {
|
||||
sigaction(signum, previousSignal, nullptr);
|
||||
} else {
|
||||
previousSignal->sa_handler(signum);
|
||||
}
|
||||
}
|
||||
# endif // _WIN32 || __APPLE__ || assume unix
|
||||
|
||||
#if defined(ANDROID) && defined(MOZ_LINKER)
|
||||
extern "C" MFBT_API bool IsSignalHandlingBroken();
|
||||
#endif
|
||||
|
||||
bool
|
||||
EnsureEagerSignalHandlers()
|
||||
{
|
||||
#if defined(ANDROID) && defined(MOZ_LINKER)
|
||||
// Signal handling is broken on some android systems.
|
||||
if (IsSignalHandlingBroken()) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
sAlreadyHandlingTrap = false;
|
||||
|
||||
// Install whatever exception/signal handler is appropriate for the OS.
|
||||
#if defined(_WIN32)
|
||||
|
||||
# if defined(MOZ_ASAN)
|
||||
// Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
|
||||
// in the first handler position. This requires some coordination with
|
||||
// MemoryProtectionExceptionHandler::isDisabled().
|
||||
const bool firstHandler = false;
|
||||
# else
|
||||
// Otherwise, WasmTrapHandler needs to go first, so that we can recover
|
||||
// from wasm faults and continue execution without triggering handlers
|
||||
// such as MemoryProtectionExceptionHandler that assume we are crashing.
|
||||
const bool firstHandler = true;
|
||||
# endif
|
||||
if (!AddVectoredExceptionHandler(firstHandler, WasmTrapHandler)) {
|
||||
// Windows has all sorts of random security knobs for disabling things
|
||||
// so make this a dynamic failure that disables wasm, not an abort().
|
||||
return false;
|
||||
}
|
||||
|
||||
#elif defined(USE_APPLE_MACH_PORTS)
|
||||
// All the Mach setup in EnsureLazyProcessSignalHandlers.
|
||||
#else
|
||||
// SA_ONSTACK allows us to handle signals on an alternate stack, so that
|
||||
// the handler can run in response to running out of stack space on the
|
||||
// main stack. Rust installs an alternate stack with sigaltstack, so we
|
||||
// rely on that.
|
||||
|
||||
// SA_NODEFER allows us to reenter the signal handler if we crash while
|
||||
// handling the signal, and fall through to the Breakpad handler by testing
|
||||
// handlingSegFault.
|
||||
|
||||
// Allow handling OOB with signals on all architectures
|
||||
struct sigaction faultHandler;
|
||||
faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
|
||||
faultHandler.sa_sigaction = WasmTrapHandler;
|
||||
sigemptyset(&faultHandler.sa_mask);
|
||||
if (sigaction(SIGSEGV, &faultHandler, &sPrevSIGSEGVHandler)) {
|
||||
perror("unable to install SIGSEGV handler");
|
||||
abort();
|
||||
}
|
||||
|
||||
# if defined(__arm__) || defined(__APPLE__)
|
||||
// On ARM, handle Unaligned Accesses.
|
||||
// On Darwin, guard page accesses are raised as SIGBUS.
|
||||
struct sigaction busHandler;
|
||||
busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
|
||||
busHandler.sa_sigaction = WasmTrapHandler;
|
||||
sigemptyset(&busHandler.sa_mask);
|
||||
if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler)) {
|
||||
perror("unable to install SIGBUS handler");
|
||||
abort();
|
||||
}
|
||||
# endif
|
||||
|
||||
# if !defined(__mips__)
|
||||
// Wasm traps for MIPS currently only raise integer overflow fp exception.
|
||||
struct sigaction illHandler;
|
||||
illHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
|
||||
illHandler.sa_sigaction = WasmTrapHandler;
|
||||
sigemptyset(&illHandler.sa_mask);
|
||||
if (sigaction(SIGILL, &illHandler, &sPrevSIGILLHandler)) {
|
||||
perror("unable to install wasm SIGILL handler");
|
||||
abort();
|
||||
}
|
||||
# endif
|
||||
|
||||
# if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
|
||||
// x86 uses SIGFPE to report division by zero, and wasm traps for MIPS
|
||||
// currently raise integer overflow fp exception.
|
||||
struct sigaction fpeHandler;
|
||||
fpeHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
|
||||
fpeHandler.sa_sigaction = WasmTrapHandler;
|
||||
sigemptyset(&fpeHandler.sa_mask);
|
||||
if (sigaction(SIGFPE, &fpeHandler, &sPrevSIGFPEHandler)) {
|
||||
perror("unable to install wasm SIGFPE handler");
|
||||
abort();
|
||||
}
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
EnsureDarwinMachPorts()
|
||||
{
|
||||
#ifdef USE_APPLE_MACH_PORTS
|
||||
pthread_attr_t handlerThreadAttr;
|
||||
int r = pthread_attr_init(&handlerThreadAttr);
|
||||
if (r != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Create the port that all of our threads will redirect their traps to.
|
||||
kern_return_t kret;
|
||||
kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &sMachDebugPort);
|
||||
if (kret != KERN_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
kret = mach_port_insert_right(mach_task_self(), sMachDebugPort, sMachDebugPort,
|
||||
MACH_MSG_TYPE_MAKE_SEND);
|
||||
if (kret != KERN_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Create the thread that will wait on and service sMachDebugPort.
|
||||
// It's not useful to destroy this thread on process shutdown so
|
||||
// immediately detach on successful start.
|
||||
pthread_t handlerThread;
|
||||
r = pthread_create(&handlerThread, &handlerThreadAttr, MachExceptionHandlerThread, nullptr);
|
||||
if (r != 0) {
|
||||
return false;
|
||||
}
|
||||
r = pthread_detach(handlerThread);
|
||||
assert(r == 0);
|
||||
|
||||
// In addition to the process-wide signal handler setup, OSX needs each
|
||||
// thread configured to send its exceptions to sMachDebugPort. While there
|
||||
// are also task-level (i.e. process-level) exception ports, those are
|
||||
// "claimed" by breakpad and chaining Mach exceptions is dark magic that we
|
||||
// avoid by instead intercepting exceptions at the thread level before they
|
||||
// propagate to the process-level. This works because there are no other
|
||||
// uses of thread-level exception ports.
|
||||
assert(sMachDebugPort != MACH_PORT_NULL);
|
||||
thread_port_t thisThread = mach_thread_self();
|
||||
kret = thread_set_exception_ports(thisThread,
|
||||
EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
|
||||
sMachDebugPort,
|
||||
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
|
||||
THREAD_STATE_NONE);
|
||||
mach_port_deallocate(mach_task_self(), thisThread);
|
||||
if (kret != KERN_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
#ifndef signal_handlers_h
|
||||
#define signal_handlers_h
|
||||
|
||||
#include <stdint.h>
|
||||
#include <setjmp.h>
|
||||
#ifndef __cplusplus
|
||||
#include <stdbool.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct CodeSegment;
|
||||
|
||||
// Record the Trap code and wasm bytecode offset in TLS somewhere
|
||||
void RecordTrap(const uint8_t* pc, const struct CodeSegment* codeSegment);
|
||||
|
||||
// Initiate an unwind.
|
||||
void Unwind(void);
|
||||
|
||||
// Return the CodeSegment containing the given pc, if any exist in the process.
|
||||
// This method does not take a lock.
|
||||
const struct CodeSegment*
|
||||
LookupCodeSegment(const void* pc);
|
||||
|
||||
// Trap initialization state.
|
||||
struct TrapContext {
|
||||
bool triedToInstallSignalHandlers;
|
||||
bool haveSignalHandlers;
|
||||
};
|
||||
|
||||
// This function performs the low-overhead signal handler initialization that we
|
||||
// want to do eagerly to ensure a more-deterministic global process state. This
|
||||
// is especially relevant for signal handlers since handler ordering depends on
|
||||
// installation order: the wasm signal handler must run *before* the other crash
|
||||
// handlers and since POSIX signal handlers work LIFO, this function needs to be
|
||||
// called at the end of the startup process, after other handlers have been
|
||||
// installed. This function can thus be called multiple times, having no effect
|
||||
// after the first call.
|
||||
bool
|
||||
EnsureEagerSignalHandlers(void);
|
||||
|
||||
// Assuming EnsureEagerProcessSignalHandlers() has already been called,
|
||||
// this function performs the full installation of signal handlers which must
|
||||
// be performed per-thread. This operation may incur some overhead and
|
||||
// so should be done only when needed to use wasm.
|
||||
bool
|
||||
EnsureDarwinMachPorts(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // signal_handlers_h
|
||||
@@ -1,12 +1,10 @@
|
||||
//! Memory management for executable code.
|
||||
|
||||
use mmap::Mmap;
|
||||
use region;
|
||||
use std::cmp;
|
||||
use std::mem;
|
||||
use std::string::String;
|
||||
use std::vec::Vec;
|
||||
use vmcontext::VMFunctionBody;
|
||||
use std::{cmp, mem};
|
||||
use wasmtime_runtime::{Mmap, VMFunctionBody};
|
||||
|
||||
/// Memory manager for executable code.
|
||||
pub struct Code {
|
||||
@@ -47,7 +45,7 @@ impl Code {
|
||||
}
|
||||
|
||||
/// Convert mut a slice from u8 to VMFunctionBody.
|
||||
fn as_mut_vmfunc_slice(slice: &mut [u8]) -> &mut [VMFunctionBody] {
|
||||
fn view_as_mut_vmfunc_slice(slice: &mut [u8]) -> &mut [VMFunctionBody] {
|
||||
let byte_ptr: *mut [u8] = slice;
|
||||
let body_ptr = byte_ptr as *mut [VMFunctionBody];
|
||||
unsafe { &mut *body_ptr }
|
||||
@@ -62,7 +60,7 @@ impl Code {
|
||||
) -> Result<&mut [VMFunctionBody], String> {
|
||||
let new = self.allocate(slice.len())?;
|
||||
new.copy_from_slice(slice);
|
||||
Ok(Self::as_mut_vmfunc_slice(new))
|
||||
Ok(Self::view_as_mut_vmfunc_slice(new))
|
||||
}
|
||||
|
||||
/// Make all allocated memory executable.
|
||||
|
||||
@@ -1,22 +1,29 @@
|
||||
use cranelift_codegen::ir;
|
||||
use cranelift_wasm::Global;
|
||||
use vmcontext::{VMFunctionBody, VMGlobal, VMMemory, VMTable};
|
||||
use wasmtime_environ::{MemoryPlan, TablePlan};
|
||||
use wasmtime_runtime::{
|
||||
VMContext, VMFunctionBody, VMGlobalDefinition, VMMemoryDefinition, VMTableDefinition,
|
||||
};
|
||||
|
||||
/// An exported function.
|
||||
pub struct FunctionExport {
|
||||
/// The address of the native-code function.
|
||||
pub address: *const VMFunctionBody,
|
||||
/// The function signature declaration, used for compatibilty checking.
|
||||
pub signature: ir::Signature,
|
||||
}
|
||||
|
||||
/// The value of an export passed from one instance to another.
|
||||
pub enum ExportValue {
|
||||
pub enum Export {
|
||||
/// A function export value.
|
||||
Function {
|
||||
/// The address of the native-code function.
|
||||
address: *const VMFunctionBody,
|
||||
/// The function signature declaration, used for compatibilty checking.
|
||||
signature: ir::Signature,
|
||||
},
|
||||
Function(FunctionExport),
|
||||
|
||||
/// A table export value.
|
||||
Table {
|
||||
/// The address of the table descriptor.
|
||||
address: *mut VMTable,
|
||||
address: *mut VMTableDefinition,
|
||||
/// Pointer to the containing VMContext.
|
||||
vmctx: *mut VMContext,
|
||||
/// The table declaration, used for compatibilty checking.
|
||||
table: TablePlan,
|
||||
},
|
||||
@@ -24,7 +31,9 @@ pub enum ExportValue {
|
||||
/// A memory export value.
|
||||
Memory {
|
||||
/// The address of the memory descriptor.
|
||||
address: *mut VMMemory,
|
||||
address: *mut VMMemoryDefinition,
|
||||
/// Pointer to the containing VMContext.
|
||||
vmctx: *mut VMContext,
|
||||
/// The memory declaration, used for compatibilty checking.
|
||||
memory: MemoryPlan,
|
||||
},
|
||||
@@ -32,45 +41,57 @@ pub enum ExportValue {
|
||||
/// A global export value.
|
||||
Global {
|
||||
/// The address of the global storage.
|
||||
address: *mut VMGlobal,
|
||||
address: *mut VMGlobalDefinition,
|
||||
/// The global declaration, used for compatibilty checking.
|
||||
global: Global,
|
||||
},
|
||||
}
|
||||
|
||||
impl ExportValue {
|
||||
impl Export {
|
||||
/// Construct a function export value.
|
||||
pub fn function(address: *const VMFunctionBody, signature: ir::Signature) -> Self {
|
||||
ExportValue::Function { address, signature }
|
||||
Export::Function(FunctionExport { address, signature })
|
||||
}
|
||||
|
||||
/// Construct a table export value.
|
||||
pub fn table(address: *mut VMTable, table: TablePlan) -> Self {
|
||||
ExportValue::Table { address, table }
|
||||
pub fn table(address: *mut VMTableDefinition, vmctx: *mut VMContext, table: TablePlan) -> Self {
|
||||
Export::Table {
|
||||
address,
|
||||
vmctx,
|
||||
table,
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a memory export value.
|
||||
pub fn memory(address: *mut VMMemory, memory: MemoryPlan) -> Self {
|
||||
ExportValue::Memory { address, memory }
|
||||
pub fn memory(
|
||||
address: *mut VMMemoryDefinition,
|
||||
vmctx: *mut VMContext,
|
||||
memory: MemoryPlan,
|
||||
) -> Self {
|
||||
Export::Memory {
|
||||
address,
|
||||
vmctx,
|
||||
memory,
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a global export value.
|
||||
pub fn global(address: *mut VMGlobal, global: Global) -> Self {
|
||||
ExportValue::Global { address, global }
|
||||
pub fn global(address: *mut VMGlobalDefinition, global: Global) -> Self {
|
||||
Export::Global { address, global }
|
||||
}
|
||||
}
|
||||
|
||||
/// Import resolver connects imports with available exported values.
|
||||
pub trait Resolver {
|
||||
/// Resolve the given module/field combo.
|
||||
fn resolve(&mut self, module: &str, field: &str) -> Option<ExportValue>;
|
||||
fn resolve(&mut self, module: &str, field: &str) -> Option<Export>;
|
||||
}
|
||||
|
||||
/// `Resolver` implementation that always resolves to `None`.
|
||||
pub struct NullResolver {}
|
||||
|
||||
impl Resolver for NullResolver {
|
||||
fn resolve(&mut self, _module: &str, _field: &str) -> Option<ExportValue> {
|
||||
fn resolve(&mut self, _module: &str, _field: &str) -> Option<Export> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
//! Support for reading the value of a wasm global from outside the module.
|
||||
|
||||
use action::{ActionError, RuntimeValue};
|
||||
use cranelift_codegen::ir;
|
||||
use cranelift_entity::EntityRef;
|
||||
use cranelift_wasm::GlobalIndex;
|
||||
use instance::Instance;
|
||||
use wasmtime_environ::{Export, Module};
|
||||
|
||||
/// Reads the value of the named global variable in `module`.
|
||||
pub fn get(
|
||||
module: &Module,
|
||||
instance: &mut Instance,
|
||||
global_name: &str,
|
||||
) -> Result<RuntimeValue, ActionError> {
|
||||
let global_index = match module.exports.get(global_name) {
|
||||
Some(Export::Global(index)) => *index,
|
||||
Some(_) => {
|
||||
return Err(ActionError::Kind(format!(
|
||||
"exported item \"{}\" is not a global",
|
||||
global_name
|
||||
)))
|
||||
}
|
||||
None => {
|
||||
return Err(ActionError::Field(format!(
|
||||
"no export named \"{}\"",
|
||||
global_name
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
get_by_index(module, instance, global_index)
|
||||
}
|
||||
|
||||
/// Reads the value of the indexed global variable in `module`.
|
||||
pub fn get_by_index(
|
||||
module: &Module,
|
||||
instance: &mut Instance,
|
||||
global_index: GlobalIndex,
|
||||
) -> Result<RuntimeValue, ActionError> {
|
||||
unsafe {
|
||||
let vmctx = &mut *instance.vmctx();
|
||||
let vmglobal = vmctx.global(global_index);
|
||||
let definition = vmglobal.get_definition(module.is_imported_global(global_index));
|
||||
Ok(
|
||||
match module
|
||||
.globals
|
||||
.get(global_index)
|
||||
.ok_or_else(|| ActionError::Index(global_index.index() as u64))?
|
||||
.ty
|
||||
{
|
||||
ir::types::I32 => RuntimeValue::I32(*definition.as_i32()),
|
||||
ir::types::I64 => RuntimeValue::I64(*definition.as_i64()),
|
||||
ir::types::F32 => RuntimeValue::F32(*definition.as_f32_bits()),
|
||||
ir::types::F64 => RuntimeValue::F64(*definition.as_f64_bits()),
|
||||
other => {
|
||||
return Err(ActionError::Type(format!(
|
||||
"global with type {} not supported",
|
||||
other
|
||||
)))
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
use cranelift_entity::PrimaryMap;
|
||||
use cranelift_wasm::{FuncIndex, GlobalIndex, MemoryIndex, TableIndex};
|
||||
use vmcontext::{VMFunctionBody, VMGlobal, VMMemory, VMTable};
|
||||
|
||||
/// Resolved import pointers.
|
||||
#[derive(Debug)]
|
||||
pub struct Imports {
|
||||
/// Resolved addresses for imported functions.
|
||||
pub functions: PrimaryMap<FuncIndex, *const VMFunctionBody>,
|
||||
|
||||
/// Resolved addresses for imported tables.
|
||||
pub tables: PrimaryMap<TableIndex, *mut VMTable>,
|
||||
|
||||
/// Resolved addresses for imported globals.
|
||||
pub globals: PrimaryMap<GlobalIndex, *mut VMGlobal>,
|
||||
|
||||
/// Resolved addresses for imported memories.
|
||||
pub memories: PrimaryMap<MemoryIndex, *mut VMMemory>,
|
||||
}
|
||||
|
||||
impl Imports {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
functions: PrimaryMap::new(),
|
||||
tables: PrimaryMap::new(),
|
||||
globals: PrimaryMap::new(),
|
||||
memories: PrimaryMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
//! An `Instance` contains all the runtime state used by execution of a wasm
|
||||
//! module.
|
||||
|
||||
use cranelift_entity::EntityRef;
|
||||
use cranelift_entity::PrimaryMap;
|
||||
use cranelift_wasm::{DefinedFuncIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex};
|
||||
use imports::Imports;
|
||||
use memory::LinearMemory;
|
||||
use sig_registry::SignatureRegistry;
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
use std::string::String;
|
||||
use table::Table;
|
||||
use vmcontext::{VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMGlobal, VMMemory, VMTable};
|
||||
use wasmtime_environ::{DataInitializer, Module};
|
||||
|
||||
/// An Instance of a WebAssemby module.
|
||||
#[derive(Debug)]
|
||||
pub struct Instance {
|
||||
/// WebAssembly linear memory data.
|
||||
memories: PrimaryMap<MemoryIndex, LinearMemory>,
|
||||
|
||||
/// WebAssembly table data.
|
||||
tables: PrimaryMap<TableIndex, Table>,
|
||||
|
||||
/// Function Signature IDs.
|
||||
/// FIXME: This should be shared across instances rather than per-Instance.
|
||||
sig_registry: SignatureRegistry,
|
||||
|
||||
/// Memory base address vector pointed to by vmctx.
|
||||
vmctx_memories: PrimaryMap<MemoryIndex, VMMemory>,
|
||||
|
||||
/// WebAssembly global variable data.
|
||||
vmctx_globals: PrimaryMap<GlobalIndex, VMGlobal>,
|
||||
|
||||
/// Table storage base address vector pointed to by vmctx.
|
||||
vmctx_tables: PrimaryMap<TableIndex, VMTable>,
|
||||
|
||||
/// Pointer values for resolved imports.
|
||||
imports: Imports,
|
||||
|
||||
/// Pointers to functions in executable memory.
|
||||
allocated_functions: PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
|
||||
|
||||
/// Context pointer used by JIT code.
|
||||
vmctx: VMContext,
|
||||
}
|
||||
|
||||
impl Instance {
|
||||
/// Create a new `Instance`. In order to complete instantiation, call
|
||||
/// `invoke_start_function`. `allocated_functions` holds the function bodies
|
||||
/// which have been placed in executable memory.
|
||||
pub fn new(
|
||||
module: &Module,
|
||||
allocated_functions: PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
|
||||
data_initializers: &[DataInitializer],
|
||||
imports: Imports,
|
||||
) -> Result<Self, String> {
|
||||
let mut sig_registry = instantiate_signatures(module);
|
||||
let mut memories = instantiate_memories(module, data_initializers)?;
|
||||
let mut tables = instantiate_tables(module, &allocated_functions, &mut sig_registry);
|
||||
|
||||
let mut vmctx_memories = memories
|
||||
.values_mut()
|
||||
.map(LinearMemory::vmmemory)
|
||||
.collect::<PrimaryMap<MemoryIndex, _>>();
|
||||
|
||||
let mut vmctx_globals = instantiate_globals(module);
|
||||
|
||||
let mut vmctx_tables = tables
|
||||
.values_mut()
|
||||
.map(Table::vmtable)
|
||||
.collect::<PrimaryMap<TableIndex, _>>();
|
||||
|
||||
let vmctx_memories_ptr = vmctx_memories.values_mut().into_slice().as_mut_ptr();
|
||||
let vmctx_globals_ptr = vmctx_globals.values_mut().into_slice().as_mut_ptr();
|
||||
let vmctx_tables_ptr = vmctx_tables.values_mut().into_slice().as_mut_ptr();
|
||||
let signature_ids_ptr = sig_registry.vmsignature_ids();
|
||||
|
||||
Ok(Self {
|
||||
memories,
|
||||
tables,
|
||||
sig_registry,
|
||||
vmctx_memories,
|
||||
vmctx_globals,
|
||||
vmctx_tables,
|
||||
imports,
|
||||
allocated_functions,
|
||||
vmctx: VMContext::new(
|
||||
vmctx_memories_ptr,
|
||||
vmctx_globals_ptr,
|
||||
vmctx_tables_ptr,
|
||||
signature_ids_ptr,
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the vmctx pointer to be passed into JIT code.
|
||||
pub fn vmctx(&mut self) -> &mut VMContext {
|
||||
&mut self.vmctx
|
||||
}
|
||||
|
||||
/// Return the offset from the vmctx pointer to its containing Instance.
|
||||
pub(crate) fn vmctx_offset() -> isize {
|
||||
offset_of!(Self, vmctx) as isize
|
||||
}
|
||||
|
||||
/// Return the pointer to executable memory for the given function index.
|
||||
pub(crate) fn get_allocated_function(
|
||||
&self,
|
||||
index: DefinedFuncIndex,
|
||||
) -> Option<&[VMFunctionBody]> {
|
||||
self.allocated_functions
|
||||
.get(index)
|
||||
.map(|(ptr, len)| unsafe { slice::from_raw_parts(*ptr, *len) })
|
||||
}
|
||||
|
||||
/// Return the pointer to executable memory for the given function index.
|
||||
pub(crate) fn get_imported_function(&self, index: FuncIndex) -> Option<*const VMFunctionBody> {
|
||||
self.imports.functions.get(index).cloned()
|
||||
}
|
||||
|
||||
/// Grow memory by the specified amount of pages.
|
||||
///
|
||||
/// Returns `None` if memory can't be grown by the specified amount
|
||||
/// of pages.
|
||||
pub fn memory_grow(&mut self, memory_index: MemoryIndex, delta: u32) -> Option<u32> {
|
||||
let result = self
|
||||
.memories
|
||||
.get_mut(memory_index)
|
||||
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
|
||||
.grow(delta);
|
||||
|
||||
// Keep current the VMContext pointers used by JIT code.
|
||||
self.vmctx_memories[memory_index] = self.memories[memory_index].vmmemory();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Returns the number of allocated wasm pages.
|
||||
pub fn memory_size(&mut self, memory_index: MemoryIndex) -> u32 {
|
||||
self.memories
|
||||
.get(memory_index)
|
||||
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
|
||||
.size()
|
||||
}
|
||||
|
||||
/// Returns a slice of the contents of allocated linear memory.
|
||||
pub fn inspect_memory(&self, memory_index: MemoryIndex, address: usize, len: usize) -> &[u8] {
|
||||
&self
|
||||
.memories
|
||||
.get(memory_index)
|
||||
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
|
||||
.as_ref()[address..address + len]
|
||||
}
|
||||
|
||||
/// Shows the value of a global variable.
|
||||
pub fn inspect_global(&self, global_index: GlobalIndex) -> &VMGlobal {
|
||||
&self.vmctx_globals[global_index]
|
||||
}
|
||||
}
|
||||
|
||||
fn instantiate_signatures(module: &Module) -> SignatureRegistry {
|
||||
let mut sig_registry = SignatureRegistry::new();
|
||||
for (sig_index, sig) in module.signatures.iter() {
|
||||
sig_registry.register(sig_index, sig);
|
||||
}
|
||||
sig_registry
|
||||
}
|
||||
|
||||
/// Allocate memory for just the memories of the current module.
|
||||
fn instantiate_memories(
|
||||
module: &Module,
|
||||
data_initializers: &[DataInitializer],
|
||||
) -> Result<PrimaryMap<MemoryIndex, LinearMemory>, String> {
|
||||
let mut memories = PrimaryMap::with_capacity(module.memory_plans.len());
|
||||
for plan in module.memory_plans.values() {
|
||||
memories.push(LinearMemory::new(&plan)?);
|
||||
}
|
||||
|
||||
for init in data_initializers {
|
||||
debug_assert!(init.base.is_none(), "globalvar base not supported yet");
|
||||
let mem_mut = memories[init.memory_index].as_mut();
|
||||
let to_init = &mut mem_mut[init.offset..init.offset + init.data.len()];
|
||||
to_init.copy_from_slice(init.data);
|
||||
}
|
||||
|
||||
Ok(memories)
|
||||
}
|
||||
|
||||
/// Allocate memory for just the tables of the current module.
|
||||
fn instantiate_tables(
|
||||
module: &Module,
|
||||
allocated_functions: &PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
|
||||
sig_registry: &mut SignatureRegistry,
|
||||
) -> PrimaryMap<TableIndex, Table> {
|
||||
let mut tables = PrimaryMap::with_capacity(module.table_plans.len());
|
||||
for table in module.table_plans.values() {
|
||||
tables.push(Table::new(table));
|
||||
}
|
||||
|
||||
for init in &module.table_elements {
|
||||
debug_assert!(init.base.is_none(), "globalvar base not supported yet");
|
||||
let slice = tables[init.table_index].as_mut();
|
||||
let subslice = &mut slice[init.offset..init.offset + init.elements.len()];
|
||||
for (i, func_idx) in init.elements.iter().enumerate() {
|
||||
let callee_sig = module.functions[*func_idx];
|
||||
let func_ptr = allocated_functions[module
|
||||
.defined_func_index(*func_idx)
|
||||
.expect("table element initializer with imported function not supported yet")]
|
||||
.0;
|
||||
let type_id = sig_registry.lookup(callee_sig);
|
||||
subslice[i] = VMCallerCheckedAnyfunc { func_ptr, type_id };
|
||||
}
|
||||
}
|
||||
|
||||
tables
|
||||
}
|
||||
|
||||
/// Allocate memory for just the globals of the current module,
|
||||
/// without any initializers applied yet.
|
||||
fn instantiate_globals(module: &Module) -> PrimaryMap<GlobalIndex, VMGlobal> {
|
||||
let mut vmctx_globals = PrimaryMap::with_capacity(module.globals.len());
|
||||
|
||||
for (index, global) in module.globals.iter() {
|
||||
if module.is_imported_global(index) {
|
||||
// FIXME: get the actual import
|
||||
vmctx_globals.push(VMGlobal::import(ptr::null_mut()));
|
||||
} else {
|
||||
vmctx_globals.push(VMGlobal::definition(global));
|
||||
}
|
||||
}
|
||||
|
||||
vmctx_globals
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
//! Support for invoking wasm functions from outside a wasm module.
|
||||
|
||||
use action::{ActionError, ActionOutcome, RuntimeValue};
|
||||
use code::Code;
|
||||
use cranelift_codegen::ir::InstBuilder;
|
||||
use cranelift_codegen::{binemit, ir, isa, Context};
|
||||
use cranelift_entity::EntityRef;
|
||||
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext};
|
||||
use cranelift_wasm::FuncIndex;
|
||||
use instance::Instance;
|
||||
use signalhandlers::{ensure_eager_signal_handlers, ensure_full_signal_handlers, TrapContext};
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::vec::Vec;
|
||||
use traphandlers::call_wasm;
|
||||
use vmcontext::{VMContext, VMFunctionBody};
|
||||
use wasmtime_environ::{CompileError, Export, Module, RelocSink};
|
||||
|
||||
/// Calls the given named function, passing its return values and returning
|
||||
/// its results.
|
||||
pub fn invoke(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
module: &Module,
|
||||
instance: &mut Instance,
|
||||
function: &str,
|
||||
args: &[RuntimeValue],
|
||||
) -> Result<ActionOutcome, ActionError> {
|
||||
let fn_index = match module.exports.get(function) {
|
||||
Some(Export::Function(index)) => *index,
|
||||
Some(_) => {
|
||||
return Err(ActionError::Kind(format!(
|
||||
"exported item \"{}\" is not a function",
|
||||
function
|
||||
)))
|
||||
}
|
||||
None => {
|
||||
return Err(ActionError::Field(format!(
|
||||
"no export named \"{}\"",
|
||||
function
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
invoke_by_index(code, isa, module, instance, fn_index, args)
|
||||
}
|
||||
|
||||
/// Invoke the WebAssembly start function of the instance, if one is present.
|
||||
pub fn invoke_start_function(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
module: &Module,
|
||||
instance: &mut Instance,
|
||||
) -> Result<ActionOutcome, ActionError> {
|
||||
if let Some(start_index) = module.start_func {
|
||||
invoke_by_index(code, isa, module, instance, start_index, &[])
|
||||
} else {
|
||||
// No start function, just return nothing.
|
||||
Ok(ActionOutcome::Returned { values: vec![] })
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls the given indexed function, passing its return values and returning
|
||||
/// its results.
|
||||
pub fn invoke_by_index(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
module: &Module,
|
||||
instance: &mut Instance,
|
||||
fn_index: FuncIndex,
|
||||
args: &[RuntimeValue],
|
||||
) -> Result<ActionOutcome, ActionError> {
|
||||
let exec_code_buf = match module.defined_func_index(fn_index) {
|
||||
Some(def_fn_index) => instance
|
||||
.get_allocated_function(def_fn_index)
|
||||
.ok_or_else(|| ActionError::Index(def_fn_index.index() as u64))?
|
||||
.as_ptr(),
|
||||
None => instance
|
||||
.get_imported_function(fn_index)
|
||||
.ok_or_else(|| ActionError::Index(fn_index.index() as u64))?,
|
||||
};
|
||||
|
||||
let sig = &module.signatures[module.functions[fn_index]];
|
||||
|
||||
// TODO: Move this out to be done once per thread rather than per call.
|
||||
let mut traps = TrapContext {
|
||||
triedToInstallSignalHandlers: false,
|
||||
haveSignalHandlers: false,
|
||||
};
|
||||
|
||||
// Rather than writing inline assembly to jump to the code region, we use the fact that
|
||||
// the Rust ABI for calling a function with no arguments and no return values matches the one
|
||||
// of the generated code. Thanks to this, we can transmute the code region into a first-class
|
||||
// Rust function and call it.
|
||||
// Ensure that our signal handlers are ready for action.
|
||||
ensure_eager_signal_handlers();
|
||||
ensure_full_signal_handlers(&mut traps);
|
||||
if !traps.haveSignalHandlers {
|
||||
return Err(ActionError::Resource(
|
||||
"failed to install signal handlers".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
call_through_wrapper(code, isa, exec_code_buf, instance, args, &sig)
|
||||
}
|
||||
|
||||
fn call_through_wrapper(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
callee: *const VMFunctionBody,
|
||||
instance: &mut Instance,
|
||||
args: &[RuntimeValue],
|
||||
sig: &ir::Signature,
|
||||
) -> Result<ActionOutcome, ActionError> {
|
||||
let vmctx = instance.vmctx() as *mut VMContext;
|
||||
|
||||
for (index, value) in args.iter().enumerate() {
|
||||
assert_eq!(value.value_type(), sig.params[index].value_type);
|
||||
}
|
||||
|
||||
let wrapper_sig = ir::Signature::new(isa.frontend_config().default_call_conv);
|
||||
let mut context = Context::new();
|
||||
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
|
||||
|
||||
let value_size = 8;
|
||||
let mut results_vec = Vec::new();
|
||||
results_vec.resize(sig.returns.len(), 0i64);
|
||||
|
||||
let mut fn_builder_ctx = FunctionBuilderContext::new();
|
||||
{
|
||||
let mut builder = FunctionBuilder::new(&mut context.func, &mut fn_builder_ctx);
|
||||
let block0 = builder.create_ebb();
|
||||
|
||||
builder.append_ebb_params_for_function_params(block0);
|
||||
|
||||
builder.switch_to_block(block0);
|
||||
builder.seal_block(block0);
|
||||
|
||||
let mut callee_args = Vec::new();
|
||||
let pointer_type = isa.pointer_type();
|
||||
|
||||
let callee_value = builder.ins().iconst(pointer_type, callee as i64);
|
||||
|
||||
for value in args {
|
||||
match value {
|
||||
RuntimeValue::I32(i) => {
|
||||
callee_args.push(builder.ins().iconst(ir::types::I32, i64::from(*i)))
|
||||
}
|
||||
RuntimeValue::I64(i) => callee_args.push(builder.ins().iconst(ir::types::I64, *i)),
|
||||
RuntimeValue::F32(i) => callee_args.push(
|
||||
builder
|
||||
.ins()
|
||||
.f32const(ir::immediates::Ieee32::with_bits(*i)),
|
||||
),
|
||||
RuntimeValue::F64(i) => callee_args.push(
|
||||
builder
|
||||
.ins()
|
||||
.f64const(ir::immediates::Ieee64::with_bits(*i)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
let vmctx_value = builder.ins().iconst(pointer_type, vmctx as i64);
|
||||
callee_args.push(vmctx_value);
|
||||
|
||||
let new_sig = builder.import_signature(sig.clone());
|
||||
|
||||
// TODO: It's possible to make this a direct call. We just need Cranelift
|
||||
// to support functions declared with an immediate integer address.
|
||||
let call = builder
|
||||
.ins()
|
||||
.call_indirect(new_sig, callee_value, &callee_args);
|
||||
|
||||
let results = builder.func.dfg.inst_results(call).to_vec();
|
||||
|
||||
let results_vec_value = builder
|
||||
.ins()
|
||||
.iconst(pointer_type, results_vec.as_ptr() as i64);
|
||||
|
||||
let mut mflags = ir::MemFlags::new();
|
||||
mflags.set_notrap();
|
||||
mflags.set_aligned();
|
||||
for (i, r) in results.iter().enumerate() {
|
||||
builder
|
||||
.ins()
|
||||
.store(mflags, *r, results_vec_value, (i * value_size) as i32);
|
||||
}
|
||||
|
||||
builder.ins().return_(&[]);
|
||||
}
|
||||
|
||||
let mut code_buf: Vec<u8> = Vec::new();
|
||||
let mut reloc_sink = RelocSink::new();
|
||||
let mut trap_sink = binemit::NullTrapSink {};
|
||||
context
|
||||
.compile_and_emit(isa, &mut code_buf, &mut reloc_sink, &mut trap_sink)
|
||||
.map_err(|error| ActionError::Compile(CompileError::Codegen(error)))?;
|
||||
assert!(reloc_sink.func_relocs.is_empty());
|
||||
|
||||
let exec_code_buf = code
|
||||
.allocate_copy_of_byte_slice(&code_buf)
|
||||
.map_err(ActionError::Resource)?
|
||||
.as_ptr();
|
||||
code.publish();
|
||||
|
||||
let func: fn() = unsafe { mem::transmute(exec_code_buf) };
|
||||
|
||||
Ok(match call_wasm(func) {
|
||||
Ok(()) => {
|
||||
let mut values = Vec::with_capacity(sig.returns.len());
|
||||
|
||||
for (index, abi_param) in sig.returns.iter().enumerate() {
|
||||
let v = unsafe {
|
||||
let ptr = results_vec.as_ptr().add(index * value_size);
|
||||
|
||||
match abi_param.value_type {
|
||||
ir::types::I32 => RuntimeValue::I32(ptr::read(ptr as *const i32)),
|
||||
ir::types::I64 => RuntimeValue::I64(ptr::read(ptr as *const i64)),
|
||||
ir::types::F32 => RuntimeValue::F32(ptr::read(ptr as *const u32)),
|
||||
ir::types::F64 => RuntimeValue::F64(ptr::read(ptr as *const u64)),
|
||||
other => panic!("unsupported value type {:?}", other),
|
||||
}
|
||||
};
|
||||
|
||||
values.push(v);
|
||||
}
|
||||
|
||||
ActionOutcome::Returned { values }
|
||||
}
|
||||
Err(message) => ActionOutcome::Trapped { message },
|
||||
})
|
||||
}
|
||||
@@ -28,18 +28,12 @@ extern crate cranelift_codegen;
|
||||
extern crate cranelift_entity;
|
||||
extern crate cranelift_frontend;
|
||||
extern crate cranelift_wasm;
|
||||
extern crate errno;
|
||||
extern crate region;
|
||||
extern crate wasmtime_environ;
|
||||
extern crate wasmtime_runtime;
|
||||
#[cfg(not(feature = "std"))]
|
||||
#[macro_use]
|
||||
extern crate alloc;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate libc;
|
||||
#[macro_use]
|
||||
extern crate memoffset;
|
||||
extern crate cast;
|
||||
extern crate failure;
|
||||
#[macro_use]
|
||||
extern crate failure_derive;
|
||||
@@ -47,30 +41,13 @@ extern crate failure_derive;
|
||||
mod action;
|
||||
mod code;
|
||||
mod export;
|
||||
mod get;
|
||||
mod imports;
|
||||
mod instance;
|
||||
mod invoke;
|
||||
mod libcalls;
|
||||
mod link;
|
||||
mod memory;
|
||||
mod mmap;
|
||||
mod sig_registry;
|
||||
mod signalhandlers;
|
||||
mod table;
|
||||
mod traphandlers;
|
||||
mod vmcontext;
|
||||
mod world;
|
||||
|
||||
pub use action::{ActionError, ActionOutcome, RuntimeValue};
|
||||
pub use code::Code;
|
||||
pub use export::{ExportValue, NullResolver, Resolver};
|
||||
pub use get::{get, get_by_index};
|
||||
pub use instance::Instance;
|
||||
pub use invoke::{invoke, invoke_by_index, invoke_start_function};
|
||||
pub use export::{Export, NullResolver, Resolver};
|
||||
pub use link::link_module;
|
||||
pub use traphandlers::{call_wasm, LookupCodeSegment, RecordTrap, Unwind};
|
||||
pub use vmcontext::{VMContext, VMFunctionBody, VMGlobal, VMMemory, VMTable};
|
||||
pub use world::InstanceWorld;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
//! Runtime library calls. Note that the JIT may sometimes perform these inline
|
||||
//! rather than calling them, particularly when CPUs have special instructions
|
||||
//! which compute them directly.
|
||||
|
||||
pub extern "C" fn wasmtime_f32_ceil(x: f32) -> f32 {
|
||||
x.ceil()
|
||||
}
|
||||
|
||||
pub extern "C" fn wasmtime_f32_floor(x: f32) -> f32 {
|
||||
x.floor()
|
||||
}
|
||||
|
||||
pub extern "C" fn wasmtime_f32_trunc(x: f32) -> f32 {
|
||||
x.trunc()
|
||||
}
|
||||
|
||||
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
|
||||
pub extern "C" fn wasmtime_f32_nearest(x: f32) -> f32 {
|
||||
// Rust doesn't have a nearest function, so do it manually.
|
||||
if x == 0.0 {
|
||||
// Preserve the sign of zero.
|
||||
x
|
||||
} else {
|
||||
// Nearest is either ceil or floor depending on which is nearest or even.
|
||||
let u = x.ceil();
|
||||
let d = x.floor();
|
||||
let um = (x - u).abs();
|
||||
let dm = (x - d).abs();
|
||||
if um < dm
|
||||
|| (um == dm && {
|
||||
let h = u / 2.;
|
||||
h.floor() == h
|
||||
})
|
||||
{
|
||||
u
|
||||
} else {
|
||||
d
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub extern "C" fn wasmtime_f64_ceil(x: f64) -> f64 {
|
||||
x.ceil()
|
||||
}
|
||||
|
||||
pub extern "C" fn wasmtime_f64_floor(x: f64) -> f64 {
|
||||
x.floor()
|
||||
}
|
||||
|
||||
pub extern "C" fn wasmtime_f64_trunc(x: f64) -> f64 {
|
||||
x.trunc()
|
||||
}
|
||||
|
||||
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
|
||||
pub extern "C" fn wasmtime_f64_nearest(x: f64) -> f64 {
|
||||
// Rust doesn't have a nearest function, so do it manually.
|
||||
if x == 0.0 {
|
||||
// Preserve the sign of zero.
|
||||
x
|
||||
} else {
|
||||
// Nearest is either ceil or floor depending on which is nearest or even.
|
||||
let u = x.ceil();
|
||||
let d = x.floor();
|
||||
let um = (x - u).abs();
|
||||
let dm = (x - d).abs();
|
||||
if um < dm
|
||||
|| (um == dm && {
|
||||
let h = u / 2.;
|
||||
h.floor() == h
|
||||
})
|
||||
{
|
||||
u
|
||||
} else {
|
||||
d
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,16 @@
|
||||
use cranelift_codegen::binemit::Reloc;
|
||||
use cranelift_entity::{EntityRef, PrimaryMap};
|
||||
use cranelift_wasm::{
|
||||
DefinedFuncIndex, Global, GlobalInit, Memory, MemoryIndex, Table, TableElementType,
|
||||
};
|
||||
use export::{ExportValue, Resolver};
|
||||
use imports::Imports;
|
||||
use cranelift_entity::PrimaryMap;
|
||||
use cranelift_wasm::{DefinedFuncIndex, Global, GlobalInit, Memory, Table, TableElementType};
|
||||
use export::{Export, FunctionExport, Resolver};
|
||||
use std::ptr::write_unaligned;
|
||||
use std::string::String;
|
||||
use std::vec::Vec;
|
||||
use vmcontext::VMContext;
|
||||
use vmcontext::{VMFunctionBody, VMGlobal, VMMemory, VMTable};
|
||||
use wasmtime_environ::{
|
||||
MemoryPlan, MemoryStyle, Module, Relocation, RelocationTarget, Relocations, TablePlan,
|
||||
TableStyle,
|
||||
};
|
||||
use wasmtime_runtime::libcalls;
|
||||
use wasmtime_runtime::{Imports, VMFunctionBody, VMGlobalImport, VMMemoryImport, VMTableImport};
|
||||
|
||||
/// A link error, such as incompatible or unmatched imports/exports.
|
||||
#[derive(Fail, Debug)]
|
||||
@@ -22,29 +20,28 @@ pub struct LinkError(String);
|
||||
/// Links a module that has been compiled with `compiled_module` in `wasmtime-environ`.
|
||||
pub fn link_module(
|
||||
module: &Module,
|
||||
allocated_functions: &PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
|
||||
allocated_functions: &PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
|
||||
relocations: Relocations,
|
||||
resolver: &mut Resolver,
|
||||
) -> Result<Imports, LinkError> {
|
||||
let mut imports = Imports::new();
|
||||
|
||||
let mut function_imports = PrimaryMap::with_capacity(module.imported_funcs.len());
|
||||
for (index, (ref module_name, ref field)) in module.imported_funcs.iter() {
|
||||
match resolver.resolve(module_name, field) {
|
||||
Some(export_value) => match export_value {
|
||||
ExportValue::Function { address, signature } => {
|
||||
Export::Function(FunctionExport { address, signature }) => {
|
||||
let import_signature = &module.signatures[module.functions[index]];
|
||||
if signature != *import_signature {
|
||||
// TODO: If the difference is in the calling convention,
|
||||
// we could emit a wrapper function to fix it up.
|
||||
return Err(LinkError(
|
||||
format!("{}/{}: exported function with signature {} incompatible with function import with signature {}",
|
||||
module_name, field,
|
||||
signature, import_signature)
|
||||
));
|
||||
}
|
||||
imports.functions.push(address);
|
||||
function_imports.push(address);
|
||||
}
|
||||
ExportValue::Table { .. }
|
||||
| ExportValue::Memory { .. }
|
||||
| ExportValue::Global { .. } => {
|
||||
Export::Table { .. } | Export::Memory { .. } | Export::Global { .. } => {
|
||||
return Err(LinkError(format!(
|
||||
"{}/{}: export not compatible with function import",
|
||||
module_name, field
|
||||
@@ -60,41 +57,15 @@ pub fn link_module(
|
||||
}
|
||||
}
|
||||
|
||||
for (index, (ref module_name, ref field)) in module.imported_globals.iter() {
|
||||
match resolver.resolve(module_name, field) {
|
||||
Some(export_value) => match export_value {
|
||||
ExportValue::Global { address, global } => {
|
||||
let imported_global = module.globals[index];
|
||||
if !is_global_compatible(&global, &imported_global) {
|
||||
return Err(LinkError(format!(
|
||||
"{}/{}: exported global incompatible with global import",
|
||||
module_name, field
|
||||
)));
|
||||
}
|
||||
imports.globals.push(address as *mut VMGlobal);
|
||||
}
|
||||
ExportValue::Table { .. }
|
||||
| ExportValue::Memory { .. }
|
||||
| ExportValue::Function { .. } => {
|
||||
return Err(LinkError(format!(
|
||||
"{}/{}: exported global incompatible with global import",
|
||||
module_name, field
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return Err(LinkError(format!(
|
||||
"no provided import global for {}/{}",
|
||||
module_name, field
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut table_imports = PrimaryMap::with_capacity(module.imported_tables.len());
|
||||
for (index, (ref module_name, ref field)) in module.imported_tables.iter() {
|
||||
match resolver.resolve(module_name, field) {
|
||||
Some(export_value) => match export_value {
|
||||
ExportValue::Table { address, table } => {
|
||||
Export::Table {
|
||||
address,
|
||||
vmctx,
|
||||
table,
|
||||
} => {
|
||||
let import_table = &module.table_plans[index];
|
||||
if !is_table_compatible(&table, import_table) {
|
||||
return Err(LinkError(format!(
|
||||
@@ -102,11 +73,12 @@ pub fn link_module(
|
||||
module_name, field,
|
||||
)));
|
||||
}
|
||||
imports.tables.push(address as *mut VMTable);
|
||||
table_imports.push(VMTableImport {
|
||||
from: address,
|
||||
vmctx,
|
||||
});
|
||||
}
|
||||
ExportValue::Global { .. }
|
||||
| ExportValue::Memory { .. }
|
||||
| ExportValue::Function { .. } => {
|
||||
Export::Global { .. } | Export::Memory { .. } | Export::Function { .. } => {
|
||||
return Err(LinkError(format!(
|
||||
"{}/{}: export not compatible with table import",
|
||||
module_name, field
|
||||
@@ -122,10 +94,15 @@ pub fn link_module(
|
||||
}
|
||||
}
|
||||
|
||||
let mut memory_imports = PrimaryMap::with_capacity(module.imported_memories.len());
|
||||
for (index, (ref module_name, ref field)) in module.imported_memories.iter() {
|
||||
match resolver.resolve(module_name, field) {
|
||||
Some(export_value) => match export_value {
|
||||
ExportValue::Memory { address, memory } => {
|
||||
Export::Memory {
|
||||
address,
|
||||
vmctx,
|
||||
memory,
|
||||
} => {
|
||||
let import_memory = &module.memory_plans[index];
|
||||
if is_memory_compatible(&memory, import_memory) {
|
||||
return Err(LinkError(format!(
|
||||
@@ -133,11 +110,12 @@ pub fn link_module(
|
||||
module_name, field
|
||||
)));
|
||||
}
|
||||
imports.memories.push(address as *mut VMMemory);
|
||||
memory_imports.push(VMMemoryImport {
|
||||
from: address,
|
||||
vmctx,
|
||||
});
|
||||
}
|
||||
ExportValue::Table { .. }
|
||||
| ExportValue::Global { .. }
|
||||
| ExportValue::Function { .. } => {
|
||||
Export::Table { .. } | Export::Global { .. } | Export::Function { .. } => {
|
||||
return Err(LinkError(format!(
|
||||
"{}/{}: export not compatible with memory import",
|
||||
module_name, field
|
||||
@@ -153,6 +131,43 @@ pub fn link_module(
|
||||
}
|
||||
}
|
||||
|
||||
let mut global_imports = PrimaryMap::with_capacity(module.imported_globals.len());
|
||||
for (index, (ref module_name, ref field)) in module.imported_globals.iter() {
|
||||
match resolver.resolve(module_name, field) {
|
||||
Some(export_value) => match export_value {
|
||||
Export::Global { address, global } => {
|
||||
let imported_global = module.globals[index];
|
||||
if !is_global_compatible(&global, &imported_global) {
|
||||
return Err(LinkError(format!(
|
||||
"{}/{}: exported global incompatible with global import",
|
||||
module_name, field
|
||||
)));
|
||||
}
|
||||
global_imports.push(VMGlobalImport { from: address });
|
||||
}
|
||||
Export::Table { .. } | Export::Memory { .. } | Export::Function { .. } => {
|
||||
return Err(LinkError(format!(
|
||||
"{}/{}: exported global incompatible with global import",
|
||||
module_name, field
|
||||
)));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
return Err(LinkError(format!(
|
||||
"no provided import global for {}/{}",
|
||||
module_name, field
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let imports = Imports::new(
|
||||
function_imports,
|
||||
table_imports,
|
||||
memory_imports,
|
||||
global_imports,
|
||||
);
|
||||
|
||||
// Apply relocations, now that we have virtual addresses for everything.
|
||||
relocate(&imports, allocated_functions, relocations, &module);
|
||||
|
||||
@@ -277,22 +292,27 @@ fn is_memory_compatible(exported: &MemoryPlan, imported: &MemoryPlan) -> bool {
|
||||
/// Performs the relocations inside the function bytecode, provided the necessary metadata.
|
||||
fn relocate(
|
||||
imports: &Imports,
|
||||
allocated_functions: &PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>,
|
||||
allocated_functions: &PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>,
|
||||
relocations: PrimaryMap<DefinedFuncIndex, Vec<Relocation>>,
|
||||
module: &Module,
|
||||
) {
|
||||
for (i, function_relocs) in relocations.into_iter() {
|
||||
for r in function_relocs {
|
||||
use self::libcalls::*;
|
||||
let target_func_address: usize = match r.reloc_target {
|
||||
RelocationTarget::UserFunc(index) => match module.defined_func_index(index) {
|
||||
Some(f) => allocated_functions[f].0 as usize,
|
||||
Some(f) => {
|
||||
let fatptr: *const [VMFunctionBody] = allocated_functions[f];
|
||||
fatptr as *const VMFunctionBody as usize
|
||||
}
|
||||
None => imports.functions[index] as usize,
|
||||
},
|
||||
RelocationTarget::MemoryGrow => wasmtime_memory_grow as usize,
|
||||
RelocationTarget::MemorySize => wasmtime_memory_size as usize,
|
||||
RelocationTarget::Memory32Grow => wasmtime_memory32_grow as usize,
|
||||
RelocationTarget::Memory32Size => wasmtime_memory32_size as usize,
|
||||
RelocationTarget::ImportedMemory32Grow => wasmtime_imported_memory32_grow as usize,
|
||||
RelocationTarget::ImportedMemory32Size => wasmtime_imported_memory32_size as usize,
|
||||
RelocationTarget::LibCall(libcall) => {
|
||||
use cranelift_codegen::ir::LibCall::*;
|
||||
use libcalls::*;
|
||||
match libcall {
|
||||
CeilF32 => wasmtime_f32_ceil as usize,
|
||||
FloorF32 => wasmtime_f32_floor as usize,
|
||||
@@ -308,7 +328,8 @@ fn relocate(
|
||||
}
|
||||
};
|
||||
|
||||
let body = allocated_functions[i].0;
|
||||
let fatptr: *const [VMFunctionBody] = allocated_functions[i];
|
||||
let body = fatptr as *const VMFunctionBody;
|
||||
match r.reloc {
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
Reloc::Abs8 => unsafe {
|
||||
@@ -340,21 +361,3 @@ fn relocate(
|
||||
extern "C" {
|
||||
pub fn __rust_probestack();
|
||||
}
|
||||
|
||||
/// The implementation of memory.grow.
|
||||
extern "C" fn wasmtime_memory_grow(size: u32, memory_index: u32, vmctx: *mut VMContext) -> u32 {
|
||||
let instance = unsafe { (&mut *vmctx).instance() };
|
||||
let memory_index = MemoryIndex::new(memory_index as usize);
|
||||
|
||||
instance
|
||||
.memory_grow(memory_index, size)
|
||||
.unwrap_or(u32::max_value())
|
||||
}
|
||||
|
||||
/// The implementation of memory.size.
|
||||
extern "C" fn wasmtime_memory_size(memory_index: u32, vmctx: *mut VMContext) -> u32 {
|
||||
let instance = unsafe { (&mut *vmctx).instance() };
|
||||
let memory_index = MemoryIndex::new(memory_index as usize);
|
||||
|
||||
instance.memory_size(memory_index)
|
||||
}
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
//! Memory management for linear memories.
|
||||
//!
|
||||
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
|
||||
|
||||
use mmap::Mmap;
|
||||
use region;
|
||||
use std::string::String;
|
||||
use vmcontext::VMMemory;
|
||||
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
|
||||
|
||||
/// A linear memory instance.
|
||||
#[derive(Debug)]
|
||||
pub struct LinearMemory {
|
||||
mmap: Mmap,
|
||||
current: u32,
|
||||
maximum: Option<u32>,
|
||||
offset_guard_size: usize,
|
||||
}
|
||||
|
||||
impl LinearMemory {
|
||||
/// Create a new linear memory instance with specified minimum and maximum number of pages.
|
||||
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
|
||||
// `maximum` cannot be set to more than `65536` pages.
|
||||
assert!(plan.memory.minimum <= WASM_MAX_PAGES);
|
||||
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
|
||||
|
||||
let offset_guard_bytes = plan.offset_guard_size as usize;
|
||||
|
||||
let minimum_pages = match plan.style {
|
||||
MemoryStyle::Dynamic => plan.memory.minimum,
|
||||
MemoryStyle::Static { bound } => {
|
||||
assert!(bound >= plan.memory.minimum);
|
||||
bound
|
||||
}
|
||||
} as usize;
|
||||
let minimum_bytes = minimum_pages.checked_mul(WASM_PAGE_SIZE as usize).unwrap();
|
||||
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
|
||||
let mapped_pages = plan.memory.minimum as usize;
|
||||
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
|
||||
let unmapped_pages = minimum_pages - mapped_pages;
|
||||
let unmapped_bytes = unmapped_pages * WASM_PAGE_SIZE as usize;
|
||||
let inaccessible_bytes = unmapped_bytes + offset_guard_bytes;
|
||||
|
||||
let mmap = Mmap::with_size(request_bytes)?;
|
||||
|
||||
// Make the unmapped and offset-guard pages inaccessible.
|
||||
unsafe {
|
||||
region::protect(
|
||||
mmap.as_ptr().add(mapped_bytes),
|
||||
inaccessible_bytes,
|
||||
region::Protection::None,
|
||||
)
|
||||
}
|
||||
.expect("unable to make memory inaccessible");
|
||||
|
||||
Ok(Self {
|
||||
mmap,
|
||||
current: plan.memory.minimum,
|
||||
maximum: plan.memory.maximum,
|
||||
offset_guard_size: offset_guard_bytes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the number of allocated wasm pages.
|
||||
pub fn size(&self) -> u32 {
|
||||
self.current
|
||||
}
|
||||
|
||||
/// Grow memory by the specified amount of pages.
|
||||
///
|
||||
/// Returns `None` if memory can't be grown by the specified amount
|
||||
/// of pages.
|
||||
pub fn grow(&mut self, delta: u32) -> Option<u32> {
|
||||
let new_pages = match self.current.checked_add(delta) {
|
||||
Some(new_pages) => new_pages,
|
||||
// Linear memory size overflow.
|
||||
None => return None,
|
||||
};
|
||||
let prev_pages = self.current;
|
||||
|
||||
if let Some(maximum) = self.maximum {
|
||||
if new_pages > maximum {
|
||||
// Linear memory size would exceed the declared maximum.
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// Wasm linear memories are never allowed to grow beyond what is
|
||||
// indexable. If the memory has no maximum, enforce the greatest
|
||||
// limit here.
|
||||
if new_pages >= WASM_MAX_PAGES {
|
||||
// Linear memory size would exceed the index range.
|
||||
return None;
|
||||
}
|
||||
|
||||
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
|
||||
|
||||
if new_bytes > self.mmap.len() - self.offset_guard_size {
|
||||
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
|
||||
assert!(self.maximum.is_none());
|
||||
let guard_bytes = self.offset_guard_size;
|
||||
let request_bytes = new_bytes.checked_add(guard_bytes)?;
|
||||
|
||||
let mut new_mmap = Mmap::with_size(request_bytes).ok()?;
|
||||
|
||||
// Make the offset-guard pages inaccessible.
|
||||
unsafe {
|
||||
region::protect(
|
||||
new_mmap.as_ptr().add(new_bytes),
|
||||
guard_bytes,
|
||||
region::Protection::None,
|
||||
)
|
||||
}
|
||||
.expect("unable to make memory inaccessible");
|
||||
|
||||
let copy_len = self.mmap.len() - self.offset_guard_size;
|
||||
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
|
||||
|
||||
self.mmap = new_mmap;
|
||||
}
|
||||
|
||||
self.current = new_pages;
|
||||
|
||||
Some(prev_pages)
|
||||
}
|
||||
|
||||
/// Return a `VMMemory` for exposing the memory to JIT code.
|
||||
pub fn vmmemory(&mut self) -> VMMemory {
|
||||
VMMemory::definition(self.mmap.as_mut_ptr(), self.mmap.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for LinearMemory {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.mmap.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<[u8]> for LinearMemory {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
self.mmap.as_mut_slice()
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
//! Low-level abstraction for allocating and managing zero-filled pages
|
||||
//! of memory.
|
||||
|
||||
use errno;
|
||||
use libc;
|
||||
use region;
|
||||
use std::ptr;
|
||||
use std::slice;
|
||||
use std::string::String;
|
||||
|
||||
/// Round `size` up to the nearest multiple of `page_size`.
|
||||
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
|
||||
(size + (page_size - 1)) & !(page_size - 1)
|
||||
}
|
||||
|
||||
/// A simple struct consisting of a page-aligned pointer to page-aligned
|
||||
/// and initially-zeroed memory and a length.
|
||||
#[derive(Debug)]
|
||||
pub struct Mmap {
|
||||
ptr: *mut u8,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Mmap {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
ptr: ptr::null_mut(),
|
||||
len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `Mmap` pointing to at least `size` bytes of memory,
|
||||
/// suitably sized and aligned for memory protection.
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
pub fn with_size(size: usize) -> Result<Self, String> {
|
||||
let page_size = region::page::size();
|
||||
let alloc_size = round_up_to_page_size(size, page_size);
|
||||
let ptr = unsafe {
|
||||
libc::mmap(
|
||||
ptr::null_mut(),
|
||||
alloc_size,
|
||||
libc::PROT_READ | libc::PROT_WRITE,
|
||||
libc::MAP_PRIVATE | libc::MAP_ANON,
|
||||
-1,
|
||||
0,
|
||||
)
|
||||
};
|
||||
if ptr as isize == -1isize {
|
||||
Err(errno::errno().to_string())
|
||||
} else {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: alloc_size,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub fn with_size(size: usize) -> Result<Self, String> {
|
||||
use winapi::um::memoryapi::VirtualAlloc;
|
||||
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
|
||||
|
||||
let page_size = region::page::size();
|
||||
|
||||
// VirtualAlloc always rounds up to the next multiple of the page size
|
||||
let ptr = unsafe {
|
||||
VirtualAlloc(
|
||||
ptr::null_mut(),
|
||||
size,
|
||||
MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE,
|
||||
)
|
||||
};
|
||||
if !ptr.is_null() {
|
||||
Ok(Self {
|
||||
ptr: ptr as *mut u8,
|
||||
len: round_up_to_page_size(size, page_size),
|
||||
})
|
||||
} else {
|
||||
Err(errno::errno().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
unsafe { slice::from_raw_parts(self.ptr, self.len) }
|
||||
}
|
||||
|
||||
pub fn as_mut_slice(&mut self) -> &mut [u8] {
|
||||
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
|
||||
}
|
||||
|
||||
pub fn as_ptr(&self) -> *const u8 {
|
||||
self.ptr
|
||||
}
|
||||
|
||||
pub fn as_mut_ptr(&mut self) -> *mut u8 {
|
||||
self.ptr
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Mmap {
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn drop(&mut self) {
|
||||
if !self.ptr.is_null() {
|
||||
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
|
||||
assert_eq!(r, 0, "munmap failed: {}", errno::errno());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn drop(&mut self) {
|
||||
if !self.ptr.is_null() {
|
||||
use winapi::um::memoryapi::VirtualFree;
|
||||
use winapi::um::winnt::MEM_RELEASE;
|
||||
let r = unsafe { VirtualFree(self.ptr, self.len, MEM_RELEASE) };
|
||||
assert_eq!(r, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_round_up_to_page_size() {
|
||||
assert_eq!(round_up_to_page_size(0, 4096), 0);
|
||||
assert_eq!(round_up_to_page_size(1, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
|
||||
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
//! Implement a registry of function signatures, for fast indirect call
|
||||
//! signature checking.
|
||||
|
||||
use cast;
|
||||
use cranelift_codegen::ir;
|
||||
use cranelift_entity::PrimaryMap;
|
||||
use cranelift_wasm::SignatureIndex;
|
||||
use std::collections::{hash_map, HashMap};
|
||||
use vmcontext::VMSignatureId;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SignatureRegistry {
|
||||
signature_hash: HashMap<ir::Signature, VMSignatureId>,
|
||||
signature_ids: PrimaryMap<SignatureIndex, VMSignatureId>,
|
||||
}
|
||||
|
||||
impl SignatureRegistry {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signature_hash: HashMap::new(),
|
||||
signature_ids: PrimaryMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vmsignature_ids(&mut self) -> *mut VMSignatureId {
|
||||
self.signature_ids.values_mut().into_slice().as_mut_ptr()
|
||||
}
|
||||
|
||||
/// Register the given signature.
|
||||
pub fn register(&mut self, sig_index: SignatureIndex, sig: &ir::Signature) {
|
||||
// TODO: Refactor this interface so that we're not passing in redundant
|
||||
// information.
|
||||
debug_assert_eq!(sig_index.index(), self.signature_ids.len());
|
||||
use cranelift_entity::EntityRef;
|
||||
|
||||
let len = self.signature_hash.len();
|
||||
let sig_id = match self.signature_hash.entry(sig.clone()) {
|
||||
hash_map::Entry::Occupied(entry) => *entry.get(),
|
||||
hash_map::Entry::Vacant(entry) => {
|
||||
let sig_id = cast::u32(len).unwrap();
|
||||
entry.insert(sig_id);
|
||||
sig_id
|
||||
}
|
||||
};
|
||||
self.signature_ids.push(sig_id);
|
||||
}
|
||||
|
||||
/// Return the identifying runtime index for the given signature.
|
||||
pub fn lookup(&mut self, sig_index: SignatureIndex) -> VMSignatureId {
|
||||
self.signature_ids[sig_index]
|
||||
}
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
//! Interface to low-level signal-handling mechanisms.
|
||||
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use std::borrow::{Borrow, BorrowMut};
|
||||
use std::sync::RwLock;
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/signalhandlers.rs"));
|
||||
|
||||
struct InstallState {
|
||||
tried: bool,
|
||||
success: bool,
|
||||
}
|
||||
|
||||
impl InstallState {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
tried: false,
|
||||
success: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref EAGER_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
|
||||
static ref LAZY_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
|
||||
}
|
||||
|
||||
/// This function performs the low-overhead signal handler initialization that we
|
||||
/// want to do eagerly to ensure a more-deterministic global process state. This
|
||||
/// is especially relevant for signal handlers since handler ordering depends on
|
||||
/// installation order: the wasm signal handler must run *before* the other crash
|
||||
/// handlers and since POSIX signal handlers work LIFO, this function needs to be
|
||||
/// called at the end of the startup process, after other handlers have been
|
||||
/// installed. This function can thus be called multiple times, having no effect
|
||||
/// after the first call.
|
||||
pub fn ensure_eager_signal_handlers() {
|
||||
let mut locked = EAGER_INSTALL_STATE.write().unwrap();
|
||||
let state = locked.borrow_mut();
|
||||
|
||||
if state.tried {
|
||||
return;
|
||||
}
|
||||
|
||||
state.tried = true;
|
||||
assert!(!state.success);
|
||||
|
||||
if !unsafe { EnsureEagerSignalHandlers() } {
|
||||
return;
|
||||
}
|
||||
|
||||
state.success = true;
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
fn ensure_darwin_mach_ports() {
|
||||
let mut locked = LAZY_INSTALL_STATE.write().unwrap();
|
||||
let state = locked.borrow_mut();
|
||||
|
||||
if state.tried {
|
||||
return;
|
||||
}
|
||||
|
||||
state.tried = true;
|
||||
assert!(!state.success);
|
||||
|
||||
if !unsafe { EnsureDarwinMachPorts() } {
|
||||
return;
|
||||
}
|
||||
|
||||
state.success = true;
|
||||
}
|
||||
|
||||
/// Assuming `EnsureEagerProcessSignalHandlers` has already been called,
|
||||
/// this function performs the full installation of signal handlers which must
|
||||
/// be performed per-thread. This operation may incur some overhead and
|
||||
/// so should be done only when needed to use wasm.
|
||||
pub fn ensure_full_signal_handlers(cx: &mut TrapContext) {
|
||||
if cx.triedToInstallSignalHandlers {
|
||||
return;
|
||||
}
|
||||
|
||||
cx.triedToInstallSignalHandlers = true;
|
||||
assert!(!cx.haveSignalHandlers);
|
||||
|
||||
{
|
||||
let locked = EAGER_INSTALL_STATE.read().unwrap();
|
||||
let state = locked.borrow();
|
||||
assert!(state.tried);
|
||||
if !state.success {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
ensure_darwin_mach_ports();
|
||||
|
||||
cx.haveSignalHandlers = true;
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
//! Memory management for tables.
|
||||
//!
|
||||
//! `Table` is to WebAssembly tables what `LinearMemory` is to WebAssembly linear memories.
|
||||
|
||||
use cranelift_wasm::TableElementType;
|
||||
use vmcontext::{VMCallerCheckedAnyfunc, VMTable};
|
||||
use wasmtime_environ::{TablePlan, TableStyle};
|
||||
|
||||
/// A table instance.
|
||||
#[derive(Debug)]
|
||||
pub struct Table {
|
||||
vec: Vec<VMCallerCheckedAnyfunc>,
|
||||
maximum: Option<u32>,
|
||||
}
|
||||
|
||||
impl Table {
|
||||
/// Create a new table instance with specified minimum and maximum number of elements.
|
||||
pub fn new(plan: &TablePlan) -> Self {
|
||||
match plan.table.ty {
|
||||
TableElementType::Func => (),
|
||||
TableElementType::Val(ty) => {
|
||||
unimplemented!("tables of types other than anyfunc ({})", ty)
|
||||
}
|
||||
};
|
||||
|
||||
match plan.style {
|
||||
TableStyle::CallerChecksSignature => {
|
||||
let mut vec = Vec::new();
|
||||
vec.resize(
|
||||
plan.table.minimum as usize,
|
||||
VMCallerCheckedAnyfunc::default(),
|
||||
);
|
||||
|
||||
Self {
|
||||
vec,
|
||||
maximum: plan.table.maximum,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a `VMTable` for exposing the table to JIT code.
|
||||
pub fn vmtable(&mut self) -> VMTable {
|
||||
VMTable::definition(self.vec.as_mut_ptr() as *mut u8, self.vec.len())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[VMCallerCheckedAnyfunc]> for Table {
|
||||
fn as_ref(&self) -> &[VMCallerCheckedAnyfunc] {
|
||||
self.vec.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsMut<[VMCallerCheckedAnyfunc]> for Table {
|
||||
fn as_mut(&mut self) -> &mut [VMCallerCheckedAnyfunc] {
|
||||
self.vec.as_mut_slice()
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
//! WebAssembly trap handling, which is built on top of the lower-level
|
||||
//! signalhandling mechanisms.
|
||||
|
||||
use libc::c_int;
|
||||
use signalhandlers::{jmp_buf, CodeSegment};
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::string::String;
|
||||
|
||||
// Currently we uset setjmp/longjmp to unwind out of a signal handler
|
||||
// and back to the point where WebAssembly was called (via `call_wasm`).
|
||||
// This works because WebAssembly code currently does not use any EH
|
||||
// or require any cleanups, and we never unwind through non-wasm frames.
|
||||
// In the future, we'll likely replace this with fancier stack unwinding.
|
||||
extern "C" {
|
||||
fn setjmp(env: *mut jmp_buf) -> c_int;
|
||||
fn longjmp(env: *const jmp_buf, val: c_int) -> !;
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct TrapData {
|
||||
pc: *const u8,
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static TRAP_DATA: Cell<TrapData> = Cell::new(TrapData { pc: ptr::null() });
|
||||
static JMP_BUFS: RefCell<Vec<jmp_buf>> = RefCell::new(Vec::new());
|
||||
}
|
||||
|
||||
/// Record the Trap code and wasm bytecode offset in TLS somewhere
|
||||
#[doc(hidden)]
|
||||
#[allow(non_snake_case)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn RecordTrap(pc: *const u8, _codeSegment: *const CodeSegment) {
|
||||
// TODO: Look up the wasm bytecode offset and trap code and record them instead.
|
||||
TRAP_DATA.with(|data| data.set(TrapData { pc }));
|
||||
}
|
||||
|
||||
/// Initiate an unwind.
|
||||
#[doc(hidden)]
|
||||
#[allow(non_snake_case)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn Unwind() {
|
||||
JMP_BUFS.with(|bufs| {
|
||||
let buf = bufs.borrow_mut().pop().unwrap();
|
||||
unsafe { longjmp(&buf, 1) };
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the CodeSegment containing the given pc, if any exist in the process.
|
||||
/// This method does not take a lock.
|
||||
#[doc(hidden)]
|
||||
#[allow(non_snake_case)]
|
||||
#[no_mangle]
|
||||
pub extern "C" fn LookupCodeSegment(_pc: *const ::std::os::raw::c_void) -> *const CodeSegment {
|
||||
// TODO: Implement this.
|
||||
-1isize as *const CodeSegment
|
||||
}
|
||||
|
||||
/// A simple guard to ensure that `JMP_BUFS` is reset when we're done.
|
||||
struct ScopeGuard {
|
||||
orig_num_bufs: usize,
|
||||
}
|
||||
|
||||
impl ScopeGuard {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
orig_num_bufs: JMP_BUFS.with(|bufs| bufs.borrow().len()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ScopeGuard {
|
||||
fn drop(&mut self) {
|
||||
let orig_num_bufs = self.orig_num_bufs;
|
||||
JMP_BUFS.with(|bufs| {
|
||||
bufs.borrow_mut()
|
||||
.resize(orig_num_bufs, unsafe { mem::zeroed() })
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Call the wasm function poined to by `f`.
|
||||
pub fn call_wasm<F>(f: F) -> Result<(), String>
|
||||
where
|
||||
F: FnOnce(),
|
||||
{
|
||||
// In case wasm code calls Rust that panics and unwinds past this point,
|
||||
// ensure that JMP_BUFS is unwound to its incoming state.
|
||||
let _guard = ScopeGuard::new();
|
||||
|
||||
JMP_BUFS.with(|bufs| {
|
||||
let mut buf = unsafe { mem::uninitialized() };
|
||||
if unsafe { setjmp(&mut buf) } != 0 {
|
||||
return TRAP_DATA.with(|data| Err(format!("wasm trap at {:?}", data.get().pc)));
|
||||
}
|
||||
bufs.borrow_mut().push(buf);
|
||||
f();
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
@@ -1,601 +0,0 @@
|
||||
//! This file declares `VMContext` and several related structs which contain
|
||||
//! fields that JIT code accesses directly.
|
||||
|
||||
use cranelift_entity::EntityRef;
|
||||
use cranelift_wasm::{Global, GlobalIndex, GlobalInit, MemoryIndex, TableIndex};
|
||||
use instance::Instance;
|
||||
use std::fmt;
|
||||
use std::ptr;
|
||||
|
||||
/// A placeholder byte-sized type which is just used to provide some amount of type
|
||||
/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
|
||||
/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
|
||||
/// around.
|
||||
#[repr(C)]
|
||||
pub struct VMFunctionBody(u8);
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmfunction_body {
|
||||
use super::VMFunctionBody;
|
||||
use std::mem::size_of;
|
||||
|
||||
#[test]
|
||||
fn check_vmfunction_body_offsets() {
|
||||
assert_eq!(size_of::<VMFunctionBody>(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// The fields a JIT needs to access to utilize a WebAssembly linear
|
||||
/// memory defined within the instance, namely the start address and the
|
||||
/// size in bytes.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C)]
|
||||
pub struct VMMemoryDefinition {
|
||||
/// The start address.
|
||||
base: *mut u8,
|
||||
/// The current size of linear memory in bytes.
|
||||
current_length: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmmemory_definition {
|
||||
use super::VMMemoryDefinition;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmmemory_definition_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMMemoryDefinition>(),
|
||||
usize::from(offsets.size_of_vmmemory_definition())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMMemoryDefinition, base),
|
||||
usize::from(offsets.vmmemory_definition_base())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMMemoryDefinition, current_length),
|
||||
usize::from(offsets.vmmemory_definition_current_length())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// The fields a JIT needs to access to utilize a WebAssembly linear
|
||||
/// memory imported from another instance.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C)]
|
||||
pub struct VMMemoryImport {
|
||||
/// A pointer to the imported memory description.
|
||||
from: *mut VMMemoryDefinition,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmmemory_import {
|
||||
use super::VMMemoryImport;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmmemory_import_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMMemoryImport>(),
|
||||
usize::from(offsets.size_of_vmmemory_import())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMMemoryImport, from),
|
||||
usize::from(offsets.vmmemory_import_from())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// The main fields a JIT needs to access to utilize a WebAssembly linear
|
||||
/// memory. It must know whether the memory is defined within the instance
|
||||
/// or imported.
|
||||
#[repr(C)]
|
||||
pub union VMMemory {
|
||||
/// A linear memory defined within the instance.
|
||||
definition: VMMemoryDefinition,
|
||||
|
||||
/// An imported linear memory.
|
||||
import: VMMemoryImport,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmmemory {
|
||||
use super::VMMemory;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmmemory_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMMemory>(),
|
||||
usize::from(offsets.size_of_vmmemory())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl VMMemory {
|
||||
/// Construct a `VMMemoryDefinition` variant of `VMMemory`.
|
||||
pub fn definition(base: *mut u8, current_length: usize) -> Self {
|
||||
Self {
|
||||
definition: VMMemoryDefinition {
|
||||
base,
|
||||
current_length,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a `VMMemoryImmport` variant of `VMMemory`.
|
||||
pub fn import(from: *mut VMMemoryDefinition) -> Self {
|
||||
Self {
|
||||
import: VMMemoryImport { from },
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the underlying `VMMemoryDefinition`.
|
||||
pub unsafe fn get_definition(&mut self, is_import: bool) -> &mut VMMemoryDefinition {
|
||||
if is_import {
|
||||
&mut *self.import.from
|
||||
} else {
|
||||
&mut self.definition
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for VMMemory {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "VMMemory {{")?;
|
||||
write!(f, " definition: {:?},", unsafe { self.definition })?;
|
||||
write!(f, " import: {:?},", unsafe { self.import })?;
|
||||
write!(f, "}}")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The storage for a WebAssembly global defined within the instance.
|
||||
///
|
||||
/// TODO: Pack the globals more densely, rather than using the same size
|
||||
/// for every type.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C, align(8))]
|
||||
pub struct VMGlobalDefinition {
|
||||
storage: [u8; 8],
|
||||
// If more elements are added here, remember to add offset_of tests below!
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmglobal_definition {
|
||||
use super::VMGlobalDefinition;
|
||||
use std::mem::{align_of, size_of};
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmglobal_definition_alignment() {
|
||||
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
|
||||
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
|
||||
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
|
||||
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_vmglobal_definition_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMGlobalDefinition>(),
|
||||
usize::from(offsets.size_of_vmglobal_definition())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl VMGlobalDefinition {
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub unsafe fn as_i32(&mut self) -> &mut i32 {
|
||||
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut i32)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub unsafe fn as_i64(&mut self) -> &mut i64 {
|
||||
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut i64)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub unsafe fn as_f32(&mut self) -> &mut f32 {
|
||||
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut f32)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub unsafe fn as_f32_bits(&mut self) -> &mut u32 {
|
||||
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut u32)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub unsafe fn as_f64(&mut self) -> &mut f64 {
|
||||
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut f64)
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub unsafe fn as_f64_bits(&mut self) -> &mut u64 {
|
||||
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut u64)
|
||||
}
|
||||
}
|
||||
|
||||
/// The fields a JIT needs to access to utilize a WebAssembly global
|
||||
/// variable imported from another instance.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C)]
|
||||
pub struct VMGlobalImport {
|
||||
/// A pointer to the imported global variable description.
|
||||
from: *mut VMGlobalDefinition,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmglobal_import {
|
||||
use super::VMGlobalImport;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmglobal_import_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMGlobalImport>(),
|
||||
usize::from(offsets.size_of_vmglobal_import())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMGlobalImport, from),
|
||||
usize::from(offsets.vmglobal_import_from())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// The main fields a JIT needs to access to utilize a WebAssembly global
|
||||
/// variable. It must know whether the global variable is defined within the
|
||||
/// instance or imported.
|
||||
#[repr(C)]
|
||||
pub union VMGlobal {
|
||||
/// A global variable defined within the instance.
|
||||
definition: VMGlobalDefinition,
|
||||
|
||||
/// An imported global variable.
|
||||
import: VMGlobalImport,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmglobal {
|
||||
use super::VMGlobal;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmglobal_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMGlobal>(),
|
||||
usize::from(offsets.size_of_vmglobal())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl VMGlobal {
|
||||
/// Construct a `VMGlobalDefinition` variant of `VMGlobal`.
|
||||
pub fn definition(global: &Global) -> Self {
|
||||
let mut result = VMGlobalDefinition { storage: [0; 8] };
|
||||
match global.initializer {
|
||||
GlobalInit::I32Const(x) => *unsafe { result.as_i32() } = x,
|
||||
GlobalInit::I64Const(x) => *unsafe { result.as_i64() } = x,
|
||||
GlobalInit::F32Const(x) => *unsafe { result.as_f32_bits() } = x,
|
||||
GlobalInit::F64Const(x) => *unsafe { result.as_f64_bits() } = x,
|
||||
GlobalInit::GetGlobal(_x) => unimplemented!("globals init with get_global"),
|
||||
GlobalInit::Import => panic!("attempting to initialize imported global"),
|
||||
}
|
||||
Self { definition: result }
|
||||
}
|
||||
|
||||
/// Construct a `VMGlobalImmport` variant of `VMGlobal`.
|
||||
pub fn import(from: *mut VMGlobalDefinition) -> Self {
|
||||
Self {
|
||||
import: VMGlobalImport { from },
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the underlying `VMGlobalDefinition`.
|
||||
pub unsafe fn get_definition(&mut self, is_import: bool) -> &mut VMGlobalDefinition {
|
||||
if is_import {
|
||||
&mut *self.import.from
|
||||
} else {
|
||||
&mut self.definition
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for VMGlobal {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "VMGlobal {{")?;
|
||||
write!(f, " definition: {:?},", unsafe { self.definition })?;
|
||||
write!(f, " import: {:?},", unsafe { self.import })?;
|
||||
write!(f, "}}")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The fields a JIT needs to access to utilize a WebAssembly table
|
||||
/// defined within the instance.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C)]
|
||||
pub struct VMTableDefinition {
|
||||
base: *mut u8,
|
||||
current_elements: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmtable_definition {
|
||||
use super::VMTableDefinition;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmtable_definition_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMTableDefinition>(),
|
||||
usize::from(offsets.size_of_vmtable_definition())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMTableDefinition, base),
|
||||
usize::from(offsets.vmtable_definition_base())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMTableDefinition, current_elements),
|
||||
usize::from(offsets.vmtable_definition_current_elements())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// The fields a JIT needs to access to utilize a WebAssembly table
|
||||
/// imported from another instance.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C)]
|
||||
pub struct VMTableImport {
|
||||
/// A pointer to the imported table description.
|
||||
from: *mut VMTableDefinition,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmtable_import {
|
||||
use super::VMTableImport;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmtable_import_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMTableImport>(),
|
||||
usize::from(offsets.size_of_vmtable_import())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMTableImport, from),
|
||||
usize::from(offsets.vmtable_import_from())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// The main fields a JIT needs to access to utilize a WebAssembly table.
|
||||
/// It must know whether the table is defined within the instance
|
||||
/// or imported.
|
||||
#[repr(C)]
|
||||
pub union VMTable {
|
||||
/// A table defined within the instance.
|
||||
definition: VMTableDefinition,
|
||||
|
||||
/// An imported table.
|
||||
import: VMTableImport,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmtable {
|
||||
use super::VMTable;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmtable_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(size_of::<VMTable>(), usize::from(offsets.size_of_vmtable()));
|
||||
}
|
||||
}
|
||||
|
||||
impl VMTable {
|
||||
/// Construct a `VMTableDefinition` variant of `VMTable`.
|
||||
pub fn definition(base: *mut u8, current_elements: usize) -> Self {
|
||||
Self {
|
||||
definition: VMTableDefinition {
|
||||
base,
|
||||
current_elements,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a `VMTableImmport` variant of `VMTable`.
|
||||
pub fn import(from: *mut VMTableDefinition) -> Self {
|
||||
Self {
|
||||
import: VMTableImport { from },
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the underlying `VMTableDefinition`.
|
||||
pub unsafe fn get_definition(&mut self, is_import: bool) -> &mut VMTableDefinition {
|
||||
if is_import {
|
||||
&mut *self.import.from
|
||||
} else {
|
||||
&mut self.definition
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for VMTable {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "VMTable {{")?;
|
||||
write!(f, " definition: {:?},", unsafe { self.definition })?;
|
||||
write!(f, " import: {:?},", unsafe { self.import })?;
|
||||
write!(f, "}}")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The type of the `type_id` field in `VMCallerCheckedAnyfunc`.
|
||||
pub type VMSignatureId = u32;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmsignature_id {
|
||||
use super::VMSignatureId;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmcaller_checked_anyfunc_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMSignatureId>(),
|
||||
usize::from(offsets.size_of_vmsignature_id())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
|
||||
/// It consists of the actual function pointer and a signature id to be checked
|
||||
/// by the caller.
|
||||
#[derive(Debug, Clone)]
|
||||
#[repr(C)]
|
||||
pub struct VMCallerCheckedAnyfunc {
|
||||
pub func_ptr: *const VMFunctionBody,
|
||||
pub type_id: VMSignatureId,
|
||||
// If more elements are added here, remember to add offset_of tests below!
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_vmcaller_checked_anyfunc {
|
||||
use super::VMCallerCheckedAnyfunc;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmcaller_checked_anyfunc_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(
|
||||
size_of::<VMCallerCheckedAnyfunc>(),
|
||||
usize::from(offsets.size_of_vmcaller_checked_anyfunc())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMCallerCheckedAnyfunc, func_ptr),
|
||||
usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMCallerCheckedAnyfunc, type_id),
|
||||
usize::from(offsets.vmcaller_checked_anyfunc_type_id())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for VMCallerCheckedAnyfunc {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
func_ptr: ptr::null_mut(),
|
||||
type_id: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
|
||||
/// This has pointers to the globals, memories, tables, and other runtime
|
||||
/// state associated with the current instance.
|
||||
///
|
||||
/// TODO: The number of memories, globals, tables, and signature IDs does
|
||||
/// not change dynamically, and pointer arrays are not indexed dynamically,
|
||||
/// so these fields could all be contiguously allocated.
|
||||
#[derive(Debug)]
|
||||
#[repr(C)]
|
||||
pub struct VMContext {
|
||||
/// A pointer to an array of `VMMemory` instances, indexed by
|
||||
/// WebAssembly memory index.
|
||||
memories: *mut VMMemory,
|
||||
/// A pointer to an array of globals.
|
||||
globals: *mut VMGlobal,
|
||||
/// A pointer to an array of `VMTable` instances, indexed by
|
||||
/// WebAssembly table index.
|
||||
tables: *mut VMTable,
|
||||
/// Signature identifiers for signature-checking indirect calls.
|
||||
signature_ids: *mut u32,
|
||||
// If more elements are added here, remember to add offset_of tests below!
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::VMContext;
|
||||
use std::mem::size_of;
|
||||
use wasmtime_environ::VMOffsets;
|
||||
|
||||
#[test]
|
||||
fn check_vmctx_offsets() {
|
||||
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
|
||||
assert_eq!(size_of::<VMContext>(), usize::from(offsets.size_of_vmctx()));
|
||||
assert_eq!(
|
||||
offset_of!(VMContext, memories),
|
||||
usize::from(offsets.vmctx_memories())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMContext, globals),
|
||||
usize::from(offsets.vmctx_globals())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMContext, tables),
|
||||
usize::from(offsets.vmctx_tables())
|
||||
);
|
||||
assert_eq!(
|
||||
offset_of!(VMContext, signature_ids),
|
||||
usize::from(offsets.vmctx_signature_ids())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl VMContext {
|
||||
/// Create a new `VMContext` instance.
|
||||
pub fn new(
|
||||
memories: *mut VMMemory,
|
||||
globals: *mut VMGlobal,
|
||||
tables: *mut VMTable,
|
||||
signature_ids: *mut u32,
|
||||
) -> Self {
|
||||
Self {
|
||||
memories,
|
||||
globals,
|
||||
tables,
|
||||
signature_ids,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the base pointer of the globals array.
|
||||
pub unsafe fn global(&mut self, index: GlobalIndex) -> &mut VMGlobal {
|
||||
&mut *self.globals.add(index.index())
|
||||
}
|
||||
|
||||
/// Return a mutable reference to linear memory `index`.
|
||||
pub unsafe fn memory(&mut self, index: MemoryIndex) -> &mut VMMemory {
|
||||
&mut *self.memories.add(index.index())
|
||||
}
|
||||
|
||||
/// Return a mutable reference to table `index`.
|
||||
pub unsafe fn table(&mut self, index: TableIndex) -> &mut VMTable {
|
||||
&mut *self.tables.add(index.index())
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the associated `Instance`.
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
pub unsafe fn instance(&mut self) -> &mut Instance {
|
||||
&mut *((self as *mut Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,30 @@
|
||||
use action::{ActionError, ActionOutcome, RuntimeValue};
|
||||
use code::Code;
|
||||
use cranelift_codegen::isa;
|
||||
use cranelift_entity::PrimaryMap;
|
||||
use cranelift_wasm::{DefinedFuncIndex, GlobalIndex, MemoryIndex};
|
||||
use cranelift_codegen::ir::InstBuilder;
|
||||
use cranelift_codegen::Context;
|
||||
use cranelift_codegen::{binemit, ir, isa};
|
||||
use cranelift_entity::{BoxedSlice, EntityRef, PrimaryMap};
|
||||
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext};
|
||||
use cranelift_wasm::{
|
||||
DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex,
|
||||
GlobalIndex, MemoryIndex, TableIndex,
|
||||
};
|
||||
use export::Resolver;
|
||||
use get::get;
|
||||
use instance::Instance;
|
||||
use invoke::{invoke, invoke_start_function};
|
||||
use link::link_module;
|
||||
use std::str;
|
||||
use vmcontext::{VMFunctionBody, VMGlobal};
|
||||
use std::cmp::max;
|
||||
use std::collections::HashMap;
|
||||
use std::slice;
|
||||
use std::string::String;
|
||||
use std::vec::Vec;
|
||||
use std::{mem, ptr};
|
||||
use wasmtime_environ::{
|
||||
compile_module, Compilation, CompileError, Module, ModuleEnvironment, Tunables,
|
||||
compile_module, Compilation, CompileError, Export, Module, ModuleEnvironment, RelocSink,
|
||||
Tunables,
|
||||
};
|
||||
use wasmtime_runtime::{
|
||||
wasmtime_call_trampoline, wasmtime_init_eager, wasmtime_init_finish, Instance, VMContext,
|
||||
VMFunctionBody, VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport,
|
||||
VMTableDefinition, VMTableImport,
|
||||
};
|
||||
|
||||
/// A module, an instance of that module, and accompanying compilation artifacts.
|
||||
@@ -20,10 +33,19 @@ use wasmtime_environ::{
|
||||
pub struct InstanceWorld {
|
||||
module: Module,
|
||||
instance: Instance,
|
||||
|
||||
/// Pointers to functions in executable memory.
|
||||
finished_functions: BoxedSlice<DefinedFuncIndex, *const VMFunctionBody>,
|
||||
|
||||
/// Trampolines for calling into JIT code.
|
||||
trampolines: TrampolinePark,
|
||||
}
|
||||
|
||||
impl InstanceWorld {
|
||||
/// Create a new `InstanceWorld` by compiling the wasm module in `data` and instatiating it.
|
||||
///
|
||||
/// `finished_functions` holds the function bodies
|
||||
/// which have been placed in executable memory and linked.
|
||||
pub fn new(
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
@@ -33,57 +55,133 @@ impl InstanceWorld {
|
||||
let mut module = Module::new();
|
||||
// TODO: Allow the tunables to be overridden.
|
||||
let tunables = Tunables::default();
|
||||
let instance = {
|
||||
// TODO: Untie this.
|
||||
let ((mut compilation, relocations), lazy_data_initializers) = {
|
||||
let (lazy_function_body_inputs, lazy_data_initializers) = {
|
||||
let environ = ModuleEnvironment::new(isa, &mut module, tunables);
|
||||
let (lazy_function_body_inputs, lazy_data_initializers) = {
|
||||
let environ = ModuleEnvironment::new(isa, &mut module, tunables);
|
||||
|
||||
let translation = environ
|
||||
.translate(&data)
|
||||
.map_err(|error| ActionError::Compile(CompileError::Wasm(error)))?;
|
||||
let translation = environ
|
||||
.translate(&data)
|
||||
.map_err(|error| ActionError::Compile(CompileError::Wasm(error)))?;
|
||||
|
||||
(
|
||||
translation.lazy.function_body_inputs,
|
||||
translation.lazy.data_initializers,
|
||||
)
|
||||
};
|
||||
|
||||
(
|
||||
compile_module(&module, &lazy_function_body_inputs, isa)
|
||||
.map_err(ActionError::Compile)?,
|
||||
lazy_data_initializers,
|
||||
)
|
||||
};
|
||||
|
||||
let allocated_functions =
|
||||
allocate_functions(code, compilation).map_err(ActionError::Resource)?;
|
||||
|
||||
let resolved = link_module(&module, &allocated_functions, relocations, resolver)
|
||||
.map_err(ActionError::Link)?;
|
||||
|
||||
let mut instance = Instance::new(
|
||||
&module,
|
||||
allocated_functions,
|
||||
&lazy_data_initializers,
|
||||
resolved,
|
||||
(
|
||||
translation.lazy.function_body_inputs,
|
||||
translation.lazy.data_initializers,
|
||||
)
|
||||
.map_err(ActionError::Resource)?;
|
||||
|
||||
// The WebAssembly spec specifies that the start function is
|
||||
// invoked automatically at instantiation time.
|
||||
match invoke_start_function(code, isa, &module, &mut instance)? {
|
||||
ActionOutcome::Returned { .. } => {}
|
||||
ActionOutcome::Trapped { message } => {
|
||||
// Instantiation fails if the start function traps.
|
||||
return Err(ActionError::Start(message));
|
||||
}
|
||||
}
|
||||
|
||||
instance
|
||||
};
|
||||
|
||||
Ok(Self { module, instance })
|
||||
let (compilation, relocations) = compile_module(&module, &lazy_function_body_inputs, isa)
|
||||
.map_err(ActionError::Compile)?;
|
||||
|
||||
let allocated_functions =
|
||||
allocate_functions(code, compilation).map_err(ActionError::Resource)?;
|
||||
|
||||
let imports = link_module(&module, &allocated_functions, relocations, resolver)
|
||||
.map_err(ActionError::Link)?;
|
||||
|
||||
let finished_functions: BoxedSlice<DefinedFuncIndex, *const VMFunctionBody> =
|
||||
allocated_functions
|
||||
.into_iter()
|
||||
.map(|(_index, allocated)| {
|
||||
let fatptr: *const [VMFunctionBody] = *allocated;
|
||||
fatptr as *const VMFunctionBody
|
||||
})
|
||||
.collect::<PrimaryMap<_, _>>()
|
||||
.into_boxed_slice();
|
||||
|
||||
let instance = Instance::new(
|
||||
&module,
|
||||
&finished_functions,
|
||||
imports,
|
||||
&lazy_data_initializers,
|
||||
)
|
||||
.map_err(ActionError::Resource)?;
|
||||
|
||||
let fn_builder_ctx = FunctionBuilderContext::new();
|
||||
|
||||
let mut result = Self {
|
||||
module,
|
||||
instance,
|
||||
finished_functions,
|
||||
trampolines: TrampolinePark {
|
||||
memo: HashMap::new(),
|
||||
fn_builder_ctx,
|
||||
},
|
||||
};
|
||||
|
||||
// The WebAssembly spec specifies that the start function is
|
||||
// invoked automatically at instantiation time.
|
||||
match result.invoke_start_function(code, isa)? {
|
||||
ActionOutcome::Returned { .. } => {}
|
||||
ActionOutcome::Trapped { message } => {
|
||||
// Instantiation fails if the start function traps.
|
||||
return Err(ActionError::Start(message));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn get_imported_function(&self, index: FuncIndex) -> Option<*const VMFunctionBody> {
|
||||
if index.index() < self.module.imported_funcs.len() {
|
||||
Some(unsafe { self.instance.vmctx().imported_function(index) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add an accessor for table elements.
|
||||
#[allow(dead_code)]
|
||||
fn get_imported_table(&self, index: TableIndex) -> Option<&VMTableImport> {
|
||||
if index.index() < self.module.imported_tables.len() {
|
||||
Some(unsafe { self.instance.vmctx().imported_table(index) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn get_imported_memory(&self, index: MemoryIndex) -> Option<&VMMemoryImport> {
|
||||
if index.index() < self.module.imported_memories.len() {
|
||||
Some(unsafe { self.instance.vmctx().imported_memory(index) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn get_imported_global(&self, index: GlobalIndex) -> Option<&VMGlobalImport> {
|
||||
if index.index() < self.module.imported_globals.len() {
|
||||
Some(unsafe { self.instance.vmctx().imported_global(index) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn get_finished_function(&self, index: DefinedFuncIndex) -> Option<*const VMFunctionBody> {
|
||||
self.finished_functions.get(index).cloned()
|
||||
}
|
||||
|
||||
// TODO: Add an accessor for table elements.
|
||||
#[allow(dead_code)]
|
||||
fn get_defined_table(&self, index: DefinedTableIndex) -> Option<&VMTableDefinition> {
|
||||
if self.module.table_index(index).index() < self.module.table_plans.len() {
|
||||
Some(unsafe { self.instance.vmctx().table(index) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn get_defined_memory(&self, index: DefinedMemoryIndex) -> Option<&VMMemoryDefinition> {
|
||||
if self.module.memory_index(index).index() < self.module.memory_plans.len() {
|
||||
Some(unsafe { self.instance.vmctx().memory(index) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn get_defined_global(&self, index: DefinedGlobalIndex) -> Option<&VMGlobalDefinition> {
|
||||
if self.module.global_index(index).index() < self.module.globals.len() {
|
||||
Some(unsafe { self.instance.vmctx().global(index) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Invoke a function in this `InstanceWorld` by name.
|
||||
@@ -94,40 +192,362 @@ impl InstanceWorld {
|
||||
function_name: &str,
|
||||
args: &[RuntimeValue],
|
||||
) -> Result<ActionOutcome, ActionError> {
|
||||
invoke(
|
||||
code,
|
||||
isa,
|
||||
&self.module,
|
||||
&mut self.instance,
|
||||
&function_name,
|
||||
args,
|
||||
)
|
||||
let fn_index = match self.module.exports.get(function_name) {
|
||||
Some(Export::Function(index)) => *index,
|
||||
Some(_) => {
|
||||
return Err(ActionError::Kind(format!(
|
||||
"exported item \"{}\" is not a function",
|
||||
function_name
|
||||
)))
|
||||
}
|
||||
None => {
|
||||
return Err(ActionError::Field(format!(
|
||||
"no export named \"{}\"",
|
||||
function_name
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
self.invoke_by_index(code, isa, fn_index, args)
|
||||
}
|
||||
|
||||
/// Invoke the WebAssembly start function of the instance, if one is present.
|
||||
fn invoke_start_function(
|
||||
&mut self,
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
) -> Result<ActionOutcome, ActionError> {
|
||||
if let Some(start_index) = self.module.start_func {
|
||||
self.invoke_by_index(code, isa, start_index, &[])
|
||||
} else {
|
||||
// No start function, just return nothing.
|
||||
Ok(ActionOutcome::Returned { values: vec![] })
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls the given indexed function, passing its return values and returning
|
||||
/// its results.
|
||||
fn invoke_by_index(
|
||||
&mut self,
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
fn_index: FuncIndex,
|
||||
args: &[RuntimeValue],
|
||||
) -> Result<ActionOutcome, ActionError> {
|
||||
let callee_address = match self.module.defined_func_index(fn_index) {
|
||||
Some(def_fn_index) => self
|
||||
.get_finished_function(def_fn_index)
|
||||
.ok_or_else(|| ActionError::Index(def_fn_index.index() as u64))?,
|
||||
None => self
|
||||
.get_imported_function(fn_index)
|
||||
.ok_or_else(|| ActionError::Index(fn_index.index() as u64))?,
|
||||
};
|
||||
|
||||
// Rather than writing inline assembly to jump to the code region, we use the fact that
|
||||
// the Rust ABI for calling a function with no arguments and no return values matches the one
|
||||
// of the generated code. Thanks to this, we can transmute the code region into a first-class
|
||||
// Rust function and call it.
|
||||
// Ensure that our signal handlers are ready for action.
|
||||
wasmtime_init_eager();
|
||||
wasmtime_init_finish(self.instance.vmctx_mut());
|
||||
|
||||
let signature = &self.module.signatures[self.module.functions[fn_index]];
|
||||
let vmctx: *mut VMContext = self.instance.vmctx_mut();
|
||||
|
||||
for (index, value) in args.iter().enumerate() {
|
||||
assert_eq!(value.value_type(), signature.params[index].value_type);
|
||||
}
|
||||
|
||||
// TODO: Support values larger than u64.
|
||||
let mut values_vec: Vec<u64> = Vec::new();
|
||||
let value_size = mem::size_of::<u64>();
|
||||
values_vec.resize(max(signature.params.len(), signature.returns.len()), 0u64);
|
||||
|
||||
// Store the argument values into `values_vec`.
|
||||
for (index, arg) in args.iter().enumerate() {
|
||||
unsafe {
|
||||
let ptr = values_vec.as_mut_ptr().add(index);
|
||||
|
||||
match arg {
|
||||
RuntimeValue::I32(x) => ptr::write(ptr as *mut i32, *x),
|
||||
RuntimeValue::I64(x) => ptr::write(ptr as *mut i64, *x),
|
||||
RuntimeValue::F32(x) => ptr::write(ptr as *mut u32, *x),
|
||||
RuntimeValue::F64(x) => ptr::write(ptr as *mut u64, *x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the vmctx value into `values_vec`.
|
||||
unsafe {
|
||||
let ptr = values_vec.as_mut_ptr().add(args.len());
|
||||
ptr::write(ptr as *mut usize, vmctx as usize)
|
||||
}
|
||||
|
||||
// Get the trampoline to call for this function.
|
||||
let exec_code_buf =
|
||||
self.trampolines
|
||||
.get(code, isa, callee_address, &signature, value_size)?;
|
||||
|
||||
// Make all JIT code produced thus far executable.
|
||||
code.publish();
|
||||
|
||||
// Call the trampoline.
|
||||
if let Err(message) = unsafe {
|
||||
wasmtime_call_trampoline(
|
||||
exec_code_buf,
|
||||
values_vec.as_mut_ptr() as *mut u8,
|
||||
self.instance.vmctx_mut(),
|
||||
)
|
||||
} {
|
||||
return Ok(ActionOutcome::Trapped { message });
|
||||
}
|
||||
|
||||
// Load the return values out of `values_vec`.
|
||||
let values = signature
|
||||
.returns
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, abi_param)| unsafe {
|
||||
let ptr = values_vec.as_ptr().add(index);
|
||||
|
||||
match abi_param.value_type {
|
||||
ir::types::I32 => RuntimeValue::I32(ptr::read(ptr as *const i32)),
|
||||
ir::types::I64 => RuntimeValue::I64(ptr::read(ptr as *const i64)),
|
||||
ir::types::F32 => RuntimeValue::F32(ptr::read(ptr as *const u32)),
|
||||
ir::types::F64 => RuntimeValue::F64(ptr::read(ptr as *const u64)),
|
||||
other => panic!("unsupported value type {:?}", other),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(ActionOutcome::Returned { values })
|
||||
}
|
||||
|
||||
/// Read a global in this `InstanceWorld` by name.
|
||||
pub fn get(&mut self, global_name: &str) -> Result<RuntimeValue, ActionError> {
|
||||
get(&self.module, &mut self.instance, global_name)
|
||||
pub fn get(&self, global_name: &str) -> Result<RuntimeValue, ActionError> {
|
||||
let global_index = match self.module.exports.get(global_name) {
|
||||
Some(Export::Global(index)) => *index,
|
||||
Some(_) => {
|
||||
return Err(ActionError::Kind(format!(
|
||||
"exported item \"{}\" is not a global",
|
||||
global_name
|
||||
)))
|
||||
}
|
||||
None => {
|
||||
return Err(ActionError::Field(format!(
|
||||
"no export named \"{}\"",
|
||||
global_name
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
self.get_by_index(global_index)
|
||||
}
|
||||
|
||||
/// Reads the value of the indexed global variable in `module`.
|
||||
pub fn get_by_index(&self, global_index: GlobalIndex) -> Result<RuntimeValue, ActionError> {
|
||||
let global_address = match self.module.defined_global_index(global_index) {
|
||||
Some(def_global_index) => self
|
||||
.get_defined_global(def_global_index)
|
||||
.ok_or_else(|| ActionError::Index(def_global_index.index() as u64))?,
|
||||
None => {
|
||||
let from: *const VMGlobalDefinition = self
|
||||
.get_imported_global(global_index)
|
||||
.ok_or_else(|| ActionError::Index(global_index.index() as u64))?
|
||||
.from;
|
||||
from
|
||||
}
|
||||
};
|
||||
let global_def = unsafe { &*global_address };
|
||||
|
||||
unsafe {
|
||||
Ok(
|
||||
match self
|
||||
.module
|
||||
.globals
|
||||
.get(global_index)
|
||||
.ok_or_else(|| ActionError::Index(global_index.index() as u64))?
|
||||
.ty
|
||||
{
|
||||
ir::types::I32 => RuntimeValue::I32(*global_def.as_i32()),
|
||||
ir::types::I64 => RuntimeValue::I64(*global_def.as_i64()),
|
||||
ir::types::F32 => RuntimeValue::F32(*global_def.as_f32_bits()),
|
||||
ir::types::F64 => RuntimeValue::F64(*global_def.as_f64_bits()),
|
||||
other => {
|
||||
return Err(ActionError::Type(format!(
|
||||
"global with type {} not supported",
|
||||
other
|
||||
)))
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the contents of allocated linear memory.
|
||||
pub fn inspect_memory(&self, memory_index: MemoryIndex, address: usize, len: usize) -> &[u8] {
|
||||
self.instance.inspect_memory(memory_index, address, len)
|
||||
}
|
||||
pub fn inspect_memory(
|
||||
&self,
|
||||
memory_index: MemoryIndex,
|
||||
address: usize,
|
||||
len: usize,
|
||||
) -> Result<&[u8], ActionError> {
|
||||
let memory_address = match self.module.defined_memory_index(memory_index) {
|
||||
Some(def_memory_index) => self
|
||||
.get_defined_memory(def_memory_index)
|
||||
.ok_or_else(|| ActionError::Index(def_memory_index.index() as u64))?,
|
||||
None => {
|
||||
let from: *const VMMemoryDefinition = self
|
||||
.get_imported_memory(memory_index)
|
||||
.ok_or_else(|| ActionError::Index(memory_index.index() as u64))?
|
||||
.from;
|
||||
from
|
||||
}
|
||||
};
|
||||
let memory_def = unsafe { &*memory_address };
|
||||
|
||||
/// Shows the value of a global variable.
|
||||
pub fn inspect_global(&self, global_index: GlobalIndex) -> &VMGlobal {
|
||||
self.instance.inspect_global(global_index)
|
||||
Ok(unsafe {
|
||||
&slice::from_raw_parts(memory_def.base, memory_def.current_length)
|
||||
[address..address + len]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn allocate_functions(
|
||||
code: &mut Code,
|
||||
compilation: Compilation,
|
||||
) -> Result<PrimaryMap<DefinedFuncIndex, (*mut VMFunctionBody, usize)>, String> {
|
||||
) -> Result<PrimaryMap<DefinedFuncIndex, *mut [VMFunctionBody]>, String> {
|
||||
let mut result = PrimaryMap::with_capacity(compilation.functions.len());
|
||||
for (_, body) in compilation.functions.into_iter() {
|
||||
let slice = code.allocate_copy_of_byte_slice(body)?;
|
||||
result.push((slice.as_mut_ptr(), slice.len()));
|
||||
let fatptr: *mut [VMFunctionBody] = code.allocate_copy_of_byte_slice(body)?;
|
||||
result.push(fatptr);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
struct TrampolinePark {
|
||||
/// Memorized per-function trampolines.
|
||||
memo: HashMap<*const VMFunctionBody, *const VMFunctionBody>,
|
||||
|
||||
/// The `FunctionBuilderContext`, shared between function compilations.
|
||||
fn_builder_ctx: FunctionBuilderContext,
|
||||
}
|
||||
|
||||
impl TrampolinePark {
|
||||
fn get(
|
||||
&mut self,
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
callee_address: *const VMFunctionBody,
|
||||
signature: &ir::Signature,
|
||||
value_size: usize,
|
||||
) -> Result<*const VMFunctionBody, ActionError> {
|
||||
use std::collections::hash_map::Entry::{Occupied, Vacant};
|
||||
Ok(match self.memo.entry(callee_address) {
|
||||
Occupied(entry) => *entry.get(),
|
||||
Vacant(entry) => {
|
||||
let body = make_trampoline(
|
||||
&mut self.fn_builder_ctx,
|
||||
code,
|
||||
isa,
|
||||
callee_address,
|
||||
signature,
|
||||
value_size,
|
||||
)?;
|
||||
entry.insert(body);
|
||||
body
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn make_trampoline(
|
||||
fn_builder_ctx: &mut FunctionBuilderContext,
|
||||
code: &mut Code,
|
||||
isa: &isa::TargetIsa,
|
||||
callee_address: *const VMFunctionBody,
|
||||
signature: &ir::Signature,
|
||||
value_size: usize,
|
||||
) -> Result<*const VMFunctionBody, ActionError> {
|
||||
let pointer_type = isa.pointer_type();
|
||||
let mut wrapper_sig = ir::Signature::new(isa.frontend_config().default_call_conv);
|
||||
|
||||
// Add the `values_vec` parameter.
|
||||
wrapper_sig.params.push(ir::AbiParam::new(pointer_type));
|
||||
// Add the `vmctx` parameter.
|
||||
wrapper_sig.params.push(ir::AbiParam::special(
|
||||
pointer_type,
|
||||
ir::ArgumentPurpose::VMContext,
|
||||
));
|
||||
|
||||
let mut context = Context::new();
|
||||
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
|
||||
|
||||
{
|
||||
let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx);
|
||||
let block0 = builder.create_ebb();
|
||||
|
||||
builder.append_ebb_params_for_function_params(block0);
|
||||
builder.switch_to_block(block0);
|
||||
builder.seal_block(block0);
|
||||
|
||||
let mut callee_args = Vec::new();
|
||||
let pointer_type = isa.pointer_type();
|
||||
|
||||
let (values_vec_ptr_val, vmctx_ptr_val) = {
|
||||
let params = builder.func.dfg.ebb_params(block0);
|
||||
(params[0], params[1])
|
||||
};
|
||||
|
||||
// Load the argument values out of `values_vec`.
|
||||
let mflags = ir::MemFlags::trusted();
|
||||
for (i, r) in signature.params.iter().enumerate() {
|
||||
let value = match r.purpose {
|
||||
ir::ArgumentPurpose::Normal => builder.ins().load(
|
||||
r.value_type,
|
||||
mflags,
|
||||
values_vec_ptr_val,
|
||||
(i * value_size) as i32,
|
||||
),
|
||||
ir::ArgumentPurpose::VMContext => vmctx_ptr_val,
|
||||
other => panic!("unsupported argument purpose {}", other),
|
||||
};
|
||||
callee_args.push(value);
|
||||
}
|
||||
|
||||
let new_sig = builder.import_signature(signature.clone());
|
||||
|
||||
// TODO: It's possible to make this a direct call. We just need Cranelift
|
||||
// to support functions declared with an immediate integer address.
|
||||
// ExternalName::Absolute(u64). Let's do it.
|
||||
let callee_value = builder.ins().iconst(pointer_type, callee_address as i64);
|
||||
let call = builder
|
||||
.ins()
|
||||
.call_indirect(new_sig, callee_value, &callee_args);
|
||||
|
||||
let results = builder.func.dfg.inst_results(call).to_vec();
|
||||
|
||||
// Store the return values into `values_vec`.
|
||||
let mflags = ir::MemFlags::trusted();
|
||||
for (i, r) in results.iter().enumerate() {
|
||||
builder
|
||||
.ins()
|
||||
.store(mflags, *r, values_vec_ptr_val, (i * value_size) as i32);
|
||||
}
|
||||
|
||||
builder.ins().return_(&[]);
|
||||
builder.finalize()
|
||||
}
|
||||
|
||||
let mut code_buf: Vec<u8> = Vec::new();
|
||||
let mut reloc_sink = RelocSink::new();
|
||||
let mut trap_sink = binemit::NullTrapSink {};
|
||||
context
|
||||
.compile_and_emit(isa, &mut code_buf, &mut reloc_sink, &mut trap_sink)
|
||||
.map_err(|error| ActionError::Compile(CompileError::Codegen(error)))?;
|
||||
assert!(reloc_sink.func_relocs.is_empty());
|
||||
|
||||
Ok(code
|
||||
.allocate_copy_of_byte_slice(&code_buf)
|
||||
.map_err(ActionError::Resource)?
|
||||
.as_ptr())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user