Initial support for function, table, memory, and global imports.

This commit is contained in:
Dan Gohman
2018-12-08 17:38:28 -05:00
parent 93f33141e9
commit 56850d481d
45 changed files with 3181 additions and 2181 deletions

38
lib/runtime/Cargo.toml Normal file
View File

@@ -0,0 +1,38 @@
[package]
name = "wasmtime-runtime"
version = "0.0.0"
authors = ["The Cranelift Project Developers"]
publish = false
description = "Runtime library support for Wasmtime"
categories = ["wasm"]
repository = "https://github.com/CraneStation/wasmtime"
license = "Apache-2.0 WITH LLVM-exception"
readme = "README.md"
[dependencies]
cranelift-codegen = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
cranelift-entity = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
cranelift-wasm = { git = "https://github.com/sunfishcode/cranelift.git", branch = "guard-offset" }
wasmtime-environ = { path = "../environ" }
region = "1.0.0"
lazy_static = "1.2.0"
libc = { version = "0.2.44", default-features = false }
errno = "0.2.4"
memoffset = "0.2.1"
cast = { version = "0.2.2", default-features = false }
failure = { version = "0.1.3", default-features = false }
failure_derive = { version = "0.1.3", default-features = false }
[build-dependencies]
cmake = "0.1.35"
bindgen = "0.44.0"
regex = "1.0.6"
[features]
default = ["std"]
std = ["cranelift-codegen/std", "cranelift-wasm/std"]
core = ["cranelift-codegen/core", "cranelift-wasm/core", "wasmtime-environ/core"]
[badges]
maintenance = { status = "experimental" }
travis-ci = { repository = "CraneStation/wasmtime" }

220
lib/runtime/LICENSE Normal file
View File

@@ -0,0 +1,220 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--- LLVM Exceptions to the Apache 2.0 License ----
As an exception, if, as a result of your compiling your source code, portions
of this Software are embedded into an Object form of such source code, you
may redistribute such embedded portions in such Object form without complying
with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
In addition, if you combine or link compiled forms of this Software with
software that is licensed under the GPLv2 ("Combined Software") and if a
court of competent jurisdiction determines that the patent provision (Section
3), the indemnity provision (Section 9) or other Section of the License
conflicts with the conditions of the GPLv2, you may retroactively and
prospectively choose to deem waived or otherwise exclude such Section(s) of
the License, but only in their entirety and only with respect to the Combined
Software.

7
lib/runtime/README.md Normal file
View File

@@ -0,0 +1,7 @@
This is the `wasmtime-runtime` crate, which contains wasm runtime library
support, supporting the wasm ABI used by [`wasmtime-environ`],
[`wasmtime-execute`], and [`wasmtime-obj`].
[`wasmtime-environ`]: https://crates.io/crates/wasmtime-environ
[`wasmtime-execute`]: https://crates.io/crates/wasmtime-execute
[`wasmtime-obj`]: https://crates.io/crates/wasmtime-obj

38
lib/runtime/build.rs Normal file
View File

@@ -0,0 +1,38 @@
extern crate bindgen;
extern crate cmake;
extern crate regex;
use cmake::Config;
use regex::Regex;
use std::env;
use std::path::PathBuf;
fn main() {
let dst = Config::new("signalhandlers").build();
println!("cargo:rustc-link-search=native={}", dst.display());
println!("cargo:rustc-link-lib=static=SignalHandlers");
let mut bindings_builder = bindgen::Builder::default()
.header("signalhandlers/SignalHandlers.h")
.whitelist_type("CodeSegment")
.whitelist_type("TrapContext")
.whitelist_type("jmp_buf")
.whitelist_function("EnsureEagerSignalHandlers");
// If we're compiling for Darwin, compile in extra Darwin support routines.
if Regex::new(r"-darwin[[:digit:].]*$")
.unwrap()
.is_match(&env::var("TARGET").unwrap())
{
bindings_builder = bindings_builder.whitelist_function("EnsureDarwinMachPorts");
}
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings_builder
.generate()
.expect("Unable to generate bindings")
.write_to_file(out_path.join("signalhandlers.rs"))
.expect("Couldn't write bindings!");
}

View File

@@ -0,0 +1,8 @@
cmake_minimum_required(VERSION 3.0)
project(SignalHandlers CXX)
set(CMAKE_CXX_FLAGS "-std=c++11 -fno-exceptions -fno-rtti -fPIC")
add_library(SignalHandlers STATIC SignalHandlers.cpp)
install(TARGETS SignalHandlers DESTINATION .)

View File

@@ -0,0 +1,823 @@
//! This file is largely derived from the code in WasmSignalHandlers.cpp in SpiderMonkey:
//!
//! https://dxr.mozilla.org/mozilla-central/source/js/src/wasm/WasmSignalHandlers.cpp
//!
//! Use of Mach ports on Darwin platforms (the USE_APPLE_MACH_PORTS code below) is
//! currently disabled.
#include "SignalHandlers.h"
#include <stdint.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#if defined(_WIN32)
# include <winternl.h> // must include before util/Windows.h's `#undef`s
# include "util/Windows.h"
#elif defined(USE_APPLE_MACH_PORTS)
# include <mach/exc.h>
# include <mach/mach.h>
# include <pthread.h>
#else
# include <signal.h>
#endif
// =============================================================================
// This following pile of macros and includes defines the ToRegisterState() and
// the ContextToPC() functions from the (highly) platform-specific CONTEXT
// struct which is provided to the signal handler.
// =============================================================================
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
# include <sys/ucontext.h> // for ucontext_t, mcontext_t
#endif
#if defined(__x86_64__)
# if defined(__DragonFly__)
# include <machine/npx.h> // for union savefpu
# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
# include <machine/fpu.h> // for struct savefpu/fxsave64
# endif
#endif
#if defined(_WIN32)
# define EIP_sig(p) ((p)->Eip)
# define EBP_sig(p) ((p)->Ebp)
# define ESP_sig(p) ((p)->Esp)
# define RIP_sig(p) ((p)->Rip)
# define RSP_sig(p) ((p)->Rsp)
# define RBP_sig(p) ((p)->Rbp)
# define R11_sig(p) ((p)->R11)
# define R13_sig(p) ((p)->R13)
# define R14_sig(p) ((p)->R14)
# define R15_sig(p) ((p)->R15)
# define EPC_sig(p) ((p)->Pc)
# define RFP_sig(p) ((p)->Fp)
# define R31_sig(p) ((p)->Sp)
# define RLR_sig(p) ((p)->Lr)
#elif defined(__OpenBSD__)
# define EIP_sig(p) ((p)->sc_eip)
# define EBP_sig(p) ((p)->sc_ebp)
# define ESP_sig(p) ((p)->sc_esp)
# define RIP_sig(p) ((p)->sc_rip)
# define RSP_sig(p) ((p)->sc_rsp)
# define RBP_sig(p) ((p)->sc_rbp)
# define R11_sig(p) ((p)->sc_r11)
# if defined(__arm__)
# define R13_sig(p) ((p)->sc_usr_sp)
# define R14_sig(p) ((p)->sc_usr_lr)
# define R15_sig(p) ((p)->sc_pc)
# else
# define R13_sig(p) ((p)->sc_r13)
# define R14_sig(p) ((p)->sc_r14)
# define R15_sig(p) ((p)->sc_r15)
# endif
# if defined(__aarch64__)
# define EPC_sig(p) ((p)->sc_elr)
# define RFP_sig(p) ((p)->sc_x[29])
# define RLR_sig(p) ((p)->sc_lr)
# define R31_sig(p) ((p)->sc_sp)
# endif
# if defined(__mips__)
# define EPC_sig(p) ((p)->sc_pc)
# define RFP_sig(p) ((p)->sc_regs[30])
# endif
#elif defined(__linux__) || defined(__sun)
# if defined(__linux__)
# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
# else
# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
# endif
# define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
# define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
# define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
# if defined(__linux__) && defined(__arm__)
# define R11_sig(p) ((p)->uc_mcontext.arm_fp)
# define R13_sig(p) ((p)->uc_mcontext.arm_sp)
# define R14_sig(p) ((p)->uc_mcontext.arm_lr)
# define R15_sig(p) ((p)->uc_mcontext.arm_pc)
# else
# define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
# define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
# define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
# define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
# endif
# if defined(__linux__) && defined(__aarch64__)
# define EPC_sig(p) ((p)->uc_mcontext.pc)
# define RFP_sig(p) ((p)->uc_mcontext.regs[29])
# define RLR_sig(p) ((p)->uc_mcontext.regs[30])
# define R31_sig(p) ((p)->uc_mcontext.regs[31])
# endif
# if defined(__linux__) && defined(__mips__)
# define EPC_sig(p) ((p)->uc_mcontext.pc)
# define RFP_sig(p) ((p)->uc_mcontext.gregs[30])
# define RSP_sig(p) ((p)->uc_mcontext.gregs[29])
# define R31_sig(p) ((p)->uc_mcontext.gregs[31])
# endif
# if defined(__linux__) && (defined(__sparc__) && defined(__arch64__))
# define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC])
# define FP_sig(p) ((p)->uc_mcontext.mc_fp)
# define SP_sig(p) ((p)->uc_mcontext.mc_i7)
# endif
# if defined(__linux__) && \
(defined(__ppc64__) || defined (__PPC64__) || defined(__ppc64le__) || defined (__PPC64LE__))
# define R01_sig(p) ((p)->uc_mcontext.gp_regs[1])
# define R32_sig(p) ((p)->uc_mcontext.gp_regs[32])
# endif
#elif defined(__NetBSD__)
# define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
# define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
# define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
# define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
# define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
# define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
# if defined(__aarch64__)
# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
# define RLR_sig(p) ((p)->uc_mcontext.__gregs[_REG_X30])
# define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_SP])
# endif
# if defined(__mips__)
# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
# endif
#elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
# define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
# define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
# define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
# define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
# define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
# define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
# if defined(__FreeBSD__) && defined(__arm__)
# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
# else
# define R11_sig(p) ((p)->uc_mcontext.mc_r11)
# define R13_sig(p) ((p)->uc_mcontext.mc_r13)
# define R14_sig(p) ((p)->uc_mcontext.mc_r14)
# define R15_sig(p) ((p)->uc_mcontext.mc_r15)
# endif
# if defined(__FreeBSD__) && defined(__aarch64__)
# define EPC_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_elr)
# define RFP_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_x[29])
# define RLR_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_lr)
# define R31_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_sp)
# endif
# if defined(__FreeBSD__) && defined(__mips__)
# define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
# endif
#elif defined(USE_APPLE_MACH_PORTS)
# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
# define RIP_sig(p) ((p)->thread.__rip)
# define RBP_sig(p) ((p)->thread.__rbp)
# define RSP_sig(p) ((p)->thread.__rsp)
# define R11_sig(p) ((p)->thread.__r[11])
# define R13_sig(p) ((p)->thread.__sp)
# define R14_sig(p) ((p)->thread.__lr)
# define R15_sig(p) ((p)->thread.__pc)
#elif defined(__APPLE__)
# define EIP_sig(p) ((p)->uc_mcontext->__ss.__eip)
# define EBP_sig(p) ((p)->uc_mcontext->__ss.__ebp)
# define ESP_sig(p) ((p)->uc_mcontext->__ss.__esp)
# define RIP_sig(p) ((p)->uc_mcontext->__ss.__rip)
# define RBP_sig(p) ((p)->uc_mcontext->__ss.__rbp)
# define RSP_sig(p) ((p)->uc_mcontext->__ss.__rsp)
# define R11_sig(p) ((p)->uc_mcontext->__ss.__r11)
# define R13_sig(p) ((p)->uc_mcontext->__ss.__sp)
# define R14_sig(p) ((p)->uc_mcontext->__ss.__lr)
# define R15_sig(p) ((p)->uc_mcontext->__ss.__pc)
#else
# error "Don't know how to read/write to the thread state via the mcontext_t."
#endif
#if defined(ANDROID)
// Not all versions of the Android NDK define ucontext_t or mcontext_t.
// Detect this and provide custom but compatible definitions. Note that these
// follow the GLibc naming convention to access register values from
// mcontext_t.
//
// See: https://chromiumcodereview.appspot.com/10829122/
// See: http://code.google.com/p/android/issues/detail?id=34784
# if !defined(__BIONIC_HAVE_UCONTEXT_T)
# if defined(__arm__)
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
# if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
# include <asm/sigcontext.h>
# endif
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used so don't define them here.
} ucontext_t;
# elif defined(__mips__)
typedef struct {
uint32_t regmask;
uint32_t status;
uint64_t pc;
uint64_t gregs[32];
uint64_t fpregs[32];
uint32_t acx;
uint32_t fpc_csr;
uint32_t fpc_eir;
uint32_t used_math;
uint32_t dsp;
uint64_t mdhi;
uint64_t mdlo;
uint32_t hi1;
uint32_t lo1;
uint32_t hi2;
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
} mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used so don't define them here.
} ucontext_t;
# elif defined(__i386__)
// x86 version for Android.
typedef struct {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
} mcontext_t;
typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EIP = 14 };
# endif // defined(__i386__)
# endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
#endif // defined(ANDROID)
#if defined(USE_APPLE_MACH_PORTS)
# if defined(__x86_64__)
struct macos_x64_context {
x86_thread_state64_t thread;
x86_float_state64_t float_;
};
# define CONTEXT macos_x64_context
# elif defined(__i386__)
struct macos_x86_context {
x86_thread_state_t thread;
x86_float_state_t float_;
};
# define CONTEXT macos_x86_context
# elif defined(__arm__)
struct macos_arm_context {
arm_thread_state_t thread;
arm_neon_state_t float_;
};
# define CONTEXT macos_arm_context
# else
# error Unsupported architecture
# endif
#elif !defined(_WIN32)
# define CONTEXT ucontext_t
#endif
#if defined(_M_X64) || defined(__x86_64__)
# define PC_sig(p) RIP_sig(p)
# define FP_sig(p) RBP_sig(p)
# define SP_sig(p) RSP_sig(p)
#elif defined(_M_IX86) || defined(__i386__)
# define PC_sig(p) EIP_sig(p)
# define FP_sig(p) EBP_sig(p)
# define SP_sig(p) ESP_sig(p)
#elif defined(__arm__)
# define FP_sig(p) R11_sig(p)
# define SP_sig(p) R13_sig(p)
# define LR_sig(p) R14_sig(p)
# define PC_sig(p) R15_sig(p)
#elif defined(_M_ARM64) || defined(__aarch64__)
# define PC_sig(p) EPC_sig(p)
# define FP_sig(p) RFP_sig(p)
# define SP_sig(p) R31_sig(p)
# define LR_sig(p) RLR_sig(p)
#elif defined(__mips__)
# define PC_sig(p) EPC_sig(p)
# define FP_sig(p) RFP_sig(p)
# define SP_sig(p) RSP_sig(p)
# define LR_sig(p) R31_sig(p)
#elif defined(__ppc64__) || defined (__PPC64__) || defined(__ppc64le__) || defined (__PPC64LE__)
# define PC_sig(p) R32_sig(p)
# define SP_sig(p) R01_sig(p)
# define FP_sig(p) R01_sig(p)
#endif
static void
SetContextPC(CONTEXT* context, const uint8_t* pc)
{
#ifdef PC_sig
PC_sig(context) = reinterpret_cast<uintptr_t>(pc);
#else
abort();
#endif
}
static const uint8_t*
ContextToPC(CONTEXT* context)
{
#ifdef PC_sig
return reinterpret_cast<const uint8_t*>(static_cast<uintptr_t>(PC_sig(context)));
#else
abort();
#endif
}
// =============================================================================
// All signals/exceptions funnel down to this one trap-handling function which
// tests whether the pc is in a wasm module and, if so, whether there is
// actually a trap expected at this pc. These tests both avoid real bugs being
// silently converted to wasm traps and provides the trapping wasm bytecode
// offset we need to report in the error.
//
// Crashing inside wasm trap handling (due to a bug in trap handling or exposed
// during trap handling) must be reported like a normal crash, not cause the
// crash report to be lost. On Windows and non-Mach Unix, a crash during the
// handler reenters the handler, possibly repeatedly until exhausting the stack,
// and so we prevent recursion with the thread-local sAlreadyHandlingTrap. On
// Mach, the wasm exception handler has its own thread and is installed only on
// the thread-level debugging ports of our threads, so a crash on
// exception handler thread will not recurse; it will bubble up to the
// process-level debugging ports (where Breakpad is installed).
// =============================================================================
static thread_local bool sAlreadyHandlingTrap;
namespace {
struct AutoHandlingTrap
{
AutoHandlingTrap() {
assert(!sAlreadyHandlingTrap);
sAlreadyHandlingTrap = true;
}
~AutoHandlingTrap() {
assert(sAlreadyHandlingTrap);
sAlreadyHandlingTrap = false;
}
};
}
static
#if defined(__GNUC__) || defined(__clang__)
__attribute__ ((warn_unused_result))
#endif
bool
HandleTrap(CONTEXT* context)
{
assert(sAlreadyHandlingTrap);
const uint8_t* pc = ContextToPC(context);
const CodeSegment* codeSegment = LookupCodeSegment(pc);
if (!codeSegment) {
return false;
}
RecordTrap(pc, codeSegment);
// Unwind calls longjmp, so it doesn't run the automatic
// sAlreadhHanldingTrap cleanups, so reset it manually before doing
// a longjmp.
sAlreadyHandlingTrap = false;
#if defined(USE_APPLE_MACH_PORTS)
// Reroute the PC to run the Unwind function on the main stack after the
// handler exits. This doesn't yet work for stack overflow traps, because
// in that case the main thread doesn't have any space left to run.
SetContextPC(context, reinterpret_cast<const uint8_t*>(&Unwind));
#else
// For now, just call Unwind directly, rather than redirecting the PC there,
// so that it runs on the alternate signal handler stack. To run on the main
// stack, reroute the context PC like this:
Unwind();
#endif
return true;
}
// =============================================================================
// The following platform-specific handlers funnel all signals/exceptions into
// the shared HandleTrap() above.
// =============================================================================
#if defined(_WIN32)
// Obtained empirically from thread_local codegen on x86/x64/arm64.
// Compiled in all user binaries, so should be stable over time.
static const unsigned sThreadLocalArrayPointerIndex = 11;
static LONG WINAPI
WasmTrapHandler(LPEXCEPTION_POINTERS exception)
{
// Make sure TLS is initialized before reading sAlreadyHandlingTrap.
if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
return EXCEPTION_CONTINUE_SEARCH;
}
if (sAlreadyHandlingTrap) {
return EXCEPTION_CONTINUE_SEARCH;
}
AutoHandlingTrap aht;
EXCEPTION_RECORD* record = exception->ExceptionRecord;
if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
record->ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION)
{
return EXCEPTION_CONTINUE_SEARCH;
}
if (!HandleTrap(exception->ContextRecord)) {
return EXCEPTION_CONTINUE_SEARCH;
}
return EXCEPTION_CONTINUE_EXECUTION;
}
#elif defined(USE_APPLE_MACH_PORTS)
// On OSX we are forced to use the lower-level Mach exception mechanism instead
// of Unix signals because breakpad uses Mach exceptions and would otherwise
// report a crash before wasm gets a chance to handle the exception.
// This definition was generated by mig (the Mach Interface Generator) for the
// routine 'exception_raise' (exc.defs).
#pragma pack(4)
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread;
mach_msg_port_descriptor_t task;
/* end of the kernel processed data */
NDR_record_t NDR;
exception_type_t exception;
mach_msg_type_number_t codeCnt;
int64_t code[2];
} Request__mach_exception_raise_t;
#pragma pack()
// The full Mach message also includes a trailer.
struct ExceptionRequest
{
Request__mach_exception_raise_t body;
mach_msg_trailer_t trailer;
};
static bool
HandleMachException(const ExceptionRequest& request)
{
// Get the port of the thread from the message.
mach_port_t cxThread = request.body.thread.name;
// Read out the thread's register state.
CONTEXT context;
# if defined(__x86_64__)
unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
int thread_state = x86_THREAD_STATE64;
int float_state = x86_FLOAT_STATE64;
# elif defined(__i386__)
unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
int thread_state = x86_THREAD_STATE;
int float_state = x86_FLOAT_STATE;
# elif defined(__arm__)
unsigned int thread_state_count = ARM_THREAD_STATE_COUNT;
unsigned int float_state_count = ARM_NEON_STATE_COUNT;
int thread_state = ARM_THREAD_STATE;
int float_state = ARM_NEON_STATE;
# else
# error Unsupported architecture
# endif
kern_return_t kret;
kret = thread_get_state(cxThread, thread_state,
(thread_state_t)&context.thread, &thread_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
kret = thread_get_state(cxThread, float_state,
(thread_state_t)&context.float_, &float_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
if (request.body.exception != EXC_BAD_ACCESS &&
request.body.exception != EXC_BAD_INSTRUCTION)
{
return false;
}
{
AutoHandlingTrap aht;
if (!HandleTrap(&context)) {
return false;
}
}
// Update the thread state with the new pc and register values.
kret = thread_set_state(cxThread, float_state, (thread_state_t)&context.float_, float_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
kret = thread_set_state(cxThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
return true;
}
static mach_port_t sMachDebugPort = MACH_PORT_NULL;
static void*
MachExceptionHandlerThread(void* arg)
{
// Taken from mach_exc in /usr/include/mach/mach_exc.defs.
static const unsigned EXCEPTION_MSG_ID = 2405;
while (true) {
ExceptionRequest request;
kern_return_t kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
sMachDebugPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
// If we fail even receiving the message, we can't even send a reply!
// Rather than hanging the faulting thread (hanging the browser), crash.
if (kret != KERN_SUCCESS) {
fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
abort();
}
if (request.body.Head.msgh_id != EXCEPTION_MSG_ID) {
fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits);
abort();
}
// Some thread just commited an EXC_BAD_ACCESS and has been suspended by
// the kernel. The kernel is waiting for us to reply with instructions.
// Our default is the "not handled" reply (by setting the RetCode field
// of the reply to KERN_FAILURE) which tells the kernel to continue
// searching at the process and system level. If this is an
// expected exception, we handle it and return KERN_SUCCESS.
bool handled = HandleMachException(request);
kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
// This magic incantation to send a reply back to the kernel was
// derived from the exc_server generated by
// 'mig -v /usr/include/mach/mach_exc.defs'.
__Reply__exception_raise_t reply;
reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
reply.Head.msgh_size = sizeof(reply);
reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
reply.Head.msgh_local_port = MACH_PORT_NULL;
reply.Head.msgh_id = request.body.Head.msgh_id + 100;
reply.NDR = NDR_record;
reply.RetCode = replyCode;
mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
}
return nullptr;
}
#else // If not Windows or Mac, assume Unix
static struct sigaction sPrevSIGSEGVHandler;
static struct sigaction sPrevSIGBUSHandler;
static struct sigaction sPrevSIGILLHandler;
static struct sigaction sPrevSIGFPEHandler;
static void
WasmTrapHandler(int signum, siginfo_t* info, void* context)
{
if (!sAlreadyHandlingTrap) {
AutoHandlingTrap aht;
assert(signum == SIGSEGV || signum == SIGBUS || signum == SIGFPE || signum == SIGILL);
if (HandleTrap(static_cast<CONTEXT*>(context))) {
return;
}
}
struct sigaction* previousSignal = nullptr;
switch (signum) {
case SIGSEGV: previousSignal = &sPrevSIGSEGVHandler; break;
case SIGBUS: previousSignal = &sPrevSIGBUSHandler; break;
case SIGFPE: previousSignal = &sPrevSIGFPEHandler; break;
case SIGILL: previousSignal = &sPrevSIGILLHandler; break;
}
assert(previousSignal);
// This signal is not for any JIT code we expect, so we need to forward
// the signal to the next handler. If there is no next handler (SIG_IGN or
// SIG_DFL), then it's time to crash. To do this, we set the signal back to
// its original disposition and return. This will cause the faulting op to
// be re-executed which will crash in the normal way. The advantage of
// doing this to calling _exit() is that we remove ourselves from the crash
// stack which improves crash reports. If there is a next handler, call it.
// It will either crash synchronously, fix up the instruction so that
// execution can continue and return, or trigger a crash by returning the
// signal to it's original disposition and returning.
//
// Note: the order of these tests matter.
if (previousSignal->sa_flags & SA_SIGINFO) {
previousSignal->sa_sigaction(signum, info, context);
} else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN) {
sigaction(signum, previousSignal, nullptr);
} else {
previousSignal->sa_handler(signum);
}
}
# endif // _WIN32 || __APPLE__ || assume unix
#if defined(ANDROID) && defined(MOZ_LINKER)
extern "C" MFBT_API bool IsSignalHandlingBroken();
#endif
bool
EnsureEagerSignalHandlers()
{
#if defined(ANDROID) && defined(MOZ_LINKER)
// Signal handling is broken on some android systems.
if (IsSignalHandlingBroken()) {
return false;
}
#endif
sAlreadyHandlingTrap = false;
// Install whatever exception/signal handler is appropriate for the OS.
#if defined(_WIN32)
# if defined(MOZ_ASAN)
// Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
// in the first handler position. This requires some coordination with
// MemoryProtectionExceptionHandler::isDisabled().
const bool firstHandler = false;
# else
// Otherwise, WasmTrapHandler needs to go first, so that we can recover
// from wasm faults and continue execution without triggering handlers
// such as MemoryProtectionExceptionHandler that assume we are crashing.
const bool firstHandler = true;
# endif
if (!AddVectoredExceptionHandler(firstHandler, WasmTrapHandler)) {
// Windows has all sorts of random security knobs for disabling things
// so make this a dynamic failure that disables wasm, not an abort().
return false;
}
#elif defined(USE_APPLE_MACH_PORTS)
// All the Mach setup in EnsureDarwinMachPorts.
#else
// SA_ONSTACK allows us to handle signals on an alternate stack, so that
// the handler can run in response to running out of stack space on the
// main stack. Rust installs an alternate stack with sigaltstack, so we
// rely on that.
// SA_NODEFER allows us to reenter the signal handler if we crash while
// handling the signal, and fall through to the Breakpad handler by testing
// handlingSegFault.
// Allow handling OOB with signals on all architectures
struct sigaction faultHandler;
faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
faultHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&faultHandler.sa_mask);
if (sigaction(SIGSEGV, &faultHandler, &sPrevSIGSEGVHandler)) {
perror("unable to install SIGSEGV handler");
abort();
}
# if defined(__arm__) || defined(__APPLE__)
// On ARM, handle Unaligned Accesses.
// On Darwin, guard page accesses are raised as SIGBUS.
struct sigaction busHandler;
busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
busHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&busHandler.sa_mask);
if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler)) {
perror("unable to install SIGBUS handler");
abort();
}
# endif
# if !defined(__mips__)
// Wasm traps for MIPS currently only raise integer overflow fp exception.
struct sigaction illHandler;
illHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
illHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&illHandler.sa_mask);
if (sigaction(SIGILL, &illHandler, &sPrevSIGILLHandler)) {
perror("unable to install wasm SIGILL handler");
abort();
}
# endif
# if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
// x86 uses SIGFPE to report division by zero, and wasm traps for MIPS
// currently raise integer overflow fp exception.
struct sigaction fpeHandler;
fpeHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
fpeHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&fpeHandler.sa_mask);
if (sigaction(SIGFPE, &fpeHandler, &sPrevSIGFPEHandler)) {
perror("unable to install wasm SIGFPE handler");
abort();
}
# endif
#endif
return true;
}
bool
EnsureDarwinMachPorts()
{
#ifdef USE_APPLE_MACH_PORTS
pthread_attr_t handlerThreadAttr;
int r = pthread_attr_init(&handlerThreadAttr);
if (r != 0) {
return false;
}
// Create the port that all of our threads will redirect their traps to.
kern_return_t kret;
kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &sMachDebugPort);
if (kret != KERN_SUCCESS) {
return false;
}
kret = mach_port_insert_right(mach_task_self(), sMachDebugPort, sMachDebugPort,
MACH_MSG_TYPE_MAKE_SEND);
if (kret != KERN_SUCCESS) {
return false;
}
// Create the thread that will wait on and service sMachDebugPort.
// It's not useful to destroy this thread on process shutdown so
// immediately detach on successful start.
pthread_t handlerThread;
r = pthread_create(&handlerThread, &handlerThreadAttr, MachExceptionHandlerThread, nullptr);
if (r != 0) {
return false;
}
r = pthread_detach(handlerThread);
assert(r == 0);
// In addition to the process-wide signal handler setup, OSX needs each
// thread configured to send its exceptions to sMachDebugPort. While there
// are also task-level (i.e. process-level) exception ports, those are
// "claimed" by breakpad and chaining Mach exceptions is dark magic that we
// avoid by instead intercepting exceptions at the thread level before they
// propagate to the process-level. This works because there are no other
// uses of thread-level exception ports.
assert(sMachDebugPort != MACH_PORT_NULL);
thread_port_t thisThread = mach_thread_self();
kret = thread_set_exception_ports(thisThread,
EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
sMachDebugPort,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
THREAD_STATE_NONE);
mach_port_deallocate(mach_task_self(), thisThread);
if (kret != KERN_SUCCESS) {
return false;
}
#endif
return true;
}

View File

@@ -0,0 +1,55 @@
#ifndef signal_handlers_h
#define signal_handlers_h
#include <stdint.h>
#include <setjmp.h>
#ifndef __cplusplus
#include <stdbool.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
struct CodeSegment;
// Record the Trap code and wasm bytecode offset in TLS somewhere
void RecordTrap(const uint8_t* pc, const struct CodeSegment* codeSegment);
// Initiate an unwind.
void Unwind(void);
// Return the CodeSegment containing the given pc, if any exist in the process.
// This method does not take a lock.
const struct CodeSegment*
LookupCodeSegment(const void* pc);
// Trap initialization state.
struct TrapContext {
bool triedToInstallSignalHandlers;
bool haveSignalHandlers;
};
// This function performs the low-overhead signal handler initialization that we
// want to do eagerly to ensure a more-deterministic global process state. This
// is especially relevant for signal handlers since handler ordering depends on
// installation order: the wasm signal handler must run *before* the other crash
// handlers and since POSIX signal handlers work LIFO, this function needs to be
// called at the end of the startup process, after other handlers have been
// installed. This function can thus be called multiple times, having no effect
// after the first call.
bool
EnsureEagerSignalHandlers(void);
// Assuming EnsureEagerProcessSignalHandlers() has already been called,
// this function performs the full installation of signal handlers which must
// be performed per-thread. This operation may incur some overhead and
// so should be done only when needed to use wasm.
bool
EnsureDarwinMachPorts(void);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // signal_handlers_h

View File

@@ -0,0 +1,46 @@
use cranelift_entity::{BoxedSlice, PrimaryMap};
use cranelift_wasm::{FuncIndex, GlobalIndex, MemoryIndex, TableIndex};
use vmcontext::{VMFunctionBody, VMGlobalImport, VMMemoryImport, VMTableImport};
/// Resolved import pointers.
#[derive(Debug)]
pub struct Imports {
/// Resolved addresses for imported functions.
pub functions: BoxedSlice<FuncIndex, *const VMFunctionBody>,
/// Resolved addresses for imported tables.
pub tables: BoxedSlice<TableIndex, VMTableImport>,
/// Resolved addresses for imported memories.
pub memories: BoxedSlice<MemoryIndex, VMMemoryImport>,
/// Resolved addresses for imported globals.
pub globals: BoxedSlice<GlobalIndex, VMGlobalImport>,
}
impl Imports {
/// Construct a new `Imports` instance.
pub fn new(
function_imports: PrimaryMap<FuncIndex, *const VMFunctionBody>,
table_imports: PrimaryMap<TableIndex, VMTableImport>,
memory_imports: PrimaryMap<MemoryIndex, VMMemoryImport>,
global_imports: PrimaryMap<GlobalIndex, VMGlobalImport>,
) -> Self {
Self {
functions: function_imports.into_boxed_slice(),
tables: table_imports.into_boxed_slice(),
memories: memory_imports.into_boxed_slice(),
globals: global_imports.into_boxed_slice(),
}
}
/// Construct a new `Imports` instance with no imports.
pub fn none() -> Self {
Self {
functions: PrimaryMap::new().into_boxed_slice(),
tables: PrimaryMap::new().into_boxed_slice(),
memories: PrimaryMap::new().into_boxed_slice(),
globals: PrimaryMap::new().into_boxed_slice(),
}
}
}

256
lib/runtime/src/instance.rs Normal file
View File

@@ -0,0 +1,256 @@
//! An `Instance` contains all the runtime state used by execution of a wasm
//! module.
use cranelift_entity::EntityRef;
use cranelift_entity::{BoxedSlice, PrimaryMap};
use cranelift_wasm::{
DefinedFuncIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex,
};
use imports::Imports;
use memory::LinearMemory;
use sig_registry::SignatureRegistry;
use std::string::String;
use table::Table;
use vmcontext::{
VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMGlobalDefinition, VMMemoryDefinition,
VMTableDefinition,
};
use wasmtime_environ::{DataInitializer, Module};
/// An Instance of a WebAssemby module.
#[derive(Debug)]
pub struct Instance {
/// WebAssembly linear memory data.
memories: BoxedSlice<DefinedMemoryIndex, LinearMemory>,
/// WebAssembly table data.
tables: BoxedSlice<DefinedTableIndex, Table>,
/// Function Signature IDs.
/// FIXME: This should be shared across instances rather than per-Instance.
sig_registry: SignatureRegistry,
/// Resolved imports.
vmctx_imports: Imports,
/// Table storage base address vector pointed to by vmctx.
vmctx_tables: BoxedSlice<DefinedTableIndex, VMTableDefinition>,
/// Memory base address vector pointed to by vmctx.
vmctx_memories: BoxedSlice<DefinedMemoryIndex, VMMemoryDefinition>,
/// WebAssembly global variable data.
vmctx_globals: BoxedSlice<DefinedGlobalIndex, VMGlobalDefinition>,
/// Context pointer used by JIT code.
vmctx: VMContext,
}
impl Instance {
/// Create a new `Instance`.
pub fn new(
module: &Module,
finished_functions: &BoxedSlice<DefinedFuncIndex, *const VMFunctionBody>,
mut vmctx_imports: Imports,
data_initializers: &[DataInitializer],
) -> Result<Self, String> {
let mut sig_registry = instantiate_signatures(module);
let mut memories = instantiate_memories(module, data_initializers)?;
let mut tables = instantiate_tables(
module,
finished_functions,
&vmctx_imports.functions,
&mut sig_registry,
);
let mut vmctx_memories = memories
.values_mut()
.map(LinearMemory::vmmemory)
.collect::<PrimaryMap<DefinedMemoryIndex, _>>()
.into_boxed_slice();
let mut vmctx_globals = instantiate_globals(module);
let mut vmctx_tables = tables
.values_mut()
.map(Table::vmtable)
.collect::<PrimaryMap<DefinedTableIndex, _>>()
.into_boxed_slice();
let vmctx_imported_functions_ptr = vmctx_imports
.functions
.values_mut()
.into_slice()
.as_mut_ptr();
let vmctx_imported_tables_ptr = vmctx_imports.tables.values_mut().into_slice().as_mut_ptr();
let vmctx_imported_memories_ptr = vmctx_imports
.memories
.values_mut()
.into_slice()
.as_mut_ptr();
let vmctx_imported_globals_ptr =
vmctx_imports.globals.values_mut().into_slice().as_mut_ptr();
let vmctx_memories_ptr = vmctx_memories.values_mut().into_slice().as_mut_ptr();
let vmctx_globals_ptr = vmctx_globals.values_mut().into_slice().as_mut_ptr();
let vmctx_tables_ptr = vmctx_tables.values_mut().into_slice().as_mut_ptr();
let vmctx_shared_signatures_ptr = sig_registry.vmshared_signatures();
Ok(Self {
memories,
tables,
sig_registry,
vmctx_imports,
vmctx_memories,
vmctx_globals,
vmctx_tables,
vmctx: VMContext::new(
vmctx_imported_functions_ptr,
vmctx_imported_tables_ptr,
vmctx_imported_memories_ptr,
vmctx_imported_globals_ptr,
vmctx_tables_ptr,
vmctx_memories_ptr,
vmctx_globals_ptr,
vmctx_shared_signatures_ptr,
),
})
}
/// Return a reference to the vmctx used by JIT code.
pub fn vmctx(&self) -> &VMContext {
&self.vmctx
}
/// Return a mutable reference to the vmctx used by JIT code.
pub fn vmctx_mut(&mut self) -> &mut VMContext {
&mut self.vmctx
}
/// Return the offset from the vmctx pointer to its containing Instance.
pub(crate) fn vmctx_offset() -> isize {
offset_of!(Self, vmctx) as isize
}
/// Grow memory by the specified amount of pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of pages.
pub fn memory_grow(&mut self, memory_index: DefinedMemoryIndex, delta: u32) -> Option<u32> {
let result = self
.memories
.get_mut(memory_index)
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
.grow(delta);
// Keep current the VMContext pointers used by JIT code.
self.vmctx_memories[memory_index] = self.memories[memory_index].vmmemory();
result
}
/// Returns the number of allocated wasm pages.
pub fn memory_size(&mut self, memory_index: DefinedMemoryIndex) -> u32 {
self.memories
.get(memory_index)
.unwrap_or_else(|| panic!("no memory for index {}", memory_index.index()))
.size()
}
/// Test whether any of the objects inside this instance require signal
/// handlers to catch out of bounds accesses.
pub(crate) fn needs_signal_handlers(&self) -> bool {
self.memories
.values()
.any(|memory| memory.needs_signal_handlers)
}
/// Return the number of imported memories.
pub(crate) fn num_imported_memories(&self) -> usize {
self.vmctx_imports.functions.len()
}
}
fn instantiate_signatures(module: &Module) -> SignatureRegistry {
let mut sig_registry = SignatureRegistry::new();
for (sig_index, sig) in module.signatures.iter() {
sig_registry.register(sig_index, sig);
}
sig_registry
}
/// Allocate memory for just the tables of the current module.
fn instantiate_tables(
module: &Module,
finished_functions: &BoxedSlice<DefinedFuncIndex, *const VMFunctionBody>,
imported_functions: &BoxedSlice<FuncIndex, *const VMFunctionBody>,
sig_registry: &mut SignatureRegistry,
) -> BoxedSlice<DefinedTableIndex, Table> {
let num_imports = module.imported_memories.len();
let mut tables: PrimaryMap<DefinedTableIndex, _> =
PrimaryMap::with_capacity(module.table_plans.len() - num_imports);
for table in &module.table_plans.values().as_slice()[num_imports..] {
tables.push(Table::new(table));
}
for init in &module.table_elements {
debug_assert!(init.base.is_none(), "globalvar base not supported yet");
let defined_table_index = module
.defined_table_index(init.table_index)
.expect("Initializers for imported tables not supported yet");
let slice = tables[defined_table_index].as_mut();
let subslice = &mut slice[init.offset..init.offset + init.elements.len()];
for (i, func_idx) in init.elements.iter().enumerate() {
let callee_sig = module.functions[*func_idx];
let func_ptr = if let Some(index) = module.defined_func_index(*func_idx) {
finished_functions[index]
} else {
imported_functions[*func_idx]
};
let type_index = sig_registry.lookup(callee_sig);
subslice[i] = VMCallerCheckedAnyfunc {
func_ptr,
type_index,
};
}
}
tables.into_boxed_slice()
}
/// Allocate memory for just the memories of the current module.
fn instantiate_memories(
module: &Module,
data_initializers: &[DataInitializer],
) -> Result<BoxedSlice<DefinedMemoryIndex, LinearMemory>, String> {
let num_imports = module.imported_memories.len();
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
memories.push(LinearMemory::new(&plan)?);
}
for init in data_initializers {
debug_assert!(init.base.is_none(), "globalvar base not supported yet");
let defined_memory_index = module
.defined_memory_index(init.memory_index)
.expect("Initializers for imported memories not supported yet");
let mem_mut = memories[defined_memory_index].as_mut();
let to_init = &mut mem_mut[init.offset..init.offset + init.data.len()];
to_init.copy_from_slice(init.data);
}
Ok(memories.into_boxed_slice())
}
/// Allocate memory for just the globals of the current module,
/// without any initializers applied yet.
fn instantiate_globals(module: &Module) -> BoxedSlice<DefinedGlobalIndex, VMGlobalDefinition> {
let num_imports = module.imported_globals.len();
let mut vmctx_globals = PrimaryMap::with_capacity(module.globals.len() - num_imports);
for global in &module.globals.values().as_slice()[num_imports..] {
vmctx_globals.push(VMGlobalDefinition::new(global));
}
vmctx_globals.into_boxed_slice()
}

70
lib/runtime/src/lib.rs Normal file
View File

@@ -0,0 +1,70 @@
//! Runtime library support for Wasmtime.
#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)]
#![warn(unused_import_braces)]
#![cfg_attr(feature = "std", deny(unstable_features))]
#![cfg_attr(feature = "clippy", plugin(clippy(conf_file = "../../clippy.toml")))]
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::new_without_default, clippy::new_without_default_derive)
)]
#![cfg_attr(
feature = "cargo-clippy",
warn(
clippy::float_arithmetic,
clippy::mut_mut,
clippy::nonminimal_bool,
clippy::option_map_unwrap_or,
clippy::option_map_unwrap_or_else,
clippy::print_stdout,
clippy::unicode_not_nfc,
clippy::use_self
)
)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(feature = "std"), feature(alloc))]
extern crate cranelift_codegen;
extern crate cranelift_entity;
extern crate cranelift_wasm;
extern crate errno;
extern crate region;
extern crate wasmtime_environ;
#[cfg(not(feature = "std"))]
#[macro_use]
extern crate alloc;
#[macro_use]
extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate memoffset;
extern crate cast;
mod imports;
mod instance;
mod memory;
mod mmap;
mod sig_registry;
mod signalhandlers;
mod table;
mod traphandlers;
mod vmcontext;
pub mod libcalls;
pub use imports::Imports;
pub use instance::Instance;
pub use mmap::Mmap;
pub use signalhandlers::{wasmtime_init_eager, wasmtime_init_finish};
pub use traphandlers::wasmtime_call_trampoline;
pub use vmcontext::{
VMContext, VMFunctionBody, VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition,
VMMemoryImport, VMTableDefinition, VMTableImport,
};
#[cfg(not(feature = "std"))]
mod std {
pub use alloc::{string, vec};
pub use core::*;
pub use core::{i32, str, u32};
}

157
lib/runtime/src/libcalls.rs Normal file
View File

@@ -0,0 +1,157 @@
//! Runtime library calls. Note that the JIT may sometimes perform these inline
//! rather than calling them, particularly when CPUs have special instructions
//! which compute them directly.
use cranelift_wasm::{DefinedMemoryIndex, MemoryIndex};
use vmcontext::VMContext;
/// Implementation of f32.ceil
pub extern "C" fn wasmtime_f32_ceil(x: f32) -> f32 {
x.ceil()
}
/// Implementation of f32.floor
pub extern "C" fn wasmtime_f32_floor(x: f32) -> f32 {
x.floor()
}
/// Implementation of f32.trunc
pub extern "C" fn wasmtime_f32_trunc(x: f32) -> f32 {
x.trunc()
}
/// Implementation of f32.nearest
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f32_nearest(x: f32) -> f32 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}
/// Implementation of f64.ceil
pub extern "C" fn wasmtime_f64_ceil(x: f64) -> f64 {
x.ceil()
}
/// Implementation of f64.floor
pub extern "C" fn wasmtime_f64_floor(x: f64) -> f64 {
x.floor()
}
/// Implementation of f64.trunc
pub extern "C" fn wasmtime_f64_trunc(x: f64) -> f64 {
x.trunc()
}
/// Implementation of f64.nearest
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f64_nearest(x: f64) -> f64 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}
/// Implementation of memory.grow for locally-defined 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_memory32_grow(
delta: u32,
memory_index: u32,
vmctx: *mut VMContext,
) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance
.memory_grow(memory_index, delta)
.unwrap_or(u32::max_value())
}
/// Implementation of memory.grow for imported 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_imported_memory32_grow(
delta: u32,
memory_index: u32,
vmctx: *mut VMContext,
) -> u32 {
let instance = (&mut *vmctx).instance();
assert!(
(memory_index as usize) < instance.num_imported_memories(),
"imported memory index for memory.grow out of bounds"
);
let memory_index = MemoryIndex::from_u32(memory_index);
let import = instance.vmctx_mut().imported_memory_mut(memory_index);
let foreign_instance = (&mut *import.vmctx).instance();
let foreign_memory = &mut *import.from;
let foreign_index = foreign_instance.vmctx().memory_index(foreign_memory);
foreign_instance
.memory_grow(foreign_index, delta)
.unwrap_or(u32::max_value())
}
/// Implementation of memory.size for locally-defined 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_memory32_size(memory_index: u32, vmctx: *mut VMContext) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance.memory_size(memory_index)
}
/// Implementation of memory.size for imported 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_imported_memory32_size(
memory_index: u32,
vmctx: *mut VMContext,
) -> u32 {
let instance = (&mut *vmctx).instance();
assert!(
(memory_index as usize) < instance.num_imported_memories(),
"imported memory index for memory.grow out of bounds"
);
let memory_index = MemoryIndex::from_u32(memory_index);
let import = instance.vmctx_mut().imported_memory_mut(memory_index);
let foreign_instance = (&mut *import.vmctx).instance();
let foreign_memory = &mut *import.from;
let foreign_index = foreign_instance.vmctx().memory_index(foreign_memory);
foreign_instance.memory_size(foreign_index)
}

168
lib/runtime/src/memory.rs Normal file
View File

@@ -0,0 +1,168 @@
//! Memory management for linear memories.
//!
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
use mmap::Mmap;
use region;
use std::string::String;
use vmcontext::VMMemoryDefinition;
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
/// A linear memory instance.
#[derive(Debug)]
pub struct LinearMemory {
// The underlying allocation.
mmap: Mmap,
// The current logical size in wasm pages of this linear memory.
current: u32,
// The optional maximum size in wasm pages of this linear memory.
maximum: Option<u32>,
// Size in bytes of extra guard pages after the end to optimize loads and stores with
// constant offsets.
offset_guard_size: usize,
// Records whether we're using a bounds-checking strategy which requires
// handlers to catch trapping accesses.
pub(crate) needs_signal_handlers: bool,
}
impl LinearMemory {
/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
// `maximum` cannot be set to more than `65536` pages.
assert!(plan.memory.minimum <= WASM_MAX_PAGES);
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
let offset_guard_bytes = plan.offset_guard_size as usize;
// If we have an offset guard, or if we're doing the static memory
// allocation strategy, we need signal handlers to catch out of bounds
// acceses.
let needs_signal_handlers = offset_guard_bytes > 0
|| match plan.style {
MemoryStyle::Dynamic => false,
MemoryStyle::Static { .. } => true,
};
let minimum_pages = match plan.style {
MemoryStyle::Dynamic => plan.memory.minimum,
MemoryStyle::Static { bound } => {
assert!(bound >= plan.memory.minimum);
bound
}
} as usize;
let minimum_bytes = minimum_pages.checked_mul(WASM_PAGE_SIZE as usize).unwrap();
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
let mapped_pages = plan.memory.minimum as usize;
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
let unmapped_pages = minimum_pages - mapped_pages;
let unmapped_bytes = unmapped_pages * WASM_PAGE_SIZE as usize;
let inaccessible_bytes = unmapped_bytes + offset_guard_bytes;
let mmap = Mmap::with_size(request_bytes)?;
// Make the unmapped and offset-guard pages inaccessible.
unsafe {
region::protect(
mmap.as_ptr().add(mapped_bytes),
inaccessible_bytes,
region::Protection::None,
)
}
.expect("unable to make memory inaccessible");
Ok(Self {
mmap,
current: plan.memory.minimum,
maximum: plan.memory.maximum,
offset_guard_size: offset_guard_bytes,
needs_signal_handlers,
})
}
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
self.current
}
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages.
pub fn grow(&mut self, delta: u32) -> Option<u32> {
let new_pages = match self.current.checked_add(delta) {
Some(new_pages) => new_pages,
// Linear memory size overflow.
None => return None,
};
let prev_pages = self.current;
if let Some(maximum) = self.maximum {
if new_pages > maximum {
// Linear memory size would exceed the declared maximum.
return None;
}
}
// Wasm linear memories are never allowed to grow beyond what is
// indexable. If the memory has no maximum, enforce the greatest
// limit here.
if new_pages >= WASM_MAX_PAGES {
// Linear memory size would exceed the index range.
return None;
}
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
if new_bytes > self.mmap.len() - self.offset_guard_size {
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
assert!(self.maximum.is_none());
let guard_bytes = self.offset_guard_size;
let request_bytes = new_bytes.checked_add(guard_bytes)?;
let mut new_mmap = Mmap::with_size(request_bytes).ok()?;
// Make the offset-guard pages inaccessible.
unsafe {
region::protect(
new_mmap.as_ptr().add(new_bytes),
guard_bytes,
region::Protection::None,
)
}
.expect("unable to make memory inaccessible");
let copy_len = self.mmap.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
self.mmap = new_mmap;
}
self.current = new_pages;
Some(prev_pages)
}
/// Return a `VMMemoryDefinition` for exposing the memory to JIT code.
pub fn vmmemory(&mut self) -> VMMemoryDefinition {
VMMemoryDefinition {
base: self.mmap.as_mut_ptr(),
current_length: self.mmap.len(),
}
}
}
impl AsRef<[u8]> for LinearMemory {
fn as_ref(&self) -> &[u8] {
self.mmap.as_slice()
}
}
impl AsMut<[u8]> for LinearMemory {
fn as_mut(&mut self) -> &mut [u8] {
self.mmap.as_mut_slice()
}
}

144
lib/runtime/src/mmap.rs Normal file
View File

@@ -0,0 +1,144 @@
//! Low-level abstraction for allocating and managing zero-filled pages
//! of memory.
use errno;
use libc;
use region;
use std::ptr;
use std::slice;
use std::string::String;
/// Round `size` up to the nearest multiple of `page_size`.
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
(size + (page_size - 1)) & !(page_size - 1)
}
/// A simple struct consisting of a page-aligned pointer to page-aligned
/// and initially-zeroed memory and a length.
#[derive(Debug)]
pub struct Mmap {
ptr: *mut u8,
len: usize,
}
impl Mmap {
/// Construct a new empty instance of `Mmap`.
pub fn new() -> Self {
Self {
ptr: ptr::null_mut(),
len: 0,
}
}
/// Create a new `Mmap` pointing to at least `size` bytes of memory,
/// suitably sized and aligned for memory protection.
#[cfg(not(target_os = "windows"))]
pub fn with_size(size: usize) -> Result<Self, String> {
let page_size = region::page::size();
let alloc_size = round_up_to_page_size(size, page_size);
let ptr = unsafe {
libc::mmap(
ptr::null_mut(),
alloc_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1,
0,
)
};
if ptr as isize == -1isize {
Err(errno::errno().to_string())
} else {
Ok(Self {
ptr: ptr as *mut u8,
len: alloc_size,
})
}
}
/// Create a new `Mmap` pointing to at least `size` bytes of memory,
/// suitably sized and aligned for memory protection.
#[cfg(target_os = "windows")]
pub fn with_size(size: usize) -> Result<Self, String> {
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE};
let page_size = region::page::size();
// VirtualAlloc always rounds up to the next multiple of the page size
let ptr = unsafe {
VirtualAlloc(
ptr::null_mut(),
size,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE,
)
};
if !ptr.is_null() {
Ok(Self {
ptr: ptr as *mut u8,
len: round_up_to_page_size(size, page_size),
})
} else {
Err(errno::errno().to_string())
}
}
/// Return the allocated memory as a slice of u8.
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.ptr, self.len) }
}
/// Return the allocated memory as a mutable slice of u8.
pub fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
}
/// Return the allocated memory as a pointer to u8.
pub fn as_ptr(&self) -> *const u8 {
self.ptr
}
/// Return the allocated memory as a mutable pointer to u8.
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self.ptr
}
/// Return the lengthof the allocated memory.
pub fn len(&self) -> usize {
self.len
}
}
impl Drop for Mmap {
#[cfg(not(target_os = "windows"))]
fn drop(&mut self) {
if !self.ptr.is_null() {
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
assert_eq!(r, 0, "munmap failed: {}", errno::errno());
}
}
#[cfg(target_os = "windows")]
fn drop(&mut self) {
if !self.ptr.is_null() {
use winapi::um::memoryapi::VirtualFree;
use winapi::um::winnt::MEM_RELEASE;
let r = unsafe { VirtualFree(self.ptr, self.len, MEM_RELEASE) };
assert_eq!(r, 0);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_round_up_to_page_size() {
assert_eq!(round_up_to_page_size(0, 4096), 0);
assert_eq!(round_up_to_page_size(1, 4096), 4096);
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
}
}

View File

@@ -0,0 +1,55 @@
//! Implement a registry of function signatures, for fast indirect call
//! signature checking.
use cast;
use cranelift_codegen::ir;
use cranelift_entity::PrimaryMap;
use cranelift_wasm::SignatureIndex;
use std::collections::{hash_map, HashMap};
use vmcontext::VMSharedSignatureIndex;
#[derive(Debug)]
pub struct SignatureRegistry {
signature_hash: HashMap<ir::Signature, VMSharedSignatureIndex>,
shared_signatures: PrimaryMap<SignatureIndex, VMSharedSignatureIndex>,
}
impl SignatureRegistry {
pub fn new() -> Self {
Self {
signature_hash: HashMap::new(),
shared_signatures: PrimaryMap::new(),
}
}
pub fn vmshared_signatures(&mut self) -> *mut VMSharedSignatureIndex {
self.shared_signatures
.values_mut()
.into_slice()
.as_mut_ptr()
}
/// Register the given signature.
pub fn register(&mut self, sig_index: SignatureIndex, sig: &ir::Signature) {
// TODO: Refactor this interface so that we're not passing in redundant
// information.
debug_assert_eq!(sig_index.index(), self.shared_signatures.len());
use cranelift_entity::EntityRef;
let len = self.signature_hash.len();
let sig_id = match self.signature_hash.entry(sig.clone()) {
hash_map::Entry::Occupied(entry) => *entry.get(),
hash_map::Entry::Vacant(entry) => {
let sig_id = VMSharedSignatureIndex::new(cast::u32(len).unwrap());
entry.insert(sig_id);
sig_id
}
};
self.shared_signatures.push(sig_id);
}
/// Return the identifying runtime index for the given signature.
pub fn lookup(&mut self, sig_index: SignatureIndex) -> VMSharedSignatureIndex {
self.shared_signatures[sig_index]
}
}

View File

@@ -0,0 +1,120 @@
//! Interface to low-level signal-handling mechanisms.
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use std::borrow::{Borrow, BorrowMut};
use std::cell::RefCell;
use std::sync::RwLock;
use vmcontext::VMContext;
include!(concat!(env!("OUT_DIR"), "/signalhandlers.rs"));
struct InstallState {
tried: bool,
success: bool,
}
impl InstallState {
fn new() -> Self {
Self {
tried: false,
success: false,
}
}
}
lazy_static! {
static ref EAGER_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
static ref LAZY_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
}
/// This function performs the low-overhead signal handler initialization that we
/// want to do eagerly to ensure a more-deterministic global process state. This
/// is especially relevant for signal handlers since handler ordering depends on
/// installation order: the wasm signal handler must run *before* the other crash
/// handlers and since POSIX signal handlers work LIFO, this function needs to be
/// called at the end of the startup process, after other handlers have been
/// installed. This function can thus be called multiple times, having no effect
/// after the first call.
#[no_mangle]
pub extern "C" fn wasmtime_init_eager() {
let mut locked = EAGER_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if !unsafe { EnsureEagerSignalHandlers() } {
return;
}
state.success = true;
}
thread_local! {
static TRAP_CONTEXT: RefCell<TrapContext> = RefCell::new(TrapContext { triedToInstallSignalHandlers: false, haveSignalHandlers: false });
}
/// Assuming `EnsureEagerProcessSignalHandlers` has already been called,
/// this function performs the full installation of signal handlers which must
/// be performed per-thread. This operation may incur some overhead and
/// so should be done only when needed to use wasm.
#[no_mangle]
pub extern "C" fn wasmtime_init_finish(vmctx: &mut VMContext) {
if !TRAP_CONTEXT.with(|cx| cx.borrow().triedToInstallSignalHandlers) {
TRAP_CONTEXT.with(|cx| {
cx.borrow_mut().triedToInstallSignalHandlers = true;
assert!(!cx.borrow().haveSignalHandlers);
});
{
let locked = EAGER_INSTALL_STATE.read().unwrap();
let state = locked.borrow();
assert!(
state.tried,
"call wasmtime_init_eager before calling wasmtime_init_finish"
);
if !state.success {
return;
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
ensure_darwin_mach_ports();
TRAP_CONTEXT.with(|cx| {
cx.borrow_mut().haveSignalHandlers = true;
})
}
let instance = unsafe { vmctx.instance() };
let have_signal_handlers = TRAP_CONTEXT.with(|cx| cx.borrow().haveSignalHandlers);
if !have_signal_handlers && instance.needs_signal_handlers() {
panic!("failed to install signal handlers");
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn ensure_darwin_mach_ports() {
let mut locked = LAZY_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if !unsafe { EnsureDarwinMachPorts() } {
return;
}
state.success = true;
}

61
lib/runtime/src/table.rs Normal file
View File

@@ -0,0 +1,61 @@
//! Memory management for tables.
//!
//! `Table` is to WebAssembly tables what `LinearMemory` is to WebAssembly linear memories.
use cranelift_wasm::TableElementType;
use vmcontext::{VMCallerCheckedAnyfunc, VMTableDefinition};
use wasmtime_environ::{TablePlan, TableStyle};
/// A table instance.
#[derive(Debug)]
pub struct Table {
vec: Vec<VMCallerCheckedAnyfunc>,
maximum: Option<u32>,
}
impl Table {
/// Create a new table instance with specified minimum and maximum number of elements.
pub fn new(plan: &TablePlan) -> Self {
match plan.table.ty {
TableElementType::Func => (),
TableElementType::Val(ty) => {
unimplemented!("tables of types other than anyfunc ({})", ty)
}
};
match plan.style {
TableStyle::CallerChecksSignature => {
let mut vec = Vec::new();
vec.resize(
plan.table.minimum as usize,
VMCallerCheckedAnyfunc::default(),
);
Self {
vec,
maximum: plan.table.maximum,
}
}
}
}
/// Return a `VMTableDefinition` for exposing the table to JIT code.
pub fn vmtable(&mut self) -> VMTableDefinition {
VMTableDefinition {
base: self.vec.as_mut_ptr() as *mut u8,
current_elements: self.vec.len(),
}
}
}
impl AsRef<[VMCallerCheckedAnyfunc]> for Table {
fn as_ref(&self) -> &[VMCallerCheckedAnyfunc] {
self.vec.as_slice()
}
}
impl AsMut<[VMCallerCheckedAnyfunc]> for Table {
fn as_mut(&mut self) -> &mut [VMCallerCheckedAnyfunc] {
self.vec.as_mut_slice()
}
}

View File

@@ -0,0 +1,109 @@
//! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use libc::c_int;
use signalhandlers::{jmp_buf, CodeSegment};
use std::cell::{Cell, RefCell};
use std::mem;
use std::ptr;
use std::string::String;
use vmcontext::{VMContext, VMFunctionBody};
// Currently we uset setjmp/longjmp to unwind out of a signal handler
// and back to the point where WebAssembly was called (via `call_wasm`).
// This works because WebAssembly code currently does not use any EH
// or require any cleanups, and we never unwind through non-wasm frames.
// In the future, we'll likely replace this with fancier stack unwinding.
extern "C" {
fn setjmp(env: *mut jmp_buf) -> c_int;
fn longjmp(env: *const jmp_buf, val: c_int) -> !;
}
#[derive(Copy, Clone, Debug)]
struct TrapData {
pc: *const u8,
}
thread_local! {
static TRAP_DATA: Cell<TrapData> = Cell::new(TrapData { pc: ptr::null() });
static JMP_BUFS: RefCell<Vec<jmp_buf>> = RefCell::new(Vec::new());
}
/// Record the Trap code and wasm bytecode offset in TLS somewhere
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn RecordTrap(pc: *const u8, _codeSegment: *const CodeSegment) {
// TODO: Look up the wasm bytecode offset and trap code and record them instead.
TRAP_DATA.with(|data| data.set(TrapData { pc }));
}
/// Initiate an unwind.
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn Unwind() {
JMP_BUFS.with(|bufs| {
let buf = bufs.borrow_mut().pop().unwrap();
unsafe { longjmp(&buf, 1) };
})
}
/// Return the CodeSegment containing the given pc, if any exist in the process.
/// This method does not take a lock.
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn LookupCodeSegment(_pc: *const ::std::os::raw::c_void) -> *const CodeSegment {
// TODO: Implement this.
-1isize as *const CodeSegment
}
/// A simple guard to ensure that `JMP_BUFS` is reset when we're done.
struct ScopeGuard {
orig_num_bufs: usize,
}
impl ScopeGuard {
fn new() -> Self {
Self {
orig_num_bufs: JMP_BUFS.with(|bufs| bufs.borrow().len()),
}
}
}
impl Drop for ScopeGuard {
fn drop(&mut self) {
let orig_num_bufs = self.orig_num_bufs;
JMP_BUFS.with(|bufs| {
bufs.borrow_mut()
.resize(orig_num_bufs, unsafe { mem::zeroed() })
});
}
}
/// Call the wasm function pointed to by `callee`. `values_vec` points to
/// a buffer which holds the incoming arguments, and to which the outgoing
/// return values will be written.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_call_trampoline(
callee: *const VMFunctionBody,
values_vec: *mut u8,
vmctx: *mut VMContext,
) -> Result<(), String> {
// In case wasm code calls Rust that panics and unwinds past this point,
// ensure that JMP_BUFS is unwound to its incoming state.
let _guard = ScopeGuard::new();
let func: fn(*mut u8, *mut VMContext) = mem::transmute(callee);
JMP_BUFS.with(|bufs| {
let mut buf = mem::uninitialized();
if setjmp(&mut buf) != 0 {
return TRAP_DATA.with(|data| Err(format!("wasm trap at {:?}", data.get().pc)));
}
bufs.borrow_mut().push(buf);
func(values_vec, vmctx);
Ok(())
})
}

View File

@@ -0,0 +1,577 @@
//! This file declares `VMContext` and several related structs which contain
//! fields that JIT code accesses directly.
use cranelift_entity::EntityRef;
use cranelift_wasm::{
DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, Global, GlobalIndex,
GlobalInit, MemoryIndex, TableIndex,
};
use instance::Instance;
use std::{mem, ptr, u32};
/// A placeholder byte-sized type which is just used to provide some amount of type
/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
/// around.
#[repr(C)]
pub struct VMFunctionBody(u8);
#[cfg(test)]
mod test_vmfunction_body {
use super::VMFunctionBody;
use std::mem::size_of;
#[test]
fn check_vmfunction_body_offsets() {
assert_eq!(size_of::<VMFunctionBody>(), 1);
}
}
/// The fields a JIT needs to access to utilize a WebAssembly table
/// imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableImport {
/// A pointer to the imported table description.
pub from: *mut VMTableDefinition,
/// A pointer to the VMContext that owns the table description.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmtable_import {
use super::VMTableImport;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmtable_import_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMTableImport>(),
usize::from(offsets.size_of_vmtable_import())
);
assert_eq!(
offset_of!(VMTableImport, from),
usize::from(offsets.vmtable_import_from())
);
assert_eq!(
offset_of!(VMTableImport, vmctx),
usize::from(offsets.vmtable_import_vmctx())
);
}
}
/// The fields a JIT needs to access to utilize a WebAssembly linear
/// memory imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryImport {
/// A pointer to the imported memory description.
pub from: *mut VMMemoryDefinition,
/// A pointer to the VMContext that owns the memory description.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmmemory_import {
use super::VMMemoryImport;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmmemory_import_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMMemoryImport>(),
usize::from(offsets.size_of_vmmemory_import())
);
assert_eq!(
offset_of!(VMMemoryImport, from),
usize::from(offsets.vmmemory_import_from())
);
assert_eq!(
offset_of!(VMMemoryImport, vmctx),
usize::from(offsets.vmmemory_import_vmctx())
);
}
}
/// The fields a JIT needs to access to utilize a WebAssembly global
/// variable imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMGlobalImport {
/// A pointer to the imported global variable description.
pub from: *mut VMGlobalDefinition,
}
#[cfg(test)]
mod test_vmglobal_import {
use super::VMGlobalImport;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmglobal_import_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMGlobalImport>(),
usize::from(offsets.size_of_vmglobal_import())
);
assert_eq!(
offset_of!(VMGlobalImport, from),
usize::from(offsets.vmglobal_import_from())
);
}
}
/// The fields a JIT needs to access to utilize a WebAssembly linear
/// memory defined within the instance, namely the start address and the
/// size in bytes.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryDefinition {
/// The start address.
pub base: *mut u8,
/// The current logical size of this linear memory in bytes.
pub current_length: usize,
}
#[cfg(test)]
mod test_vmmemory_definition {
use super::VMMemoryDefinition;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmmemory_definition_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMMemoryDefinition>(),
usize::from(offsets.size_of_vmmemory_definition())
);
assert_eq!(
offset_of!(VMMemoryDefinition, base),
usize::from(offsets.vmmemory_definition_base())
);
assert_eq!(
offset_of!(VMMemoryDefinition, current_length),
usize::from(offsets.vmmemory_definition_current_length())
);
/* TODO: Assert that the size of `current_length` matches.
assert_eq!(
size_of::<VMMemoryDefinition::current_length>(),
usize::from(offsets.size_of_vmmemory_definition_current_length())
);
*/
}
}
/// The fields a JIT needs to access to utilize a WebAssembly table
/// defined within the instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableDefinition {
/// Pointer to the table data.
pub base: *mut u8,
/// The current number of elements in the table.
pub current_elements: usize,
}
#[cfg(test)]
mod test_vmtable_definition {
use super::VMTableDefinition;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmtable_definition_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMTableDefinition>(),
usize::from(offsets.size_of_vmtable_definition())
);
assert_eq!(
offset_of!(VMTableDefinition, base),
usize::from(offsets.vmtable_definition_base())
);
assert_eq!(
offset_of!(VMTableDefinition, current_elements),
usize::from(offsets.vmtable_definition_current_elements())
);
}
}
/// The storage for a WebAssembly global defined within the instance.
///
/// TODO: Pack the globals more densely, rather than using the same size
/// for every type.
#[derive(Debug, Copy, Clone)]
#[repr(C, align(8))]
pub struct VMGlobalDefinition {
storage: [u8; 8],
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmglobal_definition {
use super::VMGlobalDefinition;
use std::mem::{align_of, size_of};
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmglobal_definition_alignment() {
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
}
#[test]
fn check_vmglobal_definition_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMGlobalDefinition>(),
usize::from(offsets.size_of_vmglobal_definition())
);
}
}
impl VMGlobalDefinition {
/// Construct a `VMGlobalDefinition`.
pub fn new(global: &Global) -> Self {
let mut result = Self { storage: [0; 8] };
match global.initializer {
GlobalInit::I32Const(x) => *unsafe { result.as_i32_mut() } = x,
GlobalInit::I64Const(x) => *unsafe { result.as_i64_mut() } = x,
GlobalInit::F32Const(x) => *unsafe { result.as_f32_bits_mut() } = x,
GlobalInit::F64Const(x) => *unsafe { result.as_f64_bits_mut() } = x,
GlobalInit::GetGlobal(_x) => unimplemented!("globals init with get_global"),
GlobalInit::Import => panic!("attempting to initialize imported global"),
}
result
}
/// Return a reference to the value as an i32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i32(&self) -> &i32 {
&*(self.storage.as_ref().as_ptr() as *const u8 as *const i32)
}
/// Return a mutable reference to the value as an i32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut i32)
}
/// Return a reference to the value as an i64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i64(&self) -> &i64 {
&*(self.storage.as_ref().as_ptr() as *const u8 as *const i64)
}
/// Return a mutable reference to the value as an i64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut i64)
}
/// Return a reference to the value as an f32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32(&self) -> &f32 {
&*(self.storage.as_ref().as_ptr() as *const u8 as *const f32)
}
/// Return a mutable reference to the value as an f32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut f32)
}
/// Return a reference to the value as f32 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_bits(&self) -> &u32 {
&*(self.storage.as_ref().as_ptr() as *const u8 as *const u32)
}
/// Return a mutable reference to the value as f32 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut u32)
}
/// Return a reference to the value as an f64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64(&self) -> &f64 {
&*(self.storage.as_ref().as_ptr() as *const u8 as *const f64)
}
/// Return a mutable reference to the value as an f64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut f64)
}
/// Return a reference to the value as f64 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_bits(&self) -> &u64 {
&*(self.storage.as_ref().as_ptr() as *const u8 as *const u64)
}
/// Return a mutable reference to the value as f64 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u8 as *mut u64)
}
}
/// An index into the shared signature registry, usable for checking signatures
/// at indirect calls.
#[repr(C)]
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub struct VMSharedSignatureIndex(u32);
#[cfg(test)]
mod test_vmshared_signature_index {
use super::VMSharedSignatureIndex;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmshared_signature_index() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMSharedSignatureIndex>(),
usize::from(offsets.size_of_vmshared_signature_index())
);
}
}
impl VMSharedSignatureIndex {
pub fn new(value: u32) -> Self {
VMSharedSignatureIndex(value)
}
}
/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
/// It consists of the actual function pointer and a signature id to be checked
/// by the caller.
#[derive(Debug, Clone)]
#[repr(C)]
pub struct VMCallerCheckedAnyfunc {
pub func_ptr: *const VMFunctionBody,
pub type_index: VMSharedSignatureIndex,
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmcaller_checked_anyfunc {
use super::VMCallerCheckedAnyfunc;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmcaller_checked_anyfunc_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(
size_of::<VMCallerCheckedAnyfunc>(),
usize::from(offsets.size_of_vmcaller_checked_anyfunc())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, func_ptr),
usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, type_index),
usize::from(offsets.vmcaller_checked_anyfunc_type_index())
);
}
}
impl Default for VMCallerCheckedAnyfunc {
fn default() -> Self {
Self {
func_ptr: ptr::null_mut(),
type_index: VMSharedSignatureIndex::new(u32::MAX),
}
}
}
/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
/// This has pointers to the globals, memories, tables, and other runtime
/// state associated with the current instance.
///
/// TODO: The number of memories, globals, tables, and signature IDs does
/// not change dynamically, and pointer arrays are not indexed dynamically,
/// so these fields could all be contiguously allocated.
#[derive(Debug)]
#[repr(C)]
pub struct VMContext {
/// A pointer to an array of `*const VMFunctionBody` instances, indexed by `FuncIndex`.
imported_functions: *const *const VMFunctionBody,
/// A pointer to an array of `VMTableImport` instances, indexed by `TableIndex`.
imported_tables: *mut VMTableImport,
/// A pointer to an array of `VMMemoryImport` instances, indexed by `MemoryIndex`.
imported_memories: *mut VMMemoryImport,
/// A pointer to an array of `VMGlobalImport` instances, indexed by `GlobalIndex`.
imported_globals: *mut VMGlobalImport,
/// A pointer to an array of locally-defined `VMTableDefinition` instances,
/// indexed by `DefinedTableIndex`.
tables: *mut VMTableDefinition,
/// A pointer to an array of locally-defined `VMMemoryDefinition` instances,
/// indexed by `DefinedMemoryIndex`.
memories: *mut VMMemoryDefinition,
/// A pointer to an array of locally-defined `VMGlobalDefinition` instances,
/// indexed by `DefinedGlobalIndex`.
globals: *mut VMGlobalDefinition,
/// Signature identifiers for signature-checking indirect calls.
signature_ids: *mut VMSharedSignatureIndex,
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test {
use super::VMContext;
use std::mem::size_of;
use wasmtime_environ::VMOffsets;
#[test]
fn check_vmctx_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMContext>(), usize::from(offsets.size_of_vmctx()));
assert_eq!(
offset_of!(VMContext, memories),
usize::from(offsets.vmctx_memories())
);
assert_eq!(
offset_of!(VMContext, globals),
usize::from(offsets.vmctx_globals())
);
assert_eq!(
offset_of!(VMContext, tables),
usize::from(offsets.vmctx_tables())
);
assert_eq!(
offset_of!(VMContext, signature_ids),
usize::from(offsets.vmctx_signature_ids())
);
}
}
impl VMContext {
/// Create a new `VMContext` instance.
pub fn new(
imported_functions: *const *const VMFunctionBody,
imported_tables: *mut VMTableImport,
imported_memories: *mut VMMemoryImport,
imported_globals: *mut VMGlobalImport,
tables: *mut VMTableDefinition,
memories: *mut VMMemoryDefinition,
globals: *mut VMGlobalDefinition,
signature_ids: *mut VMSharedSignatureIndex,
) -> Self {
Self {
imported_functions,
imported_tables,
imported_memories,
imported_globals,
tables,
memories,
globals,
signature_ids,
}
}
/// Return a reference to imported function `index`.
pub unsafe fn imported_function(&self, index: FuncIndex) -> *const VMFunctionBody {
*self.imported_functions.add(index.index())
}
/// Return a reference to imported table `index`.
pub unsafe fn imported_table(&self, index: TableIndex) -> &VMTableImport {
&*self.imported_tables.add(index.index())
}
/// Return a mutable reference to imported table `index`.
pub unsafe fn imported_table_mut(&mut self, index: TableIndex) -> &mut VMTableImport {
&mut *self.imported_tables.add(index.index())
}
/// Return a reference to imported memory `index`.
pub unsafe fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
&*self.imported_memories.add(index.index())
}
/// Return a mutable reference to imported memory `index`.
pub unsafe fn imported_memory_mut(&mut self, index: MemoryIndex) -> &mut VMMemoryImport {
&mut *self.imported_memories.add(index.index())
}
/// Return a reference to imported global `index`.
pub unsafe fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
&*self.imported_globals.add(index.index())
}
/// Return a mutable reference to imported global `index`.
pub unsafe fn imported_global_mut(&mut self, index: GlobalIndex) -> &mut VMGlobalImport {
&mut *self.imported_globals.add(index.index())
}
/// Return a reference to locally-defined table `index`.
pub unsafe fn table(&self, index: DefinedTableIndex) -> &VMTableDefinition {
&*self.tables.add(index.index())
}
/// Return a mutable reference to locally-defined table `index`.
pub unsafe fn table_mut(&mut self, index: DefinedTableIndex) -> &mut VMTableDefinition {
&mut *self.tables.add(index.index())
}
/// Return a reference to locally-defined linear memory `index`.
pub unsafe fn memory(&self, index: DefinedMemoryIndex) -> &VMMemoryDefinition {
&*self.memories.add(index.index())
}
/// Return a mutable reference to locally-defined linear memory `index`.
pub unsafe fn memory_mut(&mut self, index: DefinedMemoryIndex) -> &mut VMMemoryDefinition {
&mut *self.memories.add(index.index())
}
/// Return a reference to locally-defined global variable `index`.
pub unsafe fn global(&self, index: DefinedGlobalIndex) -> &VMGlobalDefinition {
&*self.globals.add(index.index())
}
/// Return a mutable reference to locally-defined global variable `index`.
pub unsafe fn global_mut(&mut self, index: DefinedGlobalIndex) -> &mut VMGlobalDefinition {
&mut *self.globals.add(index.index())
}
/// Return a mutable reference to the associated `Instance`.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn instance(&mut self) -> &mut Instance {
&mut *((self as *mut Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
}
/// Return the memory index for the given `VMMemoryDefinition`.
pub fn memory_index(&self, memory: &mut VMMemoryDefinition) -> DefinedMemoryIndex {
// TODO: Use `offset_from` once it stablizes.
let begin = self.memories;
let end: *mut VMMemoryDefinition = memory;
DefinedMemoryIndex::new(
(end as usize - begin as usize) / mem::size_of::<VMMemoryDefinition>(),
)
}
}