Initial reorg.

This is largely the same as #305, but updated for the current tree.
This commit is contained in:
Dan Gohman
2019-11-07 17:11:06 -08:00
parent 2c69546a24
commit 22641de629
351 changed files with 52 additions and 52 deletions

40
crates/runtime/Cargo.toml Normal file
View File

@@ -0,0 +1,40 @@
[package]
name = "wasmtime-runtime"
version = "0.2.0"
authors = ["The Wasmtime Project Developers"]
description = "Runtime library support for Wasmtime"
categories = ["wasm"]
keywords = ["webassembly", "wasm"]
repository = "https://github.com/CraneStation/wasmtime"
license = "Apache-2.0 WITH LLVM-exception"
readme = "README.md"
edition = "2018"
[dependencies]
cranelift-codegen = { version = "0.49", features = ["enable-serde"] }
cranelift-entity = { version = "0.49", features = ["enable-serde"] }
cranelift-wasm = { version = "0.49", features = ["enable-serde"] }
wasmtime-environ = { path = "../environ", default-features = false }
region = "2.0.0"
lazy_static = "1.2.0"
libc = { version = "0.2.60", default-features = false }
memoffset = "0.5.1"
indexmap = "1.0.2"
hashbrown = { version = "0.6.0", optional = true }
spin = { version = "0.5.2", optional = true }
thiserror = "1.0.4"
[target.'cfg(target_os = "windows")'.dependencies]
winapi = { version = "0.3.7", features = ["winbase", "memoryapi"] }
[build-dependencies]
cc = "1.0"
[features]
default = ["std"]
std = ["cranelift-codegen/std", "cranelift-wasm/std", "wasmtime-environ/std"]
core = ["hashbrown/nightly", "cranelift-codegen/core", "cranelift-wasm/core", "wasmtime-environ/core", "spin"]
[badges]
maintenance = { status = "experimental" }
travis-ci = { repository = "CraneStation/wasmtime" }

220
crates/runtime/LICENSE Normal file
View File

@@ -0,0 +1,220 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--- LLVM Exceptions to the Apache 2.0 License ----
As an exception, if, as a result of your compiling your source code, portions
of this Software are embedded into an Object form of such source code, you
may redistribute such embedded portions in such Object form without complying
with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
In addition, if you combine or link compiled forms of this Software with
software that is licensed under the GPLv2 ("Combined Software") and if a
court of competent jurisdiction determines that the patent provision (Section
3), the indemnity provision (Section 9) or other Section of the License
conflicts with the conditions of the GPLv2, you may retroactively and
prospectively choose to deem waived or otherwise exclude such Section(s) of
the License, but only in their entirety and only with respect to the Combined
Software.

10
crates/runtime/README.md Normal file
View File

@@ -0,0 +1,10 @@
This is the `wasmtime-runtime` crate, which contains wasm runtime library
support, supporting the wasm ABI used by [`wasmtime-environ`],
[`wasmtime-jit`], and [`wasmtime-obj`].
This crate does not make a host vs. target distinction; it is meant to be
compiled for the target.
[`wasmtime-environ`]: https://crates.io/crates/wasmtime-environ
[`wasmtime-jit`]: https://crates.io/crates/wasmtime-jit
[`wasmtime-obj`]: https://crates.io/crates/wasmtime-obj

20
crates/runtime/build.rs Normal file
View File

@@ -0,0 +1,20 @@
fn main() {
println!("cargo:rerun-if-changed=signalhandlers/SignalHandlers.cpp");
println!("cargo:rerun-if-changed=signalhandlers/SignalHandlers.hpp");
println!("cargo:rerun-if-changed=signalhandlers/Trampolines.cpp");
let target = std::env::var("TARGET").unwrap();
let mut build = cc::Build::new();
build
.cpp(true)
.warnings(false)
.file("signalhandlers/SignalHandlers.cpp")
.file("signalhandlers/Trampolines.cpp");
if !target.contains("windows") {
build
.flag("-std=c++11")
.flag("-fno-exceptions")
.flag("-fno-rtti");
}
build.compile("signalhandlers");
}

View File

@@ -0,0 +1,827 @@
//! This file is largely derived from the code in WasmSignalHandlers.cpp in SpiderMonkey:
//!
//! https://dxr.mozilla.org/mozilla-central/source/js/src/wasm/WasmSignalHandlers.cpp
//!
//! Use of Mach ports on Darwin platforms (the USE_APPLE_MACH_PORTS code below) is
//! currently disabled.
#include "SignalHandlers.hpp"
#include <stdint.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#if defined(_WIN32)
# include <windows.h>
# include <winternl.h>
#elif defined(USE_APPLE_MACH_PORTS)
# include <mach/exc.h>
# include <mach/mach.h>
# include <pthread.h>
#else
# include <signal.h>
#endif
// =============================================================================
// This following pile of macros and includes defines the ToRegisterState() and
// the ContextToPC() functions from the (highly) platform-specific CONTEXT
// struct which is provided to the signal handler.
// =============================================================================
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
# include <sys/ucontext.h> // for ucontext_t, mcontext_t
#endif
#if defined(__x86_64__)
# if defined(__DragonFly__)
# include <machine/npx.h> // for union savefpu
# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
# include <machine/fpu.h> // for struct savefpu/fxsave64
# endif
#endif
#if defined(_WIN32)
# define EIP_sig(p) ((p)->Eip)
# define EBP_sig(p) ((p)->Ebp)
# define ESP_sig(p) ((p)->Esp)
# define RIP_sig(p) ((p)->Rip)
# define RSP_sig(p) ((p)->Rsp)
# define RBP_sig(p) ((p)->Rbp)
# define R11_sig(p) ((p)->R11)
# define R13_sig(p) ((p)->R13)
# define R14_sig(p) ((p)->R14)
# define R15_sig(p) ((p)->R15)
# define EPC_sig(p) ((p)->Pc)
# define RFP_sig(p) ((p)->Fp)
# define R31_sig(p) ((p)->Sp)
# define RLR_sig(p) ((p)->Lr)
#elif defined(__OpenBSD__)
# define EIP_sig(p) ((p)->sc_eip)
# define EBP_sig(p) ((p)->sc_ebp)
# define ESP_sig(p) ((p)->sc_esp)
# define RIP_sig(p) ((p)->sc_rip)
# define RSP_sig(p) ((p)->sc_rsp)
# define RBP_sig(p) ((p)->sc_rbp)
# define R11_sig(p) ((p)->sc_r11)
# if defined(__arm__)
# define R13_sig(p) ((p)->sc_usr_sp)
# define R14_sig(p) ((p)->sc_usr_lr)
# define R15_sig(p) ((p)->sc_pc)
# else
# define R13_sig(p) ((p)->sc_r13)
# define R14_sig(p) ((p)->sc_r14)
# define R15_sig(p) ((p)->sc_r15)
# endif
# if defined(__aarch64__)
# define EPC_sig(p) ((p)->sc_elr)
# define RFP_sig(p) ((p)->sc_x[29])
# define RLR_sig(p) ((p)->sc_lr)
# define R31_sig(p) ((p)->sc_sp)
# endif
# if defined(__mips__)
# define EPC_sig(p) ((p)->sc_pc)
# define RFP_sig(p) ((p)->sc_regs[30])
# endif
#elif defined(__linux__) || defined(__sun)
# if defined(__linux__)
# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
# else
# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
# endif
# define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
# define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
# define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
# if defined(__linux__) && defined(__arm__)
# define R11_sig(p) ((p)->uc_mcontext.arm_fp)
# define R13_sig(p) ((p)->uc_mcontext.arm_sp)
# define R14_sig(p) ((p)->uc_mcontext.arm_lr)
# define R15_sig(p) ((p)->uc_mcontext.arm_pc)
# else
# define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
# define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
# define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
# define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
# endif
# if defined(__linux__) && defined(__aarch64__)
# define EPC_sig(p) ((p)->uc_mcontext.pc)
# define RFP_sig(p) ((p)->uc_mcontext.regs[29])
# define RLR_sig(p) ((p)->uc_mcontext.regs[30])
# define R31_sig(p) ((p)->uc_mcontext.regs[31])
# endif
# if defined(__linux__) && defined(__mips__)
# define EPC_sig(p) ((p)->uc_mcontext.pc)
# define RFP_sig(p) ((p)->uc_mcontext.gregs[30])
# define RSP_sig(p) ((p)->uc_mcontext.gregs[29])
# define R31_sig(p) ((p)->uc_mcontext.gregs[31])
# endif
# if defined(__linux__) && (defined(__sparc__) && defined(__arch64__))
# define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC])
# define FP_sig(p) ((p)->uc_mcontext.mc_fp)
# define SP_sig(p) ((p)->uc_mcontext.mc_i7)
# endif
# if defined(__linux__) && \
(defined(__ppc64__) || defined (__PPC64__) || defined(__ppc64le__) || defined (__PPC64LE__))
# define R01_sig(p) ((p)->uc_mcontext.gp_regs[1])
# define R32_sig(p) ((p)->uc_mcontext.gp_regs[32])
# endif
#elif defined(__NetBSD__)
# define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
# define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
# define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
# define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
# define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
# define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
# if defined(__aarch64__)
# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
# define RLR_sig(p) ((p)->uc_mcontext.__gregs[_REG_X30])
# define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_SP])
# endif
# if defined(__mips__)
# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
# endif
#elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
# define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
# define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
# define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
# define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
# define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
# define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
# if defined(__FreeBSD__) && defined(__arm__)
# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
# else
# define R11_sig(p) ((p)->uc_mcontext.mc_r11)
# define R13_sig(p) ((p)->uc_mcontext.mc_r13)
# define R14_sig(p) ((p)->uc_mcontext.mc_r14)
# define R15_sig(p) ((p)->uc_mcontext.mc_r15)
# endif
# if defined(__FreeBSD__) && defined(__aarch64__)
# define EPC_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_elr)
# define RFP_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_x[29])
# define RLR_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_lr)
# define R31_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_sp)
# endif
# if defined(__FreeBSD__) && defined(__mips__)
# define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
# endif
#elif defined(USE_APPLE_MACH_PORTS)
# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
# define RIP_sig(p) ((p)->thread.__rip)
# define RBP_sig(p) ((p)->thread.__rbp)
# define RSP_sig(p) ((p)->thread.__rsp)
# define R11_sig(p) ((p)->thread.__r[11])
# define R13_sig(p) ((p)->thread.__sp)
# define R14_sig(p) ((p)->thread.__lr)
# define R15_sig(p) ((p)->thread.__pc)
#elif defined(__APPLE__)
# define EIP_sig(p) ((p)->uc_mcontext->__ss.__eip)
# define EBP_sig(p) ((p)->uc_mcontext->__ss.__ebp)
# define ESP_sig(p) ((p)->uc_mcontext->__ss.__esp)
# define RIP_sig(p) ((p)->uc_mcontext->__ss.__rip)
# define RBP_sig(p) ((p)->uc_mcontext->__ss.__rbp)
# define RSP_sig(p) ((p)->uc_mcontext->__ss.__rsp)
# define R11_sig(p) ((p)->uc_mcontext->__ss.__r11)
# define R13_sig(p) ((p)->uc_mcontext->__ss.__sp)
# define R14_sig(p) ((p)->uc_mcontext->__ss.__lr)
# define R15_sig(p) ((p)->uc_mcontext->__ss.__pc)
#else
# error "Don't know how to read/write to the thread state via the mcontext_t."
#endif
#if defined(ANDROID)
// Not all versions of the Android NDK define ucontext_t or mcontext_t.
// Detect this and provide custom but compatible definitions. Note that these
// follow the GLibc naming convention to access register values from
// mcontext_t.
//
// See: https://chromiumcodereview.appspot.com/10829122/
// See: http://code.google.com/p/android/issues/detail?id=34784
# if !defined(__BIONIC_HAVE_UCONTEXT_T)
# if defined(__arm__)
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
# if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
# include <asm/sigcontext.h>
# endif
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used so don't define them here.
} ucontext_t;
# elif defined(__mips__)
typedef struct {
uint32_t regmask;
uint32_t status;
uint64_t pc;
uint64_t gregs[32];
uint64_t fpregs[32];
uint32_t acx;
uint32_t fpc_csr;
uint32_t fpc_eir;
uint32_t used_math;
uint32_t dsp;
uint64_t mdhi;
uint64_t mdlo;
uint32_t hi1;
uint32_t lo1;
uint32_t hi2;
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
} mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used so don't define them here.
} ucontext_t;
# elif defined(__i386__)
// x86 version for Android.
typedef struct {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
} mcontext_t;
typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
typedef struct ucontext {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
} ucontext_t;
enum { REG_EIP = 14 };
# endif // defined(__i386__)
# endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
#endif // defined(ANDROID)
#if defined(USE_APPLE_MACH_PORTS)
# if defined(__x86_64__)
struct macos_x64_context {
x86_thread_state64_t thread;
x86_float_state64_t float_;
};
# define CONTEXT macos_x64_context
# elif defined(__i386__)
struct macos_x86_context {
x86_thread_state_t thread;
x86_float_state_t float_;
};
# define CONTEXT macos_x86_context
# elif defined(__arm__)
struct macos_arm_context {
arm_thread_state_t thread;
arm_neon_state_t float_;
};
# define CONTEXT macos_arm_context
# else
# error Unsupported architecture
# endif
#elif !defined(_WIN32)
# define CONTEXT ucontext_t
#endif
#if defined(_M_X64) || defined(__x86_64__)
# define PC_sig(p) RIP_sig(p)
# define FP_sig(p) RBP_sig(p)
# define SP_sig(p) RSP_sig(p)
#elif defined(_M_IX86) || defined(__i386__)
# define PC_sig(p) EIP_sig(p)
# define FP_sig(p) EBP_sig(p)
# define SP_sig(p) ESP_sig(p)
#elif defined(__arm__)
# define FP_sig(p) R11_sig(p)
# define SP_sig(p) R13_sig(p)
# define LR_sig(p) R14_sig(p)
# define PC_sig(p) R15_sig(p)
#elif defined(_M_ARM64) || defined(__aarch64__)
# define PC_sig(p) EPC_sig(p)
# define FP_sig(p) RFP_sig(p)
# define SP_sig(p) R31_sig(p)
# define LR_sig(p) RLR_sig(p)
#elif defined(__mips__)
# define PC_sig(p) EPC_sig(p)
# define FP_sig(p) RFP_sig(p)
# define SP_sig(p) RSP_sig(p)
# define LR_sig(p) R31_sig(p)
#elif defined(__ppc64__) || defined (__PPC64__) || defined(__ppc64le__) || defined (__PPC64LE__)
# define PC_sig(p) R32_sig(p)
# define SP_sig(p) R01_sig(p)
# define FP_sig(p) R01_sig(p)
#endif
static void
SetContextPC(CONTEXT* context, const uint8_t* pc)
{
#ifdef PC_sig
PC_sig(context) = reinterpret_cast<uintptr_t>(pc);
#else
abort();
#endif
}
static const uint8_t*
ContextToPC(CONTEXT* context)
{
#ifdef PC_sig
return reinterpret_cast<const uint8_t*>(static_cast<uintptr_t>(PC_sig(context)));
#else
abort();
#endif
}
// =============================================================================
// All signals/exceptions funnel down to this one trap-handling function which
// tests whether the pc is in a wasm module and, if so, whether there is
// actually a trap expected at this pc. These tests both avoid real bugs being
// silently converted to wasm traps and provides the trapping wasm bytecode
// offset we need to report in the error.
//
// Crashing inside wasm trap handling (due to a bug in trap handling or exposed
// during trap handling) must be reported like a normal crash, not cause the
// crash report to be lost. On Windows and non-Mach Unix, a crash during the
// handler reenters the handler, possibly repeatedly until exhausting the stack,
// and so we prevent recursion with the thread-local sAlreadyHandlingTrap. On
// Mach, the wasm exception handler has its own thread and is installed only on
// the thread-level debugging ports of our threads, so a crash on
// exception handler thread will not recurse; it will bubble up to the
// process-level debugging ports (where Breakpad is installed).
// =============================================================================
static thread_local bool sAlreadyHandlingTrap;
namespace {
struct AutoHandlingTrap
{
AutoHandlingTrap() {
assert(!sAlreadyHandlingTrap);
sAlreadyHandlingTrap = true;
}
~AutoHandlingTrap() {
assert(sAlreadyHandlingTrap);
sAlreadyHandlingTrap = false;
}
};
}
static
#if defined(__GNUC__) || defined(__clang__)
__attribute__ ((warn_unused_result))
#endif
bool
HandleTrap(CONTEXT* context, bool reset_guard_page)
{
assert(sAlreadyHandlingTrap);
if (!CheckIfTrapAtAddress(ContextToPC(context))) {
return false;
}
RecordTrap(ContextToPC(context), reset_guard_page);
// Unwind calls longjmp, so it doesn't run the automatic
// sAlreadhHanldingTrap cleanups, so reset it manually before doing
// a longjmp.
sAlreadyHandlingTrap = false;
#if defined(USE_APPLE_MACH_PORTS)
// Reroute the PC to run the Unwind function on the main stack after the
// handler exits. This doesn't yet work for stack overflow traps, because
// in that case the main thread doesn't have any space left to run.
SetContextPC(context, reinterpret_cast<const uint8_t*>(&Unwind));
#else
// For now, just call Unwind directly, rather than redirecting the PC there,
// so that it runs on the alternate signal handler stack. To run on the main
// stack, reroute the context PC like this:
Unwind();
#endif
return true;
}
// =============================================================================
// The following platform-specific handlers funnel all signals/exceptions into
// the shared HandleTrap() above.
// =============================================================================
#if defined(_WIN32)
// Obtained empirically from thread_local codegen on x86/x64/arm64.
// Compiled in all user binaries, so should be stable over time.
static const unsigned sThreadLocalArrayPointerIndex = 11;
static LONG WINAPI
WasmTrapHandler(LPEXCEPTION_POINTERS exception)
{
// Make sure TLS is initialized before reading sAlreadyHandlingTrap.
if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
return EXCEPTION_CONTINUE_SEARCH;
}
if (sAlreadyHandlingTrap) {
return EXCEPTION_CONTINUE_SEARCH;
}
AutoHandlingTrap aht;
EXCEPTION_RECORD* record = exception->ExceptionRecord;
if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
record->ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION &&
record->ExceptionCode != EXCEPTION_STACK_OVERFLOW &&
record->ExceptionCode != EXCEPTION_INT_DIVIDE_BY_ZERO &&
record->ExceptionCode != EXCEPTION_INT_OVERFLOW)
{
return EXCEPTION_CONTINUE_SEARCH;
}
if (!HandleTrap(exception->ContextRecord,
record->ExceptionCode == EXCEPTION_STACK_OVERFLOW)) {
return EXCEPTION_CONTINUE_SEARCH;
}
return EXCEPTION_CONTINUE_EXECUTION;
}
#elif defined(USE_APPLE_MACH_PORTS)
// On OSX we are forced to use the lower-level Mach exception mechanism instead
// of Unix signals because breakpad uses Mach exceptions and would otherwise
// report a crash before wasm gets a chance to handle the exception.
// This definition was generated by mig (the Mach Interface Generator) for the
// routine 'exception_raise' (exc.defs).
#pragma pack(4)
typedef struct {
mach_msg_header_t Head;
/* start of the kernel processed data */
mach_msg_body_t msgh_body;
mach_msg_port_descriptor_t thread;
mach_msg_port_descriptor_t task;
/* end of the kernel processed data */
NDR_record_t NDR;
exception_type_t exception;
mach_msg_type_number_t codeCnt;
int64_t code[2];
} Request__mach_exception_raise_t;
#pragma pack()
// The full Mach message also includes a trailer.
struct ExceptionRequest
{
Request__mach_exception_raise_t body;
mach_msg_trailer_t trailer;
};
static bool
HandleMachException(const ExceptionRequest& request)
{
// Get the port of the thread from the message.
mach_port_t cxThread = request.body.thread.name;
// Read out the thread's register state.
CONTEXT context;
# if defined(__x86_64__)
unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
int thread_state = x86_THREAD_STATE64;
int float_state = x86_FLOAT_STATE64;
# elif defined(__i386__)
unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
int thread_state = x86_THREAD_STATE;
int float_state = x86_FLOAT_STATE;
# elif defined(__arm__)
unsigned int thread_state_count = ARM_THREAD_STATE_COUNT;
unsigned int float_state_count = ARM_NEON_STATE_COUNT;
int thread_state = ARM_THREAD_STATE;
int float_state = ARM_NEON_STATE;
# else
# error Unsupported architecture
# endif
kern_return_t kret;
kret = thread_get_state(cxThread, thread_state,
(thread_state_t)&context.thread, &thread_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
kret = thread_get_state(cxThread, float_state,
(thread_state_t)&context.float_, &float_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
if (request.body.exception != EXC_BAD_ACCESS &&
request.body.exception != EXC_BAD_INSTRUCTION)
{
return false;
}
{
AutoHandlingTrap aht;
if (!HandleTrap(&context, false)) {
return false;
}
}
// Update the thread state with the new pc and register values.
kret = thread_set_state(cxThread, float_state, (thread_state_t)&context.float_, float_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
kret = thread_set_state(cxThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
if (kret != KERN_SUCCESS) {
return false;
}
return true;
}
static mach_port_t sMachDebugPort = MACH_PORT_NULL;
static void*
MachExceptionHandlerThread(void* arg)
{
// Taken from mach_exc in /usr/include/mach/mach_exc.defs.
static const unsigned EXCEPTION_MSG_ID = 2405;
while (true) {
ExceptionRequest request;
kern_return_t kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
sMachDebugPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
// If we fail even receiving the message, we can't even send a reply!
// Rather than hanging the faulting thread (hanging the browser), crash.
if (kret != KERN_SUCCESS) {
fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
abort();
}
if (request.body.Head.msgh_id != EXCEPTION_MSG_ID) {
fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits);
abort();
}
// Some thread just commited an EXC_BAD_ACCESS and has been suspended by
// the kernel. The kernel is waiting for us to reply with instructions.
// Our default is the "not handled" reply (by setting the RetCode field
// of the reply to KERN_FAILURE) which tells the kernel to continue
// searching at the process and system level. If this is an
// expected exception, we handle it and return KERN_SUCCESS.
bool handled = HandleMachException(request);
kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
// This magic incantation to send a reply back to the kernel was
// derived from the exc_server generated by
// 'mig -v /usr/include/mach/mach_exc.defs'.
__Reply__exception_raise_t reply;
reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
reply.Head.msgh_size = sizeof(reply);
reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
reply.Head.msgh_local_port = MACH_PORT_NULL;
reply.Head.msgh_id = request.body.Head.msgh_id + 100;
reply.NDR = NDR_record;
reply.RetCode = replyCode;
mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
}
return nullptr;
}
#else // If not Windows or Mac, assume Unix
static struct sigaction sPrevSIGSEGVHandler;
static struct sigaction sPrevSIGBUSHandler;
static struct sigaction sPrevSIGILLHandler;
static struct sigaction sPrevSIGFPEHandler;
static void
WasmTrapHandler(int signum, siginfo_t* info, void* context)
{
if (!sAlreadyHandlingTrap) {
AutoHandlingTrap aht;
assert(signum == SIGSEGV || signum == SIGBUS || signum == SIGFPE || signum == SIGILL);
if (HandleTrap(static_cast<CONTEXT*>(context), false)) {
return;
}
}
struct sigaction* previousSignal = nullptr;
switch (signum) {
case SIGSEGV: previousSignal = &sPrevSIGSEGVHandler; break;
case SIGBUS: previousSignal = &sPrevSIGBUSHandler; break;
case SIGFPE: previousSignal = &sPrevSIGFPEHandler; break;
case SIGILL: previousSignal = &sPrevSIGILLHandler; break;
}
assert(previousSignal);
// This signal is not for any compiled wasm code we expect, so we need to
// forward the signal to the next handler. If there is no next handler (SIG_IGN
// or SIG_DFL), then it's time to crash. To do this, we set the signal back to
// its original disposition and return. This will cause the faulting op to
// be re-executed which will crash in the normal way. The advantage of
// doing this to calling _exit() is that we remove ourselves from the crash
// stack which improves crash reports. If there is a next handler, call it.
// It will either crash synchronously, fix up the instruction so that
// execution can continue and return, or trigger a crash by returning the
// signal to it's original disposition and returning.
//
// Note: the order of these tests matter.
if (previousSignal->sa_flags & SA_SIGINFO) {
previousSignal->sa_sigaction(signum, info, context);
} else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN) {
sigaction(signum, previousSignal, nullptr);
} else {
previousSignal->sa_handler(signum);
}
}
# endif // _WIN32 || __APPLE__ || assume unix
#if defined(ANDROID) && defined(MOZ_LINKER)
extern "C" MFBT_API bool IsSignalHandlingBroken();
#endif
int
EnsureEagerSignalHandlers()
{
#if defined(ANDROID) && defined(MOZ_LINKER)
// Signal handling is broken on some android systems.
if (IsSignalHandlingBroken()) {
return false;
}
#endif
sAlreadyHandlingTrap = false;
// Install whatever exception/signal handler is appropriate for the OS.
#if defined(_WIN32)
# if defined(MOZ_ASAN)
// Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
// in the first handler position. This requires some coordination with
// MemoryProtectionExceptionHandler::isDisabled().
const bool firstHandler = false;
# else
// Otherwise, WasmTrapHandler needs to go first, so that we can recover
// from wasm faults and continue execution without triggering handlers
// such as MemoryProtectionExceptionHandler that assume we are crashing.
const bool firstHandler = true;
# endif
if (!AddVectoredExceptionHandler(firstHandler, WasmTrapHandler)) {
// Windows has all sorts of random security knobs for disabling things
// so make this a dynamic failure that disables wasm, not an abort().
return false;
}
#elif defined(USE_APPLE_MACH_PORTS)
// All the Mach setup in EnsureDarwinMachPorts.
#else
// SA_ONSTACK allows us to handle signals on an alternate stack, so that
// the handler can run in response to running out of stack space on the
// main stack. Rust installs an alternate stack with sigaltstack, so we
// rely on that.
// SA_NODEFER allows us to reenter the signal handler if we crash while
// handling the signal, and fall through to the Breakpad handler by testing
// handlingSegFault.
// Allow handling OOB with signals on all architectures
struct sigaction faultHandler;
faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
faultHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&faultHandler.sa_mask);
if (sigaction(SIGSEGV, &faultHandler, &sPrevSIGSEGVHandler)) {
perror("unable to install SIGSEGV handler");
abort();
}
# if defined(__arm__) || defined(__APPLE__)
// On ARM, handle Unaligned Accesses.
// On Darwin, guard page accesses are raised as SIGBUS.
struct sigaction busHandler;
busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
busHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&busHandler.sa_mask);
if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler)) {
perror("unable to install SIGBUS handler");
abort();
}
# endif
# if !defined(__mips__)
// Wasm traps for MIPS currently only raise integer overflow fp exception.
struct sigaction illHandler;
illHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
illHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&illHandler.sa_mask);
if (sigaction(SIGILL, &illHandler, &sPrevSIGILLHandler)) {
perror("unable to install wasm SIGILL handler");
abort();
}
# endif
# if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
// x86 uses SIGFPE to report division by zero, and wasm traps for MIPS
// currently raise integer overflow fp exception.
struct sigaction fpeHandler;
fpeHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
fpeHandler.sa_sigaction = WasmTrapHandler;
sigemptyset(&fpeHandler.sa_mask);
if (sigaction(SIGFPE, &fpeHandler, &sPrevSIGFPEHandler)) {
perror("unable to install wasm SIGFPE handler");
abort();
}
# endif
#endif
return true;
}
int
EnsureDarwinMachPorts()
{
#ifdef USE_APPLE_MACH_PORTS
pthread_attr_t handlerThreadAttr;
int r = pthread_attr_init(&handlerThreadAttr);
if (r != 0) {
return false;
}
// Create the port that all of our threads will redirect their traps to.
kern_return_t kret;
kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &sMachDebugPort);
if (kret != KERN_SUCCESS) {
return false;
}
kret = mach_port_insert_right(mach_task_self(), sMachDebugPort, sMachDebugPort,
MACH_MSG_TYPE_MAKE_SEND);
if (kret != KERN_SUCCESS) {
return false;
}
// Create the thread that will wait on and service sMachDebugPort.
// It's not useful to destroy this thread on process shutdown so
// immediately detach on successful start.
pthread_t handlerThread;
r = pthread_create(&handlerThread, &handlerThreadAttr, MachExceptionHandlerThread, nullptr);
if (r != 0) {
return false;
}
r = pthread_detach(handlerThread);
assert(r == 0);
// In addition to the process-wide signal handler setup, OSX needs each
// thread configured to send its exceptions to sMachDebugPort. While there
// are also task-level (i.e. process-level) exception ports, those are
// "claimed" by breakpad and chaining Mach exceptions is dark magic that we
// avoid by instead intercepting exceptions at the thread level before they
// propagate to the process-level. This works because there are no other
// uses of thread-level exception ports.
assert(sMachDebugPort != MACH_PORT_NULL);
thread_port_t thisThread = mach_thread_self();
kret = thread_set_exception_ports(thisThread,
EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
sMachDebugPort,
EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
THREAD_STATE_NONE);
mach_port_deallocate(mach_task_self(), thisThread);
if (kret != KERN_SUCCESS) {
return false;
}
#endif
return true;
}

View File

@@ -0,0 +1,45 @@
#ifndef signal_handlers_h
#define signal_handlers_h
#include <stdint.h>
#include <setjmp.h>
#ifndef __cplusplus
#include <stdbool.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
int8_t CheckIfTrapAtAddress(const uint8_t* pc);
// Record the Trap code and wasm bytecode offset in TLS somewhere
void RecordTrap(const uint8_t* pc, bool reset_guard_page);
void* EnterScope(void*);
void LeaveScope(void*);
void* GetScope(void);
void Unwind(void);
// This function performs the low-overhead signal handler initialization that we
// want to do eagerly to ensure a more-deterministic global process state. This
// is especially relevant for signal handlers since handler ordering depends on
// installation order: the wasm signal handler must run *before* the other crash
// handlers and since POSIX signal handlers work LIFO, this function needs to be
// called at the end of the startup process, after other handlers have been
// installed. This function can thus be called multiple times, having no effect
// after the first call.
int
EnsureEagerSignalHandlers(void);
// Assuming EnsureEagerProcessSignalHandlers() has already been called,
// this function performs the full installation of signal handlers which must
// be performed per-thread. This operation may incur some overhead and
// so should be done only when needed to use wasm.
int
EnsureDarwinMachPorts(void);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // signal_handlers_h

View File

@@ -0,0 +1,37 @@
#include <setjmp.h>
#include "SignalHandlers.hpp"
extern "C"
int WasmtimeCallTrampoline(void *vmctx, void (*body)(void*, void*), void *args) {
jmp_buf buf;
void *volatile prev;
if (setjmp(buf) != 0) {
LeaveScope(prev);
return 0;
}
prev = EnterScope(&buf);
body(vmctx, args);
LeaveScope(prev);
return 1;
}
extern "C"
int WasmtimeCall(void *vmctx, void (*body)(void*)) {
jmp_buf buf;
void *volatile prev;
if (setjmp(buf) != 0) {
LeaveScope(prev);
return 0;
}
prev = EnterScope(&buf);
body(vmctx);
LeaveScope(prev);
return 1;
}
extern "C"
void Unwind() {
jmp_buf *buf = (jmp_buf*) GetScope();
longjmp(*buf, 1);
}

View File

@@ -0,0 +1,104 @@
use crate::vmcontext::{
VMContext, VMFunctionBody, VMGlobalDefinition, VMMemoryDefinition, VMTableDefinition,
};
use cranelift_codegen::ir;
use cranelift_wasm::Global;
use wasmtime_environ::{MemoryPlan, TablePlan};
/// The value of an export passed from one instance to another.
#[derive(Debug, Clone)]
pub enum Export {
/// A function export value.
Function {
/// The address of the native-code function.
address: *const VMFunctionBody,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The function signature declaration, used for compatibilty checking.
signature: ir::Signature,
},
/// A table export value.
Table {
/// The address of the table descriptor.
definition: *mut VMTableDefinition,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The table declaration, used for compatibilty checking.
table: TablePlan,
},
/// A memory export value.
Memory {
/// The address of the memory descriptor.
definition: *mut VMMemoryDefinition,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The memory declaration, used for compatibilty checking.
memory: MemoryPlan,
},
/// A global export value.
Global {
/// The address of the global storage.
definition: *mut VMGlobalDefinition,
/// Pointer to the containing `VMContext`.
vmctx: *mut VMContext,
/// The global declaration, used for compatibilty checking.
global: Global,
},
}
impl Export {
/// Construct a function export value.
pub fn function(
address: *const VMFunctionBody,
vmctx: *mut VMContext,
signature: ir::Signature,
) -> Self {
Self::Function {
address,
vmctx,
signature,
}
}
/// Construct a table export value.
pub fn table(
definition: *mut VMTableDefinition,
vmctx: *mut VMContext,
table: TablePlan,
) -> Self {
Self::Table {
definition,
vmctx,
table,
}
}
/// Construct a memory export value.
pub fn memory(
definition: *mut VMMemoryDefinition,
vmctx: *mut VMContext,
memory: MemoryPlan,
) -> Self {
Self::Memory {
definition,
vmctx,
memory,
}
}
/// Construct a global export value.
pub fn global(
definition: *mut VMGlobalDefinition,
vmctx: *mut VMContext,
global: Global,
) -> Self {
Self::Global {
definition,
vmctx,
global,
}
}
}

View File

@@ -0,0 +1,54 @@
use crate::instance::InstanceHandle;
use crate::vmcontext::{VMFunctionImport, VMGlobalImport, VMMemoryImport, VMTableImport};
use crate::HashSet;
use cranelift_entity::{BoxedSlice, PrimaryMap};
use cranelift_wasm::{FuncIndex, GlobalIndex, MemoryIndex, TableIndex};
/// Resolved import pointers.
#[derive(Clone)]
pub struct Imports {
/// The set of instances that the imports depend on.
pub dependencies: HashSet<InstanceHandle>,
/// Resolved addresses for imported functions.
pub functions: BoxedSlice<FuncIndex, VMFunctionImport>,
/// Resolved addresses for imported tables.
pub tables: BoxedSlice<TableIndex, VMTableImport>,
/// Resolved addresses for imported memories.
pub memories: BoxedSlice<MemoryIndex, VMMemoryImport>,
/// Resolved addresses for imported globals.
pub globals: BoxedSlice<GlobalIndex, VMGlobalImport>,
}
impl Imports {
/// Construct a new `Imports` instance.
pub fn new(
dependencies: HashSet<InstanceHandle>,
function_imports: PrimaryMap<FuncIndex, VMFunctionImport>,
table_imports: PrimaryMap<TableIndex, VMTableImport>,
memory_imports: PrimaryMap<MemoryIndex, VMMemoryImport>,
global_imports: PrimaryMap<GlobalIndex, VMGlobalImport>,
) -> Self {
Self {
dependencies,
functions: function_imports.into_boxed_slice(),
tables: table_imports.into_boxed_slice(),
memories: memory_imports.into_boxed_slice(),
globals: global_imports.into_boxed_slice(),
}
}
/// Construct a new `Imports` instance with no imports.
pub fn none() -> Self {
Self {
dependencies: HashSet::new(),
functions: PrimaryMap::new().into_boxed_slice(),
tables: PrimaryMap::new().into_boxed_slice(),
memories: PrimaryMap::new().into_boxed_slice(),
globals: PrimaryMap::new().into_boxed_slice(),
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,120 @@
//! The GDB's JIT compilation interface. The low level module that exposes
//! the __jit_debug_register_code() and __jit_debug_descriptor to register
//! or unregister generated object images with debuggers.
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::ptr;
#[repr(C)]
struct JITCodeEntry {
next_entry: *mut JITCodeEntry,
prev_entry: *mut JITCodeEntry,
symfile_addr: *const u8,
symfile_size: u64,
}
const JIT_NOACTION: u32 = 0;
const JIT_REGISTER_FN: u32 = 1;
const JIT_UNREGISTER_FN: u32 = 2;
#[repr(C)]
struct JITDescriptor {
version: u32,
action_flag: u32,
relevant_entry: *mut JITCodeEntry,
first_entry: *mut JITCodeEntry,
}
#[no_mangle]
#[used]
static mut __jit_debug_descriptor: JITDescriptor = JITDescriptor {
version: 1,
action_flag: JIT_NOACTION,
relevant_entry: ptr::null_mut(),
first_entry: ptr::null_mut(),
};
#[no_mangle]
#[inline(never)]
extern "C" fn __jit_debug_register_code() {
// Hack to not allow inlining even when Rust wants to do it in release mode.
let x = 3;
unsafe {
core::ptr::read_volatile(&x);
}
}
/// Registeration for JIT image
pub struct GdbJitImageRegistration {
entry: *mut JITCodeEntry,
file: Vec<u8>,
}
impl GdbJitImageRegistration {
/// Registers JIT image using __jit_debug_register_code
pub fn register(file: Vec<u8>) -> Self {
Self {
entry: unsafe { register_gdb_jit_image(&file) },
file,
}
}
/// JIT image used in registration
pub fn file(&self) -> &[u8] {
&self.file
}
}
impl Drop for GdbJitImageRegistration {
fn drop(&mut self) {
unsafe {
unregister_gdb_jit_image(self.entry);
}
}
}
unsafe fn register_gdb_jit_image(file: &[u8]) -> *mut JITCodeEntry {
// Create a code entry for the file, which gives the start and size of the symbol file.
let entry = Box::into_raw(Box::new(JITCodeEntry {
next_entry: __jit_debug_descriptor.first_entry,
prev_entry: ptr::null_mut(),
symfile_addr: file.as_ptr(),
symfile_size: file.len() as u64,
}));
// Add it to the linked list in the JIT descriptor.
if !__jit_debug_descriptor.first_entry.is_null() {
(*__jit_debug_descriptor.first_entry).prev_entry = entry;
}
__jit_debug_descriptor.first_entry = entry;
// Point the relevant_entry field of the descriptor at the entry.
__jit_debug_descriptor.relevant_entry = entry;
// Set action_flag to JIT_REGISTER and call __jit_debug_register_code.
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
__jit_debug_register_code();
__jit_debug_descriptor.action_flag = JIT_NOACTION;
__jit_debug_descriptor.relevant_entry = ptr::null_mut();
entry
}
unsafe fn unregister_gdb_jit_image(entry: *mut JITCodeEntry) {
// Remove the code entry corresponding to the code from the linked list.
if !(*entry).prev_entry.is_null() {
(*(*entry).prev_entry).next_entry = (*entry).next_entry;
} else {
__jit_debug_descriptor.first_entry = (*entry).next_entry;
}
if !(*entry).next_entry.is_null() {
(*(*entry).next_entry).prev_entry = (*entry).prev_entry;
}
// Point the relevant_entry field of the descriptor at the code entry.
__jit_debug_descriptor.relevant_entry = entry;
// Set action_flag to JIT_UNREGISTER and call __jit_debug_register_code.
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
__jit_debug_register_code();
__jit_debug_descriptor.action_flag = JIT_NOACTION;
__jit_debug_descriptor.relevant_entry = ptr::null_mut();
let _box = Box::from_raw(entry);
}

73
crates/runtime/src/lib.rs Normal file
View File

@@ -0,0 +1,73 @@
//! Runtime library support for Wasmtime.
#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)]
#![warn(unused_import_braces)]
#![cfg_attr(feature = "std", deny(unstable_features))]
#![cfg_attr(feature = "clippy", plugin(clippy(conf_file = "../../clippy.toml")))]
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::new_without_default, clippy::new_without_default_derive)
)]
#![cfg_attr(
feature = "cargo-clippy",
warn(
clippy::float_arithmetic,
clippy::mut_mut,
clippy::nonminimal_bool,
clippy::option_map_unwrap_or,
clippy::option_map_unwrap_or_else,
clippy::print_stdout,
clippy::unicode_not_nfc,
clippy::use_self
)
)]
#![cfg_attr(not(feature = "std"), no_std)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate memoffset;
extern crate alloc;
mod export;
mod imports;
mod instance;
mod jit_int;
mod memory;
mod mmap;
mod sig_registry;
mod signalhandlers;
mod table;
mod trap_registry;
mod traphandlers;
mod vmcontext;
pub mod libcalls;
pub use crate::export::Export;
pub use crate::imports::Imports;
pub use crate::instance::{InstanceHandle, InstantiationError, LinkError};
pub use crate::jit_int::GdbJitImageRegistration;
pub use crate::mmap::Mmap;
pub use crate::sig_registry::SignatureRegistry;
pub use crate::signalhandlers::{wasmtime_init_eager, wasmtime_init_finish};
pub use crate::trap_registry::{get_mut_trap_registry, get_trap_registry, TrapRegistrationGuard};
pub use crate::traphandlers::{wasmtime_call, wasmtime_call_trampoline};
pub use crate::vmcontext::{
VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport, VMGlobalDefinition,
VMGlobalImport, VMInvokeArgument, VMMemoryDefinition, VMMemoryImport, VMSharedSignatureIndex,
VMTableDefinition, VMTableImport,
};
#[cfg(not(feature = "std"))]
use hashbrown::{hash_map, HashMap, HashSet};
#[cfg(feature = "std")]
use std::collections::{hash_map, HashMap, HashSet};
#[cfg(not(feature = "std"))]
use spin::{RwLock, RwLockReadGuard, RwLockWriteGuard};
#[cfg(feature = "std")]
use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
/// Version number of this crate.
pub const VERSION: &str = env!("CARGO_PKG_VERSION");

View File

@@ -0,0 +1,139 @@
//! Runtime library calls. Note that wasm compilers may sometimes perform these
//! inline rather than calling them, particularly when CPUs have special
//! instructions which compute them directly.
use crate::vmcontext::VMContext;
use cranelift_wasm::{DefinedMemoryIndex, MemoryIndex};
/// Implementation of f32.ceil
pub extern "C" fn wasmtime_f32_ceil(x: f32) -> f32 {
x.ceil()
}
/// Implementation of f32.floor
pub extern "C" fn wasmtime_f32_floor(x: f32) -> f32 {
x.floor()
}
/// Implementation of f32.trunc
pub extern "C" fn wasmtime_f32_trunc(x: f32) -> f32 {
x.trunc()
}
/// Implementation of f32.nearest
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f32_nearest(x: f32) -> f32 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}
/// Implementation of f64.ceil
pub extern "C" fn wasmtime_f64_ceil(x: f64) -> f64 {
x.ceil()
}
/// Implementation of f64.floor
pub extern "C" fn wasmtime_f64_floor(x: f64) -> f64 {
x.floor()
}
/// Implementation of f64.trunc
pub extern "C" fn wasmtime_f64_trunc(x: f64) -> f64 {
x.trunc()
}
/// Implementation of f64.nearest
#[allow(clippy::float_arithmetic, clippy::float_cmp)]
pub extern "C" fn wasmtime_f64_nearest(x: f64) -> f64 {
// Rust doesn't have a nearest function, so do it manually.
if x == 0.0 {
// Preserve the sign of zero.
x
} else {
// Nearest is either ceil or floor depending on which is nearest or even.
let u = x.ceil();
let d = x.floor();
let um = (x - u).abs();
let dm = (x - d).abs();
if um < dm
|| (um == dm && {
let h = u / 2.;
h.floor() == h
})
{
u
} else {
d
}
}
}
/// Implementation of memory.grow for locally-defined 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_memory32_grow(
vmctx: *mut VMContext,
delta: u32,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance
.memory_grow(memory_index, delta)
.unwrap_or(u32::max_value())
}
/// Implementation of memory.grow for imported 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_imported_memory32_grow(
vmctx: *mut VMContext,
delta: u32,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = MemoryIndex::from_u32(memory_index);
instance
.imported_memory_grow(memory_index, delta)
.unwrap_or(u32::max_value())
}
/// Implementation of memory.size for locally-defined 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_memory32_size(vmctx: *mut VMContext, memory_index: u32) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = DefinedMemoryIndex::from_u32(memory_index);
instance.memory_size(memory_index)
}
/// Implementation of memory.size for imported 32-bit memories.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_imported_memory32_size(
vmctx: *mut VMContext,
memory_index: u32,
) -> u32 {
let instance = (&mut *vmctx).instance();
let memory_index = MemoryIndex::from_u32(memory_index);
instance.imported_memory_size(memory_index)
}

View File

@@ -0,0 +1,143 @@
//! Memory management for linear memories.
//!
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
use crate::mmap::Mmap;
use crate::vmcontext::VMMemoryDefinition;
use alloc::string::String;
use core::convert::TryFrom;
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
/// A linear memory instance.
#[derive(Debug)]
pub struct LinearMemory {
// The underlying allocation.
mmap: Mmap,
// The current logical size in wasm pages of this linear memory.
current: u32,
// The optional maximum size in wasm pages of this linear memory.
maximum: Option<u32>,
// Size in bytes of extra guard pages after the end to optimize loads and stores with
// constant offsets.
offset_guard_size: usize,
// Records whether we're using a bounds-checking strategy which requires
// handlers to catch trapping accesses.
pub(crate) needs_signal_handlers: bool,
}
impl LinearMemory {
/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
// `maximum` cannot be set to more than `65536` pages.
assert!(plan.memory.minimum <= WASM_MAX_PAGES);
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
let offset_guard_bytes = plan.offset_guard_size as usize;
// If we have an offset guard, or if we're doing the static memory
// allocation strategy, we need signal handlers to catch out of bounds
// acceses.
let needs_signal_handlers = offset_guard_bytes > 0
|| match plan.style {
MemoryStyle::Dynamic => false,
MemoryStyle::Static { .. } => true,
};
let minimum_pages = match plan.style {
MemoryStyle::Dynamic => plan.memory.minimum,
MemoryStyle::Static { bound } => {
assert!(bound >= plan.memory.minimum);
bound
}
} as usize;
let minimum_bytes = minimum_pages.checked_mul(WASM_PAGE_SIZE as usize).unwrap();
let request_bytes = minimum_bytes.checked_add(offset_guard_bytes).unwrap();
let mapped_pages = plan.memory.minimum as usize;
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
let mmap = Mmap::accessible_reserved(mapped_bytes, request_bytes)?;
Ok(Self {
mmap,
current: plan.memory.minimum,
maximum: plan.memory.maximum,
offset_guard_size: offset_guard_bytes,
needs_signal_handlers,
})
}
/// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 {
self.current
}
/// Grow memory by the specified amount of wasm pages.
///
/// Returns `None` if memory can't be grown by the specified amount
/// of wasm pages.
pub fn grow(&mut self, delta: u32) -> Option<u32> {
// Optimization of memory.grow 0 calls.
if delta == 0 {
return Some(self.current);
}
let new_pages = match self.current.checked_add(delta) {
Some(new_pages) => new_pages,
// Linear memory size overflow.
None => return None,
};
let prev_pages = self.current;
if let Some(maximum) = self.maximum {
if new_pages > maximum {
// Linear memory size would exceed the declared maximum.
return None;
}
}
// Wasm linear memories are never allowed to grow beyond what is
// indexable. If the memory has no maximum, enforce the greatest
// limit here.
if new_pages >= WASM_MAX_PAGES {
// Linear memory size would exceed the index range.
return None;
}
let delta_bytes = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize;
let new_bytes = usize::try_from(new_pages).unwrap() * WASM_PAGE_SIZE as usize;
if new_bytes > self.mmap.len() - self.offset_guard_size {
// If the new size is within the declared maximum, but needs more memory than we
// have on hand, it's a dynamic heap and it can move.
let guard_bytes = self.offset_guard_size;
let request_bytes = new_bytes.checked_add(guard_bytes)?;
let mut new_mmap = Mmap::accessible_reserved(new_bytes, request_bytes).ok()?;
let copy_len = self.mmap.len() - self.offset_guard_size;
new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
self.mmap = new_mmap;
} else if delta_bytes > 0 {
// Make the newly allocated pages accessible.
self.mmap.make_accessible(prev_bytes, delta_bytes).ok()?;
}
self.current = new_pages;
Some(prev_pages)
}
/// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
pub fn vmmemory(&mut self) -> VMMemoryDefinition {
VMMemoryDefinition {
base: self.mmap.as_mut_ptr(),
current_length: self.current as usize * WASM_PAGE_SIZE as usize,
}
}
}

275
crates/runtime/src/mmap.rs Normal file
View File

@@ -0,0 +1,275 @@
//! Low-level abstraction for allocating and managing zero-filled pages
//! of memory.
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::ptr;
use core::slice;
#[cfg(not(target_os = "windows"))]
use libc;
use region;
use std::io;
/// Round `size` up to the nearest multiple of `page_size`.
fn round_up_to_page_size(size: usize, page_size: usize) -> usize {
(size + (page_size - 1)) & !(page_size - 1)
}
/// A simple struct consisting of a page-aligned pointer to page-aligned
/// and initially-zeroed memory and a length.
#[derive(Debug)]
pub struct Mmap {
ptr: *mut u8,
len: usize,
}
impl Mmap {
/// Construct a new empty instance of `Mmap`.
pub fn new() -> Self {
// Rust's slices require non-null pointers, even when empty. `Vec`
// contains code to create a non-null dangling pointer value when
// constructed empty, so we reuse that here.
Self {
ptr: Vec::new().as_mut_ptr(),
len: 0,
}
}
/// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
pub fn with_at_least(size: usize) -> Result<Self, String> {
let page_size = region::page::size();
let rounded_size = round_up_to_page_size(size, page_size);
Self::accessible_reserved(rounded_size, rounded_size)
}
/// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
/// must be native page-size multiples.
#[cfg(not(target_os = "windows"))]
pub fn accessible_reserved(
accessible_size: usize,
mapping_size: usize,
) -> Result<Self, String> {
let page_size = region::page::size();
assert!(accessible_size <= mapping_size);
assert_eq!(mapping_size & (page_size - 1), 0);
assert_eq!(accessible_size & (page_size - 1), 0);
// Mmap may return EINVAL if the size is zero, so just
// special-case that.
if mapping_size == 0 {
return Ok(Self::new());
}
Ok(if accessible_size == mapping_size {
// Allocate a single read-write region at once.
let ptr = unsafe {
libc::mmap(
ptr::null_mut(),
mapping_size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1,
0,
)
};
if ptr as isize == -1_isize {
return Err(io::Error::last_os_error().to_string());
}
Self {
ptr: ptr as *mut u8,
len: mapping_size,
}
} else {
// Reserve the mapping size.
let ptr = unsafe {
libc::mmap(
ptr::null_mut(),
mapping_size,
libc::PROT_NONE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1,
0,
)
};
if ptr as isize == -1_isize {
return Err(io::Error::last_os_error().to_string());
}
let mut result = Self {
ptr: ptr as *mut u8,
len: mapping_size,
};
if accessible_size != 0 {
// Commit the accessible size.
result.make_accessible(0, accessible_size)?;
}
result
})
}
/// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
/// must be native page-size multiples.
#[cfg(target_os = "windows")]
pub fn accessible_reserved(
accessible_size: usize,
mapping_size: usize,
) -> Result<Self, String> {
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
let page_size = region::page::size();
assert!(accessible_size <= mapping_size);
assert_eq!(mapping_size & (page_size - 1), 0);
assert_eq!(accessible_size & (page_size - 1), 0);
Ok(if accessible_size == mapping_size {
// Allocate a single read-write region at once.
let ptr = unsafe {
VirtualAlloc(
ptr::null_mut(),
mapping_size,
MEM_RESERVE | MEM_COMMIT,
PAGE_READWRITE,
)
};
if ptr.is_null() {
return Err(io::Error::last_os_error().to_string());
}
Self {
ptr: ptr as *mut u8,
len: mapping_size,
}
} else {
// Reserve the mapping size.
let ptr =
unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
if ptr.is_null() {
return Err(io::Error::last_os_error().to_string());
}
let mut result = Self {
ptr: ptr as *mut u8,
len: mapping_size,
};
if accessible_size != 0 {
// Commit the accessible size.
result.make_accessible(0, accessible_size)?;
}
result
})
}
/// Make the memory starting at `start` and extending for `len` bytes accessible.
/// `start` and `len` must be native page-size multiples and describe a range within
/// `self`'s reserved memory.
#[cfg(not(target_os = "windows"))]
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
let page_size = region::page::size();
assert_eq!(start & (page_size - 1), 0);
assert_eq!(len & (page_size - 1), 0);
assert!(len < self.len);
assert!(start < self.len - len);
// Commit the accessible size.
unsafe { region::protect(self.ptr.add(start), len, region::Protection::ReadWrite) }
.map_err(|e| e.to_string())
}
/// Make the memory starting at `start` and extending for `len` bytes accessible.
/// `start` and `len` must be native page-size multiples and describe a range within
/// `self`'s reserved memory.
#[cfg(target_os = "windows")]
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
use winapi::ctypes::c_void;
use winapi::um::memoryapi::VirtualAlloc;
use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
let page_size = region::page::size();
assert_eq!(start & (page_size - 1), 0);
assert_eq!(len & (page_size - 1), 0);
assert!(len < self.len);
assert!(start < self.len - len);
// Commit the accessible size.
if unsafe {
VirtualAlloc(
self.ptr.add(start) as *mut c_void,
len,
MEM_COMMIT,
PAGE_READWRITE,
)
}
.is_null()
{
return Err(io::Error::last_os_error().to_string());
}
Ok(())
}
/// Return the allocated memory as a slice of u8.
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.ptr, self.len) }
}
/// Return the allocated memory as a mutable slice of u8.
pub fn as_mut_slice(&mut self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.ptr, self.len) }
}
/// Return the allocated memory as a pointer to u8.
pub fn as_ptr(&self) -> *const u8 {
self.ptr
}
/// Return the allocated memory as a mutable pointer to u8.
pub fn as_mut_ptr(&mut self) -> *mut u8 {
self.ptr
}
/// Return the length of the allocated memory.
pub fn len(&self) -> usize {
self.len
}
}
impl Drop for Mmap {
#[cfg(not(target_os = "windows"))]
fn drop(&mut self) {
if self.len != 0 {
let r = unsafe { libc::munmap(self.ptr as *mut libc::c_void, self.len) };
assert_eq!(r, 0, "munmap failed: {}", io::Error::last_os_error());
}
}
#[cfg(target_os = "windows")]
fn drop(&mut self) {
if self.len != 0 {
use winapi::ctypes::c_void;
use winapi::um::memoryapi::VirtualFree;
use winapi::um::winnt::MEM_RELEASE;
let r = unsafe { VirtualFree(self.ptr as *mut c_void, self.len, MEM_RELEASE) };
assert_eq!(r, 0);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_round_up_to_page_size() {
assert_eq!(round_up_to_page_size(0, 4096), 0);
assert_eq!(round_up_to_page_size(1, 4096), 4096);
assert_eq!(round_up_to_page_size(4096, 4096), 4096);
assert_eq!(round_up_to_page_size(4097, 4096), 8192);
}
}

View File

@@ -0,0 +1,44 @@
//! Implement a registry of function signatures, for fast indirect call
//! signature checking.
use crate::vmcontext::VMSharedSignatureIndex;
use crate::{hash_map, HashMap};
use core::convert::TryFrom;
use cranelift_codegen::ir;
/// WebAssembly requires that the caller and callee signatures in an indirect
/// call must match. To implement this efficiently, keep a registry of all
/// signatures, shared by all instances, so that call sites can just do an
/// index comparison.
#[derive(Debug)]
pub struct SignatureRegistry {
signature_hash: HashMap<ir::Signature, VMSharedSignatureIndex>,
}
impl SignatureRegistry {
/// Create a new `SignatureRegistry`.
pub fn new() -> Self {
Self {
signature_hash: HashMap::new(),
}
}
/// Register a signature and return its unique index.
pub fn register(&mut self, sig: &ir::Signature) -> VMSharedSignatureIndex {
let len = self.signature_hash.len();
match self.signature_hash.entry(sig.clone()) {
hash_map::Entry::Occupied(entry) => *entry.get(),
hash_map::Entry::Vacant(entry) => {
// Keep `signature_hash` len under 2**32 -- VMSharedSignatureIndex::new(core::u32::MAX)
// is reserved for VMSharedSignatureIndex::default().
debug_assert!(
len < core::u32::MAX as usize,
"Invariant check: signature_hash.len() < core::u32::MAX"
);
let sig_id = VMSharedSignatureIndex::new(u32::try_from(len).unwrap());
entry.insert(sig_id);
sig_id
}
}
}
}

View File

@@ -0,0 +1,130 @@
//! Interface to low-level signal-handling mechanisms.
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use crate::vmcontext::VMContext;
use crate::RwLock;
use core::borrow::{Borrow, BorrowMut};
use core::cell::Cell;
#[derive(Default)]
struct TrapContext {
tried_to_install_signal_handlers: Cell<bool>,
have_signal_handlers: Cell<bool>,
}
extern "C" {
fn EnsureEagerSignalHandlers() -> libc::c_int;
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn EnsureDarwinMachPorts() -> libc::c_int;
}
struct InstallState {
tried: bool,
success: bool,
}
impl InstallState {
fn new() -> Self {
Self {
tried: false,
success: false,
}
}
}
lazy_static! {
static ref EAGER_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
static ref LAZY_INSTALL_STATE: RwLock<InstallState> = RwLock::new(InstallState::new());
}
/// This function performs the low-overhead signal handler initialization that we
/// want to do eagerly to ensure a more-deterministic global process state. This
/// is especially relevant for signal handlers since handler ordering depends on
/// installation order: the wasm signal handler must run *before* the other crash
/// handlers and since POSIX signal handlers work LIFO, this function needs to be
/// called at the end of the startup process, after other handlers have been
/// installed. This function can thus be called multiple times, having no effect
/// after the first call.
#[no_mangle]
pub extern "C" fn wasmtime_init_eager() {
let mut locked = EAGER_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if unsafe { EnsureEagerSignalHandlers() == 0 } {
return;
}
state.success = true;
}
thread_local! {
static TRAP_CONTEXT: TrapContext = TrapContext::default();
}
/// Assuming `EnsureEagerProcessSignalHandlers` has already been called,
/// this function performs the full installation of signal handlers which must
/// be performed per-thread. This operation may incur some overhead and
/// so should be done only when needed to use wasm.
#[no_mangle]
pub extern "C" fn wasmtime_init_finish(vmctx: &mut VMContext) {
if !TRAP_CONTEXT.with(|cx| cx.tried_to_install_signal_handlers.get()) {
TRAP_CONTEXT.with(|cx| {
cx.tried_to_install_signal_handlers.set(true);
assert!(!cx.have_signal_handlers.get());
});
{
let locked = EAGER_INSTALL_STATE.read().unwrap();
let state = locked.borrow();
assert!(
state.tried,
"call wasmtime_init_eager before calling wasmtime_init_finish"
);
if !state.success {
return;
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
ensure_darwin_mach_ports();
TRAP_CONTEXT.with(|cx| {
cx.have_signal_handlers.set(true);
})
}
let instance = unsafe { vmctx.instance() };
let have_signal_handlers = TRAP_CONTEXT.with(|cx| cx.have_signal_handlers.get());
if !have_signal_handlers && instance.needs_signal_handlers() {
panic!("failed to install signal handlers");
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
fn ensure_darwin_mach_ports() {
let mut locked = LAZY_INSTALL_STATE.write().unwrap();
let state = locked.borrow_mut();
if state.tried {
return;
}
state.tried = true;
assert!(!state.success);
if unsafe { EnsureDarwinMachPorts() != 0 } {
return;
}
state.success = true;
}

101
crates/runtime/src/table.rs Normal file
View File

@@ -0,0 +1,101 @@
//! Memory management for tables.
//!
//! `Table` is to WebAssembly tables what `LinearMemory` is to WebAssembly linear memories.
use crate::vmcontext::{VMCallerCheckedAnyfunc, VMTableDefinition};
use alloc::vec::Vec;
use core::convert::{TryFrom, TryInto};
use cranelift_wasm::TableElementType;
use wasmtime_environ::{TablePlan, TableStyle};
/// A table instance.
#[derive(Debug)]
pub struct Table {
vec: Vec<VMCallerCheckedAnyfunc>,
maximum: Option<u32>,
}
impl Table {
/// Create a new table instance with specified minimum and maximum number of elements.
pub fn new(plan: &TablePlan) -> Self {
match plan.table.ty {
TableElementType::Func => (),
TableElementType::Val(ty) => {
unimplemented!("tables of types other than anyfunc ({})", ty)
}
};
match plan.style {
TableStyle::CallerChecksSignature => Self {
vec: vec![
VMCallerCheckedAnyfunc::default();
usize::try_from(plan.table.minimum).unwrap()
],
maximum: plan.table.maximum,
},
}
}
/// Returns the number of allocated elements.
pub fn size(&self) -> u32 {
self.vec.len().try_into().unwrap()
}
/// Grow table by the specified amount of elements.
///
/// Returns `None` if table can't be grown by the specified amount
/// of elements.
pub fn grow(&mut self, delta: u32) -> Option<u32> {
let new_len = match self.size().checked_add(delta) {
Some(len) => {
if let Some(max) = self.maximum {
if len > max {
return None;
}
}
len
}
None => {
return None;
}
};
self.vec.resize(
usize::try_from(new_len).unwrap(),
VMCallerCheckedAnyfunc::default(),
);
Some(new_len)
}
/// Get reference to the specified element.
///
/// Returns `None` if the index is out of bounds.
pub fn get(&self, index: u32) -> Option<&VMCallerCheckedAnyfunc> {
self.vec.get(index as usize)
}
/// Get mutable reference to the specified element.
///
/// Returns `None` if the index is out of bounds.
pub fn get_mut(&mut self, index: u32) -> Option<&mut VMCallerCheckedAnyfunc> {
self.vec.get_mut(index as usize)
}
/// Return a `VMTableDefinition` for exposing the table to compiled wasm code.
pub fn vmtable(&mut self) -> VMTableDefinition {
VMTableDefinition {
base: self.vec.as_mut_ptr() as *mut u8,
current_elements: self.vec.len().try_into().unwrap(),
}
}
}
impl AsRef<[VMCallerCheckedAnyfunc]> for Table {
fn as_ref(&self) -> &[VMCallerCheckedAnyfunc] {
self.vec.as_slice()
}
}
impl AsMut<[VMCallerCheckedAnyfunc]> for Table {
fn as_mut(&mut self) -> &mut [VMCallerCheckedAnyfunc] {
self.vec.as_mut_slice()
}
}

View File

@@ -0,0 +1,71 @@
use crate::HashMap;
use crate::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use cranelift_codegen::ir;
use lazy_static::lazy_static;
lazy_static! {
static ref REGISTRY: RwLock<TrapRegistry> = RwLock::new(TrapRegistry::default());
}
/// The registry maintains descriptions of traps in currently allocated functions.
#[derive(Default)]
pub struct TrapRegistry {
traps: HashMap<usize, TrapDescription>,
}
/// Description of a trap.
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct TrapDescription {
/// Location of the trap in source binary module.
pub source_loc: ir::SourceLoc,
/// Code of the trap.
pub trap_code: ir::TrapCode,
}
/// RAII guard for deregistering traps
pub struct TrapRegistrationGuard(usize);
impl TrapRegistry {
/// Registers a new trap.
/// Returns a RAII guard that deregisters the trap when dropped.
pub fn register_trap(
&mut self,
address: usize,
source_loc: ir::SourceLoc,
trap_code: ir::TrapCode,
) -> TrapRegistrationGuard {
let entry = TrapDescription {
source_loc,
trap_code,
};
let previous_trap = self.traps.insert(address, entry);
assert!(previous_trap.is_none());
TrapRegistrationGuard(address)
}
fn deregister_trap(&mut self, address: usize) {
assert!(self.traps.remove(&address).is_some());
}
/// Gets a trap description at given address.
pub fn get_trap(&self, address: usize) -> Option<TrapDescription> {
self.traps.get(&address).copied()
}
}
impl Drop for TrapRegistrationGuard {
fn drop(&mut self) {
let mut registry = get_mut_trap_registry();
registry.deregister_trap(self.0);
}
}
/// Gets guarded writable reference to traps registry
pub fn get_mut_trap_registry() -> RwLockWriteGuard<'static, TrapRegistry> {
REGISTRY.write().expect("trap registry lock got poisoned")
}
/// Gets guarded readable reference to traps registry
pub fn get_trap_registry() -> RwLockReadGuard<'static, TrapRegistry> {
REGISTRY.read().expect("trap registry lock got poisoned")
}

View File

@@ -0,0 +1,170 @@
//! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use crate::trap_registry::get_trap_registry;
use crate::trap_registry::TrapDescription;
use crate::vmcontext::{VMContext, VMFunctionBody};
use alloc::string::{String, ToString};
use core::cell::Cell;
use core::ptr;
use cranelift_codegen::ir;
extern "C" {
fn WasmtimeCallTrampoline(
vmctx: *mut u8,
callee: *const VMFunctionBody,
values_vec: *mut u8,
) -> i32;
fn WasmtimeCall(vmctx: *mut u8, callee: *const VMFunctionBody) -> i32;
}
thread_local! {
static RECORDED_TRAP: Cell<Option<TrapDescription>> = Cell::new(None);
static JMP_BUF: Cell<*const u8> = Cell::new(ptr::null());
static RESET_GUARD_PAGE: Cell<bool> = Cell::new(false);
}
/// Check if there is a trap at given PC
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn CheckIfTrapAtAddress(_pc: *const u8) -> i8 {
// TODO: stack overflow can happen at any random time (i.e. in malloc() in memory.grow)
// and it's really hard to determine if the cause was stack overflow and if it happened
// in WebAssembly module.
// So, let's assume that any untrusted code called from WebAssembly doesn't trap.
// Then, if we have called some WebAssembly code, it means the trap is stack overflow.
JMP_BUF.with(|ptr| !ptr.get().is_null()) as i8
}
/// Record the Trap code and wasm bytecode offset in TLS somewhere
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn RecordTrap(pc: *const u8, reset_guard_page: bool) {
// TODO: please see explanation in CheckIfTrapAtAddress.
let registry = get_trap_registry();
let trap_desc = registry
.get_trap(pc as usize)
.unwrap_or_else(|| TrapDescription {
source_loc: ir::SourceLoc::default(),
trap_code: ir::TrapCode::StackOverflow,
});
if reset_guard_page {
RESET_GUARD_PAGE.with(|v| v.set(true));
}
RECORDED_TRAP.with(|data| {
assert_eq!(
data.get(),
None,
"Only one trap per thread can be recorded at a moment!"
);
data.set(Some(trap_desc))
});
}
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn EnterScope(ptr: *const u8) -> *const u8 {
JMP_BUF.with(|buf| buf.replace(ptr))
}
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn GetScope() -> *const u8 {
JMP_BUF.with(|buf| buf.get())
}
#[doc(hidden)]
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn LeaveScope(ptr: *const u8) {
RESET_GUARD_PAGE.with(|v| {
if v.get() {
reset_guard_page();
v.set(false);
}
});
JMP_BUF.with(|buf| buf.set(ptr))
}
#[cfg(target_os = "windows")]
fn reset_guard_page() {
extern "C" {
fn _resetstkoflw() -> winapi::ctypes::c_int;
}
// We need to restore guard page under stack to handle future stack overflows properly.
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/resetstkoflw?view=vs-2019
if unsafe { _resetstkoflw() } == 0 {
panic!("failed to restore stack guard page");
}
}
#[cfg(not(target_os = "windows"))]
fn reset_guard_page() {}
fn trap_message() -> String {
let trap_desc = RECORDED_TRAP
.with(|data| data.replace(None))
.expect("trap_message must be called after trap occurred");
format!(
"wasm trap: {}, source location: {}",
trap_code_to_expected_string(trap_desc.trap_code),
trap_desc.source_loc,
)
}
fn trap_code_to_expected_string(trap_code: ir::TrapCode) -> String {
use ir::TrapCode::*;
match trap_code {
StackOverflow => "call stack exhausted".to_string(),
HeapOutOfBounds => "out of bounds memory access".to_string(),
TableOutOfBounds => "undefined element".to_string(),
OutOfBounds => "out of bounds".to_string(), // Note: not covered by the test suite
IndirectCallToNull => "uninitialized element".to_string(),
BadSignature => "indirect call type mismatch".to_string(),
IntegerOverflow => "integer overflow".to_string(),
IntegerDivisionByZero => "integer divide by zero".to_string(),
BadConversionToInteger => "invalid conversion to integer".to_string(),
UnreachableCodeReached => "unreachable".to_string(),
Interrupt => "interrupt".to_string(), // Note: not covered by the test suite
User(x) => format!("user trap {}", x), // Note: not covered by the test suite
}
}
/// Call the wasm function pointed to by `callee`. `values_vec` points to
/// a buffer which holds the incoming arguments, and to which the outgoing
/// return values will be written.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_call_trampoline(
vmctx: *mut VMContext,
callee: *const VMFunctionBody,
values_vec: *mut u8,
) -> Result<(), String> {
if WasmtimeCallTrampoline(vmctx as *mut u8, callee, values_vec) == 0 {
Err(trap_message())
} else {
Ok(())
}
}
/// Call the wasm function pointed to by `callee`, which has no arguments or
/// return values.
#[no_mangle]
pub unsafe extern "C" fn wasmtime_call(
vmctx: *mut VMContext,
callee: *const VMFunctionBody,
) -> Result<(), String> {
if WasmtimeCall(vmctx as *mut u8, callee) == 0 {
Err(trap_message())
} else {
Ok(())
}
}

View File

@@ -0,0 +1,615 @@
//! This file declares `VMContext` and several related structs which contain
//! fields that compiled wasm code accesses directly.
use crate::instance::Instance;
use core::any::Any;
use core::{ptr, u32};
use wasmtime_environ::BuiltinFunctionIndex;
/// An imported function.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMFunctionImport {
/// A pointer to the imported function body.
pub body: *const VMFunctionBody,
/// A pointer to the `VMContext` that owns the function.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmfunction_import {
use super::VMFunctionImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmfunction_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMFunctionImport>(),
usize::from(offsets.size_of_vmfunction_import())
);
assert_eq!(
offset_of!(VMFunctionImport, body),
usize::from(offsets.vmfunction_import_body())
);
assert_eq!(
offset_of!(VMFunctionImport, vmctx),
usize::from(offsets.vmfunction_import_vmctx())
);
}
}
/// A placeholder byte-sized type which is just used to provide some amount of type
/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
/// around.
#[repr(C)]
pub struct VMFunctionBody(u8);
#[cfg(test)]
mod test_vmfunction_body {
use super::VMFunctionBody;
use core::mem::size_of;
#[test]
fn check_vmfunction_body_offsets() {
assert_eq!(size_of::<VMFunctionBody>(), 1);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly table
/// imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableImport {
/// A pointer to the imported table description.
pub from: *mut VMTableDefinition,
/// A pointer to the `VMContext` that owns the table description.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmtable_import {
use super::VMTableImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmtable_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMTableImport>(),
usize::from(offsets.size_of_vmtable_import())
);
assert_eq!(
offset_of!(VMTableImport, from),
usize::from(offsets.vmtable_import_from())
);
assert_eq!(
offset_of!(VMTableImport, vmctx),
usize::from(offsets.vmtable_import_vmctx())
);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly linear
/// memory imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryImport {
/// A pointer to the imported memory description.
pub from: *mut VMMemoryDefinition,
/// A pointer to the `VMContext` that owns the memory description.
pub vmctx: *mut VMContext,
}
#[cfg(test)]
mod test_vmmemory_import {
use super::VMMemoryImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmmemory_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMMemoryImport>(),
usize::from(offsets.size_of_vmmemory_import())
);
assert_eq!(
offset_of!(VMMemoryImport, from),
usize::from(offsets.vmmemory_import_from())
);
assert_eq!(
offset_of!(VMMemoryImport, vmctx),
usize::from(offsets.vmmemory_import_vmctx())
);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly global
/// variable imported from another instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMGlobalImport {
/// A pointer to the imported global variable description.
pub from: *mut VMGlobalDefinition,
}
#[cfg(test)]
mod test_vmglobal_import {
use super::VMGlobalImport;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmglobal_import_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMGlobalImport>(),
usize::from(offsets.size_of_vmglobal_import())
);
assert_eq!(
offset_of!(VMGlobalImport, from),
usize::from(offsets.vmglobal_import_from())
);
}
}
/// The fields compiled code needs to access to utilize a WebAssembly linear
/// memory defined within the instance, namely the start address and the
/// size in bytes.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMMemoryDefinition {
/// The start address.
pub base: *mut u8,
/// The current logical size of this linear memory in bytes.
pub current_length: usize,
}
#[cfg(test)]
mod test_vmmemory_definition {
use super::VMMemoryDefinition;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmmemory_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMMemoryDefinition>(),
usize::from(offsets.size_of_vmmemory_definition())
);
assert_eq!(
offset_of!(VMMemoryDefinition, base),
usize::from(offsets.vmmemory_definition_base())
);
assert_eq!(
offset_of!(VMMemoryDefinition, current_length),
usize::from(offsets.vmmemory_definition_current_length())
);
/* TODO: Assert that the size of `current_length` matches.
assert_eq!(
size_of::<VMMemoryDefinition::current_length>(),
usize::from(offsets.size_of_vmmemory_definition_current_length())
);
*/
}
}
/// The fields compiled code needs to access to utilize a WebAssembly table
/// defined within the instance.
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct VMTableDefinition {
/// Pointer to the table data.
pub base: *mut u8,
/// The current number of elements in the table.
pub current_elements: u32,
}
#[cfg(test)]
mod test_vmtable_definition {
use super::VMTableDefinition;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmtable_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMTableDefinition>(),
usize::from(offsets.size_of_vmtable_definition())
);
assert_eq!(
offset_of!(VMTableDefinition, base),
usize::from(offsets.vmtable_definition_base())
);
assert_eq!(
offset_of!(VMTableDefinition, current_elements),
usize::from(offsets.vmtable_definition_current_elements())
);
}
}
/// The storage for a WebAssembly global defined within the instance.
///
/// TODO: Pack the globals more densely, rather than using the same size
/// for every type.
#[derive(Debug, Copy, Clone)]
#[repr(C, align(16))]
pub struct VMGlobalDefinition {
storage: [u8; 16],
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmglobal_definition {
use super::VMGlobalDefinition;
use core::mem::{align_of, size_of};
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmglobal_definition_alignment() {
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
}
#[test]
fn check_vmglobal_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMGlobalDefinition>(),
usize::from(offsets.size_of_vmglobal_definition())
);
}
#[test]
fn check_vmglobal_begins_aligned() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
}
}
impl VMGlobalDefinition {
/// Construct a `VMGlobalDefinition`.
pub fn new() -> Self {
Self { storage: [0; 16] }
}
/// Return a reference to the value as an i32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i32(&self) -> &i32 {
&*(self.storage.as_ref().as_ptr() as *const i32)
}
/// Return a mutable reference to the value as an i32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut i32)
}
/// Return a reference to the value as a u32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u32(&self) -> &u32 {
&*(self.storage.as_ref().as_ptr() as *const u32)
}
/// Return a mutable reference to the value as an u32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u32)
}
/// Return a reference to the value as an i64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i64(&self) -> &i64 {
&*(self.storage.as_ref().as_ptr() as *const i64)
}
/// Return a mutable reference to the value as an i64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut i64)
}
/// Return a reference to the value as an u64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u64(&self) -> &u64 {
&*(self.storage.as_ref().as_ptr() as *const u64)
}
/// Return a mutable reference to the value as an u64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u64)
}
/// Return a reference to the value as an f32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32(&self) -> &f32 {
&*(self.storage.as_ref().as_ptr() as *const f32)
}
/// Return a mutable reference to the value as an f32.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut f32)
}
/// Return a reference to the value as f32 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_bits(&self) -> &u32 {
&*(self.storage.as_ref().as_ptr() as *const u32)
}
/// Return a mutable reference to the value as f32 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u32)
}
/// Return a reference to the value as an f64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64(&self) -> &f64 {
&*(self.storage.as_ref().as_ptr() as *const f64)
}
/// Return a mutable reference to the value as an f64.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut f64)
}
/// Return a reference to the value as f64 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_bits(&self) -> &u64 {
&*(self.storage.as_ref().as_ptr() as *const u64)
}
/// Return a mutable reference to the value as f64 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u64)
}
/// Return a reference to the value as an u128.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128(&self) -> &u128 {
&*(self.storage.as_ref().as_ptr() as *const u128)
}
/// Return a mutable reference to the value as an u128.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128_mut(&mut self) -> &mut u128 {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut u128)
}
/// Return a reference to the value as u128 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
&*(self.storage.as_ref().as_ptr() as *const [u8; 16])
}
/// Return a mutable reference to the value as u128 bits.
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
&mut *(self.storage.as_mut().as_mut_ptr() as *mut [u8; 16])
}
}
/// An index into the shared signature registry, usable for checking signatures
/// at indirect calls.
#[repr(C)]
#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
pub struct VMSharedSignatureIndex(u32);
#[cfg(test)]
mod test_vmshared_signature_index {
use super::VMSharedSignatureIndex;
use core::mem::size_of;
use wasmtime_environ::{Module, TargetSharedSignatureIndex, VMOffsets};
#[test]
fn check_vmshared_signature_index() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMSharedSignatureIndex>(),
usize::from(offsets.size_of_vmshared_signature_index())
);
}
#[test]
fn check_target_shared_signature_index() {
assert_eq!(
size_of::<VMSharedSignatureIndex>(),
size_of::<TargetSharedSignatureIndex>()
);
}
}
impl VMSharedSignatureIndex {
/// Create a new `VMSharedSignatureIndex`.
pub fn new(value: u32) -> Self {
Self(value)
}
}
impl Default for VMSharedSignatureIndex {
fn default() -> Self {
Self::new(u32::MAX)
}
}
/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
/// It consists of the actual function pointer and a signature id to be checked
/// by the caller.
#[derive(Debug, Clone)]
#[repr(C)]
pub struct VMCallerCheckedAnyfunc {
/// Function body.
pub func_ptr: *const VMFunctionBody,
/// Function signature id.
pub type_index: VMSharedSignatureIndex,
/// Function `VMContext`.
pub vmctx: *mut VMContext,
// If more elements are added here, remember to add offset_of tests below!
}
#[cfg(test)]
mod test_vmcaller_checked_anyfunc {
use super::VMCallerCheckedAnyfunc;
use core::mem::size_of;
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vmcaller_checked_anyfunc_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMCallerCheckedAnyfunc>(),
usize::from(offsets.size_of_vmcaller_checked_anyfunc())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, func_ptr),
usize::from(offsets.vmcaller_checked_anyfunc_func_ptr())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, type_index),
usize::from(offsets.vmcaller_checked_anyfunc_type_index())
);
assert_eq!(
offset_of!(VMCallerCheckedAnyfunc, vmctx),
usize::from(offsets.vmcaller_checked_anyfunc_vmctx())
);
}
}
impl Default for VMCallerCheckedAnyfunc {
fn default() -> Self {
Self {
func_ptr: ptr::null_mut(),
type_index: Default::default(),
vmctx: ptr::null_mut(),
}
}
}
/// An array that stores addresses of builtin functions. We translate code
/// to use indirect calls. This way, we don't have to patch the code.
#[repr(C)]
pub struct VMBuiltinFunctionsArray {
ptrs: [usize; Self::len()],
}
impl VMBuiltinFunctionsArray {
pub const fn len() -> usize {
BuiltinFunctionIndex::builtin_functions_total_number() as usize
}
pub fn initialized() -> Self {
use crate::libcalls::*;
let mut ptrs = [0; Self::len()];
ptrs[BuiltinFunctionIndex::get_memory32_grow_index().index() as usize] =
wasmtime_memory32_grow as usize;
ptrs[BuiltinFunctionIndex::get_imported_memory32_grow_index().index() as usize] =
wasmtime_imported_memory32_grow as usize;
ptrs[BuiltinFunctionIndex::get_memory32_size_index().index() as usize] =
wasmtime_memory32_size as usize;
ptrs[BuiltinFunctionIndex::get_imported_memory32_size_index().index() as usize] =
wasmtime_imported_memory32_size as usize;
Self { ptrs }
}
}
/// The storage for a WebAssembly invocation argument
///
/// TODO: These could be packed more densely, rather than using the same size for every type.
#[derive(Debug, Copy, Clone)]
#[repr(C, align(16))]
pub struct VMInvokeArgument([u8; 16]);
#[cfg(test)]
mod test_vm_invoke_argument {
use super::VMInvokeArgument;
use core::mem::{align_of, size_of};
use wasmtime_environ::{Module, VMOffsets};
#[test]
fn check_vm_invoke_argument_alignment() {
assert_eq!(align_of::<VMInvokeArgument>(), 16);
}
#[test]
fn check_vmglobal_definition_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMInvokeArgument>(),
usize::from(offsets.size_of_vmglobal_definition())
);
}
}
impl VMInvokeArgument {
/// Create a new invocation argument filled with zeroes
pub fn new() -> Self {
Self([0; 16])
}
}
/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
/// This has information about globals, memories, tables, and other runtime
/// state associated with the current instance.
///
/// The struct here is empty, as the sizes of these fields are dynamic, and
/// we can't describe them in Rust's type system. Sufficient memory is
/// allocated at runtime.
///
/// TODO: We could move the globals into the `vmctx` allocation too.
#[derive(Debug)]
#[repr(C)]
pub struct VMContext {}
impl VMContext {
/// Return a mutable reference to the associated `Instance`.
///
/// This is unsafe because it doesn't work on just any `VMContext`, it must
/// be a `VMContext` allocated as part of an `Instance`.
#[allow(clippy::cast_ptr_alignment)]
pub(crate) unsafe fn instance(&mut self) -> &mut Instance {
&mut *((self as *mut Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
}
/// Return a mutable reference to the host state associated with this `Instance`.
///
/// This is unsafe because it doesn't work on just any `VMContext`, it must
/// be a `VMContext` allocated as part of an `Instance`.
pub unsafe fn host_state(&mut self) -> &mut dyn Any {
self.instance().host_state()
}
/// Lookup an export in the global exports namespace.
pub unsafe fn lookup_global_export(&mut self, field: &str) -> Option<crate::export::Export> {
self.instance().lookup_global_export(field)
}
}