diff --git a/RELEASES.md b/RELEASES.md index cc094771e8..2030be863b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -2,6 +2,24 @@ -------------------------------------------------------------------------------- +## 0.36.0 + +Unreleased. + +### Added + +* Support for epoch-based interruption has been added to the C API. + [#3925](https://github.com/bytecodealliance/wasmtime/pull/3925) + +### Removed + +* Support for `Config::interruptable` and `InterruptHandle` has been removed + from the `wasmtime` crate. Users should migrate to using epoch-based + interruption instead. + [#3925](https://github.com/bytecodealliance/wasmtime/pull/3925) + +-------------------------------------------------------------------------------- + ## 0.35.1 Released 2022-03-09. diff --git a/crates/c-api/include/wasmtime.h b/crates/c-api/include/wasmtime.h index 9d1f7219eb..cc18ef1bff 100644 --- a/crates/c-api/include/wasmtime.h +++ b/crates/c-api/include/wasmtime.h @@ -167,6 +167,7 @@ #include #include #include +#include #include #include #include diff --git a/crates/c-api/include/wasmtime/config.h b/crates/c-api/include/wasmtime/config.h index 9f90059b09..ae40d8394f 100644 --- a/crates/c-api/include/wasmtime/config.h +++ b/crates/c-api/include/wasmtime/config.h @@ -96,15 +96,6 @@ enum wasmtime_profiling_strategy_enum { // ProfilingStrategy */ WASMTIME_CONFIG_PROP(void, debug_info, bool) -/** - * \brief Enables WebAssembly code to be interrupted. - * - * This setting is `false` by default. When enabled it will enable getting an - * interrupt handle via #wasmtime_interrupt_handle_new which can be used to - * interrupt currently-executing WebAssembly code. - */ -WASMTIME_CONFIG_PROP(void, interruptable, bool) - /** * \brief Whether or not fuel is enabled for generated code. * @@ -114,6 +105,22 @@ WASMTIME_CONFIG_PROP(void, interruptable, bool) */ WASMTIME_CONFIG_PROP(void, consume_fuel, bool) +/** + * \brief Whether or not epoch-based interruption is enabled for generated code. + * + * This setting is `false` by default. When enabled wasm code will check the + * current epoch periodically and abort if the current epoch is beyond a + * store-configured limit. + * + * Note that when this setting is enabled all stores will immediately trap and + * need to have their epoch deadline otherwise configured with + * #wasmtime_context_set_epoch_deadline. + * + * Note that the current epoch is engine-local and can be incremented with + * #wasmtime_engine_increment_epoch. + */ +WASMTIME_CONFIG_PROP(void, epoch_interruption, bool) + /** * \brief Configures the maximum stack size, in bytes, that JIT code can use. * diff --git a/crates/c-api/include/wasmtime/engine.h b/crates/c-api/include/wasmtime/engine.h new file mode 100644 index 0000000000..1b8336f460 --- /dev/null +++ b/crates/c-api/include/wasmtime/engine.h @@ -0,0 +1,36 @@ +/** + * \file wasmtime/engine.h + * + * Wasmtime-specific extensions to #wasm_engine_t. + */ + +#ifndef WASMTIME_ENGINE_H +#define WASMTIME_ENGINE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief Increments the engine-local epoch variable. + * + * This function will increment the engine's current epoch which can be used to + * force WebAssembly code to trap if the current epoch goes beyond the + * #wasmtime_store_t configured epoch deadline. + * + * This function is safe to call from any thread, and it is also + * async-signal-safe. + * + * See also #wasmtime_config_epoch_interruption_set. + */ +WASM_API_EXTERN void wasmtime_engine_increment_epoch(wasm_engine_t *engine); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WASMTIME_ENGINE_H + + diff --git a/crates/c-api/include/wasmtime/store.h b/crates/c-api/include/wasmtime/store.h index f8298c10fa..55c9f680bd 100644 --- a/crates/c-api/include/wasmtime/store.h +++ b/crates/c-api/include/wasmtime/store.h @@ -154,7 +154,7 @@ WASM_API_EXTERN bool wasmtime_context_fuel_consumed(const wasmtime_context_t *co WASM_API_EXTERN wasmtime_error_t *wasmtime_context_consume_fuel(wasmtime_context_t *context, uint64_t fuel, uint64_t *remaining); /** - * \brief Configres WASI state within the specified store. + * \brief Configures WASI state within the specified store. * * This function is required if #wasmtime_linker_define_wasi is called. This * will configure the WASI state for instances defined within this store to the @@ -167,47 +167,15 @@ WASM_API_EXTERN wasmtime_error_t *wasmtime_context_consume_fuel(wasmtime_context WASM_API_EXTERN wasmtime_error_t *wasmtime_context_set_wasi(wasmtime_context_t *context, wasi_config_t *wasi); /** - * \typedef wasmtime_interrupt_handle_t - * \brief Convenience alias for #wasmtime_interrupt_handle_t + * \brief Configures the relative deadline at which point WebAssembly code will + * trap. * - * \struct wasmtime_interrupt_handle_t - * \brief A handle used to interrupt executing WebAssembly code. + * This function configures the store-local epoch deadline after which point + * WebAssembly code will trap. * - * This structure is an opaque handle that represents a handle to a store. This - * handle can be used to remotely (from another thread) interrupt currently - * executing WebAssembly code. - * - * This structure is safe to share from multiple threads. + * See also #wasmtime_config_epoch_interruption_set. */ -typedef struct wasmtime_interrupt_handle wasmtime_interrupt_handle_t; - -/** - * \brief Creates a new interrupt handle to interrupt executing WebAssembly from - * the provided store. - * - * There are a number of caveats about how interrupt is handled in Wasmtime. For - * more information see the [Rust - * documentation](https://bytecodealliance.github.io/wasmtime/api/wasmtime/struct.Store.html#method.interrupt_handle). - * - * This function returns `NULL` if the store's configuration does not have - * interrupts enabled. See #wasmtime_config_interruptable_set. - */ -WASM_API_EXTERN wasmtime_interrupt_handle_t *wasmtime_interrupt_handle_new(wasmtime_context_t *context); - -/** - * \brief Requests that WebAssembly code running in the store attached to this - * interrupt handle is interrupted. - * - * For more information about interrupts see #wasmtime_interrupt_handle_new. - * - * Note that this is safe to call from any thread. - */ -WASM_API_EXTERN void wasmtime_interrupt_handle_interrupt(wasmtime_interrupt_handle_t *handle); - -/** - * \brief Deletes an interrupt handle. - */ -WASM_API_EXTERN void wasmtime_interrupt_handle_delete(wasmtime_interrupt_handle_t *handle); +WASM_API_EXTERN void wasmtime_context_set_epoch_deadline(wasmtime_context_t *context, uint64_t ticks_beyond_current); #ifdef __cplusplus } // extern "C" diff --git a/crates/c-api/src/config.rs b/crates/c-api/src/config.rs index 64c3a88131..1326f87bc1 100644 --- a/crates/c-api/src/config.rs +++ b/crates/c-api/src/config.rs @@ -50,13 +50,13 @@ pub extern "C" fn wasmtime_config_debug_info_set(c: &mut wasm_config_t, enable: } #[no_mangle] -pub extern "C" fn wasmtime_config_interruptable_set(c: &mut wasm_config_t, enable: bool) { - c.config.interruptable(enable); +pub extern "C" fn wasmtime_config_consume_fuel_set(c: &mut wasm_config_t, enable: bool) { + c.config.consume_fuel(enable); } #[no_mangle] -pub extern "C" fn wasmtime_config_consume_fuel_set(c: &mut wasm_config_t, enable: bool) { - c.config.consume_fuel(enable); +pub extern "C" fn wasmtime_config_epoch_interruption_set(c: &mut wasm_config_t, enable: bool) { + c.config.epoch_interruption(enable); } #[no_mangle] diff --git a/crates/c-api/src/engine.rs b/crates/c-api/src/engine.rs index f130dfb047..67785fc03f 100644 --- a/crates/c-api/src/engine.rs +++ b/crates/c-api/src/engine.rs @@ -33,3 +33,8 @@ pub extern "C" fn wasm_engine_new_with_config(c: Box) -> Box, -) -> Option> { - Some(Box::new(wasmtime_interrupt_handle_t { - handle: store.interrupt_handle().ok()?, - })) +pub extern "C" fn wasmtime_context_set_epoch_deadline( + mut store: CStoreContextMut<'_>, + ticks_beyond_current: u64, +) { + store.set_epoch_deadline(ticks_beyond_current); } - -#[no_mangle] -pub extern "C" fn wasmtime_interrupt_handle_interrupt(handle: &wasmtime_interrupt_handle_t) { - handle.handle.interrupt(); -} - -#[no_mangle] -pub extern "C" fn wasmtime_interrupt_handle_delete(_: Box) {} diff --git a/crates/cranelift/src/compiler.rs b/crates/cranelift/src/compiler.rs index a67790bb33..f7f0e2d2b2 100644 --- a/crates/cranelift/src/compiler.rs +++ b/crates/cranelift/src/compiler.rs @@ -129,7 +129,7 @@ impl wasmtime_environ::Compiler for Compiler { // needed by `ir::Function`. // // Otherwise our stack limit is specially calculated from the vmctx - // argument, where we need to load the `*const VMInterrupts` + // argument, where we need to load the `*const VMRuntimeLimits` // pointer, and then from that pointer we need to load the stack // limit itself. Note that manual register allocation is needed here // too due to how late in the process this codegen happens. @@ -141,7 +141,7 @@ impl wasmtime_environ::Compiler for Compiler { .create_global_value(ir::GlobalValueData::VMContext); let interrupts_ptr = context.func.create_global_value(ir::GlobalValueData::Load { base: vmctx, - offset: i32::try_from(func_env.offsets.vmctx_interrupts()) + offset: i32::try_from(func_env.offsets.vmctx_runtime_limits()) .unwrap() .into(), global_type: isa.pointer_type(), @@ -149,7 +149,7 @@ impl wasmtime_environ::Compiler for Compiler { }); let stack_limit = context.func.create_global_value(ir::GlobalValueData::Load { base: interrupts_ptr, - offset: i32::try_from(func_env.offsets.vminterrupts_stack_limit()) + offset: i32::try_from(func_env.offsets.vmruntime_limits_stack_limit()) .unwrap() .into(), global_type: isa.pointer_type(), diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index b6565bdef4..ff6e80c781 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -17,7 +17,7 @@ use std::mem; use wasmparser::Operator; use wasmtime_environ::{ BuiltinFunctionIndex, MemoryPlan, MemoryStyle, Module, ModuleTranslation, TableStyle, Tunables, - TypeTables, VMOffsets, INTERRUPTED, WASM_PAGE_SIZE, + TypeTables, VMOffsets, WASM_PAGE_SIZE, }; use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK}; @@ -129,17 +129,17 @@ pub struct FuncEnvironment<'module_environment> { /// A function-local variable which stores the cached value of the amount of /// fuel remaining to execute. If used this is modified frequently so it's /// stored locally as a variable instead of always referenced from the field - /// in `*const VMInterrupts` + /// in `*const VMRuntimeLimits` fuel_var: cranelift_frontend::Variable, /// A function-local variable which caches the value of `*const - /// VMInterrupts` for this function's vmctx argument. This pointer is stored + /// VMRuntimeLimits` for this function's vmctx argument. This pointer is stored /// in the vmctx itself, but never changes for the lifetime of the function, /// so if we load it up front we can continue to use it throughout. - vminterrupts_ptr: cranelift_frontend::Variable, + vmruntime_limits_ptr: cranelift_frontend::Variable, /// A cached epoch deadline value, when performing epoch-based - /// interruption. Loaded from `VMInterrupts` and reloaded after + /// interruption. Loaded from `VMRuntimeLimits` and reloaded after /// any yield. epoch_deadline_var: cranelift_frontend::Variable, @@ -182,7 +182,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { fuel_var: Variable::new(0), epoch_deadline_var: Variable::new(0), epoch_ptr_var: Variable::new(0), - vminterrupts_ptr: Variable::new(0), + vmruntime_limits_ptr: Variable::new(0), // Start with at least one fuel being consumed because even empty // functions should consume at least some fuel. @@ -344,27 +344,27 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } } - fn declare_vminterrupts_ptr(&mut self, builder: &mut FunctionBuilder<'_>) { - // We load the `*const VMInterrupts` value stored within vmctx at the + fn declare_vmruntime_limits_ptr(&mut self, builder: &mut FunctionBuilder<'_>) { + // We load the `*const VMRuntimeLimits` value stored within vmctx at the // head of the function and reuse the same value across the entire // function. This is possible since we know that the pointer never // changes for the lifetime of the function. let pointer_type = self.pointer_type(); - builder.declare_var(self.vminterrupts_ptr, pointer_type); + builder.declare_var(self.vmruntime_limits_ptr, pointer_type); let vmctx = self.vmctx(builder.func); let base = builder.ins().global_value(pointer_type, vmctx); - let offset = i32::try_from(self.offsets.vmctx_interrupts()).unwrap(); + let offset = i32::try_from(self.offsets.vmctx_runtime_limits()).unwrap(); let interrupt_ptr = builder .ins() .load(pointer_type, ir::MemFlags::trusted(), base, offset); - builder.def_var(self.vminterrupts_ptr, interrupt_ptr); + builder.def_var(self.vmruntime_limits_ptr, interrupt_ptr); } fn fuel_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) { // On function entry we load the amount of fuel into a function-local // `self.fuel_var` to make fuel modifications fast locally. This cache // is then periodically flushed to the Store-defined location in - // `VMInterrupts` later. + // `VMRuntimeLimits` later. builder.declare_var(self.fuel_var, ir::types::I64); self.fuel_load_into_var(builder); self.fuel_check(builder); @@ -412,13 +412,13 @@ impl<'module_environment> FuncEnvironment<'module_environment> { match op { // Exiting a function (via a return or unreachable) or otherwise // entering a different function (via a call) means that we need to - // update the fuel consumption in `VMInterrupts` because we're + // update the fuel consumption in `VMRuntimeLimits` because we're // about to move control out of this function itself and the fuel // may need to be read. // // Before this we need to update the fuel counter from our own cost // leading up to this function call, and then we can store - // `self.fuel_var` into `VMInterrupts`. + // `self.fuel_var` into `VMRuntimeLimits`. Operator::Unreachable | Operator::Return | Operator::CallIndirect { .. } @@ -502,7 +502,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { builder.def_var(self.fuel_var, fuel); } - /// Loads the fuel consumption value from `VMInterrupts` into `self.fuel_var` + /// Loads the fuel consumption value from `VMRuntimeLimits` into `self.fuel_var` fn fuel_load_into_var(&mut self, builder: &mut FunctionBuilder<'_>) { let (addr, offset) = self.fuel_addr_offset(builder); let fuel = builder @@ -512,7 +512,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } /// Stores the fuel consumption value from `self.fuel_var` into - /// `VMInterrupts`. + /// `VMRuntimeLimits`. fn fuel_save_from_var(&mut self, builder: &mut FunctionBuilder<'_>) { let (addr, offset) = self.fuel_addr_offset(builder); let fuel_consumed = builder.use_var(self.fuel_var); @@ -522,14 +522,14 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } /// Returns the `(address, offset)` of the fuel consumption within - /// `VMInterrupts`, used to perform loads/stores later. + /// `VMRuntimeLimits`, used to perform loads/stores later. fn fuel_addr_offset( &mut self, builder: &mut FunctionBuilder<'_>, ) -> (ir::Value, ir::immediates::Offset32) { ( - builder.use_var(self.vminterrupts_ptr), - i32::from(self.offsets.vminterrupts_fuel_consumed()).into(), + builder.use_var(self.vmruntime_limits_ptr), + i32::from(self.offsets.vmruntime_limits_fuel_consumed()).into(), ) } @@ -628,12 +628,12 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } fn epoch_load_deadline_into_var(&mut self, builder: &mut FunctionBuilder<'_>) { - let interrupts = builder.use_var(self.vminterrupts_ptr); + let interrupts = builder.use_var(self.vmruntime_limits_ptr); let deadline = builder.ins().load( ir::types::I64, ir::MemFlags::trusted(), interrupts, - ir::immediates::Offset32::new(self.offsets.vminterupts_epoch_deadline() as i32), + ir::immediates::Offset32::new(self.offsets.vmruntime_limits_epoch_deadline() as i32), ); builder.def_var(self.epoch_deadline_var, deadline); } @@ -824,7 +824,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m } fn after_locals(&mut self, num_locals: usize) { - self.vminterrupts_ptr = Variable::new(num_locals); + self.vmruntime_limits_ptr = Variable::new(num_locals); self.fuel_var = Variable::new(num_locals + 1); self.epoch_deadline_var = Variable::new(num_locals + 2); self.epoch_ptr_var = Variable::new(num_locals + 3); @@ -1976,31 +1976,6 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m } fn translate_loop_header(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> { - // If enabled check the interrupt flag to prevent long or infinite - // loops. - // - // For more information about this see comments in - // `crates/environ/src/cranelift.rs` - if self.tunables.interruptable { - let pointer_type = self.pointer_type(); - let interrupt_ptr = builder.use_var(self.vminterrupts_ptr); - let interrupt = builder.ins().load( - pointer_type, - ir::MemFlags::trusted(), - interrupt_ptr, - i32::from(self.offsets.vminterrupts_stack_limit()), - ); - // Note that the cast to `isize` happens first to allow sign-extension, - // if necessary, to `i64`. - let interrupted_sentinel = builder - .ins() - .iconst(pointer_type, INTERRUPTED as isize as i64); - let cmp = builder - .ins() - .icmp(IntCC::Equal, interrupt, interrupted_sentinel); - builder.ins().trapnz(cmp, ir::TrapCode::Interrupt); - } - // Additionally if enabled check how much fuel we have remaining to see // if we've run out by this point. if self.tunables.consume_fuel { @@ -2045,13 +2020,10 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m builder: &mut FunctionBuilder, _state: &FuncTranslationState, ) -> WasmResult<()> { - // If the `vminterrupts_ptr` variable will get used then we initialize + // If the `vmruntime_limits_ptr` variable will get used then we initialize // it here. - if self.tunables.consume_fuel - || self.tunables.interruptable - || self.tunables.epoch_interruption - { - self.declare_vminterrupts_ptr(builder); + if self.tunables.consume_fuel || self.tunables.epoch_interruption { + self.declare_vmruntime_limits_ptr(builder); } // Additionally we initialize `fuel_var` if it will get used. if self.tunables.consume_fuel { diff --git a/crates/cranelift/src/lib.rs b/crates/cranelift/src/lib.rs index 693014bf2c..c8b970f935 100644 --- a/crates/cranelift/src/lib.rs +++ b/crates/cranelift/src/lib.rs @@ -5,20 +5,20 @@ // # How does Wasmtime prevent stack overflow? // -// A few locations throughout the codebase link to this file to explain -// interrupts and stack overflow. To start off, let's take a look at stack -// overflow. Wasm code is well-defined to have stack overflow being recoverable -// and raising a trap, so we need to handle this somehow! There's also an added -// constraint where as an embedder you frequently are running host-provided -// code called from wasm. WebAssembly and native code currently share the same -// call stack, so you want to make sure that your host-provided code will have -// enough call-stack available to it. +// A few locations throughout the codebase link to this file to explain stack +// overflow. To start off, let's take a look at stack overflow. Wasm code is +// well-defined to have stack overflow being recoverable and raising a trap, so +// we need to handle this somehow! There's also an added constraint where as an +// embedder you frequently are running host-provided code called from wasm. +// WebAssembly and native code currently share the same call stack, so you want +// to make sure that your host-provided code will have enough call-stack +// available to it. // // Given all that, the way that stack overflow is handled is by adding a // prologue check to all JIT functions for how much native stack is remaining. // The `VMContext` pointer is the first argument to all functions, and the first -// field of this structure is `*const VMInterrupts` and the first field of that -// is the stack limit. Note that the stack limit in this case means "if the +// field of this structure is `*const VMRuntimeLimits` and the first field of +// that is the stack limit. Note that the stack limit in this case means "if the // stack pointer goes below this, trap". Each JIT function which consumes stack // space or isn't a leaf function starts off by loading the stack limit, // checking it against the stack pointer, and optionally traps. @@ -43,50 +43,6 @@ // For more information about the tricky bits of managing the reserved stack // size of wasm, see the implementation in `traphandlers.rs` in the // `update_stack_limit` function. -// -// # How is Wasmtime interrupted? -// -// Ok so given all that background of stack checks, the next thing we want to -// build on top of this is the ability to *interrupt* executing wasm code. This -// is useful to ensure that wasm always executes within a particular time slice -// or otherwise doesn't consume all CPU resources on a system. There are two -// major ways that interrupts are required: -// -// * Loops - likely immediately apparent but it's easy to write an infinite -// loop in wasm, so we need the ability to interrupt loops. -// * Function entries - somewhat more subtle, but imagine a module where each -// function calls the next function twice. This creates 2^n calls pretty -// quickly, so a pretty small module can export a function with no loops -// that takes an extremely long time to call. -// -// In many cases if an interrupt comes in you want to interrupt host code as -// well, but we're explicitly not considering that here. We're hoping that -// interrupting host code is largely left to the embedder (e.g. figuring out -// how to interrupt blocking syscalls) and they can figure that out. The purpose -// of this feature is to basically only give the ability to interrupt -// currently-executing wasm code (or triggering an interrupt as soon as wasm -// reenters itself). -// -// To implement interruption of loops we insert code at the head of all loops -// which checks the stack limit counter. If the counter matches a magical -// sentinel value that's impossible to be the real stack limit, then we -// interrupt the loop and trap. To implement interrupts of functions, we -// actually do the same thing where the magical sentinel value we use here is -// automatically considered as considering all stack pointer values as "you ran -// over your stack". This means that with a write of a magical value to one -// location we can interrupt both loops and function bodies. -// -// The "magical value" here is `usize::max_value() - N`. We reserve -// `usize::max_value()` for "the stack limit isn't set yet" and so -N is -// then used for "you got interrupted". We do a bit of patching afterwards to -// translate a stack overflow into an interrupt trap if we see that an -// interrupt happened. Note that `N` here is a medium-size-ish nonzero value -// chosen in coordination with the cranelift backend. Currently it's 32k. The -// value of N is basically a threshold in the backend for "anything less than -// this requires only one branch in the prologue, any stack size bigger requires -// two branches". Naturally we want most functions to have one branch, but we -// also need to actually catch stack overflow, so for now 32k is chosen and it's -// assume no valid stack pointer will ever be `usize::max_value() - 32k`. use cranelift_codegen::binemit; use cranelift_codegen::ir; diff --git a/crates/environ/src/tunables.rs b/crates/environ/src/tunables.rs index dac014d8db..01d333810a 100644 --- a/crates/environ/src/tunables.rs +++ b/crates/environ/src/tunables.rs @@ -24,14 +24,6 @@ pub struct Tunables { /// Whether or not to retain DWARF sections in compiled modules. pub parse_wasm_debuginfo: bool, - /// Whether or not to enable the ability to interrupt wasm code dynamically. - /// - /// More info can be found about the implementation in - /// crates/environ/src/cranelift.rs. Note that you can't interrupt host - /// calls and interrupts are implemented through the `VMInterrupts` - /// structure, or `InterruptHandle` in the `wasmtime` crate. - pub interruptable: bool, - /// Whether or not fuel is enabled for generated code, meaning that fuel /// will be consumed every time a wasm instruction is executed. pub consume_fuel: bool, @@ -89,7 +81,6 @@ impl Default for Tunables { generate_native_debuginfo: false, parse_wasm_debuginfo: true, - interruptable: false, consume_fuel: false, epoch_interruption: false, static_memory_bound_is_maximum: false, diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 54a66af4eb..305d69c0e6 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -4,7 +4,7 @@ // Currently the `VMContext` allocation by field looks like this: // // struct VMContext { -// interrupts: *const VMInterrupts, +// runtime_limits: *const VMRuntimeLimits, // externref_activations_table: *mut VMExternRefActivationsTable, // store: *mut dyn Store, // builtins: *mut VMBuiltinFunctionsArray, @@ -74,7 +74,7 @@ pub struct VMOffsets

{ pub num_escaped_funcs: u32, // precalculated offsets of various member fields - interrupts: u32, + runtime_limits: u32, epoch_ptr: u32, externref_activations_table: u32, store: u32, @@ -221,7 +221,7 @@ impl VMOffsets

{ store: "jit store state", externref_activations_table: "jit host externref state", epoch_ptr: "jit current epoch state", - interrupts: "jit interrupt state", + runtime_limits: "jit runtime limits state", } } } @@ -239,7 +239,7 @@ impl From> for VMOffsets

{ num_defined_memories: fields.num_defined_memories, num_defined_globals: fields.num_defined_globals, num_escaped_funcs: fields.num_escaped_funcs, - interrupts: 0, + runtime_limits: 0, epoch_ptr: 0, externref_activations_table: 0, store: 0, @@ -286,7 +286,7 @@ impl From> for VMOffsets

{ } fields! { - size(interrupts) = ret.ptr.size(), + size(runtime_limits) = ret.ptr.size(), size(epoch_ptr) = ret.ptr.size(), size(externref_activations_table) = ret.ptr.size(), size(store) = ret.ptr.size() * 2, @@ -483,23 +483,23 @@ impl VMOffsets

{ } } -/// Offsets for `VMInterrupts`. +/// Offsets for `VMRuntimeLimits`. impl VMOffsets

{ - /// Return the offset of the `stack_limit` field of `VMInterrupts` + /// Return the offset of the `stack_limit` field of `VMRuntimeLimits` #[inline] - pub fn vminterrupts_stack_limit(&self) -> u8 { + pub fn vmruntime_limits_stack_limit(&self) -> u8 { 0 } - /// Return the offset of the `fuel_consumed` field of `VMInterrupts` + /// Return the offset of the `fuel_consumed` field of `VMRuntimeLimits` #[inline] - pub fn vminterrupts_fuel_consumed(&self) -> u8 { + pub fn vmruntime_limits_fuel_consumed(&self) -> u8 { self.pointer_size() } - /// Return the offset of the `epoch_deadline` field of `VMInterrupts` + /// Return the offset of the `epoch_deadline` field of `VMRuntimeLimits` #[inline] - pub fn vminterupts_epoch_deadline(&self) -> u8 { + pub fn vmruntime_limits_epoch_deadline(&self) -> u8 { self.pointer_size() + 8 // `stack_limit` is a pointer; `fuel_consumed` is an `i64` } } @@ -535,10 +535,10 @@ impl VMOffsets

{ /// Offsets for `VMContext`. impl VMOffsets

{ - /// Return the offset to the `VMInterrupts` structure + /// Return the offset to the `VMRuntimeLimits` structure #[inline] - pub fn vmctx_interrupts(&self) -> u32 { - self.interrupts + pub fn vmctx_runtime_limits(&self) -> u32 { + self.runtime_limits } /// Return the offset to the `*const AtomicU64` epoch-counter diff --git a/crates/fuzzing/src/generators.rs b/crates/fuzzing/src/generators.rs index e4cd7ee93f..601bf6f3cc 100644 --- a/crates/fuzzing/src/generators.rs +++ b/crates/fuzzing/src/generators.rs @@ -389,7 +389,6 @@ impl Config { .wasm_memory64(self.module_config.config.memory64_enabled) .cranelift_nan_canonicalization(self.wasmtime.canonicalize_nans) .cranelift_opt_level(self.wasmtime.opt_level.to_wasmtime()) - .interruptable(self.wasmtime.interruptable) .consume_fuel(self.wasmtime.consume_fuel) .epoch_interruption(self.wasmtime.epoch_interruption) .memory_init_cow(self.wasmtime.memory_init_cow) @@ -490,23 +489,16 @@ impl Config { pub fn generate_timeout(&mut self, u: &mut Unstructured<'_>) -> arbitrary::Result { let time_duration = Duration::from_secs(20); let timeout = u - .choose(&[ - Timeout::Time(time_duration), - Timeout::Fuel(100_000), - Timeout::Epoch(time_duration), - ])? + .choose(&[Timeout::Fuel(100_000), Timeout::Epoch(time_duration)])? .clone(); match &timeout { - &Timeout::Time(..) => { - self.wasmtime.interruptable = true; - } - &Timeout::Fuel(..) => { + Timeout::Fuel(..) => { self.wasmtime.consume_fuel = true; } - &Timeout::Epoch(..) => { + Timeout::Epoch(..) => { self.wasmtime.epoch_interruption = true; } - &Timeout::None => unreachable!("Not an option given to choose()"), + Timeout::None => unreachable!("Not an option given to choose()"), } Ok(timeout) } diff --git a/crates/fuzzing/src/oracles.rs b/crates/fuzzing/src/oracles.rs index d5bc0ce18d..38f355c36f 100644 --- a/crates/fuzzing/src/oracles.rs +++ b/crates/fuzzing/src/oracles.rs @@ -107,9 +107,6 @@ pub enum Timeout { /// No timeout is used, it should be guaranteed via some other means that /// the input does not infinite loop. None, - /// A time-based timeout is used with a sleeping thread sending a signal - /// after the specified duration. - Time(Duration), /// Fuel-based timeouts are used where the specified fuel is all that the /// provided wasm module is allowed to consume. Fuel(u64), @@ -143,12 +140,6 @@ pub fn instantiate(wasm: &[u8], known_valid: bool, config: &generators::Config, // This prevents us from creating a huge number of sleeping threads if // this function is executed in a loop, like it does on nightly fuzzing // infrastructure. - Timeout::Time(timeout) => { - let handle = store.interrupt_handle().unwrap(); - timeout_state.spawn_timeout(timeout, move || handle.interrupt()); - } - // Similar to above, but we bump the epoch rather than set the - // interrupt flag. Timeout::Epoch(timeout) => { let engine = store.engine().clone(); timeout_state.spawn_timeout(timeout, move || engine.increment_epoch()); diff --git a/crates/runtime/src/instance.rs b/crates/runtime/src/instance.rs index 59c1f00349..edc7cce455 100644 --- a/crates/runtime/src/instance.rs +++ b/crates/runtime/src/instance.rs @@ -9,7 +9,7 @@ use crate::table::{Table, TableElement, TableElementType}; use crate::traphandlers::Trap; use crate::vmcontext::{ VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionImport, - VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryDefinition, VMMemoryImport, + VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMRuntimeLimits, VMTableDefinition, VMTableImport, }; use crate::{ @@ -240,8 +240,8 @@ impl Instance { } /// Return a pointer to the interrupts structure - pub fn interrupts(&self) -> *mut *const VMInterrupts { - unsafe { self.vmctx_plus_offset(self.offsets.vmctx_interrupts()) } + pub fn runtime_limits(&self) -> *mut *const VMRuntimeLimits { + unsafe { self.vmctx_plus_offset(self.offsets.vmctx_runtime_limits()) } } /// Return a pointer to the global epoch counter used by this instance. @@ -888,7 +888,7 @@ impl Instance { assert!(std::ptr::eq(module, self.module().as_ref())); if let Some(store) = store.as_raw() { - *self.interrupts() = (*store).vminterrupts(); + *self.runtime_limits() = (*store).vmruntime_limits(); *self.epoch_ptr() = (*store).epoch_ptr(); *self.externref_activations_table() = (*store).externref_activations_table().0; self.set_store(store); diff --git a/crates/runtime/src/instance/allocator/pooling/uffd.rs b/crates/runtime/src/instance/allocator/pooling/uffd.rs index 21b52c6298..dcfdb4dfb6 100644 --- a/crates/runtime/src/instance/allocator/pooling/uffd.rs +++ b/crates/runtime/src/instance/allocator/pooling/uffd.rs @@ -515,7 +515,7 @@ mod test { info: MockModuleInfo, } unsafe impl Store for MockStore { - fn vminterrupts(&self) -> *mut crate::VMInterrupts { + fn vmruntime_limits(&self) -> *mut crate::VMRuntimeLimits { std::ptr::null_mut() } fn externref_activations_table( diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs index ea5b8093f8..5e64f65480 100644 --- a/crates/runtime/src/lib.rs +++ b/crates/runtime/src/lib.rs @@ -65,7 +65,7 @@ pub use crate::traphandlers::{ }; pub use crate::vmcontext::{ VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport, VMGlobalDefinition, - VMGlobalImport, VMInterrupts, VMInvokeArgument, VMMemoryDefinition, VMMemoryImport, + VMGlobalImport, VMInvokeArgument, VMMemoryDefinition, VMMemoryImport, VMRuntimeLimits, VMSharedSignatureIndex, VMTableDefinition, VMTableImport, VMTrampoline, ValRaw, }; @@ -99,11 +99,11 @@ pub const VERSION: &str = env!("CARGO_PKG_VERSION"); /// is that `wasmtime::Store` handles all this correctly. pub unsafe trait Store { /// Returns the raw pointer in memory where this store's shared - /// `VMInterrupts` structure is located. + /// `VMRuntimeLimits` structure is located. /// /// Used to configure `VMContext` initialization and store the right pointer /// in the `VMContext`. - fn vminterrupts(&self) -> *mut VMInterrupts; + fn vmruntime_limits(&self) -> *mut VMRuntimeLimits; /// Returns a pointer to the global epoch counter. /// diff --git a/crates/runtime/src/traphandlers.rs b/crates/runtime/src/traphandlers.rs index 882bc982e8..240a03b512 100644 --- a/crates/runtime/src/traphandlers.rs +++ b/crates/runtime/src/traphandlers.rs @@ -1,14 +1,13 @@ //! WebAssembly trap handling, which is built on top of the lower-level //! signalhandling mechanisms. -use crate::{VMContext, VMInterrupts}; +use crate::VMContext; use anyhow::Error; use backtrace::Backtrace; use std::any::Any; use std::cell::{Cell, UnsafeCell}; use std::mem::MaybeUninit; use std::ptr; -use std::sync::atomic::Ordering::SeqCst; use std::sync::Once; use wasmtime_environ::TrapCode; @@ -122,10 +121,6 @@ pub enum Trap { pc: usize, /// Native stack backtrace at the time the trap occurred backtrace: Backtrace, - /// An indicator for whether this may have been a trap generated from an - /// interrupt, used for switching what would otherwise be a stack - /// overflow trap to be an interrupt trap. - maybe_interrupted: bool, }, /// A trap raised from a wasm libcall @@ -169,7 +164,6 @@ impl Trap { /// /// Highly unsafe since `closure` won't have any dtors run. pub unsafe fn catch_traps<'a, F>( - vminterrupts: *mut VMInterrupts, signal_handler: Option<*const SignalHandler<'static>>, callee: *mut VMContext, mut closure: F, @@ -177,7 +171,7 @@ pub unsafe fn catch_traps<'a, F>( where F: FnMut(*mut VMContext), { - return CallThreadState::new(signal_handler).with(vminterrupts, |cx| { + return CallThreadState::new(signal_handler).with(|cx| { wasmtime_setjmp( cx.jmp_buf.as_ptr(), call_closure::, @@ -223,33 +217,21 @@ impl CallThreadState { } } - fn with( - self, - interrupts: *mut VMInterrupts, - closure: impl FnOnce(&CallThreadState) -> i32, - ) -> Result<(), Box> { + fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Box> { let ret = tls::set(&self, || closure(&self))?; if ret != 0 { Ok(()) } else { - Err(unsafe { self.read_trap(interrupts) }) + Err(unsafe { self.read_trap() }) } } #[cold] - unsafe fn read_trap(&self, interrupts: *mut VMInterrupts) -> Box { + unsafe fn read_trap(&self) -> Box { Box::new(match (*self.unwind.get()).as_ptr().read() { UnwindReason::UserTrap(data) => Trap::User(data), UnwindReason::LibTrap(trap) => trap, - UnwindReason::JitTrap { backtrace, pc } => { - let maybe_interrupted = - (*interrupts).stack_limit.load(SeqCst) == wasmtime_environ::INTERRUPTED; - Trap::Jit { - pc, - backtrace, - maybe_interrupted, - } - } + UnwindReason::JitTrap { backtrace, pc } => Trap::Jit { pc, backtrace }, UnwindReason::Panic(panic) => std::panic::resume_unwind(panic), }) } diff --git a/crates/runtime/src/vmcontext.rs b/crates/runtime/src/vmcontext.rs index f60ce4723c..819c592669 100644 --- a/crates/runtime/src/vmcontext.rs +++ b/crates/runtime/src/vmcontext.rs @@ -7,7 +7,6 @@ use std::any::Any; use std::cell::UnsafeCell; use std::marker; use std::ptr::NonNull; -use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; use std::u32; /// An imported function. @@ -669,12 +668,11 @@ impl VMInvokeArgument { /// Structure used to control interrupting wasm code. #[derive(Debug)] #[repr(C)] -pub struct VMInterrupts { +pub struct VMRuntimeLimits { /// Current stack limit of the wasm module. /// - /// This is used to control both stack overflow as well as interrupting wasm - /// modules. For more information see `crates/environ/src/cranelift.rs`. - pub stack_limit: AtomicUsize, + /// For more information see `crates/cranelift/src/lib.rs`. + pub stack_limit: UnsafeCell, /// Indicator of how much fuel has been consumed and is remaining to /// WebAssembly. @@ -691,28 +689,17 @@ pub struct VMInterrupts { pub epoch_deadline: UnsafeCell, } -// The `VMInterrupts` type is a pod-type with no destructor, and we -// only access `stack_limit` from other threads, so add in these trait -// impls which are otherwise not available due to the `fuel_consumed` -// and `epoch_deadline` variables in `VMInterrupts`. -// -// Note that users of `fuel_consumed` understand that the unsafety encompasses -// ensuring that it's only mutated/accessed from one thread dynamically. -unsafe impl Send for VMInterrupts {} -unsafe impl Sync for VMInterrupts {} +// The `VMRuntimeLimits` type is a pod-type with no destructor, and we don't +// access any fields from other threads, so add in these trait impls which are +// otherwise not available due to the `fuel_consumed` and `epoch_deadline` +// variables in `VMRuntimeLimits`. +unsafe impl Send for VMRuntimeLimits {} +unsafe impl Sync for VMRuntimeLimits {} -impl VMInterrupts { - /// Flag that an interrupt should occur - pub fn interrupt(&self) { - self.stack_limit - .store(wasmtime_environ::INTERRUPTED, SeqCst); - } -} - -impl Default for VMInterrupts { - fn default() -> VMInterrupts { - VMInterrupts { - stack_limit: AtomicUsize::new(usize::max_value()), +impl Default for VMRuntimeLimits { + fn default() -> VMRuntimeLimits { + VMRuntimeLimits { + stack_limit: UnsafeCell::new(usize::max_value()), fuel_consumed: UnsafeCell::new(0), epoch_deadline: UnsafeCell::new(0), } @@ -720,19 +707,27 @@ impl Default for VMInterrupts { } #[cfg(test)] -mod test_vminterrupts { - use super::VMInterrupts; +mod test_vmruntime_limits { + use super::VMRuntimeLimits; use memoffset::offset_of; use std::mem::size_of; use wasmtime_environ::{Module, VMOffsets}; #[test] - fn check_vminterrupts_interrupted_offset() { + fn field_offsets() { let module = Module::new(); let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module); assert_eq!( - offset_of!(VMInterrupts, stack_limit), - usize::from(offsets.vminterrupts_stack_limit()) + offset_of!(VMRuntimeLimits, stack_limit), + usize::from(offsets.vmruntime_limits_stack_limit()) + ); + assert_eq!( + offset_of!(VMRuntimeLimits, fuel_consumed), + usize::from(offsets.vmruntime_limits_fuel_consumed()) + ); + assert_eq!( + offset_of!(VMRuntimeLimits, epoch_deadline), + usize::from(offsets.vmruntime_limits_epoch_deadline()) ); } } diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index 4acfaac4cd..6de73cf919 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -257,16 +257,6 @@ impl Config { /// and the periodic yields with epochs should ensure that when the /// timeout is reached it's appropriately recognized. /// - /// * Finally you can spawn futures into a thread pool. By doing this in a - /// thread pool you are relaxing the requirement that `Future::poll` must - /// be fast because your future is executing on a separate thread. This - /// strategy, however, would likely still require some form of - /// cancellation via [`Config::epoch_interruption`] or - /// [`crate::Store::interrupt_handle`] to ensure wasm doesn't take *too* - /// long to execute. This solution is generally not recommended for its - /// complexity and instead one of the previous solutions should likely be - /// used. - /// /// In all cases special care needs to be taken when integrating /// asynchronous wasm into your application. You should carefully plan where /// WebAssembly will execute and what compute resources will be allotted to @@ -314,27 +304,13 @@ impl Config { self } - /// Configures whether functions and loops will be interruptable via the - /// [`Store::interrupt_handle`](crate::Store::interrupt_handle) method. - /// - /// For more information see the documentation on - /// [`Store::interrupt_handle`](crate::Store::interrupt_handle). - /// - /// By default this option is `false`. - pub fn interruptable(&mut self, enable: bool) -> &mut Self { - self.tunables.interruptable = enable; - self - } - /// Configures whether execution of WebAssembly will "consume fuel" to /// either halt or yield execution as desired. /// - /// This option is similar in purpose to [`Config::interruptable`] where - /// you can prevent infinitely-executing WebAssembly code. The difference - /// is that this option allows deterministic execution of WebAssembly code - /// by instrumenting generated code consume fuel as it executes. When fuel - /// runs out the behavior is defined by configuration within a [`Store`], - /// and by default a trap is raised. + /// This can be used to deterministically prevent infinitely-executing + /// WebAssembly code by instrumenting generated code to consume fuel as it + /// executes. When fuel runs out the behavior is defined by configuration + /// within a [`Store`], and by default a trap is raised. /// /// Note that a [`Store`] starts with no fuel, so if you enable this option /// you'll have to be sure to pour some fuel into [`Store`] before diff --git a/crates/wasmtime/src/func.rs b/crates/wasmtime/src/func.rs index 12a85107a2..f832bbb117 100644 --- a/crates/wasmtime/src/func.rs +++ b/crates/wasmtime/src/func.rs @@ -1,7 +1,7 @@ use crate::store::{StoreData, StoreOpaque, Stored}; use crate::{ - AsContext, AsContextMut, CallHook, Engine, Extern, FuncType, Instance, InterruptHandle, - StoreContext, StoreContextMut, Trap, Val, ValRaw, ValType, + AsContext, AsContextMut, CallHook, Engine, Extern, FuncType, Instance, StoreContext, + StoreContextMut, Trap, Val, ValRaw, ValType, }; use anyhow::{bail, Context as _, Result}; use std::future::Future; @@ -9,7 +9,6 @@ use std::mem; use std::panic::{self, AssertUnwindSafe}; use std::pin::Pin; use std::ptr::NonNull; -use std::sync::atomic::Ordering::Relaxed; use std::sync::Arc; use wasmtime_environ::{EntityIndex, FuncIndex}; use wasmtime_runtime::{ @@ -1198,14 +1197,13 @@ pub(crate) fn invoke_wasm_and_catch_traps( closure: impl FnMut(*mut VMContext), ) -> Result<(), Trap> { unsafe { - let exit = enter_wasm(store)?; + let exit = enter_wasm(store); if let Err(trap) = store.0.call_hook(CallHook::CallingWasm) { exit_wasm(store, exit); return Err(trap); } let result = wasmtime_runtime::catch_traps( - store.0.vminterrupts(), store.0.signal_handler(), store.0.default_callee(), closure, @@ -1232,7 +1230,7 @@ pub(crate) fn invoke_wasm_and_catch_traps( /// /// This function may fail if the the stack limit can't be set because an /// interrupt already happened. -fn enter_wasm(store: &mut StoreContextMut<'_, T>) -> Result, Trap> { +fn enter_wasm(store: &mut StoreContextMut<'_, T>) -> Option { // If this is a recursive call, e.g. our stack canary is already set, then // we may be able to skip this function. // @@ -1252,7 +1250,7 @@ fn enter_wasm(store: &mut StoreContextMut<'_, T>) -> Result, Tr .is_some() && !store.0.async_support() { - return Ok(None); + return None; } let stack_pointer = psm::stack_pointer() as usize; @@ -1270,34 +1268,13 @@ fn enter_wasm(store: &mut StoreContextMut<'_, T>) -> Result, Tr // (a million bytes) the slop shouldn't matter too much. // // After we've got the stack limit then we store it into the `stack_limit` - // variable. Note that the store is an atomic swap to ensure that we can - // consume any previously-sent interrupt requests. If we found that wasm was - // previously interrupted then we immediately return a trap (after resetting - // the stack limit). Otherwise we're good to keep on going. - // - // Note the usage of `Relaxed` memory orderings here. This is specifically - // an optimization in the `Drop` below where a `Relaxed` store is speedier - // than a `SeqCst` store. The rationale for `Relaxed` here is that the - // atomic orderings here aren't actually protecting any memory, we're just - // trying to be atomic with respect to this one location in memory (for when - // `InterruptHandle` sends us a signal). Due to the lack of needing to - // synchronize with any other memory it's hoped that the choice of `Relaxed` - // here should be correct for our use case. + // variable. let wasm_stack_limit = stack_pointer - store.engine().config().max_wasm_stack; - let interrupts = store.0.interrupts(); - let prev_stack = match interrupts.stack_limit.swap(wasm_stack_limit, Relaxed) { - wasmtime_environ::INTERRUPTED => { - // This means that an interrupt happened before we actually - // called this function, which means that we're now - // considered interrupted. - interrupts.stack_limit.store(usize::max_value(), Relaxed); - return Err(Trap::new_wasm( - None, - wasmtime_environ::TrapCode::Interrupt, - backtrace::Backtrace::new_unresolved(), - )); - } - n => n, + let prev_stack = unsafe { + mem::replace( + &mut *store.0.runtime_limits().stack_limit.get(), + wasm_stack_limit, + ) }; // The `usize::max_value()` sentinel is present on recursive calls to @@ -1315,7 +1292,7 @@ fn enter_wasm(store: &mut StoreContextMut<'_, T>) -> Result, Tr .set_stack_canary(Some(stack_pointer)); } - Ok(Some(prev_stack)) + Some(prev_stack) } fn exit_wasm(store: &mut StoreContextMut<'_, T>, prev_stack: Option) { @@ -1333,8 +1310,9 @@ fn exit_wasm(store: &mut StoreContextMut<'_, T>, prev_stack: Option) { store.0.externref_activations_table().set_stack_canary(None); } - // see docs above for why this uses `Relaxed` - store.0.interrupts().stack_limit.store(prev_stack, Relaxed); + unsafe { + *store.0.runtime_limits().stack_limit.get() = prev_stack; + } } /// A trait implemented for types which can be returned from closures passed to @@ -1746,14 +1724,6 @@ impl Caller<'_, T> { self.store.engine() } - /// Returns an [`InterruptHandle`] to interrupt wasm execution. - /// - /// See [`Store::interrupt_handle`](crate::Store::interrupt_handle) for more - /// information. - pub fn interrupt_handle(&self) -> Result { - self.store.interrupt_handle() - } - /// Perform garbage collection of `ExternRef`s. /// /// Same as [`Store::gc`](crate::Store::gc). diff --git a/crates/wasmtime/src/lib.rs b/crates/wasmtime/src/lib.rs index 9fb6233741..26bb8a140e 100644 --- a/crates/wasmtime/src/lib.rs +++ b/crates/wasmtime/src/lib.rs @@ -415,9 +415,7 @@ pub use crate::linker::*; pub use crate::memory::*; pub use crate::module::{FrameInfo, FrameSymbol, Module}; pub use crate::r#ref::ExternRef; -pub use crate::store::{ - AsContext, AsContextMut, CallHook, InterruptHandle, Store, StoreContext, StoreContextMut, -}; +pub use crate::store::{AsContext, AsContextMut, CallHook, Store, StoreContext, StoreContextMut}; pub use crate::trap::*; pub use crate::types::*; pub use crate::values::*; @@ -439,7 +437,6 @@ fn _assert_send_sync() { fn _assert_send(_t: T) {} _assert::(); _assert::(); - _assert::(); _assert::<(Func, TypedFunc<(), ()>, Global, Table, Memory)>(); _assert::(); _assert::(); diff --git a/crates/wasmtime/src/module/serialization.rs b/crates/wasmtime/src/module/serialization.rs index 5d34b784c3..83dca9a026 100644 --- a/crates/wasmtime/src/module/serialization.rs +++ b/crates/wasmtime/src/module/serialization.rs @@ -562,7 +562,6 @@ impl<'a> SerializedModule<'a> { dynamic_memory_offset_guard_size, generate_native_debuginfo, parse_wasm_debuginfo, - interruptable, consume_fuel, epoch_interruption, static_memory_bound_is_maximum, @@ -603,7 +602,6 @@ impl<'a> SerializedModule<'a> { other.parse_wasm_debuginfo, "WebAssembly backtrace support", )?; - Self::check_bool(interruptable, other.interruptable, "interruption support")?; Self::check_bool(consume_fuel, other.consume_fuel, "fuel support")?; Self::check_bool( epoch_interruption, @@ -823,36 +821,36 @@ Caused by: #[test] fn test_tunables_bool_mismatch() -> Result<()> { let mut config = Config::new(); - config.interruptable(true); + config.epoch_interruption(true); let engine = Engine::new(&config)?; let module = Module::new(&engine, "(module)")?; let mut serialized = SerializedModule::new(&module); - serialized.metadata.tunables.interruptable = false; + serialized.metadata.tunables.epoch_interruption = false; match serialized.into_module(&engine) { Ok(_) => unreachable!(), Err(e) => assert_eq!( e.to_string(), - "Module was compiled without interruption support but it is enabled for the host" + "Module was compiled without epoch interruption but it is enabled for the host" ), } let mut config = Config::new(); - config.interruptable(false); + config.epoch_interruption(false); let engine = Engine::new(&config)?; let module = Module::new(&engine, "(module)")?; let mut serialized = SerializedModule::new(&module); - serialized.metadata.tunables.interruptable = true; + serialized.metadata.tunables.epoch_interruption = true; match serialized.into_module(&engine) { Ok(_) => unreachable!(), Err(e) => assert_eq!( e.to_string(), - "Module was compiled with interruption support but it is not enabled for the host" + "Module was compiled with epoch interruption but it is not enabled for the host" ), } diff --git a/crates/wasmtime/src/store.rs b/crates/wasmtime/src/store.rs index 133d297917..c6c5d3ceae 100644 --- a/crates/wasmtime/src/store.rs +++ b/crates/wasmtime/src/store.rs @@ -95,7 +95,8 @@ use std::task::{Context, Poll}; use wasmtime_runtime::{ InstanceAllocationRequest, InstanceAllocator, InstanceHandle, ModuleInfo, OnDemandInstanceAllocator, SignalHandler, StorePtr, VMCallerCheckedAnyfunc, VMContext, - VMExternRef, VMExternRefActivationsTable, VMInterrupts, VMSharedSignatureIndex, VMTrampoline, + VMExternRef, VMExternRefActivationsTable, VMRuntimeLimits, VMSharedSignatureIndex, + VMTrampoline, }; mod context; @@ -255,7 +256,7 @@ pub struct StoreOpaque { _marker: marker::PhantomPinned, engine: Engine, - interrupts: Arc, + runtime_limits: VMRuntimeLimits, instances: Vec, signal_handler: Option>>, externref_activations_table: VMExternRefActivationsTable, @@ -273,7 +274,7 @@ pub struct StoreOpaque { memory_limit: usize, table_count: usize, table_limit: usize, - /// An adjustment to add to the fuel consumed value in `interrupts` above + /// An adjustment to add to the fuel consumed value in `runtime_limits` above /// to get the true amount of fuel consumed. fuel_adj: i64, #[cfg(feature = "async")] @@ -434,7 +435,7 @@ impl Store { inner: StoreOpaque { _marker: marker::PhantomPinned, engine: engine.clone(), - interrupts: Default::default(), + runtime_limits: Default::default(), instances: Vec::new(), signal_handler: None, externref_activations_table: VMExternRefActivationsTable::new(), @@ -628,89 +629,6 @@ impl Store { self.inner.engine() } - /// Creates an [`InterruptHandle`] which can be used to interrupt the - /// execution of instances within this `Store`. - /// - /// An [`InterruptHandle`] handle is a mechanism of ensuring that guest code - /// doesn't execute for too long. For example it's used to prevent wasm - /// programs for executing infinitely in infinite loops or recursive call - /// chains. - /// - /// The [`InterruptHandle`] type is sendable to other threads so you can - /// interact with it even while the thread with this `Store` is executing - /// wasm code. - /// - /// There's one method on an interrupt handle: - /// [`InterruptHandle::interrupt`]. This method is used to generate an - /// interrupt and cause wasm code to exit "soon". - /// - /// ## When are interrupts delivered? - /// - /// The term "interrupt" here refers to one of two different behaviors that - /// are interrupted in wasm: - /// - /// * The head of every loop in wasm has a check to see if it's interrupted. - /// * The prologue of every function has a check to see if it's interrupted. - /// - /// This interrupt mechanism makes no attempt to signal interrupts to - /// native code. For example if a host function is blocked, then sending - /// an interrupt will not interrupt that operation. - /// - /// Interrupts are consumed as soon as possible when wasm itself starts - /// executing. This means that if you interrupt wasm code then it basically - /// guarantees that the next time wasm is executing on the target thread it - /// will return quickly (either normally if it were already in the process - /// of returning or with a trap from the interrupt). Once an interrupt - /// trap is generated then an interrupt is consumed, and further execution - /// will not be interrupted (unless another interrupt is set). - /// - /// When implementing interrupts you'll want to ensure that the delivery of - /// interrupts into wasm code is also handled in your host imports and - /// functionality. Host functions need to either execute for bounded amounts - /// of time or you'll need to arrange for them to be interrupted as well. - /// - /// ## Return Value - /// - /// This function returns a `Result` since interrupts are not always - /// enabled. Interrupts are enabled via the - /// [`Config::interruptable`](crate::Config::interruptable) method, and if - /// this store's [`Config`](crate::Config) hasn't been configured to enable - /// interrupts then an error is returned. - /// - /// ## Examples - /// - /// ``` - /// # use anyhow::Result; - /// # use wasmtime::*; - /// # fn main() -> Result<()> { - /// // Enable interruptable code via `Config` and then create an interrupt - /// // handle which we'll use later to interrupt running code. - /// let engine = Engine::new(Config::new().interruptable(true))?; - /// let mut store = Store::new(&engine, ()); - /// let interrupt_handle = store.interrupt_handle()?; - /// - /// // Compile and instantiate a small example with an infinite loop. - /// let module = Module::new(&engine, r#" - /// (func (export "run") (loop br 0)) - /// "#)?; - /// let instance = Instance::new(&mut store, &module, &[])?; - /// let run = instance.get_typed_func::<(), (), _>(&mut store, "run")?; - /// - /// // Spin up a thread to send us an interrupt in a second - /// std::thread::spawn(move || { - /// std::thread::sleep(std::time::Duration::from_secs(1)); - /// interrupt_handle.interrupt(); - /// }); - /// - /// let trap = run.call(&mut store, ()).unwrap_err(); - /// assert!(trap.to_string().contains("wasm trap: interrupt")); - /// # Ok(()) - /// # } - /// ``` - pub fn interrupt_handle(&self) -> Result { - self.inner.interrupt_handle() - } - /// Perform garbage collection of `ExternRef`s. /// /// Note that it is not required to actively call this function. GC will @@ -921,13 +839,6 @@ impl<'a, T> StoreContext<'a, T> { self.0.engine() } - /// Returns an [`InterruptHandle`] to interrupt wasm execution. - /// - /// See [`Store::interrupt_handle`] for more information. - pub fn interrupt_handle(&self) -> Result { - self.0.interrupt_handle() - } - /// Access the underlying data owned by this `Store`. /// /// Same as [`Store::data`]. @@ -963,13 +874,6 @@ impl<'a, T> StoreContextMut<'a, T> { self.0.engine() } - /// Returns an [`InterruptHandle`] to interrupt wasm execution. - /// - /// See [`Store::interrupt_handle`] for more information. - pub fn interrupt_handle(&self) -> Result { - self.0.interrupt_handle() - } - /// Perform garbage collection of `ExternRef`s. /// /// Same as [`Store::gc`]. @@ -1111,16 +1015,6 @@ impl StoreOpaque { &mut self.store_data } - pub fn interrupt_handle(&self) -> Result { - if self.engine.config().tunables.interruptable { - Ok(InterruptHandle { - interrupts: self.interrupts.clone(), - }) - } else { - bail!("interrupts aren't enabled for this `Store`") - } - } - #[inline] pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry { &mut self.modules @@ -1148,8 +1042,8 @@ impl StoreOpaque { } #[inline] - pub fn interrupts(&self) -> &VMInterrupts { - &self.interrupts + pub fn runtime_limits(&self) -> &VMRuntimeLimits { + &self.runtime_limits } #[inline] @@ -1263,7 +1157,7 @@ impl StoreOpaque { if !self.engine.config().tunables.consume_fuel { return None; } - let consumed = unsafe { *self.interrupts.fuel_consumed.get() }; + let consumed = unsafe { *self.runtime_limits.fuel_consumed.get() }; Some(u64::try_from(self.fuel_adj + consumed).unwrap()) } @@ -1335,7 +1229,7 @@ impl StoreOpaque { // reasonable amount of time anyway. let fuel = i64::try_from(fuel).unwrap_or(i64::max_value()); let adj = self.fuel_adj; - let consumed_ptr = unsafe { &mut *self.interrupts.fuel_consumed.get() }; + let consumed_ptr = unsafe { &mut *self.runtime_limits.fuel_consumed.get() }; match (consumed_ptr.checked_sub(fuel), adj.checked_add(fuel)) { // If we succesfully did arithmetic without overflowing then we can @@ -1358,7 +1252,7 @@ impl StoreOpaque { } fn consume_fuel(&mut self, fuel: u64) -> Result { - let consumed_ptr = unsafe { &mut *self.interrupts.fuel_consumed.get() }; + let consumed_ptr = unsafe { &mut *self.runtime_limits.fuel_consumed.get() }; match i64::try_from(fuel) .ok() .and_then(|fuel| consumed_ptr.checked_add(fuel)) @@ -1394,8 +1288,8 @@ impl StoreOpaque { } #[inline] - pub fn vminterrupts(&self) -> *mut VMInterrupts { - &*self.interrupts as *const VMInterrupts as *mut VMInterrupts + pub fn vmruntime_limits(&self) -> *mut VMRuntimeLimits { + &self.runtime_limits as *const VMRuntimeLimits as *mut VMRuntimeLimits } pub unsafe fn insert_vmexternref_without_gc(&mut self, r: VMExternRef) { @@ -1731,8 +1625,8 @@ impl AsyncCx { } unsafe impl wasmtime_runtime::Store for StoreInner { - fn vminterrupts(&self) -> *mut VMInterrupts { - ::vminterrupts(self) + fn vmruntime_limits(&self) -> *mut VMRuntimeLimits { + ::vmruntime_limits(self) } fn epoch_ptr(&self) -> *const AtomicU64 { @@ -1912,13 +1806,13 @@ impl StoreInner { // Set a new deadline based on the "epoch deadline delta". // // Safety: this is safe because the epoch deadline in the - // `VMInterrupts` is accessed only here and by Wasm guest code + // `VMRuntimeLimits` is accessed only here and by Wasm guest code // running in this store, and we have a `&mut self` here. // // Also, note that when this update is performed while Wasm is // on the stack, the Wasm will reload the new value once we // return into it. - let epoch_deadline = unsafe { (*self.vminterrupts()).epoch_deadline.get_mut() }; + let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() }; *epoch_deadline = self.engine().current_epoch() + delta; } @@ -1926,7 +1820,7 @@ impl StoreInner { // Safety: this is safe because, as above, it is only invoked // from within `new_epoch` which is called from guest Wasm // code, which will have an exclusive borrow on the Store. - let epoch_deadline = unsafe { (*self.vminterrupts()).epoch_deadline.get_mut() }; + let epoch_deadline = unsafe { (*self.vmruntime_limits()).epoch_deadline.get_mut() }; *epoch_deadline } } @@ -1983,28 +1877,6 @@ impl wasmtime_runtime::ModuleInfoLookup for ModuleRegistry { } } -/// A threadsafe handle used to interrupt instances executing within a -/// particular `Store`. -/// -/// This structure is created by the [`Store::interrupt_handle`] method. -#[derive(Debug)] -pub struct InterruptHandle { - interrupts: Arc, -} - -impl InterruptHandle { - /// Flags that execution within this handle's original [`Store`] should be - /// interrupted. - /// - /// This will not immediately interrupt execution of wasm modules, but - /// rather it will interrupt wasm execution of loop headers and wasm - /// execution of function entries. For more information see - /// [`Store::interrupt_handle`]. - pub fn interrupt(&self) { - self.interrupts.interrupt() - } -} - struct Reset(*mut T, T); impl Drop for Reset { diff --git a/crates/wasmtime/src/trap.rs b/crates/wasmtime/src/trap.rs index 70ae11bda9..4fa0a19813 100644 --- a/crates/wasmtime/src/trap.rs +++ b/crates/wasmtime/src/trap.rs @@ -171,19 +171,12 @@ impl Trap { pub(crate) fn from_runtime(runtime_trap: wasmtime_runtime::Trap) -> Self { match runtime_trap { wasmtime_runtime::Trap::User(error) => Trap::from(error), - wasmtime_runtime::Trap::Jit { - pc, - backtrace, - maybe_interrupted, - } => { - let mut code = GlobalModuleRegistry::with(|modules| { + wasmtime_runtime::Trap::Jit { pc, backtrace } => { + let code = GlobalModuleRegistry::with(|modules| { modules .lookup_trap_code(pc) .unwrap_or(EnvTrapCode::StackOverflow) }); - if maybe_interrupted && code == EnvTrapCode::StackOverflow { - code = EnvTrapCode::Interrupt; - } Trap::new_wasm(Some(pc), code, backtrace) } wasmtime_runtime::Trap::Wasm { diff --git a/examples/interrupt.c b/examples/interrupt.c index b4ad55e377..f09e47ebe4 100644 --- a/examples/interrupt.c +++ b/examples/interrupt.c @@ -25,29 +25,27 @@ to tweak the `-lpthread` and such annotations as well as the name of the #include #ifdef _WIN32 -static void spawn_interrupt(wasmtime_interrupt_handle_t *handle) { - wasmtime_interrupt_handle_interrupt(handle); - wasmtime_interrupt_handle_delete(handle); +static void spawn_interrupt(wasm_engine_t *engine) { + wasmtime_engine_increment_epoch(engine); } #else #include #include -static void* helper(void *_handle) { - wasmtime_interrupt_handle_t *handle = _handle; +static void* helper(void *_engine) { + wasm_engine_t *engine = _engine; struct timespec sleep_dur; sleep_dur.tv_sec = 1; sleep_dur.tv_nsec = 0; nanosleep(&sleep_dur, NULL); printf("Sending an interrupt\n"); - wasmtime_interrupt_handle_interrupt(handle); - wasmtime_interrupt_handle_delete(handle); + wasmtime_engine_increment_epoch(engine); return 0; } -static void spawn_interrupt(wasmtime_interrupt_handle_t *handle) { +static void spawn_interrupt(wasm_engine_t *engine) { pthread_t child; - int rc = pthread_create(&child, NULL, helper, handle); + int rc = pthread_create(&child, NULL, helper, engine); assert(rc == 0); } #endif @@ -58,16 +56,15 @@ int main() { // Create a `wasm_store_t` with interrupts enabled wasm_config_t *config = wasm_config_new(); assert(config != NULL); - wasmtime_config_interruptable_set(config, true); + wasmtime_config_epoch_interruption_set(config, true); wasm_engine_t *engine = wasm_engine_new_with_config(config); assert(engine != NULL); wasmtime_store_t *store = wasmtime_store_new(engine, NULL, NULL); assert(store != NULL); wasmtime_context_t *context = wasmtime_store_context(store); - // Create our interrupt handle we'll use later - wasmtime_interrupt_handle_t *handle = wasmtime_interrupt_handle_new(context); - assert(handle != NULL); + // Configure the epoch deadline after which WebAssembly code will trap. + wasmtime_context_set_epoch_deadline(context, 1); // Read our input file, which in this case is a wasm text file. FILE* file = fopen("examples/interrupt.wat", "r"); @@ -108,7 +105,7 @@ int main() { assert(run.kind == WASMTIME_EXTERN_FUNC); // Spawn a thread to send us an interrupt after a period of time. - spawn_interrupt(handle); + spawn_interrupt(engine); // And call it! printf("Entering infinite loop...\n"); @@ -117,13 +114,6 @@ int main() { assert(trap != NULL); printf("Got a trap!...\n"); - // `trap` can be inspected here to see the trap message has an interrupt in it - wasmtime_trap_code_t code; - ok = wasmtime_trap_code(trap, &code); - assert(ok); - assert(code == WASMTIME_TRAP_CODE_INTERRUPT); - wasm_trap_delete(trap); - wasmtime_store_delete(store); wasm_engine_delete(engine); return 0; diff --git a/examples/interrupt.rs b/examples/interrupt.rs index 95179adbd3..61a2cdab75 100644 --- a/examples/interrupt.rs +++ b/examples/interrupt.rs @@ -7,11 +7,11 @@ use anyhow::Result; use wasmtime::*; fn main() -> Result<()> { - // Enable interruptable code via `Config` and then create an interrupt - // handle which we'll use later to interrupt running code. - let engine = Engine::new(Config::new().interruptable(true))?; + // Enable epoch interruption code via `Config` which means that code will + // get interrupted when `Engine::increment_epoch` happens. + let engine = Engine::new(Config::new().epoch_interruption(true))?; let mut store = Store::new(&engine, ()); - let interrupt_handle = store.interrupt_handle()?; + store.set_epoch_deadline(1); // Compile and instantiate a small example with an infinite loop. let module = Module::from_file(&engine, "examples/interrupt.wat")?; @@ -22,14 +22,14 @@ fn main() -> Result<()> { std::thread::spawn(move || { std::thread::sleep(std::time::Duration::from_secs(1)); println!("Interrupting!"); - interrupt_handle.interrupt(); + engine.increment_epoch(); }); println!("Entering infinite loop ..."); let trap = run.call(&mut store, ()).unwrap_err(); println!("trap received..."); - assert!(trap.to_string().contains("wasm trap: interrupt")); + assert!(trap.to_string().contains("epoch deadline reached")); Ok(()) } diff --git a/src/commands/compile.rs b/src/commands/compile.rs index 23088d68e1..0249012fe4 100644 --- a/src/commands/compile.rs +++ b/src/commands/compile.rs @@ -45,10 +45,6 @@ pub struct CompileCommand { #[structopt(flatten)] common: CommonOptions, - /// Enable support for interrupting WebAssembly code. - #[structopt(long)] - interruptable: bool, - /// The target triple; default is the host triple #[structopt(long, value_name = "TARGET")] target: Option, @@ -72,8 +68,7 @@ impl CompileCommand { .take() .unwrap_or_else(|| Triple::host().to_string()); - let mut config = self.common.config(Some(&target))?; - config.interruptable(self.interruptable); + let config = self.common.config(Some(&target))?; let engine = Engine::new(&config)?; diff --git a/src/commands/run.rs b/src/commands/run.rs index abd1e59a1d..b6da5b6556 100644 --- a/src/commands/run.rs +++ b/src/commands/run.rs @@ -159,7 +159,7 @@ impl RunCommand { let mut config = self.common.config(None)?; if self.wasm_timeout.is_some() { - config.interruptable(true); + config.epoch_interruption(true); } let engine = Engine::new(&config)?; let mut store = Store::new(&engine, Host::default()); @@ -305,10 +305,11 @@ impl RunCommand { fn load_main_module(&self, store: &mut Store, linker: &mut Linker) -> Result<()> { if let Some(timeout) = self.wasm_timeout { - let handle = store.interrupt_handle()?; + store.set_epoch_deadline(1); + let engine = store.engine().clone(); thread::spawn(move || { thread::sleep(timeout); - handle.interrupt(); + engine.increment_epoch(); }); } diff --git a/tests/all/cli_tests.rs b/tests/all/cli_tests.rs index 42520a5238..18875be061 100644 --- a/tests/all/cli_tests.rs +++ b/tests/all/cli_tests.rs @@ -175,7 +175,7 @@ fn timeout_in_start() -> Result<()> { assert_eq!(output.stdout, b""); let stderr = String::from_utf8_lossy(&output.stderr); assert!( - stderr.contains("wasm trap: interrupt"), + stderr.contains("epoch deadline reached during execution"), "bad stderr: {}", stderr ); @@ -196,7 +196,7 @@ fn timeout_in_invoke() -> Result<()> { assert_eq!(output.stdout, b""); let stderr = String::from_utf8_lossy(&output.stderr); assert!( - stderr.contains("wasm trap: interrupt"), + stderr.contains("epoch deadline reached during execution"), "bad stderr: {}", stderr ); diff --git a/tests/all/gc.rs b/tests/all/gc.rs index 27c9341ea0..6de09f1c53 100644 --- a/tests/all/gc.rs +++ b/tests/all/gc.rs @@ -337,7 +337,7 @@ fn table_drops_externref() -> anyhow::Result<()> { fn gee_i_sure_hope_refcounting_is_atomic() -> anyhow::Result<()> { let mut config = Config::new(); config.wasm_reference_types(true); - config.interruptable(true); + config.epoch_interruption(true); let engine = Engine::new(&config)?; let mut store = Store::new(&engine, ()); let module = Module::new( @@ -380,14 +380,13 @@ fn gee_i_sure_hope_refcounting_is_atomic() -> anyhow::Result<()> { let flag = Arc::new(AtomicBool::new(false)); let externref = ExternRef::new(SetFlagOnDrop(flag.clone())); let externref2 = externref.clone(); - let handle = store.interrupt_handle()?; let child = std::thread::spawn(move || run.call(&mut store, Some(externref2))); for _ in 0..10000 { drop(externref.clone()); } - handle.interrupt(); + engine.increment_epoch(); assert!(child.join().unwrap().is_err()); assert!(!flag.load(SeqCst)); diff --git a/tests/all/iloop.rs b/tests/all/iloop.rs index 550810d4e9..9f1b6bbe91 100644 --- a/tests/all/iloop.rs +++ b/tests/all/iloop.rs @@ -1,9 +1,11 @@ -use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst}; use wasmtime::*; fn interruptable_store() -> Store<()> { - let engine = Engine::new(Config::new().interruptable(true)).unwrap(); - Store::new(&engine, ()) + let engine = Engine::new(Config::new().epoch_interruption(true)).unwrap(); + let mut store = Store::new(&engine, ()); + store.set_epoch_deadline(1); + store } fn hugely_recursive_module(engine: &Engine) -> anyhow::Result { @@ -28,9 +30,13 @@ fn loops_interruptable() -> anyhow::Result<()> { let module = Module::new(store.engine(), r#"(func (export "loop") (loop br 0))"#)?; let instance = Instance::new(&mut store, &module, &[])?; let iloop = instance.get_typed_func::<(), (), _>(&mut store, "loop")?; - store.interrupt_handle()?.interrupt(); + store.engine().increment_epoch(); let trap = iloop.call(&mut store, ()).unwrap_err(); - assert!(trap.to_string().contains("wasm trap: interrupt")); + assert!( + trap.to_string().contains("epoch deadline reached"), + "bad message: {}", + trap + ); Ok(()) } @@ -41,22 +47,25 @@ fn functions_interruptable() -> anyhow::Result<()> { let func = Func::wrap(&mut store, || {}); let instance = Instance::new(&mut store, &module, &[func.into()])?; let iloop = instance.get_typed_func::<(), (), _>(&mut store, "loop")?; - store.interrupt_handle()?.interrupt(); + store.engine().increment_epoch(); let trap = iloop.call(&mut store, ()).unwrap_err(); assert!( - trap.to_string().contains("wasm trap: interrupt"), + trap.to_string().contains("epoch deadline reached"), "{}", trap.to_string() ); Ok(()) } +const NUM_HITS: usize = 100_000; + #[test] fn loop_interrupt_from_afar() -> anyhow::Result<()> { // Create an instance which calls an imported function on each iteration of // the loop so we can count the number of loop iterations we've executed so // far. static HITS: AtomicUsize = AtomicUsize::new(0); + static STOP: AtomicBool = AtomicBool::new(false); let mut store = interruptable_store(); let module = Module::new( store.engine(), @@ -75,24 +84,26 @@ fn loop_interrupt_from_afar() -> anyhow::Result<()> { }); let instance = Instance::new(&mut store, &module, &[func.into()])?; - // Use the instance's interrupt handle to wait for it to enter the loop long - // enough and then we signal an interrupt happens. - let handle = store.interrupt_handle()?; + // Use the engine to wait for it to enter the loop long enough and then we + // signal an interrupt happens. + let engine = store.engine().clone(); let thread = std::thread::spawn(move || { - while HITS.load(SeqCst) <= 100_000 { + while HITS.load(SeqCst) <= NUM_HITS && !STOP.load(SeqCst) { // continue ... } println!("interrupting"); - handle.interrupt(); + engine.increment_epoch(); }); // Enter the infinitely looping function and assert that our interrupt // handle does indeed actually interrupt the function. let iloop = instance.get_typed_func::<(), (), _>(&mut store, "loop")?; let trap = iloop.call(&mut store, ()).unwrap_err(); + STOP.store(true, SeqCst); thread.join().unwrap(); + assert!(HITS.load(SeqCst) > NUM_HITS); assert!( - trap.to_string().contains("wasm trap: interrupt"), + trap.to_string().contains("epoch deadline reached"), "bad message: {}", trap.to_string() ); @@ -105,6 +116,8 @@ fn function_interrupt_from_afar() -> anyhow::Result<()> { // the loop so we can count the number of loop iterations we've executed so // far. static HITS: AtomicUsize = AtomicUsize::new(0); + static STOP: AtomicBool = AtomicBool::new(false); + let mut store = interruptable_store(); let module = hugely_recursive_module(store.engine())?; let func = Func::wrap(&mut store, || { @@ -114,21 +127,23 @@ fn function_interrupt_from_afar() -> anyhow::Result<()> { // Use the instance's interrupt handle to wait for it to enter the loop long // enough and then we signal an interrupt happens. - let handle = store.interrupt_handle()?; + let engine = store.engine().clone(); let thread = std::thread::spawn(move || { - while HITS.load(SeqCst) <= 100_000 { + while HITS.load(SeqCst) <= NUM_HITS && !STOP.load(SeqCst) { // continue ... } - handle.interrupt(); + engine.increment_epoch(); }); // Enter the infinitely looping function and assert that our interrupt // handle does indeed actually interrupt the function. let iloop = instance.get_typed_func::<(), (), _>(&mut store, "loop")?; let trap = iloop.call(&mut store, ()).unwrap_err(); + STOP.store(true, SeqCst); thread.join().unwrap(); + assert!(HITS.load(SeqCst) > NUM_HITS); assert!( - trap.to_string().contains("wasm trap: interrupt"), + trap.to_string().contains("epoch deadline reached"), "bad message: {}", trap.to_string() );