Port fix for CVE-2022-23636 to main. (#3818)
* Port fix for `CVE-2022-23636` to `main`. This commit ports the fix for `CVE-2022-23636` to `main`, but performs a refactoring that makes it unnecessary for the instance itself to track if it has been initialized; such a change was not targeted enough for a security patch. The pooling allocator will now only initialize an instance if all of its associated resource creation succeeds. If the resource creation fails, no instance is dropped as none was initialized. Also updates `RELEASES.md` to include the related patch releases. * Add `Instance::new_at` to fully initialize an instance. Added `Instance::new_at` to fully initialize an instance at a given address. This will hopefully prevent the possibility that an `Instance` structure doesn't have an initialized `VMContext` when it is dropped.
This commit is contained in:
@@ -8,10 +8,13 @@ use crate::memory::{Memory, RuntimeMemoryCreator};
|
||||
use crate::table::{Table, TableElement, TableElementType};
|
||||
use crate::traphandlers::Trap;
|
||||
use crate::vmcontext::{
|
||||
VMCallerCheckedAnyfunc, VMContext, VMFunctionImport, VMGlobalDefinition, VMGlobalImport,
|
||||
VMInterrupts, VMMemoryDefinition, VMMemoryImport, VMTableDefinition, VMTableImport,
|
||||
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionImport,
|
||||
VMGlobalDefinition, VMGlobalImport, VMInterrupts, VMMemoryDefinition, VMMemoryImport,
|
||||
VMTableDefinition, VMTableImport,
|
||||
};
|
||||
use crate::{
|
||||
ExportFunction, ExportGlobal, ExportMemory, ExportTable, Imports, ModuleRuntimeInfo, Store,
|
||||
};
|
||||
use crate::{ExportFunction, ExportGlobal, ExportMemory, ExportTable, ModuleRuntimeInfo, Store};
|
||||
use anyhow::Error;
|
||||
use memoffset::offset_of;
|
||||
use more_asserts::assert_lt;
|
||||
@@ -24,12 +27,12 @@ use std::ptr::NonNull;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::Arc;
|
||||
use std::{mem, ptr, slice};
|
||||
use wasmtime_environ::TableInitialization;
|
||||
use wasmtime_environ::{
|
||||
packed_option::ReservedValue, DataIndex, DefinedGlobalIndex, DefinedMemoryIndex,
|
||||
DefinedTableIndex, ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex,
|
||||
HostPtr, MemoryIndex, Module, PrimaryMap, TableIndex, TrapCode, VMOffsets, WasmType,
|
||||
};
|
||||
use wasmtime_environ::{GlobalInit, TableInitialization};
|
||||
|
||||
mod allocator;
|
||||
|
||||
@@ -96,29 +99,42 @@ pub(crate) struct Instance {
|
||||
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
impl Instance {
|
||||
/// Helper for allocators; not a public API.
|
||||
pub(crate) fn create_raw(
|
||||
runtime_info: Arc<dyn ModuleRuntimeInfo>,
|
||||
/// Create an instance at the given memory address.
|
||||
///
|
||||
/// It is assumed the memory was properly aligned and the
|
||||
/// allocation was `alloc_size` in bytes.
|
||||
unsafe fn new_at(
|
||||
ptr: *mut Instance,
|
||||
alloc_size: usize,
|
||||
offsets: VMOffsets<HostPtr>,
|
||||
req: InstanceAllocationRequest,
|
||||
memories: PrimaryMap<DefinedMemoryIndex, Memory>,
|
||||
tables: PrimaryMap<DefinedTableIndex, Table>,
|
||||
host_state: Box<dyn Any + Send + Sync>,
|
||||
) -> Instance {
|
||||
let module = runtime_info.module();
|
||||
let offsets = VMOffsets::new(HostPtr, &module);
|
||||
) {
|
||||
// The allocation must be *at least* the size required of `Instance`.
|
||||
assert!(alloc_size >= Self::alloc_layout(&offsets).size());
|
||||
|
||||
let module = req.runtime_info.module();
|
||||
let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
|
||||
let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
|
||||
Instance {
|
||||
runtime_info,
|
||||
offsets,
|
||||
memories,
|
||||
tables,
|
||||
dropped_elements,
|
||||
dropped_data,
|
||||
host_state,
|
||||
vmctx: VMContext {
|
||||
_marker: std::marker::PhantomPinned,
|
||||
|
||||
ptr::write(
|
||||
ptr,
|
||||
Instance {
|
||||
runtime_info: req.runtime_info.clone(),
|
||||
offsets,
|
||||
memories,
|
||||
tables,
|
||||
dropped_elements,
|
||||
dropped_data,
|
||||
host_state: req.host_state,
|
||||
vmctx: VMContext {
|
||||
_marker: std::marker::PhantomPinned,
|
||||
},
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
(*ptr).initialize_vmctx(module, req.store, req.imports);
|
||||
}
|
||||
|
||||
/// Helper function to access various locations offset from our `*mut
|
||||
@@ -450,11 +466,11 @@ impl Instance {
|
||||
result
|
||||
}
|
||||
|
||||
fn alloc_layout(&self) -> Layout {
|
||||
let size = mem::size_of_val(self)
|
||||
.checked_add(usize::try_from(self.offsets.size_of_vmctx()).unwrap())
|
||||
fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
|
||||
let size = mem::size_of::<Self>()
|
||||
.checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
|
||||
.unwrap();
|
||||
let align = mem::align_of_val(self);
|
||||
let align = mem::align_of::<Self>();
|
||||
Layout::from_size_align(size, align).unwrap()
|
||||
}
|
||||
|
||||
@@ -855,19 +871,121 @@ impl Instance {
|
||||
}
|
||||
}
|
||||
|
||||
fn drop_globals(&mut self) {
|
||||
for (idx, global) in self.module().globals.iter() {
|
||||
let idx = match self.module().defined_global_index(idx) {
|
||||
Some(idx) => idx,
|
||||
None => continue,
|
||||
};
|
||||
match global.wasm_ty {
|
||||
// For now only externref gloabls need to get destroyed
|
||||
WasmType::ExternRef => {}
|
||||
_ => continue,
|
||||
}
|
||||
unsafe {
|
||||
drop((*self.global_ptr(idx)).as_externref_mut().take());
|
||||
/// Initialize the VMContext data associated with this Instance.
|
||||
///
|
||||
/// The `VMContext` memory is assumed to be uninitialized; any field
|
||||
/// that we need in a certain state will be explicitly written by this
|
||||
/// function.
|
||||
unsafe fn initialize_vmctx(&mut self, module: &Module, store: StorePtr, imports: Imports) {
|
||||
assert!(std::ptr::eq(module, self.module().as_ref()));
|
||||
|
||||
if let Some(store) = store.as_raw() {
|
||||
*self.interrupts() = (*store).vminterrupts();
|
||||
*self.epoch_ptr() = (*store).epoch_ptr();
|
||||
*self.externref_activations_table() = (*store).externref_activations_table().0;
|
||||
self.set_store(store);
|
||||
}
|
||||
|
||||
// Initialize shared signatures
|
||||
let signatures = self.runtime_info.signature_ids();
|
||||
*self.vmctx_plus_offset(self.offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
|
||||
|
||||
// Initialize the built-in functions
|
||||
*self.vmctx_plus_offset(self.offsets.vmctx_builtin_functions()) =
|
||||
&VMBuiltinFunctionsArray::INIT;
|
||||
|
||||
// Initialize the imports
|
||||
debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
|
||||
ptr::copy_nonoverlapping(
|
||||
imports.functions.as_ptr(),
|
||||
self.vmctx_plus_offset(self.offsets.vmctx_imported_functions_begin()),
|
||||
imports.functions.len(),
|
||||
);
|
||||
debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
|
||||
ptr::copy_nonoverlapping(
|
||||
imports.tables.as_ptr(),
|
||||
self.vmctx_plus_offset(self.offsets.vmctx_imported_tables_begin()),
|
||||
imports.tables.len(),
|
||||
);
|
||||
debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
|
||||
ptr::copy_nonoverlapping(
|
||||
imports.memories.as_ptr(),
|
||||
self.vmctx_plus_offset(self.offsets.vmctx_imported_memories_begin()),
|
||||
imports.memories.len(),
|
||||
);
|
||||
debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
|
||||
ptr::copy_nonoverlapping(
|
||||
imports.globals.as_ptr(),
|
||||
self.vmctx_plus_offset(self.offsets.vmctx_imported_globals_begin()),
|
||||
imports.globals.len(),
|
||||
);
|
||||
|
||||
// N.B.: there is no need to initialize the anyfuncs array because
|
||||
// we eagerly construct each element in it whenever asked for a
|
||||
// reference to that element. In other words, there is no state
|
||||
// needed to track the lazy-init, so we don't need to initialize
|
||||
// any state now.
|
||||
|
||||
// Initialize the defined tables
|
||||
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_tables_begin());
|
||||
for i in 0..module.table_plans.len() - module.num_imported_tables {
|
||||
ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].vmtable());
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
// Initialize the defined memories
|
||||
let mut ptr = self.vmctx_plus_offset(self.offsets.vmctx_memories_begin());
|
||||
for i in 0..module.memory_plans.len() - module.num_imported_memories {
|
||||
ptr::write(ptr, self.memories[DefinedMemoryIndex::new(i)].vmmemory());
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
// Initialize the defined globals
|
||||
self.initialize_vmctx_globals(module);
|
||||
}
|
||||
|
||||
unsafe fn initialize_vmctx_globals(&mut self, module: &Module) {
|
||||
let num_imports = module.num_imported_globals;
|
||||
for (index, global) in module.globals.iter().skip(num_imports) {
|
||||
let def_index = module.defined_global_index(index).unwrap();
|
||||
let to = self.global_ptr(def_index);
|
||||
|
||||
// Initialize the global before writing to it
|
||||
ptr::write(to, VMGlobalDefinition::new());
|
||||
|
||||
match global.initializer {
|
||||
GlobalInit::I32Const(x) => *(*to).as_i32_mut() = x,
|
||||
GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
|
||||
GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
|
||||
GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
|
||||
GlobalInit::V128Const(x) => *(*to).as_u128_mut() = x,
|
||||
GlobalInit::GetGlobal(x) => {
|
||||
let from = if let Some(def_x) = module.defined_global_index(x) {
|
||||
self.global(def_x)
|
||||
} else {
|
||||
&*self.imported_global(x).from
|
||||
};
|
||||
// Globals of type `externref` need to manage the reference
|
||||
// count as values move between globals, everything else is just
|
||||
// copy-able bits.
|
||||
match global.wasm_ty {
|
||||
WasmType::ExternRef => {
|
||||
*(*to).as_externref_mut() = from.as_externref().clone()
|
||||
}
|
||||
_ => ptr::copy_nonoverlapping(from, to, 1),
|
||||
}
|
||||
}
|
||||
GlobalInit::RefFunc(f) => {
|
||||
*(*to).as_anyfunc_mut() = self.get_caller_checked_anyfunc(f).unwrap()
|
||||
as *const VMCallerCheckedAnyfunc;
|
||||
}
|
||||
GlobalInit::RefNullConst => match global.wasm_ty {
|
||||
// `VMGlobalDefinition::new()` already zeroed out the bits
|
||||
WasmType::FuncRef => {}
|
||||
WasmType::ExternRef => {}
|
||||
ty => panic!("unsupported reference type for global: {:?}", ty),
|
||||
},
|
||||
GlobalInit::Import => panic!("locally-defined global initialized as import"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -875,7 +993,21 @@ impl Instance {
|
||||
|
||||
impl Drop for Instance {
|
||||
fn drop(&mut self) {
|
||||
self.drop_globals();
|
||||
// Drop any defined globals
|
||||
for (idx, global) in self.module().globals.iter() {
|
||||
let idx = match self.module().defined_global_index(idx) {
|
||||
Some(idx) => idx,
|
||||
None => continue,
|
||||
};
|
||||
match global.wasm_ty {
|
||||
// For now only externref globals need to get destroyed
|
||||
WasmType::ExternRef => {}
|
||||
_ => continue,
|
||||
}
|
||||
unsafe {
|
||||
drop((*self.global_ptr(idx)).as_externref_mut().take());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator};
|
||||
use crate::memory::{DefaultMemoryCreator, Memory};
|
||||
use crate::table::Table;
|
||||
use crate::traphandlers::Trap;
|
||||
use crate::vmcontext::{VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMGlobalDefinition};
|
||||
use crate::ModuleRuntimeInfo;
|
||||
use crate::Store;
|
||||
use anyhow::Result;
|
||||
@@ -15,9 +14,9 @@ use std::slice;
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
use wasmtime_environ::{
|
||||
DefinedMemoryIndex, DefinedTableIndex, EntityRef, GlobalInit, InitMemory, MemoryInitialization,
|
||||
DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization,
|
||||
MemoryInitializer, Module, PrimaryMap, TableInitialization, TableInitializer, TrapCode,
|
||||
WasmType, WASM_PAGE_SIZE,
|
||||
VMOffsets, WasmType, WASM_PAGE_SIZE,
|
||||
};
|
||||
|
||||
#[cfg(feature = "pooling-allocator")]
|
||||
@@ -427,129 +426,6 @@ fn initialize_instance(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize the VMContext data associated with an Instance.
|
||||
///
|
||||
/// The `VMContext` memory is assumed to be uninitialized; any field
|
||||
/// that we need in a certain state will be explicitly written by this
|
||||
/// function.
|
||||
unsafe fn initialize_vmcontext(instance: &mut Instance, req: InstanceAllocationRequest) {
|
||||
if let Some(store) = req.store.as_raw() {
|
||||
*instance.interrupts() = (*store).vminterrupts();
|
||||
*instance.epoch_ptr() = (*store).epoch_ptr();
|
||||
*instance.externref_activations_table() = (*store).externref_activations_table().0;
|
||||
instance.set_store(store);
|
||||
}
|
||||
|
||||
let module = req.runtime_info.module();
|
||||
|
||||
// Initialize shared signatures
|
||||
let signatures = req.runtime_info.signature_ids();
|
||||
*instance.vmctx_plus_offset(instance.offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
|
||||
|
||||
// Initialize the built-in functions
|
||||
*instance.vmctx_plus_offset(instance.offsets.vmctx_builtin_functions()) =
|
||||
&VMBuiltinFunctionsArray::INIT;
|
||||
|
||||
// Initialize the imports
|
||||
debug_assert_eq!(req.imports.functions.len(), module.num_imported_funcs);
|
||||
ptr::copy_nonoverlapping(
|
||||
req.imports.functions.as_ptr(),
|
||||
instance.vmctx_plus_offset(instance.offsets.vmctx_imported_functions_begin()),
|
||||
req.imports.functions.len(),
|
||||
);
|
||||
debug_assert_eq!(req.imports.tables.len(), module.num_imported_tables);
|
||||
ptr::copy_nonoverlapping(
|
||||
req.imports.tables.as_ptr(),
|
||||
instance.vmctx_plus_offset(instance.offsets.vmctx_imported_tables_begin()),
|
||||
req.imports.tables.len(),
|
||||
);
|
||||
debug_assert_eq!(req.imports.memories.len(), module.num_imported_memories);
|
||||
ptr::copy_nonoverlapping(
|
||||
req.imports.memories.as_ptr(),
|
||||
instance.vmctx_plus_offset(instance.offsets.vmctx_imported_memories_begin()),
|
||||
req.imports.memories.len(),
|
||||
);
|
||||
debug_assert_eq!(req.imports.globals.len(), module.num_imported_globals);
|
||||
ptr::copy_nonoverlapping(
|
||||
req.imports.globals.as_ptr(),
|
||||
instance.vmctx_plus_offset(instance.offsets.vmctx_imported_globals_begin()),
|
||||
req.imports.globals.len(),
|
||||
);
|
||||
|
||||
// N.B.: there is no need to initialize the anyfuncs array because
|
||||
// we eagerly construct each element in it whenever asked for a
|
||||
// reference to that element. In other words, there is no state
|
||||
// needed to track the lazy-init, so we don't need to initialize
|
||||
// any state now.
|
||||
|
||||
// Initialize the defined tables
|
||||
let mut ptr = instance.vmctx_plus_offset(instance.offsets.vmctx_tables_begin());
|
||||
for i in 0..module.table_plans.len() - module.num_imported_tables {
|
||||
ptr::write(ptr, instance.tables[DefinedTableIndex::new(i)].vmtable());
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
// Initialize the defined memories
|
||||
let mut ptr = instance.vmctx_plus_offset(instance.offsets.vmctx_memories_begin());
|
||||
for i in 0..module.memory_plans.len() - module.num_imported_memories {
|
||||
ptr::write(
|
||||
ptr,
|
||||
instance.memories[DefinedMemoryIndex::new(i)].vmmemory(),
|
||||
);
|
||||
ptr = ptr.add(1);
|
||||
}
|
||||
|
||||
// Initialize the defined globals
|
||||
initialize_vmcontext_globals(instance, module);
|
||||
}
|
||||
|
||||
unsafe fn initialize_vmcontext_globals(
|
||||
instance: &mut Instance,
|
||||
module: &Arc<wasmtime_environ::Module>,
|
||||
) {
|
||||
let num_imports = module.num_imported_globals;
|
||||
for (index, global) in module.globals.iter().skip(num_imports) {
|
||||
let def_index = module.defined_global_index(index).unwrap();
|
||||
let to = instance.global_ptr(def_index);
|
||||
|
||||
// Initialize the global before writing to it
|
||||
ptr::write(to, VMGlobalDefinition::new());
|
||||
|
||||
match global.initializer {
|
||||
GlobalInit::I32Const(x) => *(*to).as_i32_mut() = x,
|
||||
GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
|
||||
GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
|
||||
GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
|
||||
GlobalInit::V128Const(x) => *(*to).as_u128_mut() = x,
|
||||
GlobalInit::GetGlobal(x) => {
|
||||
let from = if let Some(def_x) = module.defined_global_index(x) {
|
||||
instance.global(def_x)
|
||||
} else {
|
||||
&*instance.imported_global(x).from
|
||||
};
|
||||
// Globals of type `externref` need to manage the reference
|
||||
// count as values move between globals, everything else is just
|
||||
// copy-able bits.
|
||||
match global.wasm_ty {
|
||||
WasmType::ExternRef => *(*to).as_externref_mut() = from.as_externref().clone(),
|
||||
_ => ptr::copy_nonoverlapping(from, to, 1),
|
||||
}
|
||||
}
|
||||
GlobalInit::RefFunc(f) => {
|
||||
*(*to).as_anyfunc_mut() = instance.get_caller_checked_anyfunc(f).unwrap()
|
||||
as *const VMCallerCheckedAnyfunc;
|
||||
}
|
||||
GlobalInit::RefNullConst => match global.wasm_ty {
|
||||
// `VMGlobalDefinition::new()` already zeroed out the bits
|
||||
WasmType::FuncRef => {}
|
||||
WasmType::ExternRef => {}
|
||||
ty => panic!("unsupported reference type for global: {:?}", ty),
|
||||
},
|
||||
GlobalInit::Import => panic!("locally-defined global initialized as import"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the on-demand instance allocator.
|
||||
#[derive(Clone)]
|
||||
pub struct OnDemandInstanceAllocator {
|
||||
@@ -647,26 +523,16 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
|
||||
) -> Result<InstanceHandle, InstantiationError> {
|
||||
let memories = self.create_memories(&mut req.store, &req.runtime_info)?;
|
||||
let tables = Self::create_tables(&mut req.store, &req.runtime_info)?;
|
||||
let module = req.runtime_info.module();
|
||||
let offsets = VMOffsets::new(HostPtr, module);
|
||||
let layout = Instance::alloc_layout(&offsets);
|
||||
let instance_ptr = alloc::alloc(layout) as *mut Instance;
|
||||
|
||||
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
|
||||
Instance::new_at(instance_ptr, layout.size(), offsets, req, memories, tables);
|
||||
|
||||
let mut handle = {
|
||||
let instance =
|
||||
Instance::create_raw(req.runtime_info.clone(), memories, tables, host_state);
|
||||
let layout = instance.alloc_layout();
|
||||
let instance_ptr = alloc::alloc(layout) as *mut Instance;
|
||||
if instance_ptr.is_null() {
|
||||
alloc::handle_alloc_error(layout);
|
||||
}
|
||||
ptr::write(instance_ptr, instance);
|
||||
InstanceHandle {
|
||||
instance: instance_ptr,
|
||||
}
|
||||
};
|
||||
|
||||
initialize_vmcontext(handle.instance_mut(), req);
|
||||
|
||||
Ok(handle)
|
||||
Ok(InstanceHandle {
|
||||
instance: instance_ptr,
|
||||
})
|
||||
}
|
||||
|
||||
unsafe fn initialize(
|
||||
@@ -679,7 +545,7 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, handle: &InstanceHandle) {
|
||||
let layout = handle.instance().alloc_layout();
|
||||
let layout = Instance::alloc_layout(&handle.instance().offsets);
|
||||
ptr::drop_in_place(handle.instance);
|
||||
alloc::dealloc(handle.instance.cast(), layout);
|
||||
}
|
||||
|
||||
@@ -8,20 +8,19 @@
|
||||
//! when modules can be constrained based on configurable limits.
|
||||
|
||||
use super::{
|
||||
initialize_instance, initialize_vmcontext, InstanceAllocationRequest, InstanceAllocator,
|
||||
InstanceHandle, InstantiationError,
|
||||
initialize_instance, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
|
||||
InstantiationError,
|
||||
};
|
||||
use crate::{instance::Instance, Memory, Mmap, Table};
|
||||
use crate::{MemFdSlot, ModuleRuntimeInfo};
|
||||
use crate::{MemFdSlot, ModuleRuntimeInfo, Store};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use libc::c_void;
|
||||
use std::convert::TryFrom;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use wasmtime_environ::{
|
||||
DefinedMemoryIndex, HostPtr, MemoryStyle, Module, PrimaryMap, Tunables, VMOffsets,
|
||||
VMOffsetsFields, WASM_PAGE_SIZE,
|
||||
DefinedMemoryIndex, DefinedTableIndex, HostPtr, MemoryStyle, Module, PrimaryMap, Tunables,
|
||||
VMOffsets, VMOffsetsFields, WASM_PAGE_SIZE,
|
||||
};
|
||||
|
||||
mod index_allocator;
|
||||
@@ -342,51 +341,45 @@ impl InstancePool {
|
||||
&mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance)
|
||||
}
|
||||
|
||||
unsafe fn setup_instance(
|
||||
unsafe fn initialize_instance(
|
||||
&self,
|
||||
index: usize,
|
||||
mut req: InstanceAllocationRequest,
|
||||
instance_index: usize,
|
||||
req: InstanceAllocationRequest,
|
||||
) -> Result<InstanceHandle, InstantiationError> {
|
||||
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
|
||||
let instance_data = Instance::create_raw(
|
||||
req.runtime_info.clone(),
|
||||
PrimaryMap::default(),
|
||||
PrimaryMap::default(),
|
||||
host_state,
|
||||
);
|
||||
let module = req.runtime_info.module();
|
||||
|
||||
let instance = self.instance(index);
|
||||
let mut memories =
|
||||
PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
|
||||
let mut tables =
|
||||
PrimaryMap::with_capacity(module.table_plans.len() - module.num_imported_tables);
|
||||
|
||||
// Instances are uninitialized memory at first; we need to
|
||||
// write an empty but initialized `Instance` struct into the
|
||||
// chosen slot before we do anything else with it. (This is
|
||||
// paired with a `drop_in_place` in deallocate below.)
|
||||
std::ptr::write(instance as _, instance_data);
|
||||
|
||||
// set_instance_memories and _tables will need the store before we can completely
|
||||
// initialize the vmcontext.
|
||||
if let Some(store) = req.store.as_raw() {
|
||||
instance.set_store(store);
|
||||
// If we fail to allocate the instance's resources, deallocate
|
||||
// what was successfully allocated and return before initializing the instance
|
||||
if let Err(e) = self.allocate_instance_resources(
|
||||
instance_index,
|
||||
req.runtime_info.as_ref(),
|
||||
req.store.as_raw(),
|
||||
&mut memories,
|
||||
&mut tables,
|
||||
) {
|
||||
self.deallocate_memories(instance_index, &mut memories);
|
||||
self.deallocate_tables(instance_index, &mut tables);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Self::set_instance_memories(
|
||||
index,
|
||||
instance,
|
||||
&self.memories,
|
||||
self.memories.max_wasm_pages,
|
||||
&req.runtime_info,
|
||||
)?;
|
||||
let instance_ptr = self.instance(instance_index) as _;
|
||||
|
||||
Self::set_instance_tables(
|
||||
instance,
|
||||
self.tables.get(index).map(|x| x as *mut usize),
|
||||
self.tables.max_elements,
|
||||
)?;
|
||||
|
||||
initialize_vmcontext(instance, req);
|
||||
Instance::new_at(
|
||||
instance_ptr,
|
||||
self.instance_size,
|
||||
VMOffsets::new(HostPtr, module),
|
||||
req,
|
||||
memories,
|
||||
tables,
|
||||
);
|
||||
|
||||
Ok(InstanceHandle {
|
||||
instance: instance as _,
|
||||
instance: instance_ptr,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -402,15 +395,15 @@ impl InstancePool {
|
||||
alloc.alloc(req.runtime_info.unique_id()).index()
|
||||
};
|
||||
|
||||
unsafe {
|
||||
self.setup_instance(index, req).or_else(|e| {
|
||||
// Deallocate the allocated instance on error
|
||||
let instance = self.instance(index);
|
||||
self.deallocate(&InstanceHandle {
|
||||
instance: instance as _,
|
||||
});
|
||||
match unsafe { self.initialize_instance(index, req) } {
|
||||
Ok(handle) => Ok(handle),
|
||||
Err(e) => {
|
||||
// If we failed to initialize the instance, there's no need to drop
|
||||
// it as it was never "allocated", but we still need to free the
|
||||
// instance's slot.
|
||||
self.index_allocator.lock().unwrap().free(SlotId(index));
|
||||
Err(e)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -426,9 +419,112 @@ impl InstancePool {
|
||||
|
||||
let instance = unsafe { &mut *handle.instance };
|
||||
|
||||
// Deallocate any resources used by the instance
|
||||
self.deallocate_memories(index, &mut instance.memories);
|
||||
self.deallocate_tables(index, &mut instance.tables);
|
||||
|
||||
// We've now done all of the pooling-allocator-specific
|
||||
// teardown, so we can drop the Instance and let destructors
|
||||
// take care of any other fields (host state, globals, etc.).
|
||||
unsafe {
|
||||
std::ptr::drop_in_place(instance as *mut _);
|
||||
}
|
||||
// The instance is now uninitialized memory and cannot be
|
||||
// touched again until we write a fresh Instance in-place with
|
||||
// std::ptr::write in allocate() above.
|
||||
|
||||
self.index_allocator.lock().unwrap().free(SlotId(index));
|
||||
}
|
||||
|
||||
fn allocate_instance_resources(
|
||||
&self,
|
||||
instance_index: usize,
|
||||
runtime_info: &dyn ModuleRuntimeInfo,
|
||||
store: Option<*mut dyn Store>,
|
||||
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
|
||||
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
|
||||
) -> Result<(), InstantiationError> {
|
||||
self.allocate_memories(instance_index, runtime_info, store, memories)?;
|
||||
self.allocate_tables(instance_index, runtime_info, store, tables)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn allocate_memories(
|
||||
&self,
|
||||
instance_index: usize,
|
||||
runtime_info: &dyn ModuleRuntimeInfo,
|
||||
store: Option<*mut dyn Store>,
|
||||
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
|
||||
) -> Result<(), InstantiationError> {
|
||||
let module = runtime_info.module();
|
||||
|
||||
for (memory_index, plan) in module
|
||||
.memory_plans
|
||||
.iter()
|
||||
.skip(module.num_imported_memories)
|
||||
{
|
||||
let defined_index = module
|
||||
.defined_memory_index(memory_index)
|
||||
.expect("should be a defined memory since we skipped imported ones");
|
||||
|
||||
let memory = unsafe {
|
||||
std::slice::from_raw_parts_mut(
|
||||
self.memories.get_base(instance_index, defined_index),
|
||||
(self.memories.max_wasm_pages as usize) * (WASM_PAGE_SIZE as usize),
|
||||
)
|
||||
};
|
||||
|
||||
if let Some(image) = runtime_info
|
||||
.memfd_image(defined_index)
|
||||
.map_err(|err| InstantiationError::Resource(err.into()))?
|
||||
{
|
||||
let mut slot = self.memories.take_memfd_slot(instance_index, defined_index);
|
||||
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
|
||||
|
||||
// If instantiation fails, we can propagate the error
|
||||
// upward and drop the slot. This will cause the Drop
|
||||
// handler to attempt to map the range with PROT_NONE
|
||||
// memory, to reserve the space while releasing any
|
||||
// stale mappings. The next use of this slot will then
|
||||
// create a new MemFdSlot that will try to map over
|
||||
// this, returning errors as well if the mapping
|
||||
// errors persist. The unmap-on-drop is best effort;
|
||||
// if it fails, then we can still soundly continue
|
||||
// using the rest of the pool and allowing the rest of
|
||||
// the process to continue, because we never perform a
|
||||
// mmap that would leave an open space for someone
|
||||
// else to come in and map something.
|
||||
slot.instantiate(initial_size as usize, Some(image))
|
||||
.map_err(|e| InstantiationError::Resource(e.into()))?;
|
||||
|
||||
memories.push(
|
||||
Memory::new_static(plan, memory, None, Some(slot), unsafe {
|
||||
&mut *store.unwrap()
|
||||
})
|
||||
.map_err(InstantiationError::Resource)?,
|
||||
);
|
||||
} else {
|
||||
memories.push(
|
||||
Memory::new_static(plan, memory, Some(commit_memory_pages), None, unsafe {
|
||||
&mut *store.unwrap()
|
||||
})
|
||||
.map_err(InstantiationError::Resource)?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deallocate_memories(
|
||||
&self,
|
||||
instance_index: usize,
|
||||
memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
|
||||
) {
|
||||
// Decommit any linear memories that were used
|
||||
for ((def_mem_idx, memory), base) in
|
||||
instance.memories.iter_mut().zip(self.memories.get(index))
|
||||
memories.iter_mut().zip(self.memories.get(instance_index))
|
||||
{
|
||||
let mut memory = mem::take(memory);
|
||||
assert!(memory.is_static());
|
||||
@@ -444,7 +540,7 @@ impl InstancePool {
|
||||
// address space reservation.
|
||||
if memfd_slot.clear_and_remain_ready().is_ok() {
|
||||
self.memories
|
||||
.return_memfd_slot(index, def_mem_idx, memfd_slot);
|
||||
.return_memfd_slot(instance_index, def_mem_idx, memfd_slot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -466,12 +562,48 @@ impl InstancePool {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
instance.memories.clear();
|
||||
instance.dropped_data.clear();
|
||||
fn allocate_tables(
|
||||
&self,
|
||||
instance_index: usize,
|
||||
runtime_info: &dyn ModuleRuntimeInfo,
|
||||
store: Option<*mut dyn Store>,
|
||||
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
|
||||
) -> Result<(), InstantiationError> {
|
||||
let module = runtime_info.module();
|
||||
let mut bases = self.tables.get(instance_index);
|
||||
for (_, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
|
||||
let base = bases.next().unwrap() as _;
|
||||
|
||||
commit_table_pages(
|
||||
base as *mut u8,
|
||||
self.tables.max_elements as usize * mem::size_of::<*mut u8>(),
|
||||
)
|
||||
.map_err(InstantiationError::Resource)?;
|
||||
|
||||
tables.push(
|
||||
Table::new_static(
|
||||
plan,
|
||||
unsafe {
|
||||
std::slice::from_raw_parts_mut(base, self.tables.max_elements as usize)
|
||||
},
|
||||
unsafe { &mut *store.unwrap() },
|
||||
)
|
||||
.map_err(InstantiationError::Resource)?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn deallocate_tables(
|
||||
&self,
|
||||
instance_index: usize,
|
||||
tables: &mut PrimaryMap<DefinedTableIndex, Table>,
|
||||
) {
|
||||
// Decommit any tables that were used
|
||||
for (table, base) in instance.tables.values_mut().zip(self.tables.get(index)) {
|
||||
for (table, base) in tables.values_mut().zip(self.tables.get(instance_index)) {
|
||||
let table = mem::take(table);
|
||||
assert!(table.is_static());
|
||||
|
||||
@@ -483,122 +615,6 @@ impl InstancePool {
|
||||
drop(table);
|
||||
decommit_table_pages(base, size).expect("failed to decommit table pages");
|
||||
}
|
||||
|
||||
// We've now done all of the pooling-allocator-specific
|
||||
// teardown, so we can drop the Instance and let destructors
|
||||
// take care of any other fields (host state, globals, etc.).
|
||||
unsafe {
|
||||
std::ptr::drop_in_place(instance as *mut _);
|
||||
}
|
||||
// The instance is now uninitialized memory and cannot be
|
||||
// touched again until we write a fresh Instance in-place with
|
||||
// std::ptr::write in allocate() above.
|
||||
|
||||
self.index_allocator.lock().unwrap().free(SlotId(index));
|
||||
}
|
||||
|
||||
fn set_instance_memories(
|
||||
instance_idx: usize,
|
||||
instance: &mut Instance,
|
||||
memories: &MemoryPool,
|
||||
max_pages: u64,
|
||||
runtime_info: &Arc<dyn ModuleRuntimeInfo>,
|
||||
) -> Result<(), InstantiationError> {
|
||||
let module = instance.runtime_info.module();
|
||||
|
||||
assert!(instance.memories.is_empty());
|
||||
|
||||
for (memory_index, plan) in module
|
||||
.memory_plans
|
||||
.iter()
|
||||
.skip(module.num_imported_memories)
|
||||
{
|
||||
let defined_index = module
|
||||
.defined_memory_index(memory_index)
|
||||
.expect("should be a defined memory since we skipped imported ones");
|
||||
|
||||
let memory = unsafe {
|
||||
std::slice::from_raw_parts_mut(
|
||||
memories.get_base(instance_idx, defined_index),
|
||||
(max_pages as usize) * (WASM_PAGE_SIZE as usize),
|
||||
)
|
||||
};
|
||||
|
||||
if let Some(image) = runtime_info
|
||||
.memfd_image(defined_index)
|
||||
.map_err(|err| InstantiationError::Resource(err.into()))?
|
||||
{
|
||||
let mut slot = memories.take_memfd_slot(instance_idx, defined_index);
|
||||
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
|
||||
|
||||
// If instantiation fails, we can propagate the error
|
||||
// upward and drop the slot. This will cause the Drop
|
||||
// handler to attempt to map the range with PROT_NONE
|
||||
// memory, to reserve the space while releasing any
|
||||
// stale mappings. The next use of this slot will then
|
||||
// create a new MemFdSlot that will try to map over
|
||||
// this, returning errors as well if the mapping
|
||||
// errors persist. The unmap-on-drop is best effort;
|
||||
// if it fails, then we can still soundly continue
|
||||
// using the rest of the pool and allowing the rest of
|
||||
// the process to continue, because we never perform a
|
||||
// mmap that would leave an open space for someone
|
||||
// else to come in and map something.
|
||||
slot.instantiate(initial_size as usize, Some(image))
|
||||
.map_err(|e| InstantiationError::Resource(e.into()))?;
|
||||
|
||||
instance.memories.push(
|
||||
Memory::new_static(plan, memory, None, Some(slot), unsafe {
|
||||
&mut *instance.store()
|
||||
})
|
||||
.map_err(InstantiationError::Resource)?,
|
||||
);
|
||||
} else {
|
||||
instance.memories.push(
|
||||
Memory::new_static(plan, memory, Some(commit_memory_pages), None, unsafe {
|
||||
&mut *instance.store()
|
||||
})
|
||||
.map_err(InstantiationError::Resource)?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
assert!(instance.dropped_data.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_instance_tables(
|
||||
instance: &mut Instance,
|
||||
mut tables: impl Iterator<Item = *mut usize>,
|
||||
max_elements: u32,
|
||||
) -> Result<(), InstantiationError> {
|
||||
let module = instance.runtime_info.module();
|
||||
|
||||
assert!(instance.tables.is_empty());
|
||||
|
||||
for (_, plan) in module.table_plans.iter().skip(module.num_imported_tables) {
|
||||
let base = tables.next().unwrap();
|
||||
|
||||
commit_table_pages(
|
||||
base as *mut u8,
|
||||
max_elements as usize * mem::size_of::<*mut u8>(),
|
||||
)
|
||||
.map_err(InstantiationError::Resource)?;
|
||||
|
||||
let table = unsafe { std::slice::from_raw_parts_mut(base, max_elements as usize) };
|
||||
instance.tables.push(
|
||||
Table::new_static(plan, table, unsafe { &mut *instance.store() })
|
||||
.map_err(InstantiationError::Resource)?,
|
||||
);
|
||||
}
|
||||
|
||||
assert!(instance.dropped_elements.is_empty());
|
||||
instance
|
||||
.dropped_elements
|
||||
.resize(module.passive_elements.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1130,6 +1146,7 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{CompiledModuleId, Imports, MemoryMemFd, StorePtr, VMSharedSignatureIndex};
|
||||
use std::sync::Arc;
|
||||
use wasmtime_environ::{
|
||||
DefinedFuncIndex, DefinedMemoryIndex, EntityRef, FunctionInfo, Global, GlobalInit, Memory,
|
||||
MemoryPlan, ModuleType, SignatureIndex, Table, TablePlan, TableStyle, WasmType,
|
||||
|
||||
Reference in New Issue
Block a user