More PR feedback changes.
* More use of `anyhow`. * Change `make_accessible` into `protect_linear_memory` to better demonstrate what it is used for; this will make the uffd implementation make a little more sense. * Remove `create_memory_map` in favor of just creating the `Mmap` instances in the pooling allocator. This also removes the need for `MAP_NORESERVE` in the uffd implementation. * Moar comments. * Remove `BasePointerIterator` in favor of `impl Iterator`. * The uffd implementation now only monitors linear memory pages and will only receive faults on pages that could potentially be accessible and never on a statically known guard page. * Stop allocating memory or table pools if the maximum limit of the memory or table is 0.
This commit is contained in:
@@ -25,7 +25,7 @@ struct CodeMemoryEntry {
|
|||||||
|
|
||||||
impl CodeMemoryEntry {
|
impl CodeMemoryEntry {
|
||||||
fn with_capacity(cap: usize) -> Result<Self, String> {
|
fn with_capacity(cap: usize) -> Result<Self, String> {
|
||||||
let mmap = ManuallyDrop::new(Mmap::with_at_least(cap)?);
|
let mmap = ManuallyDrop::new(Mmap::with_at_least(cap).map_err(|e| e.to_string())?);
|
||||||
let registry = ManuallyDrop::new(UnwindRegistry::new(mmap.as_ptr() as usize));
|
let registry = ManuallyDrop::new(UnwindRegistry::new(mmap.as_ptr() as usize));
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
mmap,
|
mmap,
|
||||||
|
|||||||
@@ -67,10 +67,10 @@ pub(crate) struct Instance {
|
|||||||
/// Hosts can store arbitrary per-instance information here.
|
/// Hosts can store arbitrary per-instance information here.
|
||||||
host_state: Box<dyn Any>,
|
host_state: Box<dyn Any>,
|
||||||
|
|
||||||
/// Stores guard page faults in memory relating to the instance.
|
/// Stores linear memory guard page faults for the pooling allocator with uffd enabled.
|
||||||
/// This is used for the pooling allocator with uffd enabled on Linux.
|
/// These pages need to be reset after the signal handler generates the out-of-bounds trap.
|
||||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||||
guard_page_faults: RefCell<Vec<(*mut u8, usize, unsafe fn(*mut u8, usize) -> bool)>>,
|
guard_page_faults: RefCell<Vec<(*mut u8, usize, fn(*mut u8, usize) -> anyhow::Result<()>)>>,
|
||||||
|
|
||||||
/// Additional context used by compiled wasm code. This field is last, and
|
/// Additional context used by compiled wasm code. This field is last, and
|
||||||
/// represents a dynamically-sized array that extends beyond the nominal
|
/// represents a dynamically-sized array that extends beyond the nominal
|
||||||
@@ -821,7 +821,7 @@ impl Instance {
|
|||||||
&self,
|
&self,
|
||||||
page_addr: *mut u8,
|
page_addr: *mut u8,
|
||||||
size: usize,
|
size: usize,
|
||||||
reset: unsafe fn(*mut u8, usize) -> bool,
|
reset: fn(*mut u8, usize) -> anyhow::Result<()>,
|
||||||
) {
|
) {
|
||||||
self.guard_page_faults
|
self.guard_page_faults
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
@@ -837,11 +837,7 @@ impl Instance {
|
|||||||
pub(crate) fn reset_guard_pages(&self) -> anyhow::Result<()> {
|
pub(crate) fn reset_guard_pages(&self) -> anyhow::Result<()> {
|
||||||
let mut faults = self.guard_page_faults.borrow_mut();
|
let mut faults = self.guard_page_faults.borrow_mut();
|
||||||
for (addr, len, reset) in faults.drain(..) {
|
for (addr, len, reset) in faults.drain(..) {
|
||||||
unsafe {
|
reset(addr, len)?;
|
||||||
if !reset(addr, len) {
|
|
||||||
anyhow::bail!("failed to reset previously faulted memory guard page");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -549,8 +549,10 @@ impl OnDemandInstanceAllocator {
|
|||||||
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
|
let mut memories: PrimaryMap<DefinedMemoryIndex, _> =
|
||||||
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
|
PrimaryMap::with_capacity(module.memory_plans.len() - num_imports);
|
||||||
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
|
for plan in &module.memory_plans.values().as_slice()[num_imports..] {
|
||||||
memories
|
memories.push(
|
||||||
.push(Memory::new_dynamic(plan, creator).map_err(InstantiationError::Resource)?);
|
Memory::new_dynamic(plan, creator)
|
||||||
|
.map_err(|e| InstantiationError::Resource(e.to_string()))?,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Ok(memories)
|
Ok(memories)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,9 +31,7 @@ cfg_if::cfg_if! {
|
|||||||
} else if #[cfg(all(feature = "uffd", target_os = "linux"))] {
|
} else if #[cfg(all(feature = "uffd", target_os = "linux"))] {
|
||||||
mod uffd;
|
mod uffd;
|
||||||
use uffd as imp;
|
use uffd as imp;
|
||||||
use imp::PageFaultHandler;
|
use imp::initialize_memory_pool;
|
||||||
use super::{check_init_bounds, initialize_tables};
|
|
||||||
use wasmtime_environ::MemoryInitialization;
|
|
||||||
} else if #[cfg(target_os = "linux")] {
|
} else if #[cfg(target_os = "linux")] {
|
||||||
mod linux;
|
mod linux;
|
||||||
use linux as imp;
|
use linux as imp;
|
||||||
@@ -43,7 +41,10 @@ cfg_if::cfg_if! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
use imp::{create_memory_map, decommit, make_accessible};
|
use imp::{
|
||||||
|
commit_memory_pages, commit_stack_pages, commit_table_pages, decommit_memory_pages,
|
||||||
|
decommit_stack_pages, decommit_table_pages,
|
||||||
|
};
|
||||||
|
|
||||||
fn round_up_to_pow2(n: usize, to: usize) -> usize {
|
fn round_up_to_pow2(n: usize, to: usize) -> usize {
|
||||||
debug_assert!(to > 0);
|
debug_assert!(to > 0);
|
||||||
@@ -272,9 +273,9 @@ impl Default for InstanceLimits {
|
|||||||
Self {
|
Self {
|
||||||
count: 1000,
|
count: 1000,
|
||||||
#[cfg(target_pointer_width = "32")]
|
#[cfg(target_pointer_width = "32")]
|
||||||
memory_reservation_size: 0xA00000,
|
memory_reservation_size: 10 * (1 << 20), // 10 MiB,
|
||||||
#[cfg(target_pointer_width = "64")]
|
#[cfg(target_pointer_width = "64")]
|
||||||
memory_reservation_size: 0x180000000,
|
memory_reservation_size: 6 * (1 << 30), // 6 GiB,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -305,45 +306,6 @@ impl Default for PoolingAllocationStrategy {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used to iterate the base address of instance memories and tables.
|
|
||||||
struct BasePointerIterator {
|
|
||||||
base: *mut u8,
|
|
||||||
current: usize,
|
|
||||||
num: usize,
|
|
||||||
size: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BasePointerIterator {
|
|
||||||
fn new(base: *mut u8, num: usize, size: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
base,
|
|
||||||
current: 0,
|
|
||||||
num,
|
|
||||||
size,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for BasePointerIterator {
|
|
||||||
type Item = *mut u8;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
let current = self.current;
|
|
||||||
if current == self.num {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.current += 1;
|
|
||||||
|
|
||||||
Some(unsafe { self.base.add(current * self.size) })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
|
||||||
let remaining = self.num - self.current;
|
|
||||||
(remaining, Some(remaining))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a pool of maximal `Instance` structures.
|
/// Represents a pool of maximal `Instance` structures.
|
||||||
///
|
///
|
||||||
/// Each index in the pool provides enough space for a maximal `Instance`
|
/// Each index in the pool provides enough space for a maximal `Instance`
|
||||||
@@ -395,8 +357,11 @@ impl InstancePool {
|
|||||||
.checked_mul(max_instances)
|
.checked_mul(max_instances)
|
||||||
.ok_or_else(|| anyhow!("total size of instance data exceeds addressable memory"))?;
|
.ok_or_else(|| anyhow!("total size of instance data exceeds addressable memory"))?;
|
||||||
|
|
||||||
|
let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
|
||||||
|
.context("failed to create instance pool mapping")?;
|
||||||
|
|
||||||
let pool = Self {
|
let pool = Self {
|
||||||
mapping: create_memory_map(allocation_size, allocation_size)?,
|
mapping,
|
||||||
offsets,
|
offsets,
|
||||||
instance_size,
|
instance_size,
|
||||||
max_instances,
|
max_instances,
|
||||||
@@ -414,15 +379,18 @@ impl InstancePool {
|
|||||||
Ok(pool)
|
Ok(pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn initialize(&self, index: usize, module: &Arc<Module>) {
|
unsafe fn instance(&self, index: usize) -> &mut Instance {
|
||||||
debug_assert!(index < self.max_instances);
|
debug_assert!(index < self.max_instances);
|
||||||
|
&mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn initialize(&self, index: usize, module: &Arc<Module>) {
|
||||||
unsafe {
|
unsafe {
|
||||||
let instance_ptr = self.mapping.as_mut_ptr().add(index * self.instance_size);
|
let instance = self.instance(index);
|
||||||
|
|
||||||
// Write a default instance with preallocated memory/table map storage to the ptr
|
// Write a default instance with preallocated memory/table map storage to the ptr
|
||||||
std::ptr::write(
|
std::ptr::write(
|
||||||
instance_ptr as _,
|
instance as _,
|
||||||
Instance {
|
Instance {
|
||||||
module: module.clone(),
|
module: module.clone(),
|
||||||
offsets: self.offsets,
|
offsets: self.offsets,
|
||||||
@@ -456,9 +424,7 @@ impl InstancePool {
|
|||||||
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
|
let host_state = std::mem::replace(&mut req.host_state, Box::new(()));
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
debug_assert!(index < self.max_instances);
|
let instance = self.instance(index);
|
||||||
let instance =
|
|
||||||
&mut *(self.mapping.as_mut_ptr().add(index * self.instance_size) as *mut Instance);
|
|
||||||
|
|
||||||
instance.module = req.module.clone();
|
instance.module = req.module.clone();
|
||||||
instance.offsets = VMOffsets::new(
|
instance.offsets = VMOffsets::new(
|
||||||
@@ -490,47 +456,40 @@ impl InstancePool {
|
|||||||
let index = (addr - base) / self.instance_size;
|
let index = (addr - base) / self.instance_size;
|
||||||
debug_assert!(index < self.max_instances);
|
debug_assert!(index < self.max_instances);
|
||||||
|
|
||||||
unsafe {
|
let instance = unsafe { &mut *handle.instance };
|
||||||
// Decommit any linear memories that were used
|
|
||||||
for (mem, base) in (*handle.instance)
|
|
||||||
.memories
|
|
||||||
.values()
|
|
||||||
.zip(self.memories.get(index))
|
|
||||||
{
|
|
||||||
let size = (mem.size() * WASM_PAGE_SIZE) as usize;
|
|
||||||
if size > 0 {
|
|
||||||
decommit(base, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decommit any tables that were used
|
// Decommit any linear memories that were used
|
||||||
let table_element_size = max_table_element_size();
|
for (mem, base) in instance.memories.values().zip(self.memories.get(index)) {
|
||||||
for (table, base) in (*handle.instance)
|
let size = (mem.size() * WASM_PAGE_SIZE) as usize;
|
||||||
.tables
|
decommit_memory_pages(base, size).unwrap();
|
||||||
.values()
|
|
||||||
.zip(self.tables.get(index))
|
|
||||||
{
|
|
||||||
let size = round_up_to_pow2(
|
|
||||||
table.size() as usize * table_element_size,
|
|
||||||
self.tables.page_size,
|
|
||||||
);
|
|
||||||
if size > 0 {
|
|
||||||
decommit(base, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop the host state
|
|
||||||
(*handle.instance).host_state = Box::new(());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
instance.memories.clear();
|
||||||
self.free_list.lock().unwrap().push(index);
|
instance.dropped_data.borrow_mut().clear();
|
||||||
|
|
||||||
|
// Decommit any tables that were used
|
||||||
|
let table_element_size = max_table_element_size();
|
||||||
|
for (table, base) in instance.tables.values().zip(self.tables.get(index)) {
|
||||||
|
let size = round_up_to_pow2(
|
||||||
|
table.size() as usize * table_element_size,
|
||||||
|
self.tables.page_size,
|
||||||
|
);
|
||||||
|
|
||||||
|
decommit_table_pages(base, size).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
instance.tables.clear();
|
||||||
|
instance.dropped_elements.borrow_mut().clear();
|
||||||
|
|
||||||
|
// Drop any host state
|
||||||
|
instance.host_state = Box::new(());
|
||||||
|
|
||||||
|
self.free_list.lock().unwrap().push(index);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_instance_memories(
|
fn set_instance_memories(
|
||||||
instance: &mut Instance,
|
instance: &mut Instance,
|
||||||
mut memories: BasePointerIterator,
|
mut memories: impl Iterator<Item = *mut u8>,
|
||||||
max_pages: u32,
|
max_pages: u32,
|
||||||
) -> Result<(), InstantiationError> {
|
) -> Result<(), InstantiationError> {
|
||||||
let module = instance.module.as_ref();
|
let module = instance.module.as_ref();
|
||||||
@@ -541,19 +500,24 @@ impl InstancePool {
|
|||||||
.reset_guard_pages()
|
.reset_guard_pages()
|
||||||
.map_err(|e| InstantiationError::Resource(e.to_string()))?;
|
.map_err(|e| InstantiationError::Resource(e.to_string()))?;
|
||||||
|
|
||||||
instance.memories.clear();
|
debug_assert!(instance.memories.is_empty());
|
||||||
|
|
||||||
for plan in
|
for plan in
|
||||||
(&module.memory_plans.values().as_slice()[module.num_imported_memories..]).iter()
|
(&module.memory_plans.values().as_slice()[module.num_imported_memories..]).iter()
|
||||||
{
|
{
|
||||||
instance.memories.push(
|
instance.memories.push(
|
||||||
Memory::new_static(plan, memories.next().unwrap(), max_pages, make_accessible)
|
Memory::new_static(
|
||||||
.map_err(InstantiationError::Resource)?,
|
plan,
|
||||||
|
memories.next().unwrap(),
|
||||||
|
max_pages,
|
||||||
|
commit_memory_pages,
|
||||||
|
)
|
||||||
|
.map_err(|e| InstantiationError::Resource(e.to_string()))?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut dropped_data = instance.dropped_data.borrow_mut();
|
let mut dropped_data = instance.dropped_data.borrow_mut();
|
||||||
dropped_data.clear();
|
debug_assert!(dropped_data.is_empty());
|
||||||
dropped_data.resize(module.passive_data.len());
|
dropped_data.resize(module.passive_data.len());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -561,22 +525,18 @@ impl InstancePool {
|
|||||||
|
|
||||||
fn set_instance_tables(
|
fn set_instance_tables(
|
||||||
instance: &mut Instance,
|
instance: &mut Instance,
|
||||||
mut tables: BasePointerIterator,
|
mut tables: impl Iterator<Item = *mut u8>,
|
||||||
max_elements: u32,
|
max_elements: u32,
|
||||||
) -> Result<(), InstantiationError> {
|
) -> Result<(), InstantiationError> {
|
||||||
let module = instance.module.as_ref();
|
let module = instance.module.as_ref();
|
||||||
|
|
||||||
instance.tables.clear();
|
debug_assert!(instance.tables.is_empty());
|
||||||
|
|
||||||
for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() {
|
for plan in (&module.table_plans.values().as_slice()[module.num_imported_tables..]).iter() {
|
||||||
let base = tables.next().unwrap();
|
let base = tables.next().unwrap();
|
||||||
|
|
||||||
// Make the table data accessible
|
commit_table_pages(base, max_elements as usize * max_table_element_size())
|
||||||
if unsafe { !make_accessible(base, max_elements as usize * max_table_element_size()) } {
|
.map_err(|e| InstantiationError::Resource(e.to_string()))?;
|
||||||
return Err(InstantiationError::Resource(
|
|
||||||
"failed to make instance memory accessible".into(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
instance
|
instance
|
||||||
.tables
|
.tables
|
||||||
@@ -584,7 +544,7 @@ impl InstancePool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut dropped_elements = instance.dropped_elements.borrow_mut();
|
let mut dropped_elements = instance.dropped_elements.borrow_mut();
|
||||||
dropped_elements.clear();
|
debug_assert!(dropped_elements.is_empty());
|
||||||
dropped_elements.resize(module.passive_elements.len());
|
dropped_elements.resize(module.passive_elements.len());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -623,8 +583,31 @@ struct MemoryPool {
|
|||||||
|
|
||||||
impl MemoryPool {
|
impl MemoryPool {
|
||||||
fn new(module_limits: &ModuleLimits, instance_limits: &InstanceLimits) -> Result<Self> {
|
fn new(module_limits: &ModuleLimits, instance_limits: &InstanceLimits) -> Result<Self> {
|
||||||
let memory_size = usize::try_from(instance_limits.memory_reservation_size)
|
// The maximum module memory page count cannot exceed 65536 pages
|
||||||
.map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))?;
|
if module_limits.memory_pages > 0x10000 {
|
||||||
|
bail!(
|
||||||
|
"module memory page limit of {} exceeds the maximum of 65536",
|
||||||
|
module_limits.memory_pages
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The maximum module memory page count cannot exceed the memory reservation size
|
||||||
|
if (module_limits.memory_pages * WASM_PAGE_SIZE) as u64
|
||||||
|
> instance_limits.memory_reservation_size
|
||||||
|
{
|
||||||
|
bail!(
|
||||||
|
"module memory page limit of {} pages exceeds the memory reservation size limit of {} bytes",
|
||||||
|
module_limits.memory_pages,
|
||||||
|
instance_limits.memory_reservation_size
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let memory_size = if module_limits.memory_pages > 0 {
|
||||||
|
usize::try_from(instance_limits.memory_reservation_size)
|
||||||
|
.map_err(|_| anyhow!("memory reservation size exceeds addressable memory"))?
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
memory_size % region::page::size() == 0,
|
memory_size % region::page::size() == 0,
|
||||||
@@ -642,25 +625,36 @@ impl MemoryPool {
|
|||||||
anyhow!("total size of memory reservation exceeds addressable memory")
|
anyhow!("total size of memory reservation exceeds addressable memory")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(Self {
|
// Create a completely inaccessible region to start
|
||||||
mapping: create_memory_map(0, allocation_size)?,
|
let mapping = Mmap::accessible_reserved(0, allocation_size)
|
||||||
|
.context("failed to create memory pool mapping")?;
|
||||||
|
|
||||||
|
let pool = Self {
|
||||||
|
mapping,
|
||||||
memory_size,
|
memory_size,
|
||||||
max_memories,
|
max_memories,
|
||||||
max_instances,
|
max_instances,
|
||||||
max_wasm_pages: module_limits.memory_pages,
|
max_wasm_pages: module_limits.memory_pages,
|
||||||
})
|
};
|
||||||
|
|
||||||
|
// uffd support requires some special setup for the memory pool
|
||||||
|
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||||
|
initialize_memory_pool(&pool)?;
|
||||||
|
|
||||||
|
Ok(pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, instance_index: usize) -> BasePointerIterator {
|
fn get(&self, instance_index: usize) -> impl Iterator<Item = *mut u8> {
|
||||||
debug_assert!(instance_index < self.max_instances);
|
debug_assert!(instance_index < self.max_instances);
|
||||||
|
|
||||||
let base = unsafe {
|
let base: *mut u8 = unsafe {
|
||||||
self.mapping
|
self.mapping
|
||||||
.as_mut_ptr()
|
.as_mut_ptr()
|
||||||
.add(instance_index * self.memory_size * self.max_memories) as _
|
.add(instance_index * self.memory_size * self.max_memories) as _
|
||||||
};
|
};
|
||||||
|
|
||||||
BasePointerIterator::new(base, self.max_memories, self.memory_size)
|
let size = self.memory_size;
|
||||||
|
(0..self.max_memories).map(move |i| unsafe { base.add(i * size) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -668,9 +662,6 @@ impl MemoryPool {
|
|||||||
///
|
///
|
||||||
/// Each instance index into the pool returns an iterator over the base addresses
|
/// Each instance index into the pool returns an iterator over the base addresses
|
||||||
/// of the instance's tables.
|
/// of the instance's tables.
|
||||||
///
|
|
||||||
/// The userfault handler relies on how tables are stored in the mapping,
|
|
||||||
/// so make sure the uffd implementation is kept up-to-date.
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct TablePool {
|
struct TablePool {
|
||||||
mapping: Mmap,
|
mapping: Mmap,
|
||||||
@@ -685,12 +676,16 @@ impl TablePool {
|
|||||||
fn new(module_limits: &ModuleLimits, instance_limits: &InstanceLimits) -> Result<Self> {
|
fn new(module_limits: &ModuleLimits, instance_limits: &InstanceLimits) -> Result<Self> {
|
||||||
let page_size = region::page::size();
|
let page_size = region::page::size();
|
||||||
|
|
||||||
let table_size = round_up_to_pow2(
|
let table_size = if module_limits.table_elements > 0 {
|
||||||
max_table_element_size()
|
round_up_to_pow2(
|
||||||
.checked_mul(module_limits.table_elements as usize)
|
max_table_element_size()
|
||||||
.ok_or_else(|| anyhow!("table size exceeds addressable memory"))?,
|
.checked_mul(module_limits.table_elements as usize)
|
||||||
page_size,
|
.ok_or_else(|| anyhow!("table size exceeds addressable memory"))?,
|
||||||
);
|
page_size,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
let max_instances = instance_limits.count as usize;
|
let max_instances = instance_limits.count as usize;
|
||||||
let max_tables = module_limits.tables as usize;
|
let max_tables = module_limits.tables as usize;
|
||||||
@@ -700,26 +695,30 @@ impl TablePool {
|
|||||||
.and_then(|c| c.checked_mul(max_instances))
|
.and_then(|c| c.checked_mul(max_instances))
|
||||||
.ok_or_else(|| anyhow!("total size of instance tables exceeds addressable memory"))?;
|
.ok_or_else(|| anyhow!("total size of instance tables exceeds addressable memory"))?;
|
||||||
|
|
||||||
|
let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
|
||||||
|
.context("failed to create table pool mapping")?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
mapping: create_memory_map(0, allocation_size)?,
|
mapping,
|
||||||
table_size,
|
table_size,
|
||||||
max_tables,
|
max_tables,
|
||||||
max_instances,
|
max_instances,
|
||||||
page_size: region::page::size(),
|
page_size,
|
||||||
max_elements: module_limits.table_elements,
|
max_elements: module_limits.table_elements,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, instance_index: usize) -> BasePointerIterator {
|
fn get(&self, instance_index: usize) -> impl Iterator<Item = *mut u8> {
|
||||||
debug_assert!(instance_index < self.max_instances);
|
debug_assert!(instance_index < self.max_instances);
|
||||||
|
|
||||||
let base = unsafe {
|
let base: *mut u8 = unsafe {
|
||||||
self.mapping
|
self.mapping
|
||||||
.as_mut_ptr()
|
.as_mut_ptr()
|
||||||
.add(instance_index * self.table_size * self.max_tables) as _
|
.add(instance_index * self.table_size * self.max_tables) as _
|
||||||
};
|
};
|
||||||
|
|
||||||
BasePointerIterator::new(base, self.max_tables, self.table_size)
|
let size = self.table_size;
|
||||||
|
(0..self.max_tables).map(move |i| unsafe { base.add(i * size) })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -733,9 +732,6 @@ impl TablePool {
|
|||||||
///
|
///
|
||||||
/// The top of the stack (starting stack pointer) is returned when a stack is allocated
|
/// The top of the stack (starting stack pointer) is returned when a stack is allocated
|
||||||
/// from the pool.
|
/// from the pool.
|
||||||
///
|
|
||||||
/// The userfault handler relies on how stacks are stored in the mapping,
|
|
||||||
/// so make sure the uffd implementation is kept up-to-date.
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct StackPool {
|
struct StackPool {
|
||||||
mapping: Mmap,
|
mapping: Mmap,
|
||||||
@@ -765,15 +761,18 @@ impl StackPool {
|
|||||||
.checked_mul(max_instances)
|
.checked_mul(max_instances)
|
||||||
.ok_or_else(|| anyhow!("total size of execution stacks exceeds addressable memory"))?;
|
.ok_or_else(|| anyhow!("total size of execution stacks exceeds addressable memory"))?;
|
||||||
|
|
||||||
let mapping = create_memory_map(allocation_size, allocation_size)?;
|
let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
|
||||||
|
.context("failed to create stack pool mapping")?;
|
||||||
|
|
||||||
// Set up the stack guard pages
|
// Set up the stack guard pages
|
||||||
unsafe {
|
if allocation_size > 0 {
|
||||||
for i in 0..max_instances {
|
unsafe {
|
||||||
// Make the stack guard page inaccessible
|
for i in 0..max_instances {
|
||||||
let bottom_of_stack = mapping.as_mut_ptr().add(i * stack_size);
|
// Make the stack guard page inaccessible
|
||||||
region::protect(bottom_of_stack, page_size, region::Protection::NONE)
|
let bottom_of_stack = mapping.as_mut_ptr().add(i * stack_size);
|
||||||
.context("failed to protect stack guard page")?;
|
region::protect(bottom_of_stack, page_size, region::Protection::NONE)
|
||||||
|
.context("failed to protect stack guard page")?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -804,8 +803,19 @@ impl StackPool {
|
|||||||
debug_assert!(index < self.max_instances);
|
debug_assert!(index < self.max_instances);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
// The top (end) of the stack should be returned
|
// Remove the guard page from the size
|
||||||
Ok(self.mapping.as_mut_ptr().add((index + 1) * self.stack_size))
|
let size_without_guard = self.stack_size - self.page_size;
|
||||||
|
|
||||||
|
let bottom_of_stack = self
|
||||||
|
.mapping
|
||||||
|
.as_mut_ptr()
|
||||||
|
.add((index * self.stack_size) + self.page_size);
|
||||||
|
|
||||||
|
commit_stack_pages(bottom_of_stack, size_without_guard)
|
||||||
|
.map_err(|e| FiberStackError::Resource(e.to_string()))?;
|
||||||
|
|
||||||
|
// The top of the stack should be returned
|
||||||
|
Ok(bottom_of_stack.add(size_without_guard))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -826,18 +836,16 @@ impl StackPool {
|
|||||||
let index = (start_of_stack - base) / self.stack_size;
|
let index = (start_of_stack - base) / self.stack_size;
|
||||||
debug_assert!(index < self.max_instances);
|
debug_assert!(index < self.max_instances);
|
||||||
|
|
||||||
decommit(bottom_of_stack, stack_size);
|
decommit_stack_pages(bottom_of_stack, stack_size).unwrap();
|
||||||
|
|
||||||
{
|
self.free_list.lock().unwrap().push(index);
|
||||||
self.free_list.lock().unwrap().push(index);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the pooling instance allocator.
|
/// Implements the pooling instance allocator.
|
||||||
///
|
///
|
||||||
/// This allocator interinally maintains pools of instances, memories, tables, and stacks.
|
/// This allocator internally maintains pools of instances, memories, tables, and stacks.
|
||||||
///
|
///
|
||||||
/// Note: the resource pools are manually dropped so that the fault handler terminates correctly.
|
/// Note: the resource pools are manually dropped so that the fault handler terminates correctly.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -845,10 +853,11 @@ pub struct PoolingInstanceAllocator {
|
|||||||
strategy: PoolingAllocationStrategy,
|
strategy: PoolingAllocationStrategy,
|
||||||
module_limits: ModuleLimits,
|
module_limits: ModuleLimits,
|
||||||
instance_limits: InstanceLimits,
|
instance_limits: InstanceLimits,
|
||||||
|
// This is manually drop so that the pools unmap their memory before the page fault handler drops.
|
||||||
instances: mem::ManuallyDrop<InstancePool>,
|
instances: mem::ManuallyDrop<InstancePool>,
|
||||||
stacks: mem::ManuallyDrop<StackPool>,
|
stacks: StackPool,
|
||||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||||
_fault_handler: PageFaultHandler,
|
_fault_handler: imp::PageFaultHandler,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolingInstanceAllocator {
|
impl PoolingInstanceAllocator {
|
||||||
@@ -874,37 +883,18 @@ impl PoolingInstanceAllocator {
|
|||||||
instance_limits.memory_reservation_size =
|
instance_limits.memory_reservation_size =
|
||||||
min(instance_limits.memory_reservation_size, 0x200000000);
|
min(instance_limits.memory_reservation_size, 0x200000000);
|
||||||
|
|
||||||
// The maximum module memory page count cannot exceed 65536 pages
|
|
||||||
if module_limits.memory_pages > 0x10000 {
|
|
||||||
bail!(
|
|
||||||
"module memory page limit of {} exceeds the maximum of 65536",
|
|
||||||
module_limits.memory_pages
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The maximum module memory page count cannot exceed the memory reservation size
|
|
||||||
if (module_limits.memory_pages * WASM_PAGE_SIZE) as u64
|
|
||||||
> instance_limits.memory_reservation_size
|
|
||||||
{
|
|
||||||
bail!(
|
|
||||||
"module memory page limit of {} pages exeeds the memory reservation size limit of {} bytes",
|
|
||||||
module_limits.memory_pages,
|
|
||||||
instance_limits.memory_reservation_size
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let instances = InstancePool::new(&module_limits, &instance_limits)?;
|
let instances = InstancePool::new(&module_limits, &instance_limits)?;
|
||||||
let stacks = StackPool::new(&instance_limits, stack_size)?;
|
let stacks = StackPool::new(&instance_limits, stack_size)?;
|
||||||
|
|
||||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||||
let _fault_handler = PageFaultHandler::new(&instances)?;
|
let _fault_handler = imp::PageFaultHandler::new(&instances)?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
strategy,
|
strategy,
|
||||||
module_limits,
|
module_limits,
|
||||||
instance_limits,
|
instance_limits,
|
||||||
instances: mem::ManuallyDrop::new(instances),
|
instances: mem::ManuallyDrop::new(instances),
|
||||||
stacks: mem::ManuallyDrop::new(stacks),
|
stacks,
|
||||||
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
#[cfg(all(feature = "uffd", target_os = "linux"))]
|
||||||
_fault_handler,
|
_fault_handler,
|
||||||
})
|
})
|
||||||
@@ -917,7 +907,6 @@ impl Drop for PoolingInstanceAllocator {
|
|||||||
// This ensures that any fault handler thread monitoring the pool memory terminates
|
// This ensures that any fault handler thread monitoring the pool memory terminates
|
||||||
unsafe {
|
unsafe {
|
||||||
mem::ManuallyDrop::drop(&mut self.instances);
|
mem::ManuallyDrop::drop(&mut self.instances);
|
||||||
mem::ManuallyDrop::drop(&mut self.stacks);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -963,13 +952,13 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
|
|||||||
cfg_if::cfg_if! {
|
cfg_if::cfg_if! {
|
||||||
if #[cfg(all(feature = "uffd", target_os = "linux"))] {
|
if #[cfg(all(feature = "uffd", target_os = "linux"))] {
|
||||||
match &instance.module.memory_initialization {
|
match &instance.module.memory_initialization {
|
||||||
MemoryInitialization::Paged{ out_of_bounds, .. } => {
|
wasmtime_environ::MemoryInitialization::Paged{ out_of_bounds, .. } => {
|
||||||
if !is_bulk_memory {
|
if !is_bulk_memory {
|
||||||
check_init_bounds(instance)?;
|
super::check_init_bounds(instance)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the tables
|
// Initialize the tables
|
||||||
initialize_tables(instance)?;
|
super::initialize_tables(instance)?;
|
||||||
|
|
||||||
// Don't initialize the memory; the fault handler will back the pages when accessed
|
// Don't initialize the memory; the fault handler will back the pages when accessed
|
||||||
|
|
||||||
@@ -1312,21 +1301,6 @@ mod test {
|
|||||||
assert_eq!(strat.next(1), 0);
|
assert_eq!(strat.next(1), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_base_pointer_iterator() {
|
|
||||||
let mut iter = BasePointerIterator::new(std::ptr::null_mut(), 5, 3);
|
|
||||||
|
|
||||||
assert_eq!(iter.next(), Some(0usize as _));
|
|
||||||
assert_eq!(iter.next(), Some(3usize as _));
|
|
||||||
assert_eq!(iter.next(), Some(6usize as _));
|
|
||||||
assert_eq!(iter.next(), Some(9usize as _));
|
|
||||||
assert_eq!(iter.next(), Some(12usize as _));
|
|
||||||
assert_eq!(iter.next(), None);
|
|
||||||
|
|
||||||
let mut iter = BasePointerIterator::new(std::ptr::null_mut(), 0, 10);
|
|
||||||
assert_eq!(iter.next(), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(target_pointer_width = "64")]
|
#[cfg(target_pointer_width = "64")]
|
||||||
#[test]
|
#[test]
|
||||||
fn test_instance_pool() -> Result<()> {
|
fn test_instance_pool() -> Result<()> {
|
||||||
@@ -1341,11 +1315,11 @@ mod test {
|
|||||||
memories: 1,
|
memories: 1,
|
||||||
globals: 0,
|
globals: 0,
|
||||||
table_elements: 10,
|
table_elements: 10,
|
||||||
memory_pages: 10,
|
memory_pages: 1,
|
||||||
};
|
};
|
||||||
let instance_limits = InstanceLimits {
|
let instance_limits = InstanceLimits {
|
||||||
count: 3,
|
count: 3,
|
||||||
memory_reservation_size: 4096,
|
memory_reservation_size: WASM_PAGE_SIZE as u64,
|
||||||
};
|
};
|
||||||
|
|
||||||
let instances = InstancePool::new(&module_limits, &instance_limits)?;
|
let instances = InstancePool::new(&module_limits, &instance_limits)?;
|
||||||
@@ -1366,7 +1340,7 @@ mod test {
|
|||||||
assert_eq!(instances.instance_size, 4096);
|
assert_eq!(instances.instance_size, 4096);
|
||||||
assert_eq!(instances.max_instances, 3);
|
assert_eq!(instances.max_instances, 3);
|
||||||
|
|
||||||
assert_eq!(&*instances.free_list.lock().unwrap(), &[0, 1, 2],);
|
assert_eq!(&*instances.free_list.lock().unwrap(), &[0, 1, 2]);
|
||||||
|
|
||||||
let mut handles = Vec::new();
|
let mut handles = Vec::new();
|
||||||
let module = Arc::new(Module::default());
|
let module = Arc::new(Module::default());
|
||||||
@@ -1397,7 +1371,7 @@ mod test {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(&*instances.free_list.lock().unwrap(), &[],);
|
assert_eq!(&*instances.free_list.lock().unwrap(), &[]);
|
||||||
|
|
||||||
match instances.allocate(
|
match instances.allocate(
|
||||||
PoolingAllocationStrategy::NextAvailable,
|
PoolingAllocationStrategy::NextAvailable,
|
||||||
@@ -1425,7 +1399,7 @@ mod test {
|
|||||||
instances.deallocate(&handle);
|
instances.deallocate(&handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(&*instances.free_list.lock().unwrap(), &[2, 1, 0],);
|
assert_eq!(&*instances.free_list.lock().unwrap(), &[2, 1, 0]);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -1445,7 +1419,7 @@ mod test {
|
|||||||
memories: 3,
|
memories: 3,
|
||||||
globals: 0,
|
globals: 0,
|
||||||
table_elements: 0,
|
table_elements: 0,
|
||||||
memory_pages: 10,
|
memory_pages: 1,
|
||||||
},
|
},
|
||||||
&InstanceLimits {
|
&InstanceLimits {
|
||||||
count: 5,
|
count: 5,
|
||||||
@@ -1456,7 +1430,7 @@ mod test {
|
|||||||
assert_eq!(pool.memory_size, WASM_PAGE_SIZE as usize);
|
assert_eq!(pool.memory_size, WASM_PAGE_SIZE as usize);
|
||||||
assert_eq!(pool.max_memories, 3);
|
assert_eq!(pool.max_memories, 3);
|
||||||
assert_eq!(pool.max_instances, 5);
|
assert_eq!(pool.max_instances, 5);
|
||||||
assert_eq!(pool.max_wasm_pages, 10);
|
assert_eq!(pool.max_wasm_pages, 1);
|
||||||
|
|
||||||
let base = pool.mapping.as_ptr() as usize;
|
let base = pool.mapping.as_ptr() as usize;
|
||||||
|
|
||||||
@@ -1554,7 +1528,7 @@ mod test {
|
|||||||
stacks.push(stack);
|
stacks.push(stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(&*pool.free_list.lock().unwrap(), &[],);
|
assert_eq!(&*pool.free_list.lock().unwrap(), &[]);
|
||||||
|
|
||||||
match pool
|
match pool
|
||||||
.allocate(PoolingAllocationStrategy::NextAvailable)
|
.allocate(PoolingAllocationStrategy::NextAvailable)
|
||||||
@@ -1632,7 +1606,7 @@ mod test {
|
|||||||
)
|
)
|
||||||
.map_err(|e| e.to_string())
|
.map_err(|e| e.to_string())
|
||||||
.expect_err("expected a failure constructing instance allocator"),
|
.expect_err("expected a failure constructing instance allocator"),
|
||||||
"module memory page limit of 2 pages exeeds the memory reservation size limit of 65536 bytes"
|
"module memory page limit of 2 pages exceeds the memory reservation size limit of 65536 bytes"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,23 +1,58 @@
|
|||||||
use crate::Mmap;
|
use anyhow::{bail, Context, Result};
|
||||||
use anyhow::{anyhow, Result};
|
|
||||||
|
|
||||||
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
|
fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
|
||||||
region::protect(addr, len, region::Protection::READ_WRITE).is_ok()
|
if len == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
if protect {
|
||||||
|
region::protect(addr, len, region::Protection::NONE)
|
||||||
|
.context("failed to protect memory pages")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access
|
||||||
|
if libc::madvise(addr as _, len, libc::MADV_DONTNEED) != 0 {
|
||||||
|
bail!(
|
||||||
|
"madvise failed to decommit: {}",
|
||||||
|
std::io::Error::last_os_error()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn decommit(addr: *mut u8, len: usize) {
|
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
region::protect(addr, len, region::Protection::NONE).unwrap();
|
if len == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
// On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access
|
// Just change the protection level to READ|WRITE
|
||||||
assert_eq!(
|
unsafe {
|
||||||
libc::madvise(addr as _, len, libc::MADV_DONTNEED),
|
region::protect(addr, len, region::Protection::READ_WRITE)
|
||||||
0,
|
.context("failed to make linear memory pages read/write")
|
||||||
"madvise failed to mark pages as missing: {}",
|
}
|
||||||
std::io::Error::last_os_error()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap> {
|
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
Mmap::accessible_reserved(accessible_size, mapping_size)
|
decommit(addr, len, true)
|
||||||
.map_err(|e| anyhow!("failed to allocate pool memory: {}", e))
|
}
|
||||||
|
|
||||||
|
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||||
|
// A no-op as table pages remain READ|WRITE
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||||
|
// A no-op as stack pages remain READ|WRITE
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len, false)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,11 +16,10 @@
|
|||||||
//! 1. A user fault file descriptor is created to monitor specific areas of the address space.
|
//! 1. A user fault file descriptor is created to monitor specific areas of the address space.
|
||||||
//! 2. A thread is spawned to continually read events from the user fault file descriptor.
|
//! 2. A thread is spawned to continually read events from the user fault file descriptor.
|
||||||
//! 3. When a page fault event is received, the handler thread calculates where the fault occurred:
|
//! 3. When a page fault event is received, the handler thread calculates where the fault occurred:
|
||||||
//! a) If the fault occurs on a table page, it is handled by zeroing the page.
|
//! a) If the fault occurs on a linear memory page, it is handled by either copying the page from
|
||||||
//! b) If the fault occurs on a linear memory page, it is handled by either copying the page from
|
|
||||||
//! initialization data or zeroing it.
|
//! initialization data or zeroing it.
|
||||||
//! c) If the fault occurs on a guard page, the protection level of the guard page is changed to
|
//! b) If the fault occurs on a guard page, the protection level of the guard page is changed to
|
||||||
//! force the kernel to signal SIGSEV on the next retry. The faulting page is recorded so the
|
//! force the kernel to signal SIGBUS on the next retry. The faulting page is recorded so the
|
||||||
//! protection level can be reset in the future.
|
//! protection level can be reset in the future.
|
||||||
//! 4. Faults to address space relating to an instance may occur from both Wasmtime (e.g. instance
|
//! 4. Faults to address space relating to an instance may occur from both Wasmtime (e.g. instance
|
||||||
//! initialization) or from WebAssembly code (e.g. reading from or writing to linear memory),
|
//! initialization) or from WebAssembly code (e.g. reading from or writing to linear memory),
|
||||||
@@ -31,80 +30,103 @@
|
|||||||
//!
|
//!
|
||||||
//! This feature requires a Linux kernel 4.11 or newer to use.
|
//! This feature requires a Linux kernel 4.11 or newer to use.
|
||||||
|
|
||||||
use super::InstancePool;
|
use super::{InstancePool, MemoryPool};
|
||||||
use crate::{instance::Instance, Mmap};
|
use crate::instance::Instance;
|
||||||
use anyhow::{bail, Context, Result};
|
use anyhow::{bail, Context, Result};
|
||||||
use std::ptr;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use userfaultfd::{Event, FeatureFlags, IoctlFlags, Uffd, UffdBuilder};
|
use userfaultfd::{Event, FeatureFlags, IoctlFlags, Uffd, UffdBuilder};
|
||||||
use wasmtime_environ::{entity::EntityRef, wasm::DefinedMemoryIndex, MemoryInitialization};
|
use wasmtime_environ::{entity::EntityRef, wasm::DefinedMemoryIndex, MemoryInitialization};
|
||||||
|
|
||||||
const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize;
|
const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize;
|
||||||
|
|
||||||
pub unsafe fn make_accessible(_addr: *mut u8, _len: usize) -> bool {
|
fn decommit(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
// A no-op when userfaultfd is used
|
if len == 0 {
|
||||||
true
|
return Ok(());
|
||||||
}
|
|
||||||
|
|
||||||
pub unsafe fn reset_guard_page(addr: *mut u8, len: usize) -> bool {
|
|
||||||
// Guard pages are READ_WRITE with uffd until faulted
|
|
||||||
region::protect(addr, len, region::Protection::READ_WRITE).is_ok()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub unsafe fn decommit(addr: *mut u8, len: usize) {
|
|
||||||
// Use MADV_DONTNEED to mark the pages as missing
|
|
||||||
// This will cause a missing page fault for next access on any page in the given range
|
|
||||||
assert_eq!(
|
|
||||||
libc::madvise(addr as _, len, libc::MADV_DONTNEED),
|
|
||||||
0,
|
|
||||||
"madvise failed to mark pages as missing: {}",
|
|
||||||
std::io::Error::last_os_error()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn create_memory_map(_accessible_size: usize, mapping_size: usize) -> Result<Mmap> {
|
|
||||||
// Allocate a single read-write region at once
|
|
||||||
// As writable pages need to count towards commit charge, use MAP_NORESERVE to override.
|
|
||||||
// This implies that the kernel is configured to allow overcommit or else this allocation
|
|
||||||
// will almost certainly fail without a plethora of physical memory to back the allocation.
|
|
||||||
// The consequence of not reserving is that our process may segfault on any write to a memory
|
|
||||||
// page that cannot be backed (i.e. out of memory conditions).
|
|
||||||
|
|
||||||
if mapping_size == 0 {
|
|
||||||
return Ok(Mmap::new());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let ptr = libc::mmap(
|
// On Linux, this is enough to cause the kernel to initialize the pages to 0 on next access
|
||||||
ptr::null_mut(),
|
if libc::madvise(addr as _, len, libc::MADV_DONTNEED) != 0 {
|
||||||
mapping_size,
|
|
||||||
libc::PROT_READ | libc::PROT_WRITE,
|
|
||||||
libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_NORESERVE,
|
|
||||||
-1,
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
|
|
||||||
if ptr as isize == -1_isize {
|
|
||||||
bail!(
|
bail!(
|
||||||
"failed to allocate pool memory: mmap failed with {}",
|
"madvise failed to decommit: {}",
|
||||||
std::io::Error::last_os_error()
|
std::io::Error::last_os_error()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Mmap::from_raw(ptr as usize, mapping_size))
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_memory_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||||
|
// A no-op as memory pages remain READ|WRITE with uffd
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||||
|
// A no-op as table pages remain READ|WRITE
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||||
|
// A no-op as stack pages remain READ|WRITE
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is used to initialize the memory pool when uffd is enabled.
|
||||||
|
///
|
||||||
|
/// Without uffd, all of the memory pool's pages are initially protected with `NONE` to treat the entire
|
||||||
|
/// range as guard pages. When an instance is created, the initial pages of the memory are
|
||||||
|
/// changed to `READ_WRITE`.
|
||||||
|
///
|
||||||
|
/// With uffd, however, the potentially accessible pages of the each linear memory are made `READ_WRITE` and
|
||||||
|
/// the page fault handler will detect an out of bounds access and treat the page, temporarily,
|
||||||
|
/// as a guard page.
|
||||||
|
///
|
||||||
|
/// This me
|
||||||
|
pub(super) fn initialize_memory_pool(pool: &MemoryPool) -> Result<()> {
|
||||||
|
if pool.memory_size == 0 || pool.max_wasm_pages == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in 0..pool.max_instances {
|
||||||
|
for base in pool.get(i) {
|
||||||
|
unsafe {
|
||||||
|
region::protect(
|
||||||
|
base as _,
|
||||||
|
pool.max_wasm_pages as usize * WASM_PAGE_SIZE,
|
||||||
|
region::Protection::READ_WRITE,
|
||||||
|
)
|
||||||
|
.context("failed to initialize memory pool for uffd")?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is used to reset a linear memory's guard page back to read-write as the page might be accessible
|
||||||
|
/// again in the future depending on how the linear memory grows.
|
||||||
|
fn reset_guard_page(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
unsafe {
|
||||||
|
region::protect(addr, len, region::Protection::READ_WRITE)
|
||||||
|
.context("failed to reset guard page")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Represents a location of a page fault within monitored regions of memory.
|
/// Represents a location of a page fault within monitored regions of memory.
|
||||||
enum AddressLocation<'a> {
|
enum FaultLocation<'a> {
|
||||||
/// The address location is in a WebAssembly table page.
|
|
||||||
/// The fault handler will zero the page as tables are initialized at instantiation-time.
|
|
||||||
TablePage {
|
|
||||||
/// The address of the page being accessed.
|
|
||||||
page_addr: *mut u8,
|
|
||||||
/// The length of the page being accessed.
|
|
||||||
len: usize,
|
|
||||||
},
|
|
||||||
/// The address location is in a WebAssembly linear memory page.
|
/// The address location is in a WebAssembly linear memory page.
|
||||||
/// The fault handler will copy the pages from initialization data if necessary.
|
/// The fault handler will copy the pages from initialization data if necessary.
|
||||||
MemoryPage {
|
MemoryPage {
|
||||||
@@ -121,12 +143,12 @@ enum AddressLocation<'a> {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Used to resolve fault addresses to address locations.
|
/// Used to resolve fault addresses to a location.
|
||||||
///
|
///
|
||||||
/// This implementation relies heavily on how the various resource pools utilize their memory.
|
/// This implementation relies heavily on how the linear memory pool organizes its memory.
|
||||||
///
|
///
|
||||||
/// `usize` is used here instead of pointers to keep this `Send` as it gets sent to the handler thread.
|
/// `usize` is used here instead of pointers to keep this `Send` as it gets sent to the handler thread.
|
||||||
struct AddressLocator {
|
struct FaultLocator {
|
||||||
instances_start: usize,
|
instances_start: usize,
|
||||||
instance_size: usize,
|
instance_size: usize,
|
||||||
max_instances: usize,
|
max_instances: usize,
|
||||||
@@ -134,19 +156,13 @@ struct AddressLocator {
|
|||||||
memories_end: usize,
|
memories_end: usize,
|
||||||
memory_size: usize,
|
memory_size: usize,
|
||||||
max_memories: usize,
|
max_memories: usize,
|
||||||
tables_start: usize,
|
|
||||||
tables_end: usize,
|
|
||||||
table_size: usize,
|
|
||||||
page_size: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AddressLocator {
|
impl FaultLocator {
|
||||||
fn new(instances: &InstancePool) -> Self {
|
fn new(instances: &InstancePool) -> Self {
|
||||||
let instances_start = instances.mapping.as_ptr() as usize;
|
let instances_start = instances.mapping.as_ptr() as usize;
|
||||||
let memories_start = instances.memories.mapping.as_ptr() as usize;
|
let memories_start = instances.memories.mapping.as_ptr() as usize;
|
||||||
let memories_end = memories_start + instances.memories.mapping.len();
|
let memories_end = memories_start + instances.memories.mapping.len();
|
||||||
let tables_start = instances.tables.mapping.as_ptr() as usize;
|
|
||||||
let tables_end = tables_start + instances.tables.mapping.len();
|
|
||||||
|
|
||||||
// Should always have instances
|
// Should always have instances
|
||||||
debug_assert!(instances_start != 0);
|
debug_assert!(instances_start != 0);
|
||||||
@@ -159,10 +175,6 @@ impl AddressLocator {
|
|||||||
memories_end,
|
memories_end,
|
||||||
memory_size: instances.memories.memory_size,
|
memory_size: instances.memories.memory_size,
|
||||||
max_memories: instances.memories.max_memories,
|
max_memories: instances.memories.max_memories,
|
||||||
tables_start,
|
|
||||||
tables_end,
|
|
||||||
table_size: instances.tables.table_size,
|
|
||||||
page_size: instances.tables.page_size,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,7 +186,7 @@ impl AddressLocator {
|
|||||||
///
|
///
|
||||||
/// Of course a stray faulting memory access from a thread that does not own
|
/// Of course a stray faulting memory access from a thread that does not own
|
||||||
/// the instance might introduce a race, but this implementation considers
|
/// the instance might introduce a race, but this implementation considers
|
||||||
/// such to be a serious bug.
|
/// such to be a serious soundness bug not originating in this code.
|
||||||
///
|
///
|
||||||
/// If the assumption holds true, accessing the instance data from the handler thread
|
/// If the assumption holds true, accessing the instance data from the handler thread
|
||||||
/// should, in theory, be safe.
|
/// should, in theory, be safe.
|
||||||
@@ -183,8 +195,8 @@ impl AddressLocator {
|
|||||||
&*((self.instances_start + (index * self.instance_size)) as *const Instance)
|
&*((self.instances_start + (index * self.instance_size)) as *const Instance)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn get_location(&self, addr: usize) -> Option<AddressLocation> {
|
unsafe fn locate(&self, addr: usize) -> Option<FaultLocation> {
|
||||||
// Check for a memory location
|
// Check for a linear memory location
|
||||||
if addr >= self.memories_start && addr < self.memories_end {
|
if addr >= self.memories_start && addr < self.memories_end {
|
||||||
let index = (addr - self.memories_start) / self.memory_size;
|
let index = (addr - self.memories_start) / self.memory_size;
|
||||||
let memory_index = DefinedMemoryIndex::new(index % self.max_memories);
|
let memory_index = DefinedMemoryIndex::new(index % self.max_memories);
|
||||||
@@ -200,7 +212,7 @@ impl AddressLocator {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return Some(AddressLocation::MemoryPage {
|
return Some(FaultLocation::MemoryPage {
|
||||||
page_addr: (memory_start + page_index * WASM_PAGE_SIZE) as _,
|
page_addr: (memory_start + page_index * WASM_PAGE_SIZE) as _,
|
||||||
len: WASM_PAGE_SIZE,
|
len: WASM_PAGE_SIZE,
|
||||||
instance,
|
instance,
|
||||||
@@ -209,19 +221,6 @@ impl AddressLocator {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for a table location
|
|
||||||
if addr >= self.tables_start && addr < self.tables_end {
|
|
||||||
let index = (addr - self.tables_start) / self.table_size;
|
|
||||||
let table_start = self.tables_start + (index * self.table_size);
|
|
||||||
let table_offset = addr - table_start;
|
|
||||||
let page_index = table_offset / self.page_size;
|
|
||||||
|
|
||||||
return Some(AddressLocation::TablePage {
|
|
||||||
page_addr: (table_start + (page_index * self.page_size)) as _,
|
|
||||||
len: self.page_size,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -231,9 +230,9 @@ impl AddressLocator {
|
|||||||
/// Because the region being monitored is protected read-write, this needs to set the
|
/// Because the region being monitored is protected read-write, this needs to set the
|
||||||
/// protection level to `NONE` before waking the page.
|
/// protection level to `NONE` before waking the page.
|
||||||
///
|
///
|
||||||
/// This will cause the kernel to raise a SIGSEGV when retrying the fault.
|
/// This will cause the kernel to raise a SIGBUS when retrying the fault.
|
||||||
unsafe fn wake_guard_page_access(uffd: &Uffd, page_addr: *const u8, len: usize) -> Result<()> {
|
unsafe fn wake_guard_page_access(uffd: &Uffd, page_addr: *const u8, len: usize) -> Result<()> {
|
||||||
// Set the page to NONE to induce a SIGSEGV for the access on the next retry
|
// Set the page to NONE to induce a SIGBUS for the access on the next retry
|
||||||
region::protect(page_addr, len, region::Protection::NONE)
|
region::protect(page_addr, len, region::Protection::NONE)
|
||||||
.context("failed to change guard page protection")?;
|
.context("failed to change guard page protection")?;
|
||||||
|
|
||||||
@@ -288,22 +287,11 @@ unsafe fn initialize_wasm_page(
|
|||||||
|
|
||||||
unsafe fn handle_page_fault(
|
unsafe fn handle_page_fault(
|
||||||
uffd: &Uffd,
|
uffd: &Uffd,
|
||||||
locator: &AddressLocator,
|
locator: &FaultLocator,
|
||||||
addr: *mut std::ffi::c_void,
|
addr: *mut std::ffi::c_void,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
match locator.get_location(addr as usize) {
|
match locator.locate(addr as usize) {
|
||||||
Some(AddressLocation::TablePage { page_addr, len }) => {
|
Some(FaultLocation::MemoryPage {
|
||||||
log::trace!(
|
|
||||||
"handling fault in table at address {:p} on page {:p}",
|
|
||||||
addr,
|
|
||||||
page_addr,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Tables are always initialized upon instantiation, so zero the page
|
|
||||||
uffd.zeropage(page_addr as _, len, true)
|
|
||||||
.context("failed to zero table page")?;
|
|
||||||
}
|
|
||||||
Some(AddressLocation::MemoryPage {
|
|
||||||
page_addr,
|
page_addr,
|
||||||
len,
|
len,
|
||||||
instance,
|
instance,
|
||||||
@@ -340,7 +328,7 @@ unsafe fn handle_page_fault(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handler_thread(uffd: Uffd, locator: AddressLocator, mut registrations: usize) -> Result<()> {
|
fn fault_handler_thread(uffd: Uffd, locator: FaultLocator) -> Result<()> {
|
||||||
loop {
|
loop {
|
||||||
match uffd.read_event().expect("failed to read event") {
|
match uffd.read_event().expect("failed to read event") {
|
||||||
Some(Event::Unmap { start, end }) => {
|
Some(Event::Unmap { start, end }) => {
|
||||||
@@ -348,13 +336,8 @@ fn handler_thread(uffd: Uffd, locator: AddressLocator, mut registrations: usize)
|
|||||||
|
|
||||||
let (start, end) = (start as usize, end as usize);
|
let (start, end) = (start as usize, end as usize);
|
||||||
|
|
||||||
if (start == locator.memories_start && end == locator.memories_end)
|
if start == locator.memories_start && end == locator.memories_end {
|
||||||
|| (start == locator.tables_start && end == locator.tables_end)
|
break;
|
||||||
{
|
|
||||||
registrations -= 1;
|
|
||||||
if registrations == 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
panic!("unexpected memory region unmapped");
|
panic!("unexpected memory region unmapped");
|
||||||
}
|
}
|
||||||
@@ -385,53 +368,39 @@ impl PageFaultHandler {
|
|||||||
.create()
|
.create()
|
||||||
.context("failed to create user fault descriptor")?;
|
.context("failed to create user fault descriptor")?;
|
||||||
|
|
||||||
// Register the ranges with the userfault fd
|
// Register the linear memory pool with the userfault fd
|
||||||
let mut registrations = 0;
|
let start = instances.memories.mapping.as_ptr();
|
||||||
for (start, len) in &[
|
let len = instances.memories.mapping.len();
|
||||||
(
|
|
||||||
instances.memories.mapping.as_ptr() as usize,
|
|
||||||
instances.memories.mapping.len(),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
instances.tables.mapping.as_ptr() as usize,
|
|
||||||
instances.tables.mapping.len(),
|
|
||||||
),
|
|
||||||
] {
|
|
||||||
if *start == 0 || *len == 0 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
let thread = if !start.is_null() && len > 0 {
|
||||||
let ioctls = uffd
|
let ioctls = uffd
|
||||||
.register(*start as _, *len)
|
.register(start as _, len)
|
||||||
.context("failed to register user fault range")?;
|
.context("failed to register user fault range")?;
|
||||||
|
|
||||||
if !ioctls.contains(IoctlFlags::WAKE | IoctlFlags::COPY | IoctlFlags::ZEROPAGE) {
|
if !ioctls.contains(IoctlFlags::WAKE | IoctlFlags::COPY | IoctlFlags::ZEROPAGE) {
|
||||||
bail!(
|
bail!(
|
||||||
"required user fault ioctls not supported; found: {:?}",
|
"required user fault ioctls not supported by the kernel; found: {:?}",
|
||||||
ioctls,
|
ioctls,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
registrations += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
let thread = if registrations == 0 {
|
|
||||||
log::trace!("user fault handling disabled as there are no regions to monitor");
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
log::trace!(
|
log::trace!(
|
||||||
"user fault handling enabled on {} memory regions",
|
"user fault handling enabled on linear memory pool at {:p} with size {}",
|
||||||
registrations
|
start,
|
||||||
|
len
|
||||||
);
|
);
|
||||||
|
|
||||||
let locator = AddressLocator::new(&instances);
|
let locator = FaultLocator::new(&instances);
|
||||||
|
|
||||||
Some(
|
Some(
|
||||||
thread::Builder::new()
|
thread::Builder::new()
|
||||||
.name("page fault handler".into())
|
.name("page fault handler".into())
|
||||||
.spawn(move || handler_thread(uffd, locator, registrations))
|
.spawn(move || fault_handler_thread(uffd, locator))
|
||||||
.context("failed to spawn page fault handler thread")?,
|
.context("failed to spawn page fault handler thread")?,
|
||||||
)
|
)
|
||||||
|
} else {
|
||||||
|
log::trace!("user fault handling disabled as there is no linear memory pool");
|
||||||
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Self { thread })
|
Ok(Self { thread })
|
||||||
@@ -442,7 +411,7 @@ impl Drop for PageFaultHandler {
|
|||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// The handler thread should terminate once all monitored regions of memory are unmapped.
|
// The handler thread should terminate once all monitored regions of memory are unmapped.
|
||||||
// The pooling instance allocator ensures that the regions are unmapped prior to dropping
|
// The pooling instance allocator ensures that the regions are unmapped prior to dropping
|
||||||
// the user fault handler.
|
// the page fault handler.
|
||||||
if let Some(thread) = self.thread.take() {
|
if let Some(thread) = self.thread.take() {
|
||||||
thread
|
thread
|
||||||
.join()
|
.join()
|
||||||
@@ -456,15 +425,12 @@ impl Drop for PageFaultHandler {
|
|||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::{
|
use crate::{
|
||||||
table::max_table_element_size, Imports, InstanceAllocationRequest, InstanceLimits,
|
Imports, InstanceAllocationRequest, InstanceLimits, ModuleLimits,
|
||||||
ModuleLimits, PoolingAllocationStrategy, VMSharedSignatureIndex,
|
PoolingAllocationStrategy, VMSharedSignatureIndex,
|
||||||
};
|
};
|
||||||
|
use std::ptr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use wasmtime_environ::{
|
use wasmtime_environ::{entity::PrimaryMap, wasm::Memory, MemoryPlan, MemoryStyle, Module};
|
||||||
entity::PrimaryMap,
|
|
||||||
wasm::{Memory, Table, TableElementType, WasmType},
|
|
||||||
MemoryPlan, MemoryStyle, Module, TablePlan, TableStyle,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(target_pointer_width = "64")]
|
#[cfg(target_pointer_width = "64")]
|
||||||
#[test]
|
#[test]
|
||||||
@@ -476,10 +442,10 @@ mod test {
|
|||||||
imported_globals: 0,
|
imported_globals: 0,
|
||||||
types: 0,
|
types: 0,
|
||||||
functions: 0,
|
functions: 0,
|
||||||
tables: 3,
|
tables: 0,
|
||||||
memories: 2,
|
memories: 2,
|
||||||
globals: 0,
|
globals: 0,
|
||||||
table_elements: 1000,
|
table_elements: 0,
|
||||||
memory_pages: 2,
|
memory_pages: 2,
|
||||||
};
|
};
|
||||||
let instance_limits = InstanceLimits {
|
let instance_limits = InstanceLimits {
|
||||||
@@ -490,7 +456,7 @@ mod test {
|
|||||||
let instances =
|
let instances =
|
||||||
InstancePool::new(&module_limits, &instance_limits).expect("should allocate");
|
InstancePool::new(&module_limits, &instance_limits).expect("should allocate");
|
||||||
|
|
||||||
let locator = AddressLocator::new(&instances);
|
let locator = FaultLocator::new(&instances);
|
||||||
|
|
||||||
assert_eq!(locator.instances_start, instances.mapping.as_ptr() as usize);
|
assert_eq!(locator.instances_start, instances.mapping.as_ptr() as usize);
|
||||||
assert_eq!(locator.instance_size, 4096);
|
assert_eq!(locator.instance_size, 4096);
|
||||||
@@ -505,21 +471,10 @@ mod test {
|
|||||||
);
|
);
|
||||||
assert_eq!(locator.memory_size, WASM_PAGE_SIZE * 10);
|
assert_eq!(locator.memory_size, WASM_PAGE_SIZE * 10);
|
||||||
assert_eq!(locator.max_memories, 2);
|
assert_eq!(locator.max_memories, 2);
|
||||||
assert_eq!(
|
|
||||||
locator.tables_start,
|
|
||||||
instances.tables.mapping.as_ptr() as usize
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
locator.tables_end,
|
|
||||||
locator.tables_start + instances.tables.mapping.len()
|
|
||||||
);
|
|
||||||
assert_eq!(locator.table_size, 8192);
|
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
assert!(locator.get_location(0).is_none());
|
assert!(locator.locate(0).is_none());
|
||||||
assert!(locator
|
assert!(locator.locate(locator.memories_end).is_none());
|
||||||
.get_location(std::cmp::max(locator.memories_end, locator.tables_end))
|
|
||||||
.is_none());
|
|
||||||
|
|
||||||
let mut module = Module::new();
|
let mut module = Module::new();
|
||||||
|
|
||||||
@@ -535,25 +490,13 @@ mod test {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
for _ in 0..module_limits.tables {
|
|
||||||
module.table_plans.push(TablePlan {
|
|
||||||
table: Table {
|
|
||||||
wasm_ty: WasmType::FuncRef,
|
|
||||||
ty: TableElementType::Func,
|
|
||||||
minimum: 800,
|
|
||||||
maximum: Some(900),
|
|
||||||
},
|
|
||||||
style: TableStyle::CallerChecksSignature,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module_limits.validate(&module).expect("should validate");
|
module_limits.validate(&module).expect("should validate");
|
||||||
|
|
||||||
let mut handles = Vec::new();
|
let mut handles = Vec::new();
|
||||||
let module = Arc::new(module);
|
let module = Arc::new(module);
|
||||||
let finished_functions = &PrimaryMap::new();
|
let finished_functions = &PrimaryMap::new();
|
||||||
|
|
||||||
// Allocate the maximum number of instances with the maxmimum number of memories and tables
|
// Allocate the maximum number of instances with the maximum number of memories
|
||||||
for _ in 0..instances.max_instances {
|
for _ in 0..instances.max_instances {
|
||||||
handles.push(
|
handles.push(
|
||||||
instances
|
instances
|
||||||
@@ -570,9 +513,9 @@ mod test {
|
|||||||
},
|
},
|
||||||
lookup_shared_signature: &|_| VMSharedSignatureIndex::default(),
|
lookup_shared_signature: &|_| VMSharedSignatureIndex::default(),
|
||||||
host_state: Box::new(()),
|
host_state: Box::new(()),
|
||||||
interrupts: std::ptr::null(),
|
interrupts: ptr::null(),
|
||||||
externref_activations_table: std::ptr::null_mut(),
|
externref_activations_table: ptr::null_mut(),
|
||||||
stack_map_registry: std::ptr::null_mut(),
|
stack_map_registry: ptr::null_mut(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.expect("instance should allocate"),
|
.expect("instance should allocate"),
|
||||||
@@ -587,8 +530,8 @@ mod test {
|
|||||||
+ (memory_index * locator.memory_size);
|
+ (memory_index * locator.memory_size);
|
||||||
|
|
||||||
// Test for access to first page
|
// Test for access to first page
|
||||||
match locator.get_location(memory_start + 10000) {
|
match locator.locate(memory_start + 10000) {
|
||||||
Some(AddressLocation::MemoryPage {
|
Some(FaultLocation::MemoryPage {
|
||||||
page_addr,
|
page_addr,
|
||||||
len,
|
len,
|
||||||
instance: _,
|
instance: _,
|
||||||
@@ -604,8 +547,8 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test for access to second page
|
// Test for access to second page
|
||||||
match locator.get_location(memory_start + 1024 + WASM_PAGE_SIZE) {
|
match locator.locate(memory_start + 1024 + WASM_PAGE_SIZE) {
|
||||||
Some(AddressLocation::MemoryPage {
|
Some(FaultLocation::MemoryPage {
|
||||||
page_addr,
|
page_addr,
|
||||||
len,
|
len,
|
||||||
instance: _,
|
instance: _,
|
||||||
@@ -621,8 +564,8 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test for guard page
|
// Test for guard page
|
||||||
match locator.get_location(memory_start + 10 + 9 * WASM_PAGE_SIZE) {
|
match locator.locate(memory_start + 10 + 9 * WASM_PAGE_SIZE) {
|
||||||
Some(AddressLocation::MemoryPage {
|
Some(FaultLocation::MemoryPage {
|
||||||
page_addr,
|
page_addr,
|
||||||
len,
|
len,
|
||||||
instance: _,
|
instance: _,
|
||||||
@@ -639,33 +582,6 @@ mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate table locations
|
|
||||||
for instance_index in 0..instances.max_instances {
|
|
||||||
for table_index in 0..instances.tables.max_tables {
|
|
||||||
let table_start = locator.tables_start
|
|
||||||
+ (instance_index * locator.table_size * instances.tables.max_tables)
|
|
||||||
+ (table_index * locator.table_size);
|
|
||||||
|
|
||||||
// Check for an access of index 107 (first page)
|
|
||||||
match locator.get_location(table_start + (107 * max_table_element_size())) {
|
|
||||||
Some(AddressLocation::TablePage { page_addr, len }) => {
|
|
||||||
assert_eq!(page_addr, table_start as _);
|
|
||||||
assert_eq!(len, locator.page_size);
|
|
||||||
}
|
|
||||||
_ => panic!("expected a table page location"),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for an access of index 799 (second page)
|
|
||||||
match locator.get_location(table_start + (799 * max_table_element_size())) {
|
|
||||||
Some(AddressLocation::TablePage { page_addr, len }) => {
|
|
||||||
assert_eq!(page_addr, (table_start + locator.page_size) as _);
|
|
||||||
assert_eq!(len, locator.page_size);
|
|
||||||
}
|
|
||||||
_ => panic!("expected a table page location"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for handle in handles.drain(..) {
|
for handle in handles.drain(..) {
|
||||||
instances.deallocate(&handle);
|
instances.deallocate(&handle);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,27 +1,64 @@
|
|||||||
use crate::Mmap;
|
use anyhow::{bail, Context, Result};
|
||||||
use anyhow::{anyhow, Result};
|
|
||||||
|
|
||||||
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
|
fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
|
||||||
region::protect(addr, len, region::Protection::READ_WRITE).is_ok()
|
if len == 0 {
|
||||||
}
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
pub unsafe fn decommit(addr: *mut u8, len: usize) {
|
if unsafe {
|
||||||
assert_eq!(
|
|
||||||
libc::mmap(
|
libc::mmap(
|
||||||
addr as _,
|
addr as _,
|
||||||
len,
|
len,
|
||||||
libc::PROT_NONE,
|
if protect {
|
||||||
|
libc::PROT_NONE
|
||||||
|
} else {
|
||||||
|
libc::PROT_READ | libc::PROT_WRITE
|
||||||
|
},
|
||||||
libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED,
|
libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_FIXED,
|
||||||
-1,
|
-1,
|
||||||
0,
|
0,
|
||||||
) as *mut u8,
|
) as *mut u8
|
||||||
addr,
|
} != addr
|
||||||
"mmap failed to remap pages: {}",
|
{
|
||||||
std::io::Error::last_os_error()
|
bail!(
|
||||||
);
|
"mmap failed to remap pages: {}",
|
||||||
|
std::io::Error::last_os_error()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap> {
|
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
Mmap::accessible_reserved(accessible_size, mapping_size)
|
if len == 0 {
|
||||||
.map_err(|e| anyhow!("failed to allocate pool memory: {}", e))
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just change the protection level to READ|WRITE
|
||||||
|
unsafe {
|
||||||
|
region::protect(addr, len, region::Protection::READ_WRITE)
|
||||||
|
.context("failed to make linear memory pages read/write")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||||
|
// A no-op as table pages remain READ|WRITE
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
|
||||||
|
// A no-op as stack pages remain READ|WRITE
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len, false)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,22 +1,55 @@
|
|||||||
use crate::Mmap;
|
use anyhow::{bail, Result};
|
||||||
use anyhow::{anyhow, Result};
|
|
||||||
use winapi::um::memoryapi::{VirtualAlloc, VirtualFree};
|
use winapi::um::memoryapi::{VirtualAlloc, VirtualFree};
|
||||||
use winapi::um::winnt::{MEM_COMMIT, MEM_DECOMMIT, PAGE_READWRITE};
|
use winapi::um::winnt::{MEM_COMMIT, MEM_DECOMMIT, PAGE_READWRITE};
|
||||||
|
|
||||||
pub unsafe fn make_accessible(addr: *mut u8, len: usize) -> bool {
|
pub fn commit(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
// This doesn't use the `region` crate because the memory needs to be committed
|
if len == 0 {
|
||||||
!VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null()
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory needs to be committed, so don't use the `region` crate
|
||||||
|
if unsafe { VirtualAlloc(addr as _, len, MEM_COMMIT, PAGE_READWRITE).is_null() } {
|
||||||
|
bail!("failed to commit memory as read/write");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn decommit(addr: *mut u8, len: usize) {
|
pub fn decommit(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
assert!(
|
if len == 0 {
|
||||||
VirtualFree(addr as _, len, MEM_DECOMMIT) != 0,
|
return Ok(());
|
||||||
"failed to decommit memory pages: {}",
|
}
|
||||||
std::io::Error::last_os_error()
|
|
||||||
);
|
if unsafe { VirtualFree(addr as _, len, MEM_DECOMMIT) } == 0 {
|
||||||
|
bail!(
|
||||||
|
"failed to decommit memory pages: {}",
|
||||||
|
std::io::Error::last_os_error()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_memory_map(accessible_size: usize, mapping_size: usize) -> Result<Mmap> {
|
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
Mmap::accessible_reserved(accessible_size, mapping_size)
|
commit(addr, len)
|
||||||
.map_err(|e| anyhow!("failed to allocate pool memory: {}", e))
|
}
|
||||||
|
|
||||||
|
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
commit(addr, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn commit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
commit(addr, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
|
||||||
|
decommit(addr, len)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
use crate::mmap::Mmap;
|
use crate::mmap::Mmap;
|
||||||
use crate::vmcontext::VMMemoryDefinition;
|
use crate::vmcontext::VMMemoryDefinition;
|
||||||
|
use anyhow::Result;
|
||||||
use more_asserts::{assert_ge, assert_le};
|
use more_asserts::{assert_ge, assert_le};
|
||||||
use std::cell::{Cell, RefCell};
|
use std::cell::{Cell, RefCell};
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
@@ -13,7 +14,7 @@ use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM_MAX_PAGES, WASM_PAGE_SIZE};
|
|||||||
/// A memory allocator
|
/// A memory allocator
|
||||||
pub trait RuntimeMemoryCreator: Send + Sync {
|
pub trait RuntimeMemoryCreator: Send + Sync {
|
||||||
/// Create new RuntimeLinearMemory
|
/// Create new RuntimeLinearMemory
|
||||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String>;
|
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A default memory allocator used by Wasmtime
|
/// A default memory allocator used by Wasmtime
|
||||||
@@ -21,8 +22,8 @@ pub struct DefaultMemoryCreator;
|
|||||||
|
|
||||||
impl RuntimeMemoryCreator for DefaultMemoryCreator {
|
impl RuntimeMemoryCreator for DefaultMemoryCreator {
|
||||||
/// Create new MmapMemory
|
/// Create new MmapMemory
|
||||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String> {
|
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>> {
|
||||||
Ok(Box::new(MmapMemory::new(plan)?) as Box<dyn RuntimeLinearMemory>)
|
Ok(Box::new(MmapMemory::new(plan)?) as _)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,7 +66,7 @@ struct WasmMmap {
|
|||||||
|
|
||||||
impl MmapMemory {
|
impl MmapMemory {
|
||||||
/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
|
/// Create a new linear memory instance with specified minimum and maximum number of wasm pages.
|
||||||
pub fn new(plan: &MemoryPlan) -> Result<Self, String> {
|
pub fn new(plan: &MemoryPlan) -> Result<Self> {
|
||||||
// `maximum` cannot be set to more than `65536` pages.
|
// `maximum` cannot be set to more than `65536` pages.
|
||||||
assert_le!(plan.memory.minimum, WASM_MAX_PAGES);
|
assert_le!(plan.memory.minimum, WASM_MAX_PAGES);
|
||||||
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
|
assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= WASM_MAX_PAGES);
|
||||||
@@ -177,7 +178,7 @@ enum MemoryStorage {
|
|||||||
base: *mut u8,
|
base: *mut u8,
|
||||||
size: Cell<u32>,
|
size: Cell<u32>,
|
||||||
maximum: u32,
|
maximum: u32,
|
||||||
make_accessible: unsafe fn(*mut u8, usize) -> bool,
|
make_accessible: fn(*mut u8, usize) -> Result<()>,
|
||||||
},
|
},
|
||||||
Dynamic(Box<dyn RuntimeLinearMemory>),
|
Dynamic(Box<dyn RuntimeLinearMemory>),
|
||||||
}
|
}
|
||||||
@@ -189,10 +190,7 @@ pub struct Memory {
|
|||||||
|
|
||||||
impl Memory {
|
impl Memory {
|
||||||
/// Create a new dynamic (movable) memory instance for the specified plan.
|
/// Create a new dynamic (movable) memory instance for the specified plan.
|
||||||
pub fn new_dynamic(
|
pub fn new_dynamic(plan: &MemoryPlan, creator: &dyn RuntimeMemoryCreator) -> Result<Self> {
|
||||||
plan: &MemoryPlan,
|
|
||||||
creator: &dyn RuntimeMemoryCreator,
|
|
||||||
) -> Result<Self, String> {
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
storage: MemoryStorage::Dynamic(creator.new_memory(plan)?),
|
storage: MemoryStorage::Dynamic(creator.new_memory(plan)?),
|
||||||
})
|
})
|
||||||
@@ -203,14 +201,10 @@ impl Memory {
|
|||||||
plan: &MemoryPlan,
|
plan: &MemoryPlan,
|
||||||
base: *mut u8,
|
base: *mut u8,
|
||||||
maximum: u32,
|
maximum: u32,
|
||||||
make_accessible: unsafe fn(*mut u8, usize) -> bool,
|
make_accessible: fn(*mut u8, usize) -> Result<()>,
|
||||||
) -> Result<Self, String> {
|
) -> Result<Self> {
|
||||||
if plan.memory.minimum > 0 {
|
if plan.memory.minimum > 0 {
|
||||||
if unsafe {
|
make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize)?;
|
||||||
!make_accessible(base, plan.memory.minimum as usize * WASM_PAGE_SIZE as usize)
|
|
||||||
} {
|
|
||||||
return Err("memory cannot be made accessible".into());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@@ -258,9 +252,7 @@ impl Memory {
|
|||||||
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
|
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
|
||||||
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
|
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
|
||||||
|
|
||||||
if unsafe { !make_accessible(base.add(start), len) } {
|
make_accessible(unsafe { base.add(start) }, len).ok()?;
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
size.set(new_size);
|
size.set(new_size);
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
//! Low-level abstraction for allocating and managing zero-filled pages
|
//! Low-level abstraction for allocating and managing zero-filled pages
|
||||||
//! of memory.
|
//! of memory.
|
||||||
|
|
||||||
|
use anyhow::{bail, Result};
|
||||||
use more_asserts::assert_le;
|
use more_asserts::assert_le;
|
||||||
use more_asserts::assert_lt;
|
use more_asserts::assert_lt;
|
||||||
use std::io;
|
use std::io;
|
||||||
@@ -38,7 +39,7 @@ impl Mmap {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
|
/// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
|
||||||
pub fn with_at_least(size: usize) -> Result<Self, String> {
|
pub fn with_at_least(size: usize) -> Result<Self> {
|
||||||
let page_size = region::page::size();
|
let page_size = region::page::size();
|
||||||
let rounded_size = round_up_to_page_size(size, page_size);
|
let rounded_size = round_up_to_page_size(size, page_size);
|
||||||
Self::accessible_reserved(rounded_size, rounded_size)
|
Self::accessible_reserved(rounded_size, rounded_size)
|
||||||
@@ -48,10 +49,7 @@ impl Mmap {
|
|||||||
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
|
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
|
||||||
/// must be native page-size multiples.
|
/// must be native page-size multiples.
|
||||||
#[cfg(not(target_os = "windows"))]
|
#[cfg(not(target_os = "windows"))]
|
||||||
pub fn accessible_reserved(
|
pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result<Self> {
|
||||||
accessible_size: usize,
|
|
||||||
mapping_size: usize,
|
|
||||||
) -> Result<Self, String> {
|
|
||||||
let page_size = region::page::size();
|
let page_size = region::page::size();
|
||||||
assert_le!(accessible_size, mapping_size);
|
assert_le!(accessible_size, mapping_size);
|
||||||
assert_eq!(mapping_size & (page_size - 1), 0);
|
assert_eq!(mapping_size & (page_size - 1), 0);
|
||||||
@@ -76,7 +74,7 @@ impl Mmap {
|
|||||||
)
|
)
|
||||||
};
|
};
|
||||||
if ptr as isize == -1_isize {
|
if ptr as isize == -1_isize {
|
||||||
return Err(io::Error::last_os_error().to_string());
|
bail!("mmap failed: {}", io::Error::last_os_error());
|
||||||
}
|
}
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
@@ -96,7 +94,7 @@ impl Mmap {
|
|||||||
)
|
)
|
||||||
};
|
};
|
||||||
if ptr as isize == -1_isize {
|
if ptr as isize == -1_isize {
|
||||||
return Err(io::Error::last_os_error().to_string());
|
bail!("mmap failed: {}", io::Error::last_os_error());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result = Self {
|
let mut result = Self {
|
||||||
@@ -117,10 +115,7 @@ impl Mmap {
|
|||||||
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
|
/// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
|
||||||
/// must be native page-size multiples.
|
/// must be native page-size multiples.
|
||||||
#[cfg(target_os = "windows")]
|
#[cfg(target_os = "windows")]
|
||||||
pub fn accessible_reserved(
|
pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result<Self> {
|
||||||
accessible_size: usize,
|
|
||||||
mapping_size: usize,
|
|
||||||
) -> Result<Self, String> {
|
|
||||||
use winapi::um::memoryapi::VirtualAlloc;
|
use winapi::um::memoryapi::VirtualAlloc;
|
||||||
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
|
use winapi::um::winnt::{MEM_COMMIT, MEM_RESERVE, PAGE_NOACCESS, PAGE_READWRITE};
|
||||||
|
|
||||||
@@ -144,7 +139,7 @@ impl Mmap {
|
|||||||
)
|
)
|
||||||
};
|
};
|
||||||
if ptr.is_null() {
|
if ptr.is_null() {
|
||||||
return Err(io::Error::last_os_error().to_string());
|
bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
|
||||||
}
|
}
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
@@ -156,7 +151,7 @@ impl Mmap {
|
|||||||
let ptr =
|
let ptr =
|
||||||
unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
|
unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
|
||||||
if ptr.is_null() {
|
if ptr.is_null() {
|
||||||
return Err(io::Error::last_os_error().to_string());
|
bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result = Self {
|
let mut result = Self {
|
||||||
@@ -177,7 +172,7 @@ impl Mmap {
|
|||||||
/// `start` and `len` must be native page-size multiples and describe a range within
|
/// `start` and `len` must be native page-size multiples and describe a range within
|
||||||
/// `self`'s reserved memory.
|
/// `self`'s reserved memory.
|
||||||
#[cfg(not(target_os = "windows"))]
|
#[cfg(not(target_os = "windows"))]
|
||||||
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
|
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> {
|
||||||
let page_size = region::page::size();
|
let page_size = region::page::size();
|
||||||
assert_eq!(start & (page_size - 1), 0);
|
assert_eq!(start & (page_size - 1), 0);
|
||||||
assert_eq!(len & (page_size - 1), 0);
|
assert_eq!(len & (page_size - 1), 0);
|
||||||
@@ -186,15 +181,18 @@ impl Mmap {
|
|||||||
|
|
||||||
// Commit the accessible size.
|
// Commit the accessible size.
|
||||||
let ptr = self.ptr as *const u8;
|
let ptr = self.ptr as *const u8;
|
||||||
unsafe { region::protect(ptr.add(start), len, region::Protection::READ_WRITE) }
|
unsafe {
|
||||||
.map_err(|e| e.to_string())
|
region::protect(ptr.add(start), len, region::Protection::READ_WRITE)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Make the memory starting at `start` and extending for `len` bytes accessible.
|
/// Make the memory starting at `start` and extending for `len` bytes accessible.
|
||||||
/// `start` and `len` must be native page-size multiples and describe a range within
|
/// `start` and `len` must be native page-size multiples and describe a range within
|
||||||
/// `self`'s reserved memory.
|
/// `self`'s reserved memory.
|
||||||
#[cfg(target_os = "windows")]
|
#[cfg(target_os = "windows")]
|
||||||
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<(), String> {
|
pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> {
|
||||||
use winapi::ctypes::c_void;
|
use winapi::ctypes::c_void;
|
||||||
use winapi::um::memoryapi::VirtualAlloc;
|
use winapi::um::memoryapi::VirtualAlloc;
|
||||||
use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
|
use winapi::um::winnt::{MEM_COMMIT, PAGE_READWRITE};
|
||||||
@@ -216,7 +214,7 @@ impl Mmap {
|
|||||||
}
|
}
|
||||||
.is_null()
|
.is_null()
|
||||||
{
|
{
|
||||||
return Err(io::Error::last_os_error().to_string());
|
bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use crate::memory::{LinearMemory, MemoryCreator};
|
|||||||
use crate::trampoline::StoreInstanceHandle;
|
use crate::trampoline::StoreInstanceHandle;
|
||||||
use crate::Store;
|
use crate::Store;
|
||||||
use crate::{Limits, MemoryType};
|
use crate::{Limits, MemoryType};
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, Result};
|
||||||
use wasmtime_environ::entity::PrimaryMap;
|
use wasmtime_environ::entity::PrimaryMap;
|
||||||
use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE};
|
use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE};
|
||||||
use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition};
|
use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition};
|
||||||
@@ -57,7 +57,7 @@ impl RuntimeLinearMemory for LinearMemoryProxy {
|
|||||||
pub(crate) struct MemoryCreatorProxy(pub Arc<dyn MemoryCreator>);
|
pub(crate) struct MemoryCreatorProxy(pub Arc<dyn MemoryCreator>);
|
||||||
|
|
||||||
impl RuntimeMemoryCreator for MemoryCreatorProxy {
|
impl RuntimeMemoryCreator for MemoryCreatorProxy {
|
||||||
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>, String> {
|
fn new_memory(&self, plan: &MemoryPlan) -> Result<Box<dyn RuntimeLinearMemory>> {
|
||||||
let ty = MemoryType::new(Limits::new(plan.memory.minimum, plan.memory.maximum));
|
let ty = MemoryType::new(Limits::new(plan.memory.minimum, plan.memory.maximum));
|
||||||
let reserved_size_in_bytes = match plan.style {
|
let reserved_size_in_bytes = match plan.style {
|
||||||
MemoryStyle::Static { bound } => Some(bound as u64 * WASM_PAGE_SIZE as u64),
|
MemoryStyle::Static { bound } => Some(bound as u64 * WASM_PAGE_SIZE as u64),
|
||||||
@@ -66,5 +66,6 @@ impl RuntimeMemoryCreator for MemoryCreatorProxy {
|
|||||||
self.0
|
self.0
|
||||||
.new_memory(ty, reserved_size_in_bytes, plan.offset_guard_size)
|
.new_memory(ty, reserved_size_in_bytes, plan.offset_guard_size)
|
||||||
.map(|mem| Box::new(LinearMemoryProxy { mem }) as Box<dyn RuntimeLinearMemory>)
|
.map(|mem| Box::new(LinearMemoryProxy { mem }) as Box<dyn RuntimeLinearMemory>)
|
||||||
|
.map_err(|e| anyhow!(e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user