Implement on-demand memory initialization for the uffd feature.
This commit implements copying paged initialization data upon a fault of a linear memory page. If the initialization data is "paged", then the appropriate pages are copied into the Wasm page (or zeroed if the page is not present in the initialization data). If the initialization data is not "paged", the Wasm page is zeroed so that module instantiation can initialize the pages.
This commit is contained in:
@@ -23,7 +23,8 @@ use wasmtime_environ::wasm::{
|
||||
TableElementType, WasmType,
|
||||
};
|
||||
use wasmtime_environ::{
|
||||
ir, Module, ModuleTranslation, ModuleType, OwnedDataInitializer, TableElements, VMOffsets,
|
||||
ir, MemoryInitialization, MemoryInitializer, Module, ModuleTranslation, ModuleType,
|
||||
TableInitializer, VMOffsets,
|
||||
};
|
||||
|
||||
mod pooling;
|
||||
@@ -139,7 +140,6 @@ pub unsafe trait InstanceAllocator: Send + Sync {
|
||||
&self,
|
||||
handle: &InstanceHandle,
|
||||
is_bulk_memory: bool,
|
||||
data_initializers: &Arc<[OwnedDataInitializer]>,
|
||||
) -> Result<(), InstantiationError>;
|
||||
|
||||
/// Deallocates a previously allocated instance.
|
||||
@@ -169,6 +169,228 @@ pub unsafe trait InstanceAllocator: Send + Sync {
|
||||
unsafe fn deallocate_fiber_stack(&self, stack: *mut u8);
|
||||
}
|
||||
|
||||
fn get_table_init_start(init: &TableInitializer, instance: &Instance) -> usize {
|
||||
let mut start = init.offset;
|
||||
|
||||
if let Some(base) = init.base {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
start += usize::try_from(val).unwrap();
|
||||
}
|
||||
|
||||
start
|
||||
}
|
||||
|
||||
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module.table_initializers {
|
||||
let start = get_table_init_start(init, instance);
|
||||
let table = instance.get_table(init.table_index);
|
||||
|
||||
let size = usize::try_from(table.size()).unwrap();
|
||||
if size < start + init.elements.len() {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"table out of bounds: elements segment does not fit".to_owned(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module.table_initializers {
|
||||
let start = get_table_init_start(init, instance);
|
||||
let table = instance.get_table(init.table_index);
|
||||
|
||||
if start
|
||||
.checked_add(init.elements.len())
|
||||
.map_or(true, |end| end > table.size() as usize)
|
||||
{
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::TableOutOfBounds,
|
||||
)));
|
||||
}
|
||||
|
||||
for (i, func_idx) in init.elements.iter().enumerate() {
|
||||
let item = match table.element_type() {
|
||||
TableElementType::Func => instance
|
||||
.get_caller_checked_anyfunc(*func_idx)
|
||||
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
|
||||
f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc
|
||||
})
|
||||
.into(),
|
||||
TableElementType::Val(_) => {
|
||||
assert!(*func_idx == FuncIndex::reserved_value());
|
||||
TableElement::ExternRef(None)
|
||||
}
|
||||
};
|
||||
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_memory_init_start(init: &MemoryInitializer, instance: &Instance) -> usize {
|
||||
let mut start = init.offset;
|
||||
|
||||
if let Some(base) = init.base {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
start += usize::try_from(val).unwrap();
|
||||
}
|
||||
|
||||
start
|
||||
}
|
||||
|
||||
unsafe fn get_memory_slice<'instance>(
|
||||
init: &MemoryInitializer,
|
||||
instance: &'instance Instance,
|
||||
) -> &'instance mut [u8] {
|
||||
let memory = if let Some(defined_memory_index) =
|
||||
instance.module.defined_memory_index(init.memory_index)
|
||||
{
|
||||
instance.memory(defined_memory_index)
|
||||
} else {
|
||||
let import = instance.imported_memory(init.memory_index);
|
||||
let foreign_instance = (&mut *(import).vmctx).instance();
|
||||
let foreign_memory = &mut *(import).from;
|
||||
let foreign_index = foreign_instance.memory_index(foreign_memory);
|
||||
foreign_instance.memory(foreign_index)
|
||||
};
|
||||
slice::from_raw_parts_mut(memory.base, memory.current_length)
|
||||
}
|
||||
|
||||
fn check_memory_init_bounds(
|
||||
instance: &Instance,
|
||||
initializers: &[MemoryInitializer],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in initializers {
|
||||
let start = get_memory_init_start(init, instance);
|
||||
unsafe {
|
||||
let mem_slice = get_memory_slice(init, instance);
|
||||
if mem_slice.get_mut(start..start + init.data.len()).is_none() {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"memory out of bounds: data segment does not fit".into(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn initialize_memories(
|
||||
instance: &Instance,
|
||||
initializers: &[MemoryInitializer],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in initializers {
|
||||
let memory = instance.get_memory(init.memory_index);
|
||||
|
||||
let start = get_memory_init_start(init, instance);
|
||||
if start
|
||||
.checked_add(init.data.len())
|
||||
.map_or(true, |end| end > memory.current_length)
|
||||
{
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
)));
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let mem_slice = get_memory_slice(init, instance);
|
||||
let end = start + init.data.len();
|
||||
let to_init = &mut mem_slice[start..end];
|
||||
to_init.copy_from_slice(&init.data);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
check_table_init_bounds(instance)?;
|
||||
|
||||
match &instance.module.memory_initialization {
|
||||
Some(MemoryInitialization::Paged { .. }) | None => {
|
||||
// Bounds were checked at compile-time
|
||||
}
|
||||
Some(MemoryInitialization::OutOfBounds) => {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"memory out of bounds: data segment does not fit".into(),
|
||||
)));
|
||||
}
|
||||
Some(MemoryInitialization::Segmented(initializers)) => {
|
||||
check_memory_init_bounds(instance, initializers)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn initialize_instance(
|
||||
instance: &Instance,
|
||||
is_bulk_memory: bool,
|
||||
) -> Result<(), InstantiationError> {
|
||||
// If bulk memory is not enabled, bounds check the data and element segments before
|
||||
// making any changes. With bulk memory enabled, initializers are processed
|
||||
// in-order and side effects are observed up to the point of an out-of-bounds
|
||||
// initializer, so the early checking is not desired.
|
||||
if !is_bulk_memory {
|
||||
check_init_bounds(instance)?;
|
||||
}
|
||||
|
||||
// Initialize the tables
|
||||
initialize_tables(instance)?;
|
||||
|
||||
// Initialize the memories
|
||||
match &instance.module.memory_initialization {
|
||||
Some(MemoryInitialization::Paged { page_size, map }) => {
|
||||
for (index, pages) in map {
|
||||
let memory = instance.memory(index);
|
||||
|
||||
for (page_index, page) in pages.iter().enumerate() {
|
||||
if let Some(data) = page {
|
||||
// Bounds checking should have occurred when the module was compiled
|
||||
// The data should always be page sized
|
||||
assert!((page_index * page_size) < memory.current_length);
|
||||
assert_eq!(data.len(), *page_size);
|
||||
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
data.as_ptr(),
|
||||
memory.base.add(page_index * page_size),
|
||||
data.len(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(MemoryInitialization::OutOfBounds) => {
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
)))
|
||||
}
|
||||
Some(MemoryInitialization::Segmented(initializers)) => {
|
||||
initialize_memories(instance, initializers)?;
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn initialize_vmcontext(
|
||||
instance: &Instance,
|
||||
functions: &[VMFunctionImport],
|
||||
@@ -350,157 +572,6 @@ impl OnDemandInstanceAllocator {
|
||||
}
|
||||
Ok(memories)
|
||||
}
|
||||
|
||||
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module.table_elements {
|
||||
let start = Self::get_table_init_start(init, instance);
|
||||
let table = instance.get_table(init.table_index);
|
||||
|
||||
let size = usize::try_from(table.size()).unwrap();
|
||||
if size < start + init.elements.len() {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"table out of bounds: elements segment does not fit".to_owned(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_memory_init_start(init: &OwnedDataInitializer, instance: &Instance) -> usize {
|
||||
let mut start = init.location.offset;
|
||||
|
||||
if let Some(base) = init.location.base {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
start += usize::try_from(val).unwrap();
|
||||
}
|
||||
|
||||
start
|
||||
}
|
||||
|
||||
unsafe fn get_memory_slice<'instance>(
|
||||
init: &OwnedDataInitializer,
|
||||
instance: &'instance Instance,
|
||||
) -> &'instance mut [u8] {
|
||||
let memory = if let Some(defined_memory_index) = instance
|
||||
.module
|
||||
.defined_memory_index(init.location.memory_index)
|
||||
{
|
||||
instance.memory(defined_memory_index)
|
||||
} else {
|
||||
let import = instance.imported_memory(init.location.memory_index);
|
||||
let foreign_instance = (&mut *(import).vmctx).instance();
|
||||
let foreign_memory = &mut *(import).from;
|
||||
let foreign_index = foreign_instance.memory_index(foreign_memory);
|
||||
foreign_instance.memory(foreign_index)
|
||||
};
|
||||
slice::from_raw_parts_mut(memory.base, memory.current_length)
|
||||
}
|
||||
|
||||
fn check_memory_init_bounds(
|
||||
instance: &Instance,
|
||||
data_initializers: &[OwnedDataInitializer],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in data_initializers {
|
||||
let start = Self::get_memory_init_start(init, instance);
|
||||
unsafe {
|
||||
let mem_slice = Self::get_memory_slice(init, instance);
|
||||
if mem_slice.get_mut(start..start + init.data.len()).is_none() {
|
||||
return Err(InstantiationError::Link(LinkError(
|
||||
"memory out of bounds: data segment does not fit".into(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_table_init_start(init: &TableElements, instance: &Instance) -> usize {
|
||||
let mut start = init.offset;
|
||||
|
||||
if let Some(base) = init.base {
|
||||
let val = unsafe {
|
||||
if let Some(def_index) = instance.module.defined_global_index(base) {
|
||||
*instance.global(def_index).as_u32()
|
||||
} else {
|
||||
*(*instance.imported_global(base).from).as_u32()
|
||||
}
|
||||
};
|
||||
start += usize::try_from(val).unwrap();
|
||||
}
|
||||
|
||||
start
|
||||
}
|
||||
|
||||
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
|
||||
for init in &instance.module.table_elements {
|
||||
let start = Self::get_table_init_start(init, instance);
|
||||
let table = instance.get_table(init.table_index);
|
||||
|
||||
if start
|
||||
.checked_add(init.elements.len())
|
||||
.map_or(true, |end| end > table.size() as usize)
|
||||
{
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::TableOutOfBounds,
|
||||
)));
|
||||
}
|
||||
|
||||
for (i, func_idx) in init.elements.iter().enumerate() {
|
||||
let item = match table.element_type() {
|
||||
TableElementType::Func => instance
|
||||
.get_caller_checked_anyfunc(*func_idx)
|
||||
.map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| {
|
||||
f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc
|
||||
})
|
||||
.into(),
|
||||
TableElementType::Val(_) => {
|
||||
assert!(*func_idx == FuncIndex::reserved_value());
|
||||
TableElement::ExternRef(None)
|
||||
}
|
||||
};
|
||||
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize the table memory from the provided initializers.
|
||||
fn initialize_memories(
|
||||
instance: &Instance,
|
||||
data_initializers: &[OwnedDataInitializer],
|
||||
) -> Result<(), InstantiationError> {
|
||||
for init in data_initializers {
|
||||
let memory = instance.get_memory(init.location.memory_index);
|
||||
|
||||
let start = Self::get_memory_init_start(init, instance);
|
||||
if start
|
||||
.checked_add(init.data.len())
|
||||
.map_or(true, |end| end > memory.current_length)
|
||||
{
|
||||
return Err(InstantiationError::Trap(Trap::wasm(
|
||||
ir::TrapCode::HeapOutOfBounds,
|
||||
)));
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let mem_slice = Self::get_memory_slice(init, instance);
|
||||
let end = start + init.data.len();
|
||||
let to_init = &mut mem_slice[start..end];
|
||||
to_init.copy_from_slice(&init.data);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
|
||||
@@ -561,23 +632,8 @@ unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
|
||||
&self,
|
||||
handle: &InstanceHandle,
|
||||
is_bulk_memory: bool,
|
||||
data_initializers: &Arc<[OwnedDataInitializer]>,
|
||||
) -> Result<(), InstantiationError> {
|
||||
// Check initializer bounds before initializing anything. Only do this
|
||||
// when bulk memory is disabled, since the bulk memory proposal changes
|
||||
// instantiation such that the intermediate results of failed
|
||||
// initializations are visible.
|
||||
if !is_bulk_memory {
|
||||
Self::check_table_init_bounds(handle.instance())?;
|
||||
Self::check_memory_init_bounds(handle.instance(), data_initializers.as_ref())?;
|
||||
}
|
||||
|
||||
// Apply fallible initializers. Note that this can "leak" state even if
|
||||
// it fails.
|
||||
Self::initialize_tables(handle.instance())?;
|
||||
Self::initialize_memories(handle.instance(), data_initializers.as_ref())?;
|
||||
|
||||
Ok(())
|
||||
initialize_instance(handle.instance(), is_bulk_memory)
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, handle: &InstanceHandle) {
|
||||
|
||||
@@ -8,13 +8,10 @@
|
||||
//! when modules can be constrained based on configurable limits.
|
||||
|
||||
use super::{
|
||||
initialize_vmcontext, FiberStackError, InstanceAllocationRequest, InstanceAllocator,
|
||||
InstanceHandle, InstantiationError,
|
||||
};
|
||||
use crate::{
|
||||
instance::Instance, table::max_table_element_size, Memory, Mmap, OnDemandInstanceAllocator,
|
||||
Table, VMContext,
|
||||
initialize_instance, initialize_vmcontext, FiberStackError, InstanceAllocationRequest,
|
||||
InstanceAllocator, InstanceHandle, InstantiationError,
|
||||
};
|
||||
use crate::{instance::Instance, table::max_table_element_size, Memory, Mmap, Table, VMContext};
|
||||
use rand::Rng;
|
||||
use std::cell::RefCell;
|
||||
use std::cmp::min;
|
||||
@@ -23,8 +20,7 @@ use std::mem;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use wasmtime_environ::{
|
||||
entity::{EntitySet, PrimaryMap},
|
||||
MemoryStyle, Module, ModuleTranslation, OwnedDataInitializer, Tunables, VMOffsets,
|
||||
WASM_PAGE_SIZE,
|
||||
MemoryStyle, Module, ModuleTranslation, Tunables, VMOffsets, WASM_PAGE_SIZE,
|
||||
};
|
||||
|
||||
cfg_if::cfg_if! {
|
||||
@@ -35,6 +31,8 @@ cfg_if::cfg_if! {
|
||||
mod uffd;
|
||||
use uffd as imp;
|
||||
use imp::{PageFaultHandler, reset_guard_page};
|
||||
use super::{check_init_bounds, initialize_tables};
|
||||
use wasmtime_environ::MemoryInitialization;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
} else if #[cfg(target_os = "linux")] {
|
||||
mod linux;
|
||||
@@ -979,31 +977,29 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
|
||||
&self,
|
||||
handle: &InstanceHandle,
|
||||
is_bulk_memory: bool,
|
||||
data_initializers: &Arc<[OwnedDataInitializer]>,
|
||||
) -> Result<(), InstantiationError> {
|
||||
// TODO: refactor this implementation
|
||||
let instance = handle.instance();
|
||||
|
||||
// Check initializer bounds before initializing anything. Only do this
|
||||
// when bulk memory is disabled, since the bulk memory proposal changes
|
||||
// instantiation such that the intermediate results of failed
|
||||
// initializations are visible.
|
||||
if !is_bulk_memory {
|
||||
OnDemandInstanceAllocator::check_table_init_bounds(handle.instance())?;
|
||||
OnDemandInstanceAllocator::check_memory_init_bounds(
|
||||
handle.instance(),
|
||||
data_initializers.as_ref(),
|
||||
)?;
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(all(feature = "uffd", target_os = "linux"))] {
|
||||
match instance.module.memory_initialization {
|
||||
Some(MemoryInitialization::Paged{ .. }) => {
|
||||
if !is_bulk_memory {
|
||||
check_init_bounds(instance)?;
|
||||
}
|
||||
|
||||
// Initialize the tables
|
||||
initialize_tables(instance)?;
|
||||
|
||||
// Don't initialize the memory; the fault handler will fill the pages when accessed
|
||||
Ok(())
|
||||
},
|
||||
_ => initialize_instance(instance, is_bulk_memory)
|
||||
}
|
||||
} else {
|
||||
initialize_instance(instance, is_bulk_memory)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply fallible initializers. Note that this can "leak" state even if
|
||||
// it fails.
|
||||
OnDemandInstanceAllocator::initialize_tables(handle.instance())?;
|
||||
OnDemandInstanceAllocator::initialize_memories(
|
||||
handle.instance(),
|
||||
data_initializers.as_ref(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, handle: &InstanceHandle) {
|
||||
|
||||
@@ -20,7 +20,9 @@ use std::sync::{
|
||||
};
|
||||
use std::thread;
|
||||
use userfaultfd::{Event, FeatureFlags, IoctlFlags, Uffd, UffdBuilder};
|
||||
use wasmtime_environ::{wasm::DefinedMemoryIndex, WASM_PAGE_SIZE};
|
||||
use wasmtime_environ::{entity::EntityRef, wasm::DefinedMemoryIndex, MemoryInitialization};
|
||||
|
||||
const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize;
|
||||
|
||||
pub unsafe fn make_accessible(_addr: *mut u8, _len: usize) -> bool {
|
||||
// A no-op when userfaultfd is used
|
||||
@@ -191,7 +193,7 @@ impl AddressLocator {
|
||||
let index = (addr - self.memories_start) / self.memory_size;
|
||||
let memory_index = index % self.max_memories;
|
||||
let memory_start = self.memories_start + (index * self.memory_size);
|
||||
let page_index = (addr - memory_start) / (WASM_PAGE_SIZE as usize);
|
||||
let page_index = (addr - memory_start) / WASM_PAGE_SIZE;
|
||||
let instance = self.get_instance(index / self.max_memories);
|
||||
|
||||
let init_page_index = instance
|
||||
@@ -210,8 +212,8 @@ impl AddressLocator {
|
||||
});
|
||||
|
||||
return Some(AddressLocation::MemoryPage {
|
||||
page_addr: (memory_start + page_index * (WASM_PAGE_SIZE as usize)) as _,
|
||||
len: WASM_PAGE_SIZE as usize,
|
||||
page_addr: (memory_start + page_index * WASM_PAGE_SIZE) as _,
|
||||
len: WASM_PAGE_SIZE,
|
||||
instance,
|
||||
memory_index,
|
||||
page_index: init_page_index,
|
||||
@@ -250,18 +252,98 @@ impl AddressLocator {
|
||||
}
|
||||
}
|
||||
|
||||
fn wake_guard_page_access(uffd: &Uffd, page_addr: *const u8, len: usize) -> Result<(), String> {
|
||||
unsafe {
|
||||
// Set the page to NONE to induce a SIGSEV for the access on the next retry
|
||||
region::protect(page_addr, len, region::Protection::NONE)
|
||||
.map_err(|e| format!("failed to change guard page protection: {}", e))?;
|
||||
unsafe fn wake_guard_page_access(
|
||||
uffd: &Uffd,
|
||||
page_addr: *const u8,
|
||||
len: usize,
|
||||
) -> Result<(), String> {
|
||||
// Set the page to NONE to induce a SIGSEV for the access on the next retry
|
||||
region::protect(page_addr, len, region::Protection::NONE)
|
||||
.map_err(|e| format!("failed to change guard page protection: {}", e))?;
|
||||
|
||||
uffd.wake(page_addr as _, len).map_err(|e| {
|
||||
uffd.wake(page_addr as _, len).map_err(|e| {
|
||||
format!(
|
||||
"failed to wake page at {:p} with length {}: {}",
|
||||
page_addr, len, e
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn initialize_wasm_page(
|
||||
uffd: &Uffd,
|
||||
instance: &Instance,
|
||||
page_addr: *const u8,
|
||||
memory_index: usize,
|
||||
page_index: usize,
|
||||
) -> Result<(), String> {
|
||||
if let Some(MemoryInitialization::Paged { page_size, map }) =
|
||||
&instance.module.memory_initialization
|
||||
{
|
||||
let memory_index = DefinedMemoryIndex::new(memory_index);
|
||||
let memory = instance.memory(memory_index);
|
||||
let pages = &map[memory_index];
|
||||
debug_assert_eq!(WASM_PAGE_SIZE % page_size, 0);
|
||||
|
||||
let count = WASM_PAGE_SIZE / page_size;
|
||||
let start = page_index * count;
|
||||
|
||||
for i in start..start + count {
|
||||
let dst = memory.base.add(i * page_size);
|
||||
|
||||
match pages.get(i) {
|
||||
Some(Some(data)) => {
|
||||
log::trace!(
|
||||
"copying page initialization data from {:p} to {:p} with length {}",
|
||||
data,
|
||||
dst,
|
||||
page_size
|
||||
);
|
||||
|
||||
// Copy the page data without waking
|
||||
uffd.copy(data.as_ptr() as _, dst as _, *page_size, false)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"failed to copy page from {:p} to {:p} with length {}: {}",
|
||||
data, dst, page_size, e
|
||||
)
|
||||
})?;
|
||||
}
|
||||
_ => {
|
||||
log::trace!("zeroing page at {:p} with length {}", dst, page_size);
|
||||
|
||||
// No data, zero the page without waking
|
||||
uffd.zeropage(dst as _, *page_size, false).map_err(|e| {
|
||||
format!(
|
||||
"failed to zero page at {:p} with length {}: {}",
|
||||
dst, page_size, e
|
||||
)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Finally wake the entire wasm page
|
||||
uffd.wake(page_addr as _, WASM_PAGE_SIZE).map_err(|e| {
|
||||
format!(
|
||||
"failed to wake page at {:p} with length {}: {}",
|
||||
page_addr, len, e
|
||||
page_addr, WASM_PAGE_SIZE, e
|
||||
)
|
||||
})?;
|
||||
})
|
||||
} else {
|
||||
log::trace!(
|
||||
"initialization data is not paged; zeroing Wasm page at {:p}",
|
||||
page_addr
|
||||
);
|
||||
|
||||
uffd.zeropage(page_addr as _, WASM_PAGE_SIZE, true)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"failed to zero page at {:p} with length {}: {}",
|
||||
page_addr, WASM_PAGE_SIZE, e
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -327,13 +409,13 @@ fn handler_thread(
|
||||
|
||||
match page_index {
|
||||
Some(page_index) => {
|
||||
// TODO: copy the memory initialization data rather than zero the page
|
||||
uffd.zeropage(page_addr as _, len, true).map_err(|e| {
|
||||
format!(
|
||||
"failed to zero page at {:p} with length {}: {}",
|
||||
page_addr, len, e
|
||||
)
|
||||
})?;
|
||||
initialize_wasm_page(
|
||||
&uffd,
|
||||
instance,
|
||||
page_addr,
|
||||
memory_index,
|
||||
page_index,
|
||||
)?;
|
||||
}
|
||||
None => {
|
||||
log::trace!("out of bounds memory access at {:p}", access_addr);
|
||||
@@ -529,7 +611,7 @@ mod test {
|
||||
locator.memories_end,
|
||||
locator.memories_start + instances.memories.mapping.len()
|
||||
);
|
||||
assert_eq!(locator.memory_size, (WASM_PAGE_SIZE * 10) as usize);
|
||||
assert_eq!(locator.memory_size, WASM_PAGE_SIZE * 10);
|
||||
assert_eq!(locator.max_memories, 2);
|
||||
assert_eq!(
|
||||
locator.tables_start,
|
||||
@@ -634,7 +716,7 @@ mod test {
|
||||
page_index,
|
||||
}) => {
|
||||
assert_eq!(page_addr, memory_start as _);
|
||||
assert_eq!(len, WASM_PAGE_SIZE as usize);
|
||||
assert_eq!(len, WASM_PAGE_SIZE);
|
||||
assert_eq!(mem_index, memory_index);
|
||||
assert_eq!(page_index, Some(0));
|
||||
}
|
||||
@@ -642,7 +724,7 @@ mod test {
|
||||
}
|
||||
|
||||
// Test for access to second page
|
||||
match locator.get_location(memory_start + 1024 + WASM_PAGE_SIZE as usize) {
|
||||
match locator.get_location(memory_start + 1024 + WASM_PAGE_SIZE) {
|
||||
Some(AddressLocation::MemoryPage {
|
||||
page_addr,
|
||||
len,
|
||||
@@ -650,8 +732,8 @@ mod test {
|
||||
memory_index: mem_index,
|
||||
page_index,
|
||||
}) => {
|
||||
assert_eq!(page_addr, (memory_start + WASM_PAGE_SIZE as usize) as _);
|
||||
assert_eq!(len, WASM_PAGE_SIZE as usize);
|
||||
assert_eq!(page_addr, (memory_start + WASM_PAGE_SIZE) as _);
|
||||
assert_eq!(len, WASM_PAGE_SIZE);
|
||||
assert_eq!(mem_index, memory_index);
|
||||
assert_eq!(page_index, Some(1));
|
||||
}
|
||||
@@ -659,7 +741,7 @@ mod test {
|
||||
}
|
||||
|
||||
// Test for guard page
|
||||
match locator.get_location(memory_start + 10 + 9 * WASM_PAGE_SIZE as usize) {
|
||||
match locator.get_location(memory_start + 10 + 9 * WASM_PAGE_SIZE) {
|
||||
Some(AddressLocation::MemoryPage {
|
||||
page_addr,
|
||||
len,
|
||||
@@ -667,11 +749,8 @@ mod test {
|
||||
memory_index: mem_index,
|
||||
page_index,
|
||||
}) => {
|
||||
assert_eq!(
|
||||
page_addr,
|
||||
(memory_start + (9 * WASM_PAGE_SIZE as usize)) as _
|
||||
);
|
||||
assert_eq!(len, WASM_PAGE_SIZE as usize);
|
||||
assert_eq!(page_addr, (memory_start + (9 * WASM_PAGE_SIZE)) as _);
|
||||
assert_eq!(len, WASM_PAGE_SIZE);
|
||||
assert_eq!(mem_index, memory_index);
|
||||
assert_eq!(page_index, None);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user