Remove support for userfaultfd (#4040)

This commit removes support for the `userfaultfd` or "uffd" syscall on
Linux. This support was originally added for users migrating from Lucet
to Wasmtime, but the recent developments of kernel-supported
copy-on-write support for memory initialization wound up being more
appropriate for these use cases than usefaultfd. The main reason for
moving to copy-on-write initialization are:

* The `userfaultfd` feature was never necessarily intended for this
  style of use case with wasm and was susceptible to subtle and rare
  bugs that were extremely difficult to track down. We were never 100%
  certain that there were kernel bugs related to userfaultfd but the
  suspicion never went away.

* Handling faults with userfaultfd was always slow and single-threaded.
  Only one thread could handle faults and traveling to user-space to
  handle faults is inherently slower than handling them all in the
  kernel. The single-threaded aspect in particular presented a
  significant scaling bottleneck for embeddings that want to run many
  wasm instances in parallel.

* One of the major benefits of userfaultfd was lazy initialization of
  wasm linear memory which is also achieved with the copy-on-write
  initialization support we have right now.

* One of the suspected benefits of userfaultfd was less frobbing of the
  kernel vma structures when wasm modules are instantiated. Currently
  the copy-on-write support has a mitigation where we attempt to reuse
  the memory images where possible to avoid changing vma structures.
  When comparing this to userfaultfd's performance it was found that
  kernel modifications of vmas aren't a worrisome bottleneck so
  copy-on-write is suitable for this as well.

Overall there are no remaining benefits that userfaultfd gives that
copy-on-write doesn't, and copy-on-write solves a major downsides of
userfaultfd, the scaling issue with a single faulting thread.
Additionally copy-on-write support seems much more robust in terms of
kernel implementation since it's only using standard memory-management
syscalls which are heavily exercised. Finally copy-on-write support
provides a new bonus where read-only memory in WebAssembly can be mapped
directly to the same kernel cache page, even amongst many wasm instances
of the same module, which was never possible with userfaultfd.

In light of all this it's expected that all users of userfaultfd should
migrate to the copy-on-write initialization of Wasmtime (which is
enabled by default).
This commit is contained in:
Alex Crichton
2022-04-18 12:42:26 -05:00
committed by GitHub
parent 5774e068b7
commit 3f3afb455e
19 changed files with 45 additions and 1058 deletions

View File

@@ -36,9 +36,6 @@ rustix = "0.33.6"
[target.'cfg(target_os = "windows")'.dependencies]
winapi = { version = "0.3.7", features = ["winbase", "memoryapi", "errhandlingapi", "handleapi"] }
[target.'cfg(target_os = "linux")'.dependencies]
userfaultfd = { version = "0.4.1", optional = true }
[build-dependencies]
cc = "1.0"
@@ -54,9 +51,6 @@ async = ["wasmtime-fiber"]
# Enables support for the pooling instance allocator
pooling-allocator = []
# Enables support for userfaultfd in the pooling allocator when building on Linux
uffd = ["userfaultfd", "pooling-allocator"]
# Enables trap handling using POSIX signals instead of Mach exceptions on MacOS.
# It is useful for applications that do not bind their own exception ports and
# need portable signal handling.

View File

@@ -16,8 +16,7 @@ fn main() {
// `#[cfg(memory_init_cow)]` will work.
let family = env::var("CARGO_CFG_TARGET_FAMILY").unwrap();
let memory_init_cow = env::var("CARGO_FEATURE_MEMORY_INIT_COW").is_ok();
let is_uffd = env::var("CARGO_FEATURE_UFFD").is_ok();
if &family == "unix" && memory_init_cow && !is_uffd {
if &family == "unix" && memory_init_cow {
println!("cargo:rustc-cfg=memory_init_cow");
}
}

View File

@@ -363,9 +363,9 @@ fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<(), I
},
&mut |memory_index, init| {
// If this initializer applies to a defined memory but that memory
// doesn't need initialization, due to something like uffd or
// copy-on-write pre-initializing it via mmap magic, then this
// initializer can be skipped entirely.
// doesn't need initialization, due to something like copy-on-write
// pre-initializing it via mmap magic, then this initializer can be
// skipped entirely.
if let Some(memory_index) = module.defined_memory_index(memory_index) {
if !instance.memories[memory_index].needs_init() {
return true;

View File

@@ -30,10 +30,6 @@ cfg_if::cfg_if! {
if #[cfg(windows)] {
mod windows;
use windows as imp;
} else if #[cfg(all(feature = "uffd", target_os = "linux"))] {
mod uffd;
use uffd as imp;
use imp::initialize_memory_pool;
} else if #[cfg(target_os = "linux")] {
mod linux;
use linux as imp;
@@ -205,9 +201,6 @@ impl Default for PoolingAllocationStrategy {
/// structure depending on the limits used to create the pool.
///
/// The pool maintains a free list for fast instance allocation.
///
/// The userfault handler relies on how instances are stored in the mapping,
/// so make sure the uffd implementation is kept up-to-date.
#[derive(Debug)]
struct InstancePool {
mapping: Mmap,
@@ -456,7 +449,7 @@ impl InstancePool {
for ((def_mem_idx, memory), base) in
memories.iter_mut().zip(self.memories.get(instance_index))
{
let mut memory = mem::take(memory);
let memory = mem::take(memory);
assert!(memory.is_static());
match memory {
@@ -475,16 +468,6 @@ impl InstancePool {
}
_ => {
// Reset any faulted guard pages as the physical
// memory may be reused for another instance in
// the future.
#[cfg(all(feature = "uffd", target_os = "linux"))]
memory
.reset_guard_pages()
.expect("failed to reset guard pages");
// require mutable on all platforms, not just uffd
drop(&mut memory);
let size = memory.byte_size();
drop(memory);
decommit_memory_pages(base, size)
@@ -667,9 +650,6 @@ impl InstancePool {
///
/// Each instance index into the pool returns an iterator over the base addresses
/// of the instance's linear memories.
///
/// The userfault handler relies on how memories are stored in the mapping,
/// so make sure the uffd implementation is kept up-to-date.
#[derive(Debug)]
struct MemoryPool {
mapping: Mmap,
@@ -778,10 +758,6 @@ impl MemoryPool {
max_memory_size: (instance_limits.memory_pages as usize) * (WASM_PAGE_SIZE as usize),
};
// uffd support requires some special setup for the memory pool
#[cfg(all(feature = "uffd", target_os = "linux"))]
initialize_memory_pool(&pool)?;
Ok(pool)
}
@@ -1044,14 +1020,11 @@ impl StackPool {
/// Note: the resource pools are manually dropped so that the fault handler terminates correctly.
#[derive(Debug)]
pub struct PoolingInstanceAllocator {
// This is manually drop so that the pools unmap their memory before the page fault handler drops.
instances: mem::ManuallyDrop<InstancePool>,
instances: InstancePool,
#[cfg(all(feature = "async", unix))]
stacks: StackPool,
#[cfg(all(feature = "async", windows))]
stack_size: usize,
#[cfg(all(feature = "uffd", target_os = "linux"))]
_fault_handler: imp::PageFaultHandler,
}
impl PoolingInstanceAllocator {
@@ -1068,33 +1041,18 @@ impl PoolingInstanceAllocator {
let instances = InstancePool::new(strategy, &instance_limits, tunables)?;
#[cfg(all(feature = "uffd", target_os = "linux"))]
let _fault_handler = imp::PageFaultHandler::new(&instances)?;
drop(stack_size); // suppress unused warnings w/o async feature
Ok(Self {
instances: mem::ManuallyDrop::new(instances),
instances: instances,
#[cfg(all(feature = "async", unix))]
stacks: StackPool::new(&instance_limits, stack_size)?,
#[cfg(all(feature = "async", windows))]
stack_size,
#[cfg(all(feature = "uffd", target_os = "linux"))]
_fault_handler,
})
}
}
impl Drop for PoolingInstanceAllocator {
fn drop(&mut self) {
// Manually drop the pools before the fault handler (if uffd is enabled)
// This ensures that any fault handler thread monitoring the pool memory terminates
unsafe {
mem::ManuallyDrop::drop(&mut self.instances);
}
}
}
unsafe impl InstanceAllocator for PoolingInstanceAllocator {
fn validate(&self, module: &Module) -> Result<()> {
self.instances.validate_memory_plans(module)?;
@@ -1132,28 +1090,7 @@ unsafe impl InstanceAllocator for PoolingInstanceAllocator {
is_bulk_memory: bool,
) -> Result<(), InstantiationError> {
let instance = handle.instance_mut();
cfg_if::cfg_if! {
if #[cfg(all(feature = "uffd", target_os = "linux"))] {
match &module.memory_initialization {
wasmtime_environ::MemoryInitialization::Paged { .. } => {
if !is_bulk_memory {
super::check_init_bounds(instance, module)?;
}
// Initialize the tables
super::initialize_tables(instance, module)?;
// Don't initialize the memory; the fault handler will back the pages when accessed
Ok(())
},
_ => initialize_instance(instance, module, is_bulk_memory)
}
} else {
initialize_instance(instance, module, is_bulk_memory)
}
}
initialize_instance(instance, module, is_bulk_memory)
}
unsafe fn deallocate(&self, handle: &InstanceHandle) {

View File

@@ -1,655 +0,0 @@
//! This module implements user space page fault handling with the `userfaultfd` ("uffd") system call on Linux.
//!
//! Handling page faults for memory accesses in regions relating to WebAssembly instances
//! enables the runtime to protect guard pages in user space rather than kernel space (i.e. without `mprotect`).
//!
//! Additionally, linear memories can be lazy-initialized upon first access.
//!
//! Handling faults in user space is slower than handling faults in the kernel. However,
//! in use cases where there is a high number of concurrently executing instances, handling the faults
//! in user space requires rarely changing memory protection levels. This can improve concurrency
//! by not taking kernel memory manager locks and may decrease TLB shootdowns as fewer page table entries need
//! to continually change.
//!
//! Here's how the `uffd` feature works:
//!
//! 1. A user fault file descriptor is created to monitor specific areas of the address space.
//! 2. A thread is spawned to continually read events from the user fault file descriptor.
//! 3. When a page fault event is received, the handler thread calculates where the fault occurred:
//! a) If the fault occurs on a linear memory page, it is handled by either copying the page from
//! initialization data or zeroing it.
//! b) If the fault occurs on a guard page, the protection level of the guard page is changed to
//! force the kernel to signal SIGBUS on the next retry. The faulting page is recorded so the
//! protection level can be reset in the future.
//! 4. Faults to address space relating to an instance may occur from both Wasmtime (e.g. instance
//! initialization) or from WebAssembly code (e.g. reading from or writing to linear memory),
//! therefore the user fault handling must do as little work as possible to handle the fault.
//! 5. When the pooling allocator is dropped, it will drop the memory mappings relating to the pool; this
//! generates unmap events for the fault handling thread, which responds by decrementing the mapping
//! count. When the count reaches zero, the user fault handling thread will gracefully terminate.
//!
//! This feature requires a Linux kernel 4.11 or newer to use.
use super::{InstancePool, MemoryPool};
use crate::instance::Instance;
use anyhow::{bail, Context, Result};
use rustix::io::{madvise, Advice};
use std::thread;
use userfaultfd::{Event, FeatureFlags, IoctlFlags, Uffd, UffdBuilder};
use wasmtime_environ::{DefinedMemoryIndex, EntityRef, MemoryInitialization};
const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize;
fn decommit(addr: *mut u8, len: usize) -> Result<()> {
if len == 0 {
return Ok(());
}
unsafe {
// On Linux, this tells the kernel to discard the backing of the pages in the range.
// If the discarded pages are part of a uffd region, then the next access will fault
// and the user fault handler will receive the event.
// If the pages are not monitored by uffd, the kernel will zero the page on next access,
// as if it were mmap'd for the first time.
madvise(addr as _, len, Advice::LinuxDontNeed).context("madvise failed to decommit")?;
}
Ok(())
}
pub fn commit_memory_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as memory pages remain READ|WRITE with uffd
Ok(())
}
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len)
}
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as table pages remain READ|WRITE
Ok(())
}
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len)
}
#[cfg(feature = "async")]
pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as stack pages remain READ|WRITE
Ok(())
}
#[cfg(feature = "async")]
pub fn decommit_stack_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len)
}
/// This is used to initialize the memory pool when uffd is enabled.
///
/// Without uffd, all of the memory pool's pages are initially protected with `NONE` to treat the entire
/// range as guard pages. When an instance is created, the initial pages of the memory are
/// changed to `READ_WRITE`.
///
/// With uffd, however, the potentially accessible pages of the each linear memory are made `READ_WRITE` and
/// the page fault handler will detect an out of bounds access and treat the page, temporarily,
/// as a guard page.
pub(super) fn initialize_memory_pool(pool: &MemoryPool) -> Result<()> {
if pool.memory_reservation_size == 0 || pool.max_memory_size == 0 {
return Ok(());
}
for i in 0..pool.max_instances {
for base in pool.get(i) {
unsafe {
region::protect(
base as _,
pool.max_memory_size,
region::Protection::READ_WRITE,
)
.context("failed to initialize memory pool for uffd")?;
}
}
}
Ok(())
}
/// This is used to reset a linear memory's guard page back to read-write as the page might be accessible
/// again in the future depending on how the linear memory grows.
fn reset_guard_page(addr: *mut u8, len: usize) -> Result<()> {
unsafe {
region::protect(addr, len, region::Protection::READ_WRITE)
.context("failed to reset guard page")
}
}
/// Represents a location of a page fault within monitored regions of memory.
enum FaultLocation {
/// The address location is in a WebAssembly linear memory page.
/// The fault handler will copy the pages from initialization data if necessary.
MemoryPage {
/// The address of the page being accessed.
page_addr: *mut u8,
/// The length of the page being accessed.
len: usize,
/// The instance related to the memory page that was accessed.
instance: *mut Instance,
/// The index of the memory that was accessed.
memory_index: DefinedMemoryIndex,
/// The Wasm page index to initialize if the access was not a guard page.
page_index: Option<usize>,
},
}
/// Used to resolve fault addresses to a location.
///
/// This implementation relies heavily on how the linear memory pool organizes its memory.
///
/// `usize` is used here instead of pointers to keep this `Send` as it gets sent to the handler thread.
struct FaultLocator {
instances_start: usize,
instance_size: usize,
max_instances: usize,
memories_mapping_start: usize,
memories_start: usize,
memories_end: usize,
memory_size: usize,
max_memories: usize,
}
impl FaultLocator {
fn new(instances: &InstancePool) -> Self {
let instances_start = instances.mapping.as_ptr() as usize;
let memories_start =
instances.memories.mapping.as_ptr() as usize + instances.memories.initial_memory_offset;
let memories_end =
instances.memories.mapping.as_ptr() as usize + instances.memories.mapping.len();
// Should always have instances
debug_assert!(instances_start != 0);
Self {
instances_start,
instance_size: instances.instance_size,
memories_mapping_start: instances.memories.mapping.as_ptr() as usize,
max_instances: instances.max_instances,
memories_start,
memories_end,
memory_size: instances.memories.memory_reservation_size,
max_memories: instances.memories.max_memories,
}
}
/// This is super-duper unsafe as it is used from the handler thread
/// to access instance data without any locking primitives.
///
/// It is assumed that the thread that owns the instance being accessed is
/// currently suspended waiting on a fault to be handled.
///
/// Of course a stray faulting memory access from a thread that does not own
/// the instance might introduce a race, but this implementation considers
/// such to be a serious soundness bug not originating in this code.
///
/// If the assumption holds true, accessing the instance data from the handler thread
/// should, in theory, be safe.
unsafe fn get_instance(&self, index: usize) -> *mut Instance {
debug_assert!(index < self.max_instances);
(self.instances_start + (index * self.instance_size)) as *mut Instance
}
unsafe fn locate(&self, addr: usize) -> Option<FaultLocation> {
// Check for a linear memory location
if addr >= self.memories_start && addr < self.memories_end {
let index = (addr - self.memories_start) / self.memory_size;
let memory_index = DefinedMemoryIndex::new(index % self.max_memories);
let memory_start = self.memories_start + (index * self.memory_size);
let page_index = (addr - memory_start) / WASM_PAGE_SIZE;
let instance = self.get_instance(index / self.max_memories);
let init_page_index = (*instance).memories.get(memory_index).and_then(|m| {
if (addr - memory_start) < m.byte_size() {
Some(page_index)
} else {
None
}
});
return Some(FaultLocation::MemoryPage {
page_addr: (memory_start + page_index * WASM_PAGE_SIZE) as _,
len: WASM_PAGE_SIZE,
instance,
memory_index,
page_index: init_page_index,
});
}
None
}
}
/// This is called following a fault on a guard page.
///
/// Because the region being monitored is protected read-write, this needs to set the
/// protection level to `NONE` before waking the page.
///
/// This will cause the kernel to raise a SIGBUS when retrying the fault.
unsafe fn wake_guard_page_access(uffd: &Uffd, page_addr: *const u8, len: usize) -> Result<()> {
// Set the page to NONE to induce a SIGBUS for the access on the next retry
region::protect(page_addr, len, region::Protection::NONE)
.context("failed to change guard page protection")?;
uffd.wake(page_addr as _, len)
.context("failed to wake guard page access")?;
Ok(())
}
/// This is called to initialize a linear memory page (64 KiB).
///
/// If paged initialization is used for the module, then we can instruct the kernel to back the page with
/// what is already stored in the initialization data; if the page isn't in the initialization data,
/// it will be zeroed instead.
///
/// If paged initialization isn't being used, we zero the page. Initialization happens
/// at module instantiation in this case and the segment data will be then copied to the zeroed page.
unsafe fn initialize_wasm_page(
uffd: &Uffd,
instance: &Instance,
page_addr: *const u8,
memory_index: DefinedMemoryIndex,
page_index: usize,
) -> Result<()> {
// Check for paged initialization and copy the page if present in the initialization data
if let MemoryInitialization::Paged { map, .. } =
&instance.runtime_info.module().memory_initialization
{
let memory_index = instance.module().memory_index(memory_index);
let pages = &map[memory_index];
let pos = pages.binary_search_by_key(&((page_index * WASM_PAGE_SIZE) as u64), |k| k.offset);
if let Ok(i) = pos {
let data = instance.wasm_data(pages[i].data.clone());
debug_assert_eq!(data.len(), WASM_PAGE_SIZE);
log::trace!(
"copying linear memory page from {:p} to {:p}",
data.as_ptr(),
page_addr
);
uffd.copy(data.as_ptr() as _, page_addr as _, WASM_PAGE_SIZE, true)
.context("failed to copy linear memory page")?;
return Ok(());
}
}
log::trace!("zeroing linear memory page at {:p}", page_addr);
uffd.zeropage(page_addr as _, WASM_PAGE_SIZE, true)
.context("failed to zero linear memory page")?;
Ok(())
}
unsafe fn handle_page_fault(
uffd: &Uffd,
locator: &FaultLocator,
addr: *mut std::ffi::c_void,
) -> Result<()> {
match locator.locate(addr as usize) {
Some(FaultLocation::MemoryPage {
page_addr,
len,
instance,
memory_index,
page_index,
}) => {
log::trace!(
"handling fault in linear memory at address {:p} on page {:p}",
addr,
page_addr
);
match page_index {
Some(page_index) => {
initialize_wasm_page(&uffd, &*instance, page_addr, memory_index, page_index)?;
}
None => {
log::trace!("out of bounds memory access at {:p}", addr);
// Record the guard page fault so the page protection level can be reset later
(*instance).memories[memory_index].record_guard_page_fault(
page_addr,
len,
reset_guard_page,
);
wake_guard_page_access(&uffd, page_addr, len)?;
}
}
}
None => {
bail!(
"failed to locate fault address {:p} in registered memory regions",
addr
);
}
}
Ok(())
}
fn fault_handler_thread(uffd: Uffd, locator: FaultLocator) -> Result<()> {
loop {
match uffd.read_event().expect("failed to read event") {
Some(Event::Unmap { start, end }) => {
log::trace!("memory region unmapped: {:p}-{:p}", start, end);
let (start, end) = (start as usize, end as usize);
if start == locator.memories_mapping_start && end == locator.memories_end {
break;
} else {
panic!("unexpected memory region unmapped");
}
}
Some(Event::Pagefault { addr, .. }) => unsafe {
handle_page_fault(&uffd, &locator, addr as _)?
},
Some(_) => continue,
None => bail!("no event was read from the user fault descriptor"),
}
}
log::trace!("fault handler thread has successfully terminated");
Ok(())
}
#[derive(Debug)]
pub struct PageFaultHandler {
thread: Option<thread::JoinHandle<Result<()>>>,
}
impl PageFaultHandler {
pub(super) fn new(instances: &InstancePool) -> Result<Self> {
let uffd = UffdBuilder::new()
.close_on_exec(true)
.require_features(FeatureFlags::EVENT_UNMAP)
.create()
.context("failed to create user fault descriptor")?;
// Register the linear memory pool with the userfault fd
let start = instances.memories.mapping.as_ptr();
let len = instances.memories.mapping.len();
let thread = if !start.is_null() && len > 0 {
let ioctls = uffd
.register(start as _, len)
.context("failed to register user fault range")?;
if !ioctls.contains(IoctlFlags::WAKE | IoctlFlags::COPY | IoctlFlags::ZEROPAGE) {
bail!(
"required user fault ioctls not supported by the kernel; found: {:?}",
ioctls,
);
}
log::trace!(
"user fault handling enabled on linear memory pool at {:p} with size {}",
start,
len
);
let locator = FaultLocator::new(&instances);
Some(
thread::Builder::new()
.name("page fault handler".into())
.spawn(move || fault_handler_thread(uffd, locator))
.context("failed to spawn page fault handler thread")?,
)
} else {
log::trace!("user fault handling disabled as there is no linear memory pool");
None
};
Ok(Self { thread })
}
}
impl Drop for PageFaultHandler {
fn drop(&mut self) {
// The handler thread should terminate once all monitored regions of memory are unmapped.
// The pooling instance allocator ensures that the regions are unmapped prior to dropping
// the page fault handler.
if let Some(thread) = self.thread.take() {
thread
.join()
.expect("failed to join page fault handler thread")
.expect("fault handler thread failed");
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{
Imports, InstanceAllocationRequest, InstanceLimits, PoolingAllocationStrategy, Store,
StorePtr,
};
use std::sync::atomic::AtomicU64;
use std::sync::Arc;
use wasmtime_environ::{Memory, MemoryPlan, MemoryStyle, Module, Tunables};
#[cfg(target_pointer_width = "64")]
#[test]
fn test_address_locator() {
let instance_limits = InstanceLimits {
count: 3,
tables: 0,
memories: 2,
table_elements: 0,
memory_pages: 2,
size: 1000,
..Default::default()
};
let tunables = Tunables {
static_memory_bound: 10,
static_memory_offset_guard_size: 0,
guard_before_linear_memory: false,
..Tunables::default()
};
let instances = InstancePool::new(
PoolingAllocationStrategy::Random,
&instance_limits,
&tunables,
)
.expect("should allocate");
let locator = FaultLocator::new(&instances);
assert_eq!(locator.instances_start, instances.mapping.as_ptr() as usize);
assert_eq!(locator.instance_size, 1008);
assert_eq!(locator.max_instances, 3);
assert_eq!(
locator.memories_start,
instances.memories.mapping.as_ptr() as usize
);
assert_eq!(
locator.memories_end,
locator.memories_start + instances.memories.mapping.len()
);
assert_eq!(locator.memory_size, WASM_PAGE_SIZE * 10);
assert_eq!(locator.max_memories, 2);
unsafe {
assert!(locator.locate(0).is_none());
assert!(locator.locate(locator.memories_end).is_none());
let mut module = Module::new();
for _ in 0..instance_limits.memories {
module.memory_plans.push(MemoryPlan {
memory: Memory {
minimum: 2,
maximum: Some(2),
shared: false,
memory64: false,
},
style: MemoryStyle::Static { bound: 1 },
offset_guard_size: 0,
pre_guard_size: 0,
});
}
// An InstanceAllocationRequest with a module must also have
// a non-null StorePtr. Here we mock just enough of a store
// to satisfy this test.
struct MockStore {
table: crate::VMExternRefActivationsTable,
info: MockModuleInfo,
}
unsafe impl Store for MockStore {
fn vmruntime_limits(&self) -> *mut crate::VMRuntimeLimits {
std::ptr::null_mut()
}
fn externref_activations_table(
&mut self,
) -> (
&mut crate::VMExternRefActivationsTable,
&dyn crate::ModuleInfoLookup,
) {
(&mut self.table, &self.info)
}
fn memory_growing(
&mut self,
_current: usize,
_desired: usize,
_maximum: Option<usize>,
) -> Result<bool, anyhow::Error> {
Ok(true)
}
fn memory_grow_failed(&mut self, _error: &anyhow::Error) {}
fn table_growing(
&mut self,
_current: u32,
_desired: u32,
_maximum: Option<u32>,
) -> Result<bool, anyhow::Error> {
Ok(true)
}
fn table_grow_failed(&mut self, _error: &anyhow::Error) {}
fn out_of_gas(&mut self) -> Result<(), anyhow::Error> {
Ok(())
}
fn epoch_ptr(&self) -> *const AtomicU64 {
std::ptr::null()
}
fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
Ok(0)
}
}
struct MockModuleInfo;
impl crate::ModuleInfoLookup for MockModuleInfo {
fn lookup(&self, _pc: usize) -> Option<Arc<dyn crate::ModuleInfo>> {
None
}
}
let mut mock_store = MockStore {
table: crate::VMExternRefActivationsTable::new(),
info: MockModuleInfo,
};
let mut handles = Vec::new();
let module = Arc::new(module);
// Allocate the maximum number of instances with the maximum number of memories
for _ in 0..instances.max_instances {
handles.push(
instances
.allocate(InstanceAllocationRequest {
runtime_info: &super::super::test::empty_runtime_info(module.clone()),
imports: Imports {
functions: &[],
tables: &[],
memories: &[],
globals: &[],
},
host_state: Box::new(()),
store: StorePtr::new(&mut mock_store),
})
.expect("instance should allocate"),
);
}
// Validate memory locations
for instance_index in 0..instances.max_instances {
for memory_index in 0..instances.memories.max_memories {
let memory_start = locator.memories_start
+ (instance_index * locator.memory_size * locator.max_memories)
+ (memory_index * locator.memory_size);
// Test for access to first page
match locator.locate(memory_start + 10000) {
Some(FaultLocation::MemoryPage {
page_addr,
len,
instance: _,
memory_index: mem_index,
page_index,
}) => {
assert_eq!(page_addr, memory_start as _);
assert_eq!(len, WASM_PAGE_SIZE);
assert_eq!(mem_index, DefinedMemoryIndex::new(memory_index));
assert_eq!(page_index, Some(0));
}
_ => panic!("expected a memory page location"),
}
// Test for access to second page
match locator.locate(memory_start + 1024 + WASM_PAGE_SIZE) {
Some(FaultLocation::MemoryPage {
page_addr,
len,
instance: _,
memory_index: mem_index,
page_index,
}) => {
assert_eq!(page_addr, (memory_start + WASM_PAGE_SIZE) as _);
assert_eq!(len, WASM_PAGE_SIZE);
assert_eq!(mem_index, DefinedMemoryIndex::new(memory_index));
assert_eq!(page_index, Some(1));
}
_ => panic!("expected a memory page location"),
}
// Test for guard page
match locator.locate(memory_start + 10 + 9 * WASM_PAGE_SIZE) {
Some(FaultLocation::MemoryPage {
page_addr,
len,
instance: _,
memory_index: mem_index,
page_index,
}) => {
assert_eq!(page_addr, (memory_start + (9 * WASM_PAGE_SIZE)) as _);
assert_eq!(len, WASM_PAGE_SIZE);
assert_eq!(mem_index, DefinedMemoryIndex::new(memory_index));
assert_eq!(page_index, None);
}
_ => panic!("expected a memory page location"),
}
}
}
for handle in handles.drain(..) {
instances.deallocate(&handle);
}
}
}
}

View File

@@ -276,11 +276,6 @@ pub enum Memory {
/// The image management, if any, for this memory. Owned here and
/// returned to the pooling allocator when termination occurs.
memory_image: Option<MemoryImageSlot>,
/// Stores the pages in the linear memory that have faulted as guard pages when using the `uffd` feature.
/// These pages need their protection level reset before the memory can grow.
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: Vec<(usize, usize, fn(*mut u8, usize) -> Result<()>)>,
},
/// A "dynamic" memory whose data is managed at runtime and lifetime is tied
@@ -340,8 +335,6 @@ impl Memory {
size: minimum,
make_accessible,
memory_image,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: Vec::new(),
})
}
@@ -531,17 +524,6 @@ impl Memory {
}
}
#[cfg(all(feature = "uffd", target_os = "linux"))]
{
if self.is_static() {
// Reset any faulted guard pages before growing the memory.
if let Err(e) = self.reset_guard_pages() {
store.memory_grow_failed(&e);
return Ok(None);
}
}
}
match self {
Memory::Static {
base,
@@ -606,53 +588,6 @@ impl Memory {
Memory::Dynamic(mem) => mem.vmmemory(),
}
}
/// Records a faulted guard page in a static memory.
///
/// This is used to track faulted guard pages that need to be reset for the uffd feature.
///
/// This function will panic if called on a dynamic memory.
#[cfg(all(feature = "uffd", target_os = "linux"))]
pub(crate) fn record_guard_page_fault(
&mut self,
page_addr: *mut u8,
size: usize,
reset: fn(*mut u8, usize) -> Result<()>,
) {
match self {
Memory::Static {
guard_page_faults, ..
} => {
guard_page_faults.push((page_addr as usize, size, reset));
}
Memory::Dynamic(_) => {
unreachable!("dynamic memories should not have guard page faults")
}
}
}
/// Resets the previously faulted guard pages of a static memory.
///
/// This is used to reset the protection of any guard pages that were previously faulted.
///
/// This function will panic if called on a dynamic memory.
#[cfg(all(feature = "uffd", target_os = "linux"))]
pub(crate) fn reset_guard_pages(&mut self) -> Result<()> {
match self {
Memory::Static {
guard_page_faults, ..
} => {
for (addr, len, reset) in guard_page_faults.drain(..) {
reset(addr as *mut u8, len)?;
}
}
Memory::Dynamic(_) => {
unreachable!("dynamic memories should not have guard page faults")
}
}
Ok(())
}
}
// The default memory representation is an empty memory that cannot grow.
@@ -663,8 +598,6 @@ impl Default for Memory {
size: 0,
make_accessible: Some(|_, _| unreachable!()),
memory_image: None,
#[cfg(all(feature = "uffd", target_os = "linux"))]
guard_page_faults: Vec::new(),
}
}
}

View File

@@ -89,9 +89,6 @@ async = ["wasmtime-fiber", "wasmtime-runtime/async", "async-trait"]
# Enables support for the pooling instance allocation strategy
pooling-allocator = ["wasmtime-runtime/pooling-allocator"]
# Enables userfaultfd support in the runtime's pooling allocator when building on Linux
uffd = ["wasmtime-runtime/uffd", "pooling-allocator"]
# Enables support for all architectures in Cranelift, allowing
# cross-compilation using the `wasmtime` crate's API, notably the
# `Engine::precompile_module` function.
@@ -106,8 +103,7 @@ posix-signals-on-macos = ["wasmtime-runtime/posix-signals-on-macos"]
# compatible linear memories. For more information see the documentation of
# `Config::memory_init_cow`.
#
# Enabling this feature has no effect on unsupported platforms or when the
# `uffd` feature is enabled.
# Enabling this feature has no effect on unsupported platforms.
memory-init-cow = ["wasmtime-runtime/memory-init-cow"]
# Enables runtime support necessary to capture backtraces of WebAssembly code

View File

@@ -97,7 +97,6 @@ pub struct Config {
pub(crate) async_support: bool,
pub(crate) module_version: ModuleVersionStrategy,
pub(crate) parallel_compilation: bool,
pub(crate) paged_memory_initialization: bool,
pub(crate) memory_init_cow: bool,
pub(crate) memory_guaranteed_dense_image_size: u64,
pub(crate) force_memory_init_memfd: bool,
@@ -132,8 +131,6 @@ impl Config {
async_support: false,
module_version: ModuleVersionStrategy::default(),
parallel_compilation: true,
// Default to paged memory initialization when using uffd on linux
paged_memory_initialization: cfg!(all(target_os = "linux", feature = "uffd")),
memory_init_cow: true,
memory_guaranteed_dense_image_size: 16 << 20,
force_memory_init_memfd: false,
@@ -822,27 +819,6 @@ impl Config {
self
}
/// Sets whether or not an attempt is made to initialize linear memories by page.
///
/// This setting is `false` by default and Wasmtime initializes linear memories
/// by copying individual data segments from the compiled module.
///
/// Setting this to `true` will cause compilation to attempt to organize the
/// data segments into WebAssembly pages and linear memories are initialized by
/// copying each page rather than individual data segments.
///
/// Modules that import a memory or have data segments that use a global base
/// will continue to be initialized by copying each data segment individually.
///
/// When combined with the `uffd` feature on Linux, this will allow Wasmtime
/// to delay initialization of a linear memory page until it is accessed
/// for the first time during WebAssembly execution; this may improve
/// instantiation performance as a result.
pub fn paged_memory_initialization(&mut self, value: bool) -> &mut Self {
self.paged_memory_initialization = value;
self
}
/// Configures the maximum size, in bytes, where a linear memory is
/// considered static, above which it'll be considered dynamic.
///
@@ -1342,7 +1318,6 @@ impl Clone for Config {
async_stack_size: self.async_stack_size,
module_version: self.module_version.clone(),
parallel_compilation: self.parallel_compilation,
paged_memory_initialization: self.paged_memory_initialization,
memory_init_cow: self.memory_init_cow,
memory_guaranteed_dense_image_size: self.memory_guaranteed_dense_image_size,
force_memory_init_memfd: self.force_memory_init_memfd,

View File

@@ -265,12 +265,6 @@
//! * `vtune` - Enabled by default, this feature compiles in support for VTune
//! profiling of JIT code.
//!
//! * `uffd` - Not enabled by default. This feature enables `userfaultfd` support
//! when using the pooling instance allocator. As handling page faults in user space
//! comes with a performance penalty, this feature should only be enabled when kernel
//! lock contention is hampering multithreading throughput. This feature is only
//! supported on Linux and requires a Linux kernel version 4.11 or higher.
//!
//! * `all-arch` - Not enabled by default. This feature compiles in support for
//! all architectures for both the JIT compiler and the `wasmtime compile` CLI
//! command.

View File

@@ -387,12 +387,6 @@ impl Module {
.compiler()
.emit_obj(&translation, &types, funcs, tunables, &mut obj)?;
// If configured, attempt to use paged memory initialization
// instead of the default mode of memory initialization
if engine.config().paged_memory_initialization {
translation.try_paged_init();
}
// If configured attempt to use static memory initialization which
// can either at runtime be implemented as a single memcpy to
// initialize memory or otherwise enabling virtual-memory-tricks

View File

@@ -48,9 +48,9 @@ impl ModuleRegistry {
// If there's not actually any functions in this module then we may
// still need to preserve it for its data segments. Instances of this
// module will hold a pointer to the data stored in the module itself,
// and for schemes like uffd this performs lazy initialization which
// could use the module in the future. For that reason we continue to
// register empty modules and retain them.
// and for schemes that perform lazy initialization which could use the
// module in the future. For that reason we continue to register empty
// modules and retain them.
if compiled_module.finished_functions().len() == 0 {
self.modules_without_code.push(compiled_module.clone());
return;