Merge pull request from GHSA-wh6w-3828-g9qf

* Unconditionally use `MemoryImageSlot`

This commit removes the internal branching within the pooling instance
allocator to sometimes use a `MemoryImageSlot` and sometimes now.
Instead this is now unconditionally used in all situations on all
platforms. This fixes an issue where the state of a slot could get
corrupted if modules being instantiated switched from having images to
not having an image or vice versa.

The bulk of this commit is the removal of the `memory-init-cow`
compile-time feature in addition to adding Windows support to the
`cow.rs` file.

* Fix compile on Unix

* Add a stricter assertion for static memory bounds

Double-check that when a memory is allocated the configuration required
is satisfied by the pooling allocator.
This commit is contained in:
Alex Crichton
2022-11-10 11:34:38 -06:00
committed by GitHub
parent 47fa1ad6a8
commit 3535acbf3b
16 changed files with 245 additions and 331 deletions

View File

@@ -181,13 +181,11 @@ default = [
"vtune", "vtune",
"wasi-nn", "wasi-nn",
"pooling-allocator", "pooling-allocator",
"memory-init-cow",
] ]
jitdump = ["wasmtime/jitdump"] jitdump = ["wasmtime/jitdump"]
vtune = ["wasmtime/vtune"] vtune = ["wasmtime/vtune"]
wasi-crypto = ["dep:wasmtime-wasi-crypto"] wasi-crypto = ["dep:wasmtime-wasi-crypto"]
wasi-nn = ["dep:wasmtime-wasi-nn"] wasi-nn = ["dep:wasmtime-wasi-nn"]
memory-init-cow = ["wasmtime/memory-init-cow", "wasmtime-cli-flags/memory-init-cow"]
pooling-allocator = ["wasmtime/pooling-allocator", "wasmtime-cli-flags/pooling-allocator"] pooling-allocator = ["wasmtime/pooling-allocator", "wasmtime-cli-flags/pooling-allocator"]
all-arch = ["wasmtime/all-arch"] all-arch = ["wasmtime/all-arch"]
posix-signals-on-macos = ["wasmtime/posix-signals-on-macos"] posix-signals-on-macos = ["wasmtime/posix-signals-on-macos"]

View File

@@ -33,9 +33,8 @@ cap-std = { workspace = true, optional = true }
wasi-common = { workspace = true, optional = true } wasi-common = { workspace = true, optional = true }
[features] [features]
default = ['jitdump', 'wat', 'wasi', 'cache', 'parallel-compilation', 'memory-init-cow'] default = ['jitdump', 'wat', 'wasi', 'cache', 'parallel-compilation']
jitdump = ["wasmtime/jitdump"] jitdump = ["wasmtime/jitdump"]
cache = ["wasmtime/cache"] cache = ["wasmtime/cache"]
parallel-compilation = ['wasmtime/parallel-compilation'] parallel-compilation = ['wasmtime/parallel-compilation']
wasi = ['wasi-cap-std-sync', 'wasmtime-wasi', 'cap-std', 'wasi-common'] wasi = ['wasi-cap-std-sync', 'wasmtime-wasi', 'cap-std', 'wasi-common']
memory-init-cow = ["wasmtime/memory-init-cow"]

View File

@@ -25,5 +25,4 @@ default = [
"wasmtime/parallel-compilation", "wasmtime/parallel-compilation",
] ]
pooling-allocator = [] pooling-allocator = []
memory-init-cow = []
component-model = [] component-model = []

View File

@@ -218,7 +218,6 @@ pub struct CommonOptions {
/// Disable the default of attempting to initialize linear memory via a /// Disable the default of attempting to initialize linear memory via a
/// copy-on-write mapping /// copy-on-write mapping
#[cfg(feature = "memory-init-cow")]
#[clap(long)] #[clap(long)]
pub disable_memory_init_cow: bool, pub disable_memory_init_cow: bool,
@@ -324,7 +323,6 @@ impl CommonOptions {
config.epoch_interruption(self.epoch_interruption); config.epoch_interruption(self.epoch_interruption);
config.generate_address_map(!self.disable_address_map); config.generate_address_map(!self.disable_address_map);
#[cfg(feature = "memory-init-cow")]
config.memory_init_cow(!self.disable_memory_init_cow); config.memory_init_cow(!self.disable_memory_init_cow);
#[cfg(feature = "pooling-allocator")] #[cfg(feature = "pooling-allocator")]

View File

@@ -23,7 +23,7 @@ thiserror = "1.0.4"
cfg-if = "1.0" cfg-if = "1.0"
rand = "0.8.3" rand = "0.8.3"
anyhow = { workspace = true } anyhow = { workspace = true }
memfd = { version = "0.6.1", optional = true } memfd = "0.6.1"
paste = "1.0.3" paste = "1.0.3"
encoding_rs = { version = "0.8.31", optional = true } encoding_rs = { version = "0.8.31", optional = true }
@@ -52,8 +52,6 @@ cc = "1.0"
maintenance = { status = "actively-developed" } maintenance = { status = "actively-developed" }
[features] [features]
memory-init-cow = ['memfd']
async = ["wasmtime-fiber"] async = ["wasmtime-fiber"]
# Enables support for the pooling instance allocator # Enables support for the pooling instance allocator

View File

@@ -14,13 +14,4 @@ fn main() {
println!("cargo:rerun-if-changed=src/helpers.c"); println!("cargo:rerun-if-changed=src/helpers.c");
build.file("src/helpers.c"); build.file("src/helpers.c");
build.compile("wasmtime-helpers"); build.compile("wasmtime-helpers");
// Check to see if we are on Unix and the `memory-init-cow` feature is
// active. If so, enable the `memory_init_cow` rustc cfg so
// `#[cfg(memory_init_cow)]` will work.
let family = env::var("CARGO_CFG_TARGET_FAMILY").unwrap();
let memory_init_cow = env::var("CARGO_FEATURE_MEMORY_INIT_COW").is_ok();
if &family == "unix" && memory_init_cow {
println!("cargo:rustc-cfg=memory_init_cow");
}
} }

View File

@@ -1,11 +1,12 @@
//! Copy-on-write initialization support: creation of backing images for //! Copy-on-write initialization support: creation of backing images for
//! modules, and logic to support mapping these backing images into memory. //! modules, and logic to support mapping these backing images into memory.
#![cfg_attr(not(unix), allow(unused_imports, unused_variables))]
use crate::InstantiationError; use crate::InstantiationError;
use crate::MmapVec; use crate::MmapVec;
use anyhow::Result; use anyhow::Result;
use libc::c_void; use libc::c_void;
use rustix::fd::AsRawFd;
use std::fs::File; use std::fs::File;
use std::sync::Arc; use std::sync::Arc;
use std::{convert::TryFrom, ops::Range}; use std::{convert::TryFrom, ops::Range};
@@ -60,24 +61,34 @@ pub struct MemoryImage {
#[derive(Debug)] #[derive(Debug)]
enum FdSource { enum FdSource {
#[cfg(unix)]
Mmap(Arc<File>), Mmap(Arc<File>),
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
Memfd(memfd::Memfd), Memfd(memfd::Memfd),
} }
impl FdSource { impl FdSource {
#[cfg(unix)]
fn as_file(&self) -> &File { fn as_file(&self) -> &File {
match self { match self {
FdSource::Mmap(file) => file, FdSource::Mmap(ref file) => file,
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
FdSource::Memfd(memfd) => memfd.as_file(), FdSource::Memfd(ref memfd) => memfd.as_file(),
} }
} }
} }
impl PartialEq for FdSource { impl PartialEq for FdSource {
fn eq(&self, other: &FdSource) -> bool { fn eq(&self, other: &FdSource) -> bool {
cfg_if::cfg_if! {
if #[cfg(unix)] {
use rustix::fd::AsRawFd;
self.as_file().as_raw_fd() == other.as_file().as_raw_fd() self.as_file().as_raw_fd() == other.as_file().as_raw_fd()
} else {
drop(other);
match *self {}
}
}
} }
} }
@@ -111,6 +122,7 @@ impl MemoryImage {
// files, but for now this is still a Linux-specific region of Wasmtime. // files, but for now this is still a Linux-specific region of Wasmtime.
// Some work will be needed to get this file compiling for macOS and // Some work will be needed to get this file compiling for macOS and
// Windows. // Windows.
#[cfg(not(windows))]
if let Some(mmap) = mmap { if let Some(mmap) = mmap {
let start = mmap.as_ptr() as usize; let start = mmap.as_ptr() as usize;
let end = start + mmap.len(); let end = start + mmap.len();
@@ -185,6 +197,42 @@ impl MemoryImage {
} }
} }
} }
unsafe fn map_at(&self, base: usize) -> Result<()> {
cfg_if::cfg_if! {
if #[cfg(unix)] {
let ptr = rustix::mm::mmap(
(base + self.linear_memory_offset) as *mut c_void,
self.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
self.fd.as_file(),
self.fd_offset,
)?;
assert_eq!(ptr as usize, base + self.linear_memory_offset);
Ok(())
} else {
match self.fd {}
}
}
}
unsafe fn remap_as_zeros_at(&self, base: usize) -> Result<()> {
cfg_if::cfg_if! {
if #[cfg(unix)] {
let ptr = rustix::mm::mmap_anonymous(
(base + self.linear_memory_offset) as *mut c_void,
self.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
)?;
assert_eq!(ptr as usize, base + self.linear_memory_offset);
Ok(())
} else {
match self.fd {}
}
}
}
} }
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
@@ -374,6 +422,17 @@ impl MemoryImageSlot {
} }
} }
pub(crate) fn dummy() -> MemoryImageSlot {
MemoryImageSlot {
base: 0,
static_size: 0,
image: None,
accessible: 0,
dirty: false,
clear_on_drop: false,
}
}
/// Inform the MemoryImageSlot that it should *not* clear the underlying /// Inform the MemoryImageSlot that it should *not* clear the underlying
/// address space when dropped. This should be used only when the /// address space when dropped. This should be used only when the
/// caller will clear or reuse the address space in some other /// caller will clear or reuse the address space in some other
@@ -396,10 +455,7 @@ impl MemoryImageSlot {
} }
// Otherwise use `mprotect` to make the new pages read/write. // Otherwise use `mprotect` to make the new pages read/write.
self.set_protection( self.set_protection(self.accessible..size_bytes, true)?;
self.accessible..size_bytes,
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
)?;
self.accessible = size_bytes; self.accessible = size_bytes;
Ok(()) Ok(())
@@ -444,14 +500,9 @@ impl MemoryImageSlot {
if self.image.as_ref() != maybe_image { if self.image.as_ref() != maybe_image {
if let Some(image) = &self.image { if let Some(image) = &self.image {
unsafe { unsafe {
let ptr = rustix::mm::mmap_anonymous( image
(self.base + image.linear_memory_offset) as *mut c_void, .remap_as_zeros_at(self.base)
image.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
)
.map_err(|e| InstantiationError::Resource(e.into()))?; .map_err(|e| InstantiationError::Resource(e.into()))?;
assert_eq!(ptr as usize, self.base + image.linear_memory_offset);
} }
self.image = None; self.image = None;
} }
@@ -461,10 +512,7 @@ impl MemoryImageSlot {
// appropriate. First up is to grow the read/write portion of memory if // appropriate. First up is to grow the read/write portion of memory if
// it's not large enough to accommodate `initial_size_bytes`. // it's not large enough to accommodate `initial_size_bytes`.
if self.accessible < initial_size_bytes { if self.accessible < initial_size_bytes {
self.set_protection( self.set_protection(self.accessible..initial_size_bytes, true)
self.accessible..initial_size_bytes,
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
)
.map_err(|e| InstantiationError::Resource(e.into()))?; .map_err(|e| InstantiationError::Resource(e.into()))?;
self.accessible = initial_size_bytes; self.accessible = initial_size_bytes;
} }
@@ -480,10 +528,7 @@ impl MemoryImageSlot {
if initial_size_bytes < self.accessible { if initial_size_bytes < self.accessible {
match style { match style {
MemoryStyle::Static { .. } => { MemoryStyle::Static { .. } => {
self.set_protection( self.set_protection(initial_size_bytes..self.accessible, false)
initial_size_bytes..self.accessible,
rustix::mm::MprotectFlags::empty(),
)
.map_err(|e| InstantiationError::Resource(e.into()))?; .map_err(|e| InstantiationError::Resource(e.into()))?;
self.accessible = initial_size_bytes; self.accessible = initial_size_bytes;
} }
@@ -503,16 +548,9 @@ impl MemoryImageSlot {
); );
if image.len > 0 { if image.len > 0 {
unsafe { unsafe {
let ptr = rustix::mm::mmap( image
(self.base + image.linear_memory_offset) as *mut c_void, .map_at(self.base)
image.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
image.fd.as_file(),
image.fd_offset,
)
.map_err(|e| InstantiationError::Resource(e.into()))?; .map_err(|e| InstantiationError::Resource(e.into()))?;
assert_eq!(ptr as usize, self.base + image.linear_memory_offset);
} }
} }
} }
@@ -675,13 +713,35 @@ impl MemoryImageSlot {
} }
} }
fn set_protection(&self, range: Range<usize>, flags: rustix::mm::MprotectFlags) -> Result<()> { fn set_protection(&self, range: Range<usize>, readwrite: bool) -> Result<()> {
assert!(range.start <= range.end); assert!(range.start <= range.end);
assert!(range.end <= self.static_size); assert!(range.end <= self.static_size);
let mprotect_start = self.base.checked_add(range.start).unwrap(); let start = self.base.checked_add(range.start).unwrap();
if range.len() > 0 { if range.len() == 0 {
return Ok(());
}
unsafe { unsafe {
rustix::mm::mprotect(mprotect_start as *mut _, range.len(), flags)?; cfg_if::cfg_if! {
if #[cfg(unix)] {
let flags = if readwrite {
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE
} else {
rustix::mm::MprotectFlags::empty()
};
rustix::mm::mprotect(start as *mut _, range.len(), flags)?;
} else {
use windows_sys::Win32::System::Memory::*;
let failure = if readwrite {
VirtualAlloc(start as _, range.len(), MEM_COMMIT, PAGE_READWRITE).is_null()
} else {
VirtualFree(start as _, range.len(), MEM_DECOMMIT) == 0
};
if failure {
return Err(std::io::Error::last_os_error().into());
}
}
} }
} }
@@ -701,6 +761,8 @@ impl MemoryImageSlot {
/// inaccessible. Used both during instantiate and during drop. /// inaccessible. Used both during instantiate and during drop.
fn reset_with_anon_memory(&mut self) -> Result<()> { fn reset_with_anon_memory(&mut self) -> Result<()> {
unsafe { unsafe {
cfg_if::cfg_if! {
if #[cfg(unix)] {
let ptr = rustix::mm::mmap_anonymous( let ptr = rustix::mm::mmap_anonymous(
self.base as *mut c_void, self.base as *mut c_void,
self.static_size, self.static_size,
@@ -708,6 +770,13 @@ impl MemoryImageSlot {
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED, rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
)?; )?;
assert_eq!(ptr as usize, self.base); assert_eq!(ptr as usize, self.base);
} else {
use windows_sys::Win32::System::Memory::*;
if VirtualFree(self.base as _, self.static_size, MEM_DECOMMIT) == 0 {
return Err(std::io::Error::last_os_error().into());
}
}
}
} }
self.image = None; self.image = None;

View File

@@ -1,76 +0,0 @@
//! Shims for MemoryImageSlot when the copy-on-write memory initialization is
//! not included. Enables unconditional use of the type and its methods
//! throughout higher-level code.
use crate::{InstantiationError, MmapVec};
use anyhow::Result;
use std::sync::Arc;
use wasmtime_environ::{DefinedMemoryIndex, MemoryStyle, Module};
/// A shim for the memory image container when support is not included.
pub enum ModuleMemoryImages {}
/// A shim for an individual memory image.
#[allow(dead_code)]
pub enum MemoryImage {}
impl ModuleMemoryImages {
/// Construct a new set of memory images. This variant is used
/// when cow support is not included; it always returns no
/// images.
pub fn new(_: &Module, _: &[u8], _: Option<&MmapVec>) -> Result<Option<ModuleMemoryImages>> {
Ok(None)
}
/// Get the memory image for a particular memory.
pub fn get_memory_image(&self, _: DefinedMemoryIndex) -> Option<&Arc<MemoryImage>> {
match *self {}
}
}
/// A placeholder for MemoryImageSlot when we have not included the pooling
/// allocator.
///
/// To allow MemoryImageSlot to be unconditionally passed around in various
/// places (e.g. a `Memory`), we define a zero-sized type when memory is
/// not included in the build.
#[derive(Debug)]
pub struct MemoryImageSlot {
_priv: (),
}
#[allow(dead_code)]
impl MemoryImageSlot {
pub(crate) fn create(_: *mut libc::c_void, _: usize, _: usize) -> Self {
panic!("create() on invalid MemoryImageSlot");
}
pub(crate) fn instantiate(
&mut self,
_: usize,
_: Option<&Arc<MemoryImage>>,
_: &MemoryStyle,
) -> Result<Self, InstantiationError> {
unreachable!();
}
pub(crate) fn no_clear_on_drop(&mut self) {
unreachable!();
}
pub(crate) fn clear_and_remain_ready(&mut self, _keep_resident: usize) -> Result<()> {
unreachable!();
}
pub(crate) fn has_image(&self) -> bool {
unreachable!();
}
pub(crate) fn is_dirty(&self) -> bool {
unreachable!();
}
pub(crate) fn set_heap_limit(&mut self, _: usize) -> Result<()> {
unreachable!();
}
}

View File

@@ -36,7 +36,7 @@ cfg_if::cfg_if! {
} }
} }
use imp::{commit_memory_pages, commit_table_pages, decommit_memory_pages, decommit_table_pages}; use imp::{commit_table_pages, decommit_table_pages};
#[cfg(all(feature = "async", unix))] #[cfg(all(feature = "async", unix))]
use imp::{commit_stack_pages, reset_stack_pages_to_zero}; use imp::{commit_stack_pages, reset_stack_pages_to_zero};
@@ -104,11 +104,7 @@ pub enum PoolingAllocationStrategy {
impl Default for PoolingAllocationStrategy { impl Default for PoolingAllocationStrategy {
fn default() -> Self { fn default() -> Self {
if cfg!(memory_init_cow) {
Self::ReuseAffinity Self::ReuseAffinity
} else {
Self::NextAvailable
}
} }
} }
@@ -309,6 +305,18 @@ impl InstancePool {
.defined_memory_index(memory_index) .defined_memory_index(memory_index)
.expect("should be a defined memory since we skipped imported ones"); .expect("should be a defined memory since we skipped imported ones");
// Double-check that the runtime requirements of the memory are
// satisfied by the configuration of this pooling allocator. This
// should be returned as an error through `validate_memory_plans`
// but double-check here to be sure.
match plan.style {
MemoryStyle::Static { bound } => {
let bound = bound * u64::from(WASM_PAGE_SIZE);
assert!(bound <= (self.memories.memory_size as u64));
}
MemoryStyle::Dynamic { .. } => {}
}
let memory = unsafe { let memory = unsafe {
std::slice::from_raw_parts_mut( std::slice::from_raw_parts_mut(
self.memories.get_base(instance_index, defined_index), self.memories.get_base(instance_index, defined_index),
@@ -316,13 +324,12 @@ impl InstancePool {
) )
}; };
if let Some(image) = runtime_info
.memory_image(defined_index)
.map_err(|err| InstantiationError::Resource(err.into()))?
{
let mut slot = self let mut slot = self
.memories .memories
.take_memory_image_slot(instance_index, defined_index); .take_memory_image_slot(instance_index, defined_index);
let image = runtime_info
.memory_image(defined_index)
.map_err(|err| InstantiationError::Resource(err.into()))?;
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64; let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
// If instantiation fails, we can propagate the error // If instantiation fails, we can propagate the error
@@ -338,23 +345,13 @@ impl InstancePool {
// the process to continue, because we never perform a // the process to continue, because we never perform a
// mmap that would leave an open space for someone // mmap that would leave an open space for someone
// else to come in and map something. // else to come in and map something.
slot.instantiate(initial_size as usize, Some(image), &plan.style) slot.instantiate(initial_size as usize, image, &plan.style)
.map_err(|e| InstantiationError::Resource(e.into()))?; .map_err(|e| InstantiationError::Resource(e.into()))?;
memories.push( memories.push(
Memory::new_static(plan, memory, None, Some(slot), unsafe { Memory::new_static(plan, memory, slot, unsafe { &mut *store.unwrap() })
&mut *store.unwrap()
})
.map_err(InstantiationError::Resource)?, .map_err(InstantiationError::Resource)?,
); );
} else {
memories.push(
Memory::new_static(plan, memory, Some(commit_memory_pages), None, unsafe {
&mut *store.unwrap()
})
.map_err(InstantiationError::Resource)?,
);
}
} }
Ok(()) Ok(())
@@ -367,12 +364,8 @@ impl InstancePool {
) { ) {
// Decommit any linear memories that were used. // Decommit any linear memories that were used.
let memories = mem::take(memories); let memories = mem::take(memories);
for ((def_mem_idx, mut memory), base) in for (def_mem_idx, memory) in memories {
memories.into_iter().zip(self.memories.get(instance_index)) let mut image = memory.unwrap_static_image();
{
assert!(memory.is_static());
let size = memory.byte_size();
if let Some(mut image) = memory.unwrap_static_image() {
// Reset the image slot. If there is any error clearing the // Reset the image slot. If there is any error clearing the
// image, just drop it here, and let the drop handler for the // image, just drop it here, and let the drop handler for the
// slot unmap in a way that retains the address space // slot unmap in a way that retains the address space
@@ -384,10 +377,6 @@ impl InstancePool {
self.memories self.memories
.return_memory_image_slot(instance_index, def_mem_idx, image); .return_memory_image_slot(instance_index, def_mem_idx, image);
} }
} else {
// Otherwise, decommit the memory pages.
decommit_memory_pages(base, size).expect("failed to decommit linear memory pages");
}
} }
} }
@@ -496,11 +485,9 @@ impl InstancePool {
.iter() .iter()
.skip(module.num_imported_memories) .skip(module.num_imported_memories)
{ {
match &plan.style { match plan.style {
MemoryStyle::Static { bound } => { MemoryStyle::Static { bound } => {
let memory_size_pages = if (self.memories.memory_size as u64) < bound {
(self.memories.memory_size as u64) / u64::from(WASM_PAGE_SIZE);
if memory_size_pages < *bound {
bail!( bail!(
"memory size allocated per-memory is too small to \ "memory size allocated per-memory is too small to \
satisfy static bound of {bound:#x} pages" satisfy static bound of {bound:#x} pages"
@@ -695,11 +682,7 @@ impl MemoryPool {
let mapping = Mmap::accessible_reserved(0, allocation_size) let mapping = Mmap::accessible_reserved(0, allocation_size)
.context("failed to create memory pool mapping")?; .context("failed to create memory pool mapping")?;
let num_image_slots = if cfg!(memory_init_cow) { let num_image_slots = max_instances * max_memories;
max_instances * max_memories
} else {
0
};
let image_slots: Vec<_> = std::iter::repeat_with(|| Mutex::new(None)) let image_slots: Vec<_> = std::iter::repeat_with(|| Mutex::new(None))
.take(num_image_slots) .take(num_image_slots)
.collect(); .collect();
@@ -727,6 +710,7 @@ impl MemoryPool {
unsafe { self.mapping.as_mut_ptr().offset(offset as isize) } unsafe { self.mapping.as_mut_ptr().offset(offset as isize) }
} }
#[cfg(test)]
fn get<'a>(&'a self, instance_index: usize) -> impl Iterator<Item = *mut u8> + 'a { fn get<'a>(&'a self, instance_index: usize) -> impl Iterator<Item = *mut u8> + 'a {
(0..self.max_memories) (0..self.max_memories)
.map(move |i| self.get_base(instance_index, DefinedMemoryIndex::from_u32(i as u32))) .map(move |i| self.get_base(instance_index, DefinedMemoryIndex::from_u32(i as u32)))

View File

@@ -1,7 +1,6 @@
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use rustix::mm::{mprotect, MprotectFlags};
fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> { fn decommit(addr: *mut u8, len: usize) -> Result<()> {
if len == 0 { if len == 0 {
return Ok(()); return Ok(());
} }
@@ -11,11 +10,6 @@ fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
if #[cfg(target_os = "linux")] { if #[cfg(target_os = "linux")] {
use rustix::mm::{madvise, Advice}; use rustix::mm::{madvise, Advice};
if protect {
mprotect(addr.cast(), len, MprotectFlags::empty())
.context("failed to protect memory pages")?;
}
// On Linux, this is enough to cause the kernel to initialize // On Linux, this is enough to cause the kernel to initialize
// the pages to 0 on next access // the pages to 0 on next access
madvise(addr as _, len, Advice::LinuxDontNeed) madvise(addr as _, len, Advice::LinuxDontNeed)
@@ -30,11 +24,7 @@ fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
mmap_anonymous( mmap_anonymous(
addr as _, addr as _,
len, len,
if protect { ProtFlags::READ | ProtFlags::WRITE,
ProtFlags::empty()
} else {
ProtFlags::READ | ProtFlags::WRITE
},
MapFlags::PRIVATE | MapFlags::FIXED, MapFlags::PRIVATE | MapFlags::FIXED,
) )
.context("mmap failed to remap pages: {}")?; .context("mmap failed to remap pages: {}")?;
@@ -45,29 +35,13 @@ fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
Ok(()) Ok(())
} }
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
if len == 0 {
return Ok(());
}
// Just change the protection level to READ|WRITE
unsafe {
mprotect(addr.cast(), len, MprotectFlags::READ | MprotectFlags::WRITE)
.context("failed to make linear memory pages read/write")
}
}
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, true)
}
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> { pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as table pages remain READ|WRITE // A no-op as table pages remain READ|WRITE
Ok(()) Ok(())
} }
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false) decommit(addr, len)
} }
#[cfg(feature = "async")] #[cfg(feature = "async")]
@@ -78,5 +52,5 @@ pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
#[cfg(feature = "async")] #[cfg(feature = "async")]
pub fn reset_stack_pages_to_zero(addr: *mut u8, len: usize) -> Result<()> { pub fn reset_stack_pages_to_zero(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false) decommit(addr, len)
} }

View File

@@ -29,14 +29,6 @@ pub fn decommit(addr: *mut u8, len: usize) -> Result<()> {
Ok(()) Ok(())
} }
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
commit(addr, len)
}
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len)
}
pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> { pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
commit(addr, len) commit(addr, len)
} }

View File

@@ -82,16 +82,9 @@ pub use crate::vmcontext::{
mod module_id; mod module_id;
pub use module_id::{CompiledModuleId, CompiledModuleIdAllocator}; pub use module_id::{CompiledModuleId, CompiledModuleIdAllocator};
#[cfg(memory_init_cow)]
mod cow; mod cow;
#[cfg(memory_init_cow)]
pub use crate::cow::{MemoryImage, MemoryImageSlot, ModuleMemoryImages}; pub use crate::cow::{MemoryImage, MemoryImageSlot, ModuleMemoryImages};
#[cfg(not(memory_init_cow))]
mod cow_disabled;
#[cfg(not(memory_init_cow))]
pub use crate::cow_disabled::{MemoryImage, MemoryImageSlot, ModuleMemoryImages};
/// Version number of this crate. /// Version number of this crate.
pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const VERSION: &str = env!("CARGO_PKG_VERSION");

View File

@@ -10,6 +10,7 @@ use crate::Store;
use anyhow::Error; use anyhow::Error;
use anyhow::{bail, format_err, Result}; use anyhow::{bail, format_err, Result};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::mem;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM32_MAX_PAGES, WASM64_MAX_PAGES}; use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM32_MAX_PAGES, WASM64_MAX_PAGES};
@@ -351,14 +352,9 @@ struct StaticMemory {
/// The current size, in bytes, of this memory. /// The current size, in bytes, of this memory.
size: usize, size: usize,
/// A callback which makes portions of `base` accessible for when memory
/// is grown. Otherwise it's expected that accesses to `base` will
/// fault.
make_accessible: Option<fn(*mut u8, usize) -> Result<()>>,
/// The image management, if any, for this memory. Owned here and /// The image management, if any, for this memory. Owned here and
/// returned to the pooling allocator when termination occurs. /// returned to the pooling allocator when termination occurs.
memory_image: Option<MemoryImageSlot>, memory_image: MemoryImageSlot,
} }
impl StaticMemory { impl StaticMemory {
@@ -366,8 +362,7 @@ impl StaticMemory {
base: &'static mut [u8], base: &'static mut [u8],
initial_size: usize, initial_size: usize,
maximum_size: Option<usize>, maximum_size: Option<usize>,
make_accessible: Option<fn(*mut u8, usize) -> Result<()>>, memory_image: MemoryImageSlot,
memory_image: Option<MemoryImageSlot>,
) -> Result<Self> { ) -> Result<Self> {
if base.len() < initial_size { if base.len() < initial_size {
bail!( bail!(
@@ -384,16 +379,9 @@ impl StaticMemory {
_ => base, _ => base,
}; };
if let Some(make_accessible) = make_accessible {
if initial_size > 0 {
make_accessible(base.as_mut_ptr(), initial_size)?;
}
}
Ok(Self { Ok(Self {
base, base,
size: initial_size, size: initial_size,
make_accessible,
memory_image, memory_image,
}) })
} }
@@ -413,21 +401,7 @@ impl RuntimeLinearMemory for StaticMemory {
// prior to arriving here. // prior to arriving here.
assert!(new_byte_size <= self.base.len()); assert!(new_byte_size <= self.base.len());
// Actually grow the memory. self.memory_image.set_heap_limit(new_byte_size)?;
if let Some(image) = &mut self.memory_image {
image.set_heap_limit(new_byte_size)?;
} else {
let make_accessible = self
.make_accessible
.expect("make_accessible must be Some if this is not a CoW memory");
// Operating system can fail to make memory accessible.
let old_byte_size = self.byte_size();
make_accessible(
unsafe { self.base.as_mut_ptr().add(old_byte_size) },
new_byte_size - old_byte_size,
)?;
}
// Update our accounting of the available size. // Update our accounting of the available size.
self.size = new_byte_size; self.size = new_byte_size;
@@ -442,11 +416,7 @@ impl RuntimeLinearMemory for StaticMemory {
} }
fn needs_init(&self) -> bool { fn needs_init(&self) -> bool {
if let Some(slot) = &self.memory_image { !self.memory_image.has_image()
!slot.has_image()
} else {
true
}
} }
fn as_any_mut(&mut self) -> &mut dyn std::any::Any { fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
@@ -627,13 +597,11 @@ impl Memory {
pub fn new_static( pub fn new_static(
plan: &MemoryPlan, plan: &MemoryPlan,
base: &'static mut [u8], base: &'static mut [u8],
make_accessible: Option<fn(*mut u8, usize) -> Result<()>>, memory_image: MemoryImageSlot,
memory_image: Option<MemoryImageSlot>,
store: &mut dyn Store, store: &mut dyn Store,
) -> Result<Self> { ) -> Result<Self> {
let (minimum, maximum) = Self::limit_new(plan, Some(store))?; let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
let pooled_memory = let pooled_memory = StaticMemory::new(base, minimum, maximum, memory_image)?;
StaticMemory::new(base, minimum, maximum, make_accessible, memory_image)?;
let allocation = Box::new(pooled_memory); let allocation = Box::new(pooled_memory);
let allocation: Box<dyn RuntimeLinearMemory> = if plan.memory.shared { let allocation: Box<dyn RuntimeLinearMemory> = if plan.memory.shared {
// FIXME: since the pooling allocator owns the memory allocation // FIXME: since the pooling allocator owns the memory allocation
@@ -794,25 +762,13 @@ impl Memory {
self.0.vmmemory() self.0.vmmemory()
} }
/// Check if the inner implementation of [`Memory`] is a memory created with
/// [`Memory::new_static()`].
#[cfg(feature = "pooling-allocator")]
pub fn is_static(&mut self) -> bool {
let as_any = self.0.as_any_mut();
as_any.downcast_ref::<StaticMemory>().is_some()
}
/// Consume the memory, returning its [`MemoryImageSlot`] if any is present. /// Consume the memory, returning its [`MemoryImageSlot`] if any is present.
/// The image should only be present for a subset of memories created with /// The image should only be present for a subset of memories created with
/// [`Memory::new_static()`]. /// [`Memory::new_static()`].
#[cfg(feature = "pooling-allocator")] #[cfg(feature = "pooling-allocator")]
pub fn unwrap_static_image(mut self) -> Option<MemoryImageSlot> { pub fn unwrap_static_image(mut self) -> MemoryImageSlot {
let as_any = self.0.as_any_mut(); let mem = self.0.as_any_mut().downcast_mut::<StaticMemory>().unwrap();
if let Some(m) = as_any.downcast_mut::<StaticMemory>() { mem::replace(&mut mem.memory_image, MemoryImageSlot::dummy())
std::mem::take(&mut m.memory_image)
} else {
None
}
} }
/// If the [Memory] is a [SharedMemory], unwrap it and return a clone to /// If the [Memory] is a [SharedMemory], unwrap it and return a clone to

View File

@@ -62,7 +62,6 @@ default = [
'parallel-compilation', 'parallel-compilation',
'cranelift', 'cranelift',
'pooling-allocator', 'pooling-allocator',
'memory-init-cow',
'vtune', 'vtune',
] ]
@@ -104,13 +103,6 @@ all-arch = ["wasmtime-cranelift?/all-arch"]
# need portable signal handling. # need portable signal handling.
posix-signals-on-macos = ["wasmtime-runtime/posix-signals-on-macos"] posix-signals-on-macos = ["wasmtime-runtime/posix-signals-on-macos"]
# Enables, on supported platforms, the usage of copy-on-write initialization of
# compatible linear memories. For more information see the documentation of
# `Config::memory_init_cow`.
#
# Enabling this feature has no effect on unsupported platforms.
memory-init-cow = ["wasmtime-runtime/memory-init-cow"]
# Enables in-progress support for the component model. Note that this feature is # Enables in-progress support for the component model. Note that this feature is
# in-progress, buggy, and incomplete. This is primarily here for internal # in-progress, buggy, and incomplete. This is primarily here for internal
# testing purposes. # testing purposes.

View File

@@ -1340,8 +1340,6 @@ impl Config {
/// ///
/// [`Module::deserialize_file`]: crate::Module::deserialize_file /// [`Module::deserialize_file`]: crate::Module::deserialize_file
/// [`Module`]: crate::Module /// [`Module`]: crate::Module
#[cfg(feature = "memory-init-cow")]
#[cfg_attr(nightlydoc, doc(cfg(feature = "memory-init-cow")))]
pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self { pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
self.memory_init_cow = enable; self.memory_init_cow = enable;
self self
@@ -1367,8 +1365,6 @@ impl Config {
/// on. /// on.
/// ///
/// This option is disabled by default. /// This option is disabled by default.
#[cfg(feature = "memory-init-cow")]
#[cfg_attr(nightlydoc, doc(cfg(feature = "memory-init-cow")))]
pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self { pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
self.force_memory_init_memfd = enable; self.force_memory_init_memfd = enable;
self self
@@ -1409,8 +1405,6 @@ impl Config {
/// as the maximum module initial memory content size. /// as the maximum module initial memory content size.
/// ///
/// By default this value is 16 MiB. /// By default this value is 16 MiB.
#[cfg(feature = "memory-init-cow")]
#[cfg_attr(nightlydoc, doc(cfg(feature = "memory-init-cow")))]
pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self { pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
self.memory_guaranteed_dense_image_size = size_in_bytes; self.memory_guaranteed_dense_image_size = size_in_bytes;
self self
@@ -1683,11 +1677,7 @@ impl PoolingAllocationConfig {
/// Configures the method by which slots in the pooling allocator are /// Configures the method by which slots in the pooling allocator are
/// allocated to instances /// allocated to instances
/// ///
/// This defaults to [`PoolingAllocationStrategy::ReuseAffinity`] when the /// This defaults to [`PoolingAllocationStrategy::ReuseAffinity`] .
/// `memory-init-cow` feature of Wasmtime is enabled, which is enabled by
/// default. Otherwise it defaults to
/// [`PoolingAllocationStrategy::NextAvailable`] Otherwise it defaults to
/// [`PoolingAllocationStrategy::NextAvailable`].
pub fn strategy(&mut self, strategy: PoolingAllocationStrategy) -> &mut Self { pub fn strategy(&mut self, strategy: PoolingAllocationStrategy) -> &mut Self {
self.config.strategy = strategy; self.config.strategy = strategy;
self self

View File

@@ -579,6 +579,63 @@ fn drop_externref_global_during_module_init() -> Result<()> {
Ok(()) Ok(())
} }
#[test]
fn switch_image_and_non_image() -> Result<()> {
let mut pool = PoolingAllocationConfig::default();
pool.instance_count(1);
let mut c = Config::new();
c.allocation_strategy(InstanceAllocationStrategy::Pooling(pool));
let engine = Engine::new(&c)?;
let module1 = Module::new(
&engine,
r#"
(module
(memory 1)
(func (export "load") (param i32) (result i32)
local.get 0
i32.load
)
)
"#,
)?;
let module2 = Module::new(
&engine,
r#"
(module
(memory (export "memory") 1)
(data (i32.const 0) "1234")
)
"#,
)?;
let assert_zero = || -> Result<()> {
let mut store = Store::new(&engine, ());
let instance = Instance::new(&mut store, &module1, &[])?;
let func = instance.get_typed_func::<i32, i32, _>(&mut store, "load")?;
assert_eq!(func.call(&mut store, 0)?, 0);
Ok(())
};
// Initialize with a heap image and make sure the next instance, without an
// image, is zeroed
Instance::new(&mut Store::new(&engine, ()), &module2, &[])?;
assert_zero()?;
// ... transition back to heap image and do this again
Instance::new(&mut Store::new(&engine, ()), &module2, &[])?;
assert_zero()?;
// And go back to an image and make sure it's read/write on the host.
let mut store = Store::new(&engine, ());
let instance = Instance::new(&mut store, &module2, &[])?;
let memory = instance.get_memory(&mut store, "memory").unwrap();
let mem = memory.data_mut(&mut store);
assert!(mem.starts_with(b"1234"));
mem[..6].copy_from_slice(b"567890");
Ok(())
}
#[test] #[test]
#[cfg(target_pointer_width = "64")] #[cfg(target_pointer_width = "64")]
fn instance_too_large() -> Result<()> { fn instance_too_large() -> Result<()> {