Merge pull request from GHSA-wh6w-3828-g9qf

* Unconditionally use `MemoryImageSlot`

This commit removes the internal branching within the pooling instance
allocator to sometimes use a `MemoryImageSlot` and sometimes now.
Instead this is now unconditionally used in all situations on all
platforms. This fixes an issue where the state of a slot could get
corrupted if modules being instantiated switched from having images to
not having an image or vice versa.

The bulk of this commit is the removal of the `memory-init-cow`
compile-time feature in addition to adding Windows support to the
`cow.rs` file.

* Fix compile on Unix

* Add a stricter assertion for static memory bounds

Double-check that when a memory is allocated the configuration required
is satisfied by the pooling allocator.
This commit is contained in:
Alex Crichton
2022-11-10 11:34:38 -06:00
committed by GitHub
parent 47fa1ad6a8
commit 3535acbf3b
16 changed files with 245 additions and 331 deletions

View File

@@ -23,7 +23,7 @@ thiserror = "1.0.4"
cfg-if = "1.0"
rand = "0.8.3"
anyhow = { workspace = true }
memfd = { version = "0.6.1", optional = true }
memfd = "0.6.1"
paste = "1.0.3"
encoding_rs = { version = "0.8.31", optional = true }
@@ -52,8 +52,6 @@ cc = "1.0"
maintenance = { status = "actively-developed" }
[features]
memory-init-cow = ['memfd']
async = ["wasmtime-fiber"]
# Enables support for the pooling instance allocator

View File

@@ -14,13 +14,4 @@ fn main() {
println!("cargo:rerun-if-changed=src/helpers.c");
build.file("src/helpers.c");
build.compile("wasmtime-helpers");
// Check to see if we are on Unix and the `memory-init-cow` feature is
// active. If so, enable the `memory_init_cow` rustc cfg so
// `#[cfg(memory_init_cow)]` will work.
let family = env::var("CARGO_CFG_TARGET_FAMILY").unwrap();
let memory_init_cow = env::var("CARGO_FEATURE_MEMORY_INIT_COW").is_ok();
if &family == "unix" && memory_init_cow {
println!("cargo:rustc-cfg=memory_init_cow");
}
}

View File

@@ -1,11 +1,12 @@
//! Copy-on-write initialization support: creation of backing images for
//! modules, and logic to support mapping these backing images into memory.
#![cfg_attr(not(unix), allow(unused_imports, unused_variables))]
use crate::InstantiationError;
use crate::MmapVec;
use anyhow::Result;
use libc::c_void;
use rustix::fd::AsRawFd;
use std::fs::File;
use std::sync::Arc;
use std::{convert::TryFrom, ops::Range};
@@ -60,24 +61,34 @@ pub struct MemoryImage {
#[derive(Debug)]
enum FdSource {
#[cfg(unix)]
Mmap(Arc<File>),
#[cfg(target_os = "linux")]
Memfd(memfd::Memfd),
}
impl FdSource {
#[cfg(unix)]
fn as_file(&self) -> &File {
match self {
FdSource::Mmap(file) => file,
FdSource::Mmap(ref file) => file,
#[cfg(target_os = "linux")]
FdSource::Memfd(memfd) => memfd.as_file(),
FdSource::Memfd(ref memfd) => memfd.as_file(),
}
}
}
impl PartialEq for FdSource {
fn eq(&self, other: &FdSource) -> bool {
self.as_file().as_raw_fd() == other.as_file().as_raw_fd()
cfg_if::cfg_if! {
if #[cfg(unix)] {
use rustix::fd::AsRawFd;
self.as_file().as_raw_fd() == other.as_file().as_raw_fd()
} else {
drop(other);
match *self {}
}
}
}
}
@@ -111,6 +122,7 @@ impl MemoryImage {
// files, but for now this is still a Linux-specific region of Wasmtime.
// Some work will be needed to get this file compiling for macOS and
// Windows.
#[cfg(not(windows))]
if let Some(mmap) = mmap {
let start = mmap.as_ptr() as usize;
let end = start + mmap.len();
@@ -185,6 +197,42 @@ impl MemoryImage {
}
}
}
unsafe fn map_at(&self, base: usize) -> Result<()> {
cfg_if::cfg_if! {
if #[cfg(unix)] {
let ptr = rustix::mm::mmap(
(base + self.linear_memory_offset) as *mut c_void,
self.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
self.fd.as_file(),
self.fd_offset,
)?;
assert_eq!(ptr as usize, base + self.linear_memory_offset);
Ok(())
} else {
match self.fd {}
}
}
}
unsafe fn remap_as_zeros_at(&self, base: usize) -> Result<()> {
cfg_if::cfg_if! {
if #[cfg(unix)] {
let ptr = rustix::mm::mmap_anonymous(
(base + self.linear_memory_offset) as *mut c_void,
self.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
)?;
assert_eq!(ptr as usize, base + self.linear_memory_offset);
Ok(())
} else {
match self.fd {}
}
}
}
}
#[cfg(target_os = "linux")]
@@ -374,6 +422,17 @@ impl MemoryImageSlot {
}
}
pub(crate) fn dummy() -> MemoryImageSlot {
MemoryImageSlot {
base: 0,
static_size: 0,
image: None,
accessible: 0,
dirty: false,
clear_on_drop: false,
}
}
/// Inform the MemoryImageSlot that it should *not* clear the underlying
/// address space when dropped. This should be used only when the
/// caller will clear or reuse the address space in some other
@@ -396,10 +455,7 @@ impl MemoryImageSlot {
}
// Otherwise use `mprotect` to make the new pages read/write.
self.set_protection(
self.accessible..size_bytes,
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
)?;
self.set_protection(self.accessible..size_bytes, true)?;
self.accessible = size_bytes;
Ok(())
@@ -444,14 +500,9 @@ impl MemoryImageSlot {
if self.image.as_ref() != maybe_image {
if let Some(image) = &self.image {
unsafe {
let ptr = rustix::mm::mmap_anonymous(
(self.base + image.linear_memory_offset) as *mut c_void,
image.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
)
.map_err(|e| InstantiationError::Resource(e.into()))?;
assert_eq!(ptr as usize, self.base + image.linear_memory_offset);
image
.remap_as_zeros_at(self.base)
.map_err(|e| InstantiationError::Resource(e.into()))?;
}
self.image = None;
}
@@ -461,11 +512,8 @@ impl MemoryImageSlot {
// appropriate. First up is to grow the read/write portion of memory if
// it's not large enough to accommodate `initial_size_bytes`.
if self.accessible < initial_size_bytes {
self.set_protection(
self.accessible..initial_size_bytes,
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
)
.map_err(|e| InstantiationError::Resource(e.into()))?;
self.set_protection(self.accessible..initial_size_bytes, true)
.map_err(|e| InstantiationError::Resource(e.into()))?;
self.accessible = initial_size_bytes;
}
@@ -480,11 +528,8 @@ impl MemoryImageSlot {
if initial_size_bytes < self.accessible {
match style {
MemoryStyle::Static { .. } => {
self.set_protection(
initial_size_bytes..self.accessible,
rustix::mm::MprotectFlags::empty(),
)
.map_err(|e| InstantiationError::Resource(e.into()))?;
self.set_protection(initial_size_bytes..self.accessible, false)
.map_err(|e| InstantiationError::Resource(e.into()))?;
self.accessible = initial_size_bytes;
}
MemoryStyle::Dynamic { .. } => {}
@@ -503,16 +548,9 @@ impl MemoryImageSlot {
);
if image.len > 0 {
unsafe {
let ptr = rustix::mm::mmap(
(self.base + image.linear_memory_offset) as *mut c_void,
image.len,
rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
image.fd.as_file(),
image.fd_offset,
)
.map_err(|e| InstantiationError::Resource(e.into()))?;
assert_eq!(ptr as usize, self.base + image.linear_memory_offset);
image
.map_at(self.base)
.map_err(|e| InstantiationError::Resource(e.into()))?;
}
}
}
@@ -675,13 +713,35 @@ impl MemoryImageSlot {
}
}
fn set_protection(&self, range: Range<usize>, flags: rustix::mm::MprotectFlags) -> Result<()> {
fn set_protection(&self, range: Range<usize>, readwrite: bool) -> Result<()> {
assert!(range.start <= range.end);
assert!(range.end <= self.static_size);
let mprotect_start = self.base.checked_add(range.start).unwrap();
if range.len() > 0 {
unsafe {
rustix::mm::mprotect(mprotect_start as *mut _, range.len(), flags)?;
let start = self.base.checked_add(range.start).unwrap();
if range.len() == 0 {
return Ok(());
}
unsafe {
cfg_if::cfg_if! {
if #[cfg(unix)] {
let flags = if readwrite {
rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE
} else {
rustix::mm::MprotectFlags::empty()
};
rustix::mm::mprotect(start as *mut _, range.len(), flags)?;
} else {
use windows_sys::Win32::System::Memory::*;
let failure = if readwrite {
VirtualAlloc(start as _, range.len(), MEM_COMMIT, PAGE_READWRITE).is_null()
} else {
VirtualFree(start as _, range.len(), MEM_DECOMMIT) == 0
};
if failure {
return Err(std::io::Error::last_os_error().into());
}
}
}
}
@@ -701,13 +761,22 @@ impl MemoryImageSlot {
/// inaccessible. Used both during instantiate and during drop.
fn reset_with_anon_memory(&mut self) -> Result<()> {
unsafe {
let ptr = rustix::mm::mmap_anonymous(
self.base as *mut c_void,
self.static_size,
rustix::mm::ProtFlags::empty(),
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
)?;
assert_eq!(ptr as usize, self.base);
cfg_if::cfg_if! {
if #[cfg(unix)] {
let ptr = rustix::mm::mmap_anonymous(
self.base as *mut c_void,
self.static_size,
rustix::mm::ProtFlags::empty(),
rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
)?;
assert_eq!(ptr as usize, self.base);
} else {
use windows_sys::Win32::System::Memory::*;
if VirtualFree(self.base as _, self.static_size, MEM_DECOMMIT) == 0 {
return Err(std::io::Error::last_os_error().into());
}
}
}
}
self.image = None;

View File

@@ -1,76 +0,0 @@
//! Shims for MemoryImageSlot when the copy-on-write memory initialization is
//! not included. Enables unconditional use of the type and its methods
//! throughout higher-level code.
use crate::{InstantiationError, MmapVec};
use anyhow::Result;
use std::sync::Arc;
use wasmtime_environ::{DefinedMemoryIndex, MemoryStyle, Module};
/// A shim for the memory image container when support is not included.
pub enum ModuleMemoryImages {}
/// A shim for an individual memory image.
#[allow(dead_code)]
pub enum MemoryImage {}
impl ModuleMemoryImages {
/// Construct a new set of memory images. This variant is used
/// when cow support is not included; it always returns no
/// images.
pub fn new(_: &Module, _: &[u8], _: Option<&MmapVec>) -> Result<Option<ModuleMemoryImages>> {
Ok(None)
}
/// Get the memory image for a particular memory.
pub fn get_memory_image(&self, _: DefinedMemoryIndex) -> Option<&Arc<MemoryImage>> {
match *self {}
}
}
/// A placeholder for MemoryImageSlot when we have not included the pooling
/// allocator.
///
/// To allow MemoryImageSlot to be unconditionally passed around in various
/// places (e.g. a `Memory`), we define a zero-sized type when memory is
/// not included in the build.
#[derive(Debug)]
pub struct MemoryImageSlot {
_priv: (),
}
#[allow(dead_code)]
impl MemoryImageSlot {
pub(crate) fn create(_: *mut libc::c_void, _: usize, _: usize) -> Self {
panic!("create() on invalid MemoryImageSlot");
}
pub(crate) fn instantiate(
&mut self,
_: usize,
_: Option<&Arc<MemoryImage>>,
_: &MemoryStyle,
) -> Result<Self, InstantiationError> {
unreachable!();
}
pub(crate) fn no_clear_on_drop(&mut self) {
unreachable!();
}
pub(crate) fn clear_and_remain_ready(&mut self, _keep_resident: usize) -> Result<()> {
unreachable!();
}
pub(crate) fn has_image(&self) -> bool {
unreachable!();
}
pub(crate) fn is_dirty(&self) -> bool {
unreachable!();
}
pub(crate) fn set_heap_limit(&mut self, _: usize) -> Result<()> {
unreachable!();
}
}

View File

@@ -36,7 +36,7 @@ cfg_if::cfg_if! {
}
}
use imp::{commit_memory_pages, commit_table_pages, decommit_memory_pages, decommit_table_pages};
use imp::{commit_table_pages, decommit_table_pages};
#[cfg(all(feature = "async", unix))]
use imp::{commit_stack_pages, reset_stack_pages_to_zero};
@@ -104,11 +104,7 @@ pub enum PoolingAllocationStrategy {
impl Default for PoolingAllocationStrategy {
fn default() -> Self {
if cfg!(memory_init_cow) {
Self::ReuseAffinity
} else {
Self::NextAvailable
}
Self::ReuseAffinity
}
}
@@ -309,6 +305,18 @@ impl InstancePool {
.defined_memory_index(memory_index)
.expect("should be a defined memory since we skipped imported ones");
// Double-check that the runtime requirements of the memory are
// satisfied by the configuration of this pooling allocator. This
// should be returned as an error through `validate_memory_plans`
// but double-check here to be sure.
match plan.style {
MemoryStyle::Static { bound } => {
let bound = bound * u64::from(WASM_PAGE_SIZE);
assert!(bound <= (self.memories.memory_size as u64));
}
MemoryStyle::Dynamic { .. } => {}
}
let memory = unsafe {
std::slice::from_raw_parts_mut(
self.memories.get_base(instance_index, defined_index),
@@ -316,45 +324,34 @@ impl InstancePool {
)
};
if let Some(image) = runtime_info
let mut slot = self
.memories
.take_memory_image_slot(instance_index, defined_index);
let image = runtime_info
.memory_image(defined_index)
.map_err(|err| InstantiationError::Resource(err.into()))?
{
let mut slot = self
.memories
.take_memory_image_slot(instance_index, defined_index);
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
.map_err(|err| InstantiationError::Resource(err.into()))?;
let initial_size = plan.memory.minimum * WASM_PAGE_SIZE as u64;
// If instantiation fails, we can propagate the error
// upward and drop the slot. This will cause the Drop
// handler to attempt to map the range with PROT_NONE
// memory, to reserve the space while releasing any
// stale mappings. The next use of this slot will then
// create a new slot that will try to map over
// this, returning errors as well if the mapping
// errors persist. The unmap-on-drop is best effort;
// if it fails, then we can still soundly continue
// using the rest of the pool and allowing the rest of
// the process to continue, because we never perform a
// mmap that would leave an open space for someone
// else to come in and map something.
slot.instantiate(initial_size as usize, Some(image), &plan.style)
.map_err(|e| InstantiationError::Resource(e.into()))?;
// If instantiation fails, we can propagate the error
// upward and drop the slot. This will cause the Drop
// handler to attempt to map the range with PROT_NONE
// memory, to reserve the space while releasing any
// stale mappings. The next use of this slot will then
// create a new slot that will try to map over
// this, returning errors as well if the mapping
// errors persist. The unmap-on-drop is best effort;
// if it fails, then we can still soundly continue
// using the rest of the pool and allowing the rest of
// the process to continue, because we never perform a
// mmap that would leave an open space for someone
// else to come in and map something.
slot.instantiate(initial_size as usize, image, &plan.style)
.map_err(|e| InstantiationError::Resource(e.into()))?;
memories.push(
Memory::new_static(plan, memory, None, Some(slot), unsafe {
&mut *store.unwrap()
})
memories.push(
Memory::new_static(plan, memory, slot, unsafe { &mut *store.unwrap() })
.map_err(InstantiationError::Resource)?,
);
} else {
memories.push(
Memory::new_static(plan, memory, Some(commit_memory_pages), None, unsafe {
&mut *store.unwrap()
})
.map_err(InstantiationError::Resource)?,
);
}
);
}
Ok(())
@@ -367,26 +364,18 @@ impl InstancePool {
) {
// Decommit any linear memories that were used.
let memories = mem::take(memories);
for ((def_mem_idx, mut memory), base) in
memories.into_iter().zip(self.memories.get(instance_index))
{
assert!(memory.is_static());
let size = memory.byte_size();
if let Some(mut image) = memory.unwrap_static_image() {
// Reset the image slot. If there is any error clearing the
// image, just drop it here, and let the drop handler for the
// slot unmap in a way that retains the address space
// reservation.
if image
.clear_and_remain_ready(self.linear_memory_keep_resident)
.is_ok()
{
self.memories
.return_memory_image_slot(instance_index, def_mem_idx, image);
}
} else {
// Otherwise, decommit the memory pages.
decommit_memory_pages(base, size).expect("failed to decommit linear memory pages");
for (def_mem_idx, memory) in memories {
let mut image = memory.unwrap_static_image();
// Reset the image slot. If there is any error clearing the
// image, just drop it here, and let the drop handler for the
// slot unmap in a way that retains the address space
// reservation.
if image
.clear_and_remain_ready(self.linear_memory_keep_resident)
.is_ok()
{
self.memories
.return_memory_image_slot(instance_index, def_mem_idx, image);
}
}
}
@@ -496,11 +485,9 @@ impl InstancePool {
.iter()
.skip(module.num_imported_memories)
{
match &plan.style {
match plan.style {
MemoryStyle::Static { bound } => {
let memory_size_pages =
(self.memories.memory_size as u64) / u64::from(WASM_PAGE_SIZE);
if memory_size_pages < *bound {
if (self.memories.memory_size as u64) < bound {
bail!(
"memory size allocated per-memory is too small to \
satisfy static bound of {bound:#x} pages"
@@ -695,11 +682,7 @@ impl MemoryPool {
let mapping = Mmap::accessible_reserved(0, allocation_size)
.context("failed to create memory pool mapping")?;
let num_image_slots = if cfg!(memory_init_cow) {
max_instances * max_memories
} else {
0
};
let num_image_slots = max_instances * max_memories;
let image_slots: Vec<_> = std::iter::repeat_with(|| Mutex::new(None))
.take(num_image_slots)
.collect();
@@ -727,6 +710,7 @@ impl MemoryPool {
unsafe { self.mapping.as_mut_ptr().offset(offset as isize) }
}
#[cfg(test)]
fn get<'a>(&'a self, instance_index: usize) -> impl Iterator<Item = *mut u8> + 'a {
(0..self.max_memories)
.map(move |i| self.get_base(instance_index, DefinedMemoryIndex::from_u32(i as u32)))

View File

@@ -1,7 +1,6 @@
use anyhow::{Context, Result};
use rustix::mm::{mprotect, MprotectFlags};
fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
fn decommit(addr: *mut u8, len: usize) -> Result<()> {
if len == 0 {
return Ok(());
}
@@ -11,11 +10,6 @@ fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
if #[cfg(target_os = "linux")] {
use rustix::mm::{madvise, Advice};
if protect {
mprotect(addr.cast(), len, MprotectFlags::empty())
.context("failed to protect memory pages")?;
}
// On Linux, this is enough to cause the kernel to initialize
// the pages to 0 on next access
madvise(addr as _, len, Advice::LinuxDontNeed)
@@ -30,11 +24,7 @@ fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
mmap_anonymous(
addr as _,
len,
if protect {
ProtFlags::empty()
} else {
ProtFlags::READ | ProtFlags::WRITE
},
ProtFlags::READ | ProtFlags::WRITE,
MapFlags::PRIVATE | MapFlags::FIXED,
)
.context("mmap failed to remap pages: {}")?;
@@ -45,29 +35,13 @@ fn decommit(addr: *mut u8, len: usize, protect: bool) -> Result<()> {
Ok(())
}
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
if len == 0 {
return Ok(());
}
// Just change the protection level to READ|WRITE
unsafe {
mprotect(addr.cast(), len, MprotectFlags::READ | MprotectFlags::WRITE)
.context("failed to make linear memory pages read/write")
}
}
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, true)
}
pub fn commit_table_pages(_addr: *mut u8, _len: usize) -> Result<()> {
// A no-op as table pages remain READ|WRITE
Ok(())
}
pub fn decommit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false)
decommit(addr, len)
}
#[cfg(feature = "async")]
@@ -78,5 +52,5 @@ pub fn commit_stack_pages(_addr: *mut u8, _len: usize) -> Result<()> {
#[cfg(feature = "async")]
pub fn reset_stack_pages_to_zero(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len, false)
decommit(addr, len)
}

View File

@@ -29,14 +29,6 @@ pub fn decommit(addr: *mut u8, len: usize) -> Result<()> {
Ok(())
}
pub fn commit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
commit(addr, len)
}
pub fn decommit_memory_pages(addr: *mut u8, len: usize) -> Result<()> {
decommit(addr, len)
}
pub fn commit_table_pages(addr: *mut u8, len: usize) -> Result<()> {
commit(addr, len)
}

View File

@@ -82,16 +82,9 @@ pub use crate::vmcontext::{
mod module_id;
pub use module_id::{CompiledModuleId, CompiledModuleIdAllocator};
#[cfg(memory_init_cow)]
mod cow;
#[cfg(memory_init_cow)]
pub use crate::cow::{MemoryImage, MemoryImageSlot, ModuleMemoryImages};
#[cfg(not(memory_init_cow))]
mod cow_disabled;
#[cfg(not(memory_init_cow))]
pub use crate::cow_disabled::{MemoryImage, MemoryImageSlot, ModuleMemoryImages};
/// Version number of this crate.
pub const VERSION: &str = env!("CARGO_PKG_VERSION");

View File

@@ -10,6 +10,7 @@ use crate::Store;
use anyhow::Error;
use anyhow::{bail, format_err, Result};
use std::convert::TryFrom;
use std::mem;
use std::sync::atomic::Ordering;
use std::sync::{Arc, RwLock};
use wasmtime_environ::{MemoryPlan, MemoryStyle, WASM32_MAX_PAGES, WASM64_MAX_PAGES};
@@ -351,14 +352,9 @@ struct StaticMemory {
/// The current size, in bytes, of this memory.
size: usize,
/// A callback which makes portions of `base` accessible for when memory
/// is grown. Otherwise it's expected that accesses to `base` will
/// fault.
make_accessible: Option<fn(*mut u8, usize) -> Result<()>>,
/// The image management, if any, for this memory. Owned here and
/// returned to the pooling allocator when termination occurs.
memory_image: Option<MemoryImageSlot>,
memory_image: MemoryImageSlot,
}
impl StaticMemory {
@@ -366,8 +362,7 @@ impl StaticMemory {
base: &'static mut [u8],
initial_size: usize,
maximum_size: Option<usize>,
make_accessible: Option<fn(*mut u8, usize) -> Result<()>>,
memory_image: Option<MemoryImageSlot>,
memory_image: MemoryImageSlot,
) -> Result<Self> {
if base.len() < initial_size {
bail!(
@@ -384,16 +379,9 @@ impl StaticMemory {
_ => base,
};
if let Some(make_accessible) = make_accessible {
if initial_size > 0 {
make_accessible(base.as_mut_ptr(), initial_size)?;
}
}
Ok(Self {
base,
size: initial_size,
make_accessible,
memory_image,
})
}
@@ -413,21 +401,7 @@ impl RuntimeLinearMemory for StaticMemory {
// prior to arriving here.
assert!(new_byte_size <= self.base.len());
// Actually grow the memory.
if let Some(image) = &mut self.memory_image {
image.set_heap_limit(new_byte_size)?;
} else {
let make_accessible = self
.make_accessible
.expect("make_accessible must be Some if this is not a CoW memory");
// Operating system can fail to make memory accessible.
let old_byte_size = self.byte_size();
make_accessible(
unsafe { self.base.as_mut_ptr().add(old_byte_size) },
new_byte_size - old_byte_size,
)?;
}
self.memory_image.set_heap_limit(new_byte_size)?;
// Update our accounting of the available size.
self.size = new_byte_size;
@@ -442,11 +416,7 @@ impl RuntimeLinearMemory for StaticMemory {
}
fn needs_init(&self) -> bool {
if let Some(slot) = &self.memory_image {
!slot.has_image()
} else {
true
}
!self.memory_image.has_image()
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
@@ -627,13 +597,11 @@ impl Memory {
pub fn new_static(
plan: &MemoryPlan,
base: &'static mut [u8],
make_accessible: Option<fn(*mut u8, usize) -> Result<()>>,
memory_image: Option<MemoryImageSlot>,
memory_image: MemoryImageSlot,
store: &mut dyn Store,
) -> Result<Self> {
let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
let pooled_memory =
StaticMemory::new(base, minimum, maximum, make_accessible, memory_image)?;
let pooled_memory = StaticMemory::new(base, minimum, maximum, memory_image)?;
let allocation = Box::new(pooled_memory);
let allocation: Box<dyn RuntimeLinearMemory> = if plan.memory.shared {
// FIXME: since the pooling allocator owns the memory allocation
@@ -794,25 +762,13 @@ impl Memory {
self.0.vmmemory()
}
/// Check if the inner implementation of [`Memory`] is a memory created with
/// [`Memory::new_static()`].
#[cfg(feature = "pooling-allocator")]
pub fn is_static(&mut self) -> bool {
let as_any = self.0.as_any_mut();
as_any.downcast_ref::<StaticMemory>().is_some()
}
/// Consume the memory, returning its [`MemoryImageSlot`] if any is present.
/// The image should only be present for a subset of memories created with
/// [`Memory::new_static()`].
#[cfg(feature = "pooling-allocator")]
pub fn unwrap_static_image(mut self) -> Option<MemoryImageSlot> {
let as_any = self.0.as_any_mut();
if let Some(m) = as_any.downcast_mut::<StaticMemory>() {
std::mem::take(&mut m.memory_image)
} else {
None
}
pub fn unwrap_static_image(mut self) -> MemoryImageSlot {
let mem = self.0.as_any_mut().downcast_mut::<StaticMemory>().unwrap();
mem::replace(&mut mem.memory_image, MemoryImageSlot::dummy())
}
/// If the [Memory] is a [SharedMemory], unwrap it and return a clone to