Refactor shared memory internals, expose embedder methods (#5311)

This commit refactors the internals of `wasmtime_runtime::SharedMemory`
a bit to expose the necessary functions to invoke from the
`wasmtime::SharedMemory` layer. Notably some items are moved out of the
`RwLock` from prior, such as the type and the `VMMemoryDefinition`.
Additionally the organization around the `atomic_*` methods has been
redone to ensure that the `wasmtime`-layer abstraction has a single
method to call into which everything else uses as well.
This commit is contained in:
Alex Crichton
2022-11-22 10:51:55 -06:00
committed by GitHub
parent 0a2a0444b3
commit 6ce2ac19b8
9 changed files with 401 additions and 246 deletions

View File

@@ -12,7 +12,7 @@ use std::ptr::NonNull;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::u32;
pub use vm_host_func_context::VMHostFuncContext;
use wasmtime_environ::{DefinedMemoryIndex, Trap};
use wasmtime_environ::DefinedMemoryIndex;
pub const VMCONTEXT_MAGIC: u32 = u32::from_le_bytes(*b"core");
@@ -248,30 +248,6 @@ impl VMMemoryDefinition {
current_length: other.current_length().into(),
}
}
/// In the configurations where bounds checks were elided in JIT code (because
/// we are using static memories with virtual memory guard pages) this manual
/// check is here so we don't segfault from Rust. For other configurations,
/// these checks are required anyways.
pub fn validate_addr(
&self,
addr: u64,
access_size: u64,
access_alignment: u64,
) -> Result<*const u8, Trap> {
debug_assert!(access_alignment.is_power_of_two());
if !(addr % access_alignment == 0) {
return Err(Trap::HeapMisaligned);
}
let length = u64::try_from(self.current_length()).unwrap();
if !(addr.saturating_add(access_size) < length) {
return Err(Trap::MemoryOutOfBounds);
}
// SAFETY: checked above that the address is in bounds
Ok(unsafe { self.base.add(addr as usize) })
}
}
#[cfg(test)]