Refactor shared memory internals, expose embedder methods (#5311)
This commit refactors the internals of `wasmtime_runtime::SharedMemory` a bit to expose the necessary functions to invoke from the `wasmtime::SharedMemory` layer. Notably some items are moved out of the `RwLock` from prior, such as the type and the `VMMemoryDefinition`. Additionally the organization around the `atomic_*` methods has been redone to ensure that the `wasmtime`-layer abstraction has a single method to call into which everything else uses as well.
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
use anyhow::Result;
|
||||
use rayon::prelude::*;
|
||||
use std::sync::atomic::{AtomicU32, Ordering::SeqCst};
|
||||
use std::time::{Duration, Instant};
|
||||
use wasmtime::*;
|
||||
|
||||
fn module(engine: &Engine) -> Result<Module> {
|
||||
@@ -462,3 +464,105 @@ fn memory64_maximum_minimum() -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shared_memory_basics() -> Result<()> {
|
||||
let engine = Engine::default();
|
||||
assert!(SharedMemory::new(&engine, MemoryType::new(1, None)).is_err());
|
||||
assert!(SharedMemory::new(&engine, MemoryType::new(1, Some(1))).is_err());
|
||||
assert!(SharedMemory::new(&engine, MemoryType::new64(1, None)).is_err());
|
||||
assert!(SharedMemory::new(&engine, MemoryType::new64(1, Some(1))).is_err());
|
||||
assert!(SharedMemory::new(&engine, MemoryType::shared(1, 0)).is_err());
|
||||
|
||||
let memory = SharedMemory::new(&engine, MemoryType::shared(1, 1))?;
|
||||
assert!(memory.ty().is_shared());
|
||||
assert_eq!(memory.ty().minimum(), 1);
|
||||
assert_eq!(memory.ty().maximum(), Some(1));
|
||||
assert_eq!(memory.size(), 1);
|
||||
assert_eq!(memory.data_size(), 65536);
|
||||
assert_eq!(memory.data().len(), 65536);
|
||||
assert!(memory.grow(1).is_err());
|
||||
|
||||
// misaligned
|
||||
assert_eq!(memory.atomic_notify(1, 100), Err(Trap::HeapMisaligned));
|
||||
assert_eq!(
|
||||
memory.atomic_wait32(1, 100, None),
|
||||
Err(Trap::HeapMisaligned)
|
||||
);
|
||||
assert_eq!(
|
||||
memory.atomic_wait64(1, 100, None),
|
||||
Err(Trap::HeapMisaligned)
|
||||
);
|
||||
|
||||
// oob
|
||||
assert_eq!(
|
||||
memory.atomic_notify(1 << 20, 100),
|
||||
Err(Trap::MemoryOutOfBounds)
|
||||
);
|
||||
assert_eq!(
|
||||
memory.atomic_wait32(1 << 20, 100, None),
|
||||
Err(Trap::MemoryOutOfBounds)
|
||||
);
|
||||
assert_eq!(
|
||||
memory.atomic_wait64(1 << 20, 100, None),
|
||||
Err(Trap::MemoryOutOfBounds)
|
||||
);
|
||||
|
||||
// ok
|
||||
assert_eq!(memory.atomic_notify(8, 100), Ok(0));
|
||||
assert_eq!(memory.atomic_wait32(8, 1, None), Ok(WaitResult::Mismatch));
|
||||
assert_eq!(memory.atomic_wait64(8, 1, None), Ok(WaitResult::Mismatch));
|
||||
|
||||
// timeout
|
||||
let near_future = Instant::now() + Duration::new(0, 100);
|
||||
assert_eq!(
|
||||
memory.atomic_wait32(8, 0, Some(near_future)),
|
||||
Ok(WaitResult::TimedOut)
|
||||
);
|
||||
assert_eq!(
|
||||
memory.atomic_wait64(8, 0, Some(near_future)),
|
||||
Ok(WaitResult::TimedOut)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shared_memory_wait_notify() -> Result<()> {
|
||||
const THREADS: usize = 8;
|
||||
const COUNT: usize = 100_000;
|
||||
|
||||
let engine = Engine::default();
|
||||
let memory = SharedMemory::new(&engine, MemoryType::shared(1, 1))?;
|
||||
let data = unsafe { &*(memory.data().as_ptr() as *const AtomicU32) };
|
||||
let locked = unsafe { &*(memory.data().as_ptr().add(4) as *const AtomicU32) };
|
||||
|
||||
// Note that `SeqCst` is used here to not think much about the orderings
|
||||
// here, and it also somewhat more closely mirrors what's happening in wasm.
|
||||
let lock = || {
|
||||
while locked.swap(1, SeqCst) == 1 {
|
||||
memory.atomic_wait32(0, 1, None).unwrap();
|
||||
}
|
||||
};
|
||||
let unlock = || {
|
||||
locked.store(0, SeqCst);
|
||||
memory.atomic_notify(0, 1).unwrap();
|
||||
};
|
||||
|
||||
std::thread::scope(|s| {
|
||||
for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
for _ in 0..COUNT {
|
||||
lock();
|
||||
let next = data.load(SeqCst) + 1;
|
||||
data.store(next, SeqCst);
|
||||
unlock();
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
assert_eq!(data.load(SeqCst), (THREADS * COUNT) as u32);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ fn test_probe_shared_memory_size() -> Result<()> {
|
||||
let mut store = Store::new(&engine, ());
|
||||
let instance = Instance::new(&mut store, &module, &[])?;
|
||||
let size_fn = instance.get_typed_func::<(), i32>(&mut store, "size")?;
|
||||
let mut shared_memory = instance.get_shared_memory(&mut store, "memory").unwrap();
|
||||
let shared_memory = instance.get_shared_memory(&mut store, "memory").unwrap();
|
||||
|
||||
assert_eq!(size_fn.call(&mut store, ())?, 1);
|
||||
assert_eq!(shared_memory.size(), 1);
|
||||
@@ -244,7 +244,7 @@ fn test_memory_size_accessibility() -> Result<()> {
|
||||
let shared_memory = SharedMemory::new(&engine, MemoryType::shared(1, NUM_GROW_OPS as u32))?;
|
||||
let done = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let mut grow_memory = shared_memory.clone();
|
||||
let grow_memory = shared_memory.clone();
|
||||
let grow_thread = std::thread::spawn(move || {
|
||||
for i in 0..NUM_GROW_OPS {
|
||||
if grow_memory.grow(1).is_err() {
|
||||
|
||||
Reference in New Issue
Block a user