wiggle: add initial support for shared memory (#5225)
This change is the first in a series of changes to support shared memory in Wiggle. Since Wiggle was written under the assumption of single-threaded guest-side access, this change introduces a `shared` field to guest memories in order to flag when this assumption will not be the case. This change always sets `shared` to `false`; once a few more pieces are in place, `shared` will be set dynamically when a shared memory is detected, e.g., in a change like #5054. Using the `shared` field, we can now decide to load Wiggle values differently under the new assumptions. This change makes the guest `T::read` and `T::write` calls into `Relaxed` atomic loads and stores in order to maintain WebAssembly's expected memory consistency guarantees. We choose Rust's `Relaxed` here to match the `Unordered` memory consistency described in the [memory model] section of the ECMA spec. These relaxed accesses are done unconditionally, since we theorize that the performance benefit of an additional branch vs a relaxed load is not much. [memory model]: https://tc39.es/ecma262/multipage/memory-model.html#sec-memory-model Since 128-bit scalar types do not have `Atomic*` equivalents, we remove their `T::read` and `T::write` implementations here. They are unused by any WASI implementations in the project.
This commit is contained in:
@@ -118,7 +118,7 @@ fn generate_func(
|
|||||||
};
|
};
|
||||||
let (mem , ctx) = mem.data_and_store_mut(&mut caller);
|
let (mem , ctx) = mem.data_and_store_mut(&mut caller);
|
||||||
let ctx = get_cx(ctx);
|
let ctx = get_cx(ctx);
|
||||||
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem);
|
let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem, false);
|
||||||
Ok(<#ret_ty>::from(#abi_func(ctx, &mem #(, #arg_names)*) #await_ ?))
|
Ok(<#ret_ty>::from(#abi_func(ctx, &mem #(, #arg_names)*) #await_ ?))
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
use crate::{region::Region, GuestError, GuestPtr};
|
use crate::{region::Region, GuestError, GuestPtr};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use std::sync::atomic::{
|
||||||
|
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8, Ordering,
|
||||||
|
};
|
||||||
|
|
||||||
/// A trait for types which are used to report errors. Each type used in the
|
/// A trait for types which are used to report errors. Each type used in the
|
||||||
/// first result position of an interface function is used, by convention, to
|
/// first result position of an interface function is used, by convention, to
|
||||||
@@ -60,9 +63,9 @@ pub unsafe trait GuestTypeTransparent<'a>: GuestType<'a> {
|
|||||||
fn validate(ptr: *mut Self) -> Result<(), GuestError>;
|
fn validate(ptr: *mut Self) -> Result<(), GuestError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! primitives {
|
macro_rules! integer_primitives {
|
||||||
($($i:ident)*) => ($(
|
($([$ty:ident, $ty_atomic:ident],)*) => ($(
|
||||||
impl<'a> GuestType<'a> for $i {
|
impl<'a> GuestType<'a> for $ty {
|
||||||
fn guest_size() -> u32 { mem::size_of::<Self>() as u32 }
|
fn guest_size() -> u32 { mem::size_of::<Self>() as u32 }
|
||||||
fn guest_align() -> usize { mem::align_of::<Self>() }
|
fn guest_align() -> usize { mem::align_of::<Self>() }
|
||||||
|
|
||||||
@@ -88,7 +91,14 @@ macro_rules! primitives {
|
|||||||
if ptr.mem().is_mut_borrowed(region) {
|
if ptr.mem().is_mut_borrowed(region) {
|
||||||
return Err(GuestError::PtrBorrowed(region));
|
return Err(GuestError::PtrBorrowed(region));
|
||||||
}
|
}
|
||||||
Ok(unsafe { <$i>::from_le_bytes(*host_ptr.cast::<[u8; mem::size_of::<Self>()]>()) })
|
// If the accessed memory is shared, we need to load the bytes
|
||||||
|
// with the correct memory consistency. We could check if the
|
||||||
|
// memory is shared each time, but we expect little performance
|
||||||
|
// difference between an additional branch and a relaxed memory
|
||||||
|
// access and thus always do the relaxed access here.
|
||||||
|
let atomic_value_ref: &$ty_atomic =
|
||||||
|
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
|
||||||
|
Ok($ty::from_le(atomic_value_ref.load(Ordering::Relaxed)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@@ -107,16 +117,21 @@ macro_rules! primitives {
|
|||||||
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
|
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
|
||||||
return Err(GuestError::PtrBorrowed(region));
|
return Err(GuestError::PtrBorrowed(region));
|
||||||
}
|
}
|
||||||
unsafe {
|
// If the accessed memory is shared, we need to load the bytes
|
||||||
*host_ptr.cast::<[u8; mem::size_of::<Self>()]>() = <$i>::to_le_bytes(val);
|
// with the correct memory consistency. We could check if the
|
||||||
}
|
// memory is shared each time, but we expect little performance
|
||||||
|
// difference between an additional branch and a relaxed memory
|
||||||
|
// access and thus always do the relaxed access here.
|
||||||
|
let atomic_value_ref: &$ty_atomic =
|
||||||
|
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
|
||||||
|
atomic_value_ref.store(val.to_le(), Ordering::Relaxed);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<'a> GuestTypeTransparent<'a> for $i {
|
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn validate(_ptr: *mut $i) -> Result<(), GuestError> {
|
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
|
||||||
// All bit patterns are safe, nothing to do here
|
// All bit patterns are safe, nothing to do here
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -125,13 +140,94 @@ macro_rules! primitives {
|
|||||||
)*)
|
)*)
|
||||||
}
|
}
|
||||||
|
|
||||||
primitives! {
|
macro_rules! float_primitives {
|
||||||
|
($([$ty:ident, $ty_unsigned:ident, $ty_atomic:ident],)*) => ($(
|
||||||
|
impl<'a> GuestType<'a> for $ty {
|
||||||
|
fn guest_size() -> u32 { mem::size_of::<Self>() as u32 }
|
||||||
|
fn guest_align() -> usize { mem::align_of::<Self>() }
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn read(ptr: &GuestPtr<'a, Self>) -> Result<Self, GuestError> {
|
||||||
|
// Any bit pattern for any primitive implemented with this
|
||||||
|
// macro is safe, so our `validate_size_align` method will
|
||||||
|
// guarantee that if we are given a pointer it's valid for the
|
||||||
|
// size of our type as well as properly aligned. Consequently we
|
||||||
|
// should be able to safely ready the pointer just after we
|
||||||
|
// validated it, returning it along here.
|
||||||
|
let offset = ptr.offset();
|
||||||
|
let size = Self::guest_size();
|
||||||
|
let host_ptr = ptr.mem().validate_size_align(
|
||||||
|
offset,
|
||||||
|
Self::guest_align(),
|
||||||
|
size,
|
||||||
|
)?;
|
||||||
|
let region = Region {
|
||||||
|
start: offset,
|
||||||
|
len: size,
|
||||||
|
};
|
||||||
|
if ptr.mem().is_mut_borrowed(region) {
|
||||||
|
return Err(GuestError::PtrBorrowed(region));
|
||||||
|
}
|
||||||
|
// If the accessed memory is shared, we need to load the bytes
|
||||||
|
// with the correct memory consistency. We could check if the
|
||||||
|
// memory is shared each time, but we expect little performance
|
||||||
|
// difference between an additional branch and a relaxed memory
|
||||||
|
// access and thus always do the relaxed access here.
|
||||||
|
let atomic_value_ref: &$ty_atomic =
|
||||||
|
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
|
||||||
|
let value = $ty_unsigned::from_le(atomic_value_ref.load(Ordering::Relaxed));
|
||||||
|
Ok($ty::from_bits(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn write(ptr: &GuestPtr<'_, Self>, val: Self) -> Result<(), GuestError> {
|
||||||
|
let offset = ptr.offset();
|
||||||
|
let size = Self::guest_size();
|
||||||
|
let host_ptr = ptr.mem().validate_size_align(
|
||||||
|
offset,
|
||||||
|
Self::guest_align(),
|
||||||
|
size,
|
||||||
|
)?;
|
||||||
|
let region = Region {
|
||||||
|
start: offset,
|
||||||
|
len: size,
|
||||||
|
};
|
||||||
|
if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) {
|
||||||
|
return Err(GuestError::PtrBorrowed(region));
|
||||||
|
}
|
||||||
|
// If the accessed memory is shared, we need to load the bytes
|
||||||
|
// with the correct memory consistency. We could check if the
|
||||||
|
// memory is shared each time, but we expect little performance
|
||||||
|
// difference between an additional branch and a relaxed memory
|
||||||
|
// access and thus always do the relaxed access here.
|
||||||
|
let atomic_value_ref: &$ty_atomic =
|
||||||
|
unsafe { &*(host_ptr.cast::<$ty_atomic>()) };
|
||||||
|
let le_value = $ty_unsigned::to_le(val.to_bits());
|
||||||
|
atomic_value_ref.store(le_value, Ordering::Relaxed);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl<'a> GuestTypeTransparent<'a> for $ty {
|
||||||
|
#[inline]
|
||||||
|
fn validate(_ptr: *mut $ty) -> Result<(), GuestError> {
|
||||||
|
// All bit patterns are safe, nothing to do here
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
)*)
|
||||||
|
}
|
||||||
|
|
||||||
|
integer_primitives! {
|
||||||
// signed
|
// signed
|
||||||
i8 i16 i32 i64 i128
|
[i8, AtomicI8], [i16, AtomicI16], [i32, AtomicI32], [i64, AtomicI64],
|
||||||
// unsigned
|
// unsigned
|
||||||
u8 u16 u32 u64 u128
|
[u8, AtomicU8], [u16, AtomicU16], [u32, AtomicU32], [u64, AtomicU64],
|
||||||
// floats
|
}
|
||||||
f32 f64
|
|
||||||
|
float_primitives! {
|
||||||
|
[f32, u32, AtomicU32], [f64, u64, AtomicU64],
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support pointers-to-pointers where pointers are always 32-bits in wasm land
|
// Support pointers-to-pointers where pointers are always 32-bits in wasm land
|
||||||
|
|||||||
@@ -192,6 +192,12 @@ pub unsafe trait GuestMemory: Send + Sync {
|
|||||||
/// `GuestStr` are implemented correctly, a shared `BorrowHandle` should only be
|
/// `GuestStr` are implemented correctly, a shared `BorrowHandle` should only be
|
||||||
/// unborrowed once.
|
/// unborrowed once.
|
||||||
fn shared_unborrow(&self, h: BorrowHandle);
|
fn shared_unborrow(&self, h: BorrowHandle);
|
||||||
|
|
||||||
|
/// Check if the underlying memory is shared across multiple threads; e.g.,
|
||||||
|
/// with a WebAssembly shared memory.
|
||||||
|
fn is_shared_memory(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A handle to a borrow on linear memory. It is produced by `{mut, shared}_borrow` and
|
/// A handle to a borrow on linear memory. It is produced by `{mut, shared}_borrow` and
|
||||||
|
|||||||
@@ -6,10 +6,11 @@ use crate::{BorrowHandle, GuestError, GuestMemory, Region};
|
|||||||
pub struct WasmtimeGuestMemory<'a> {
|
pub struct WasmtimeGuestMemory<'a> {
|
||||||
mem: &'a mut [u8],
|
mem: &'a mut [u8],
|
||||||
bc: BorrowChecker,
|
bc: BorrowChecker,
|
||||||
|
shared: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> WasmtimeGuestMemory<'a> {
|
impl<'a> WasmtimeGuestMemory<'a> {
|
||||||
pub fn new(mem: &'a mut [u8]) -> Self {
|
pub fn new(mem: &'a mut [u8], shared: bool) -> Self {
|
||||||
Self {
|
Self {
|
||||||
mem,
|
mem,
|
||||||
// Wiggle does not expose any methods for functions to re-enter
|
// Wiggle does not expose any methods for functions to re-enter
|
||||||
@@ -22,6 +23,7 @@ impl<'a> WasmtimeGuestMemory<'a> {
|
|||||||
// integrated fully with wasmtime:
|
// integrated fully with wasmtime:
|
||||||
// https://github.com/bytecodealliance/wasmtime/issues/1917
|
// https://github.com/bytecodealliance/wasmtime/issues/1917
|
||||||
bc: BorrowChecker::new(),
|
bc: BorrowChecker::new(),
|
||||||
|
shared,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -51,4 +53,7 @@ unsafe impl GuestMemory for WasmtimeGuestMemory<'_> {
|
|||||||
fn mut_unborrow(&self, h: BorrowHandle) {
|
fn mut_unborrow(&self, h: BorrowHandle) {
|
||||||
self.bc.mut_unborrow(h)
|
self.bc.mut_unborrow(h)
|
||||||
}
|
}
|
||||||
|
fn is_shared_memory(&self) -> bool {
|
||||||
|
self.shared
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user