diff --git a/crates/wiggle/generate/src/wasmtime.rs b/crates/wiggle/generate/src/wasmtime.rs index 50ece48207..be36037546 100644 --- a/crates/wiggle/generate/src/wasmtime.rs +++ b/crates/wiggle/generate/src/wasmtime.rs @@ -118,7 +118,7 @@ fn generate_func( }; let (mem , ctx) = mem.data_and_store_mut(&mut caller); let ctx = get_cx(ctx); - let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem); + let mem = #rt::wasmtime::WasmtimeGuestMemory::new(mem, false); Ok(<#ret_ty>::from(#abi_func(ctx, &mem #(, #arg_names)*) #await_ ?)) }; diff --git a/crates/wiggle/src/guest_type.rs b/crates/wiggle/src/guest_type.rs index 0fae52491e..3145038e3c 100644 --- a/crates/wiggle/src/guest_type.rs +++ b/crates/wiggle/src/guest_type.rs @@ -1,5 +1,8 @@ use crate::{region::Region, GuestError, GuestPtr}; use std::mem; +use std::sync::atomic::{ + AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8, Ordering, +}; /// A trait for types which are used to report errors. Each type used in the /// first result position of an interface function is used, by convention, to @@ -60,9 +63,9 @@ pub unsafe trait GuestTypeTransparent<'a>: GuestType<'a> { fn validate(ptr: *mut Self) -> Result<(), GuestError>; } -macro_rules! primitives { - ($($i:ident)*) => ($( - impl<'a> GuestType<'a> for $i { +macro_rules! integer_primitives { + ($([$ty:ident, $ty_atomic:ident],)*) => ($( + impl<'a> GuestType<'a> for $ty { fn guest_size() -> u32 { mem::size_of::() as u32 } fn guest_align() -> usize { mem::align_of::() } @@ -88,7 +91,14 @@ macro_rules! primitives { if ptr.mem().is_mut_borrowed(region) { return Err(GuestError::PtrBorrowed(region)); } - Ok(unsafe { <$i>::from_le_bytes(*host_ptr.cast::<[u8; mem::size_of::()]>()) }) + // If the accessed memory is shared, we need to load the bytes + // with the correct memory consistency. We could check if the + // memory is shared each time, but we expect little performance + // difference between an additional branch and a relaxed memory + // access and thus always do the relaxed access here. + let atomic_value_ref: &$ty_atomic = + unsafe { &*(host_ptr.cast::<$ty_atomic>()) }; + Ok($ty::from_le(atomic_value_ref.load(Ordering::Relaxed))) } #[inline] @@ -107,16 +117,21 @@ macro_rules! primitives { if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) { return Err(GuestError::PtrBorrowed(region)); } - unsafe { - *host_ptr.cast::<[u8; mem::size_of::()]>() = <$i>::to_le_bytes(val); - } + // If the accessed memory is shared, we need to load the bytes + // with the correct memory consistency. We could check if the + // memory is shared each time, but we expect little performance + // difference between an additional branch and a relaxed memory + // access and thus always do the relaxed access here. + let atomic_value_ref: &$ty_atomic = + unsafe { &*(host_ptr.cast::<$ty_atomic>()) }; + atomic_value_ref.store(val.to_le(), Ordering::Relaxed); Ok(()) } } - unsafe impl<'a> GuestTypeTransparent<'a> for $i { + unsafe impl<'a> GuestTypeTransparent<'a> for $ty { #[inline] - fn validate(_ptr: *mut $i) -> Result<(), GuestError> { + fn validate(_ptr: *mut $ty) -> Result<(), GuestError> { // All bit patterns are safe, nothing to do here Ok(()) } @@ -125,13 +140,94 @@ macro_rules! primitives { )*) } -primitives! { +macro_rules! float_primitives { + ($([$ty:ident, $ty_unsigned:ident, $ty_atomic:ident],)*) => ($( + impl<'a> GuestType<'a> for $ty { + fn guest_size() -> u32 { mem::size_of::() as u32 } + fn guest_align() -> usize { mem::align_of::() } + + #[inline] + fn read(ptr: &GuestPtr<'a, Self>) -> Result { + // Any bit pattern for any primitive implemented with this + // macro is safe, so our `validate_size_align` method will + // guarantee that if we are given a pointer it's valid for the + // size of our type as well as properly aligned. Consequently we + // should be able to safely ready the pointer just after we + // validated it, returning it along here. + let offset = ptr.offset(); + let size = Self::guest_size(); + let host_ptr = ptr.mem().validate_size_align( + offset, + Self::guest_align(), + size, + )?; + let region = Region { + start: offset, + len: size, + }; + if ptr.mem().is_mut_borrowed(region) { + return Err(GuestError::PtrBorrowed(region)); + } + // If the accessed memory is shared, we need to load the bytes + // with the correct memory consistency. We could check if the + // memory is shared each time, but we expect little performance + // difference between an additional branch and a relaxed memory + // access and thus always do the relaxed access here. + let atomic_value_ref: &$ty_atomic = + unsafe { &*(host_ptr.cast::<$ty_atomic>()) }; + let value = $ty_unsigned::from_le(atomic_value_ref.load(Ordering::Relaxed)); + Ok($ty::from_bits(value)) + } + + #[inline] + fn write(ptr: &GuestPtr<'_, Self>, val: Self) -> Result<(), GuestError> { + let offset = ptr.offset(); + let size = Self::guest_size(); + let host_ptr = ptr.mem().validate_size_align( + offset, + Self::guest_align(), + size, + )?; + let region = Region { + start: offset, + len: size, + }; + if ptr.mem().is_shared_borrowed(region) || ptr.mem().is_mut_borrowed(region) { + return Err(GuestError::PtrBorrowed(region)); + } + // If the accessed memory is shared, we need to load the bytes + // with the correct memory consistency. We could check if the + // memory is shared each time, but we expect little performance + // difference between an additional branch and a relaxed memory + // access and thus always do the relaxed access here. + let atomic_value_ref: &$ty_atomic = + unsafe { &*(host_ptr.cast::<$ty_atomic>()) }; + let le_value = $ty_unsigned::to_le(val.to_bits()); + atomic_value_ref.store(le_value, Ordering::Relaxed); + Ok(()) + } + } + + unsafe impl<'a> GuestTypeTransparent<'a> for $ty { + #[inline] + fn validate(_ptr: *mut $ty) -> Result<(), GuestError> { + // All bit patterns are safe, nothing to do here + Ok(()) + } + } + + )*) +} + +integer_primitives! { // signed - i8 i16 i32 i64 i128 + [i8, AtomicI8], [i16, AtomicI16], [i32, AtomicI32], [i64, AtomicI64], // unsigned - u8 u16 u32 u64 u128 - // floats - f32 f64 + [u8, AtomicU8], [u16, AtomicU16], [u32, AtomicU32], [u64, AtomicU64], +} + +float_primitives! { + [f32, u32, AtomicU32], [f64, u64, AtomicU64], } // Support pointers-to-pointers where pointers are always 32-bits in wasm land diff --git a/crates/wiggle/src/lib.rs b/crates/wiggle/src/lib.rs index 9867e893f0..79b02e4d3a 100644 --- a/crates/wiggle/src/lib.rs +++ b/crates/wiggle/src/lib.rs @@ -192,6 +192,12 @@ pub unsafe trait GuestMemory: Send + Sync { /// `GuestStr` are implemented correctly, a shared `BorrowHandle` should only be /// unborrowed once. fn shared_unborrow(&self, h: BorrowHandle); + + /// Check if the underlying memory is shared across multiple threads; e.g., + /// with a WebAssembly shared memory. + fn is_shared_memory(&self) -> bool { + false + } } /// A handle to a borrow on linear memory. It is produced by `{mut, shared}_borrow` and diff --git a/crates/wiggle/src/wasmtime.rs b/crates/wiggle/src/wasmtime.rs index dd9de48abd..c4f53584b3 100644 --- a/crates/wiggle/src/wasmtime.rs +++ b/crates/wiggle/src/wasmtime.rs @@ -6,10 +6,11 @@ use crate::{BorrowHandle, GuestError, GuestMemory, Region}; pub struct WasmtimeGuestMemory<'a> { mem: &'a mut [u8], bc: BorrowChecker, + shared: bool, } impl<'a> WasmtimeGuestMemory<'a> { - pub fn new(mem: &'a mut [u8]) -> Self { + pub fn new(mem: &'a mut [u8], shared: bool) -> Self { Self { mem, // Wiggle does not expose any methods for functions to re-enter @@ -22,6 +23,7 @@ impl<'a> WasmtimeGuestMemory<'a> { // integrated fully with wasmtime: // https://github.com/bytecodealliance/wasmtime/issues/1917 bc: BorrowChecker::new(), + shared, } } } @@ -51,4 +53,7 @@ unsafe impl GuestMemory for WasmtimeGuestMemory<'_> { fn mut_unborrow(&self, h: BorrowHandle) { self.bc.mut_unborrow(h) } + fn is_shared_memory(&self) -> bool { + self.shared + } }