Add a first-class StoreId type to Wasmtime (#4204)
* Add a first-class `StoreId` type to Wasmtime This commit adds a `StoreId` type to uniquely identify a store internally within Wasmtime. This hasn't been created previously as it was never really needed but I've run across a case for its usage in the component model so I've gone ahead and split out a commit to add this type. While I was here in this file I opted to improve some other miscellaneous things as well: * Notes were added to the `Index` impls that unchecked indexing could be used in theory if we ever need it one day. * The check in `Index` for the same store should now be a bit lighter on codegen where instead of having a `panic!()` in the codegen for each `Index` there's now an out-of-line version which is `#[cold]`. This should improve codegen as calling a function with no arguments is slighly more efficient than calling the panic macro with one string argument. * An `assert!` guarded with a `cfg(debug_assertions)` was changed to a `debug_assert!`. * Allocation of a `StoreId` was refactored to a method on the `StoreId` itself. * Review comments * Fix an ordering
This commit is contained in:
@@ -4,7 +4,7 @@ use std::fmt;
|
|||||||
use std::marker;
|
use std::marker;
|
||||||
use std::num::NonZeroU64;
|
use std::num::NonZeroU64;
|
||||||
use std::ops::{Index, IndexMut};
|
use std::ops::{Index, IndexMut};
|
||||||
use std::sync::atomic::{AtomicU64, Ordering::SeqCst};
|
use std::sync::atomic::{AtomicU64, Ordering::Relaxed};
|
||||||
|
|
||||||
// This is defined here, in a private submodule, so we can explicitly reexport
|
// This is defined here, in a private submodule, so we can explicitly reexport
|
||||||
// it only as `pub(crate)`. This avoids a ton of
|
// it only as `pub(crate)`. This avoids a ton of
|
||||||
@@ -14,7 +14,7 @@ use std::sync::atomic::{AtomicU64, Ordering::SeqCst};
|
|||||||
pub struct InstanceId(pub(super) usize);
|
pub struct InstanceId(pub(super) usize);
|
||||||
|
|
||||||
pub struct StoreData {
|
pub struct StoreData {
|
||||||
id: NonZeroU64,
|
id: StoreId,
|
||||||
funcs: Vec<crate::func::FuncData>,
|
funcs: Vec<crate::func::FuncData>,
|
||||||
tables: Vec<wasmtime_runtime::ExportTable>,
|
tables: Vec<wasmtime_runtime::ExportTable>,
|
||||||
globals: Vec<wasmtime_runtime::ExportGlobal>,
|
globals: Vec<wasmtime_runtime::ExportGlobal>,
|
||||||
@@ -50,18 +50,8 @@ impl_store_data! {
|
|||||||
|
|
||||||
impl StoreData {
|
impl StoreData {
|
||||||
pub fn new() -> StoreData {
|
pub fn new() -> StoreData {
|
||||||
static NEXT_ID: AtomicU64 = AtomicU64::new(0);
|
|
||||||
|
|
||||||
// Only allow 2^63 stores at which point we start panicking to prevent
|
|
||||||
// overflow. This should still last us to effectively the end of time.
|
|
||||||
let id = NEXT_ID.fetch_add(1, SeqCst);
|
|
||||||
if id & (1 << 63) != 0 {
|
|
||||||
NEXT_ID.store(1 << 63, SeqCst);
|
|
||||||
panic!("store id allocator overflow");
|
|
||||||
}
|
|
||||||
|
|
||||||
StoreData {
|
StoreData {
|
||||||
id: NonZeroU64::new(id + 1).unwrap(),
|
id: StoreId::allocate(),
|
||||||
funcs: Vec::new(),
|
funcs: Vec::new(),
|
||||||
tables: Vec::new(),
|
tables: Vec::new(),
|
||||||
globals: Vec::new(),
|
globals: Vec::new(),
|
||||||
@@ -72,6 +62,10 @@ impl StoreData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn id(&self) -> StoreId {
|
||||||
|
self.id
|
||||||
|
}
|
||||||
|
|
||||||
pub fn insert<T>(&mut self, data: T) -> Stored<T>
|
pub fn insert<T>(&mut self, data: T) -> Stored<T>
|
||||||
where
|
where
|
||||||
T: StoredData,
|
T: StoredData,
|
||||||
@@ -93,14 +87,12 @@ impl StoreData {
|
|||||||
where
|
where
|
||||||
T: StoredData,
|
T: StoredData,
|
||||||
{
|
{
|
||||||
if id.store_id() != self.id {
|
if id.store_id != self.id {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// This should be true as an invariant of our API, but double-check with
|
// This should be true as an invariant of our API, but double-check with
|
||||||
// debug assertions enabled.
|
// debug assertions enabled.
|
||||||
if cfg!(debug_assertions) {
|
debug_assert!(id.index() < T::list(self).len());
|
||||||
assert!(id.index() < T::list(self).len());
|
|
||||||
}
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,10 +113,12 @@ where
|
|||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn index(&self, index: Stored<T>) -> &Self::Output {
|
fn index(&self, index: Stored<T>) -> &Self::Output {
|
||||||
assert!(
|
index.assert_belongs_to(self.id);
|
||||||
index.store_id() == self.id,
|
// Note that if this is ever a performance bottleneck it should be safe
|
||||||
"object used with the wrong store"
|
// to use unchecked indexing here because presence of a `Stored<T>` is
|
||||||
);
|
// proof of an item having been inserted into a store and lists in
|
||||||
|
// stores are never shrunk. After the store check above the actual index
|
||||||
|
// should always be valid.
|
||||||
&T::list(self)[index.index()]
|
&T::list(self)[index.index()]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -135,10 +129,9 @@ where
|
|||||||
{
|
{
|
||||||
#[inline]
|
#[inline]
|
||||||
fn index_mut(&mut self, index: Stored<T>) -> &mut Self::Output {
|
fn index_mut(&mut self, index: Stored<T>) -> &mut Self::Output {
|
||||||
assert!(
|
index.assert_belongs_to(self.id);
|
||||||
index.store_id() == self.id,
|
// Note that this could be unchecked indexing, see the note in `Index`
|
||||||
"object used with the wrong store"
|
// above.
|
||||||
);
|
|
||||||
&mut T::list_mut(self)[index.index()]
|
&mut T::list_mut(self)[index.index()]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -191,15 +184,50 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A unique identifier to get attached to a store.
|
||||||
|
///
|
||||||
|
/// This identifier is embedded into the `Stored<T>` structure and is used to
|
||||||
|
/// identify the original store that items come from. For example a `Memory` is
|
||||||
|
/// owned by a `Store` and will embed a `StoreId` internally to say which store
|
||||||
|
/// it came from. Comparisons with this value are how panics are generated for
|
||||||
|
/// mismatching the item that a store belongs to.
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||||
|
pub struct StoreId(NonZeroU64);
|
||||||
|
|
||||||
|
impl StoreId {
|
||||||
|
/// Allocates a new unique identifier for a store that has never before been
|
||||||
|
/// used in this process.
|
||||||
|
fn allocate() -> StoreId {
|
||||||
|
static NEXT_ID: AtomicU64 = AtomicU64::new(0);
|
||||||
|
|
||||||
|
// Only allow 2^63 stores at which point we start panicking to prevent
|
||||||
|
// overflow.
|
||||||
|
//
|
||||||
|
// If a store is created once per microsecond then this will last the
|
||||||
|
// current process for 584,540 years before overflowing.
|
||||||
|
//
|
||||||
|
// Also note the usage of `Relaxed` ordering here which should be ok
|
||||||
|
// since we're only looking for atomicity on this counter and this
|
||||||
|
// otherwise isn't used to synchronize memory stored anywhere else.
|
||||||
|
let id = NEXT_ID.fetch_add(1, Relaxed);
|
||||||
|
if id & (1 << 63) != 0 {
|
||||||
|
NEXT_ID.store(1 << 63, Relaxed);
|
||||||
|
panic!("store id allocator overflow");
|
||||||
|
}
|
||||||
|
|
||||||
|
StoreId(NonZeroU64::new(id + 1).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[repr(C)] // used by reference in the C API
|
#[repr(C)] // used by reference in the C API
|
||||||
pub struct Stored<T> {
|
pub struct Stored<T> {
|
||||||
store_id: NonZeroU64,
|
store_id: StoreId,
|
||||||
index: usize,
|
index: usize,
|
||||||
_marker: marker::PhantomData<fn() -> T>,
|
_marker: marker::PhantomData<fn() -> T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Stored<T> {
|
impl<T> Stored<T> {
|
||||||
fn new(store_id: NonZeroU64, index: usize) -> Stored<T> {
|
fn new(store_id: StoreId, index: usize) -> Stored<T> {
|
||||||
Stored {
|
Stored {
|
||||||
store_id,
|
store_id,
|
||||||
index,
|
index,
|
||||||
@@ -207,8 +235,12 @@ impl<T> Stored<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn store_id(&self) -> NonZeroU64 {
|
#[inline]
|
||||||
self.store_id
|
fn assert_belongs_to(&self, store: StoreId) {
|
||||||
|
if self.store_id == store {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
store_id_mismatch();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index(&self) -> usize {
|
fn index(&self) -> usize {
|
||||||
@@ -216,6 +248,11 @@ impl<T> Stored<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cold]
|
||||||
|
fn store_id_mismatch() {
|
||||||
|
panic!("object used with the wrong store");
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> PartialEq for Stored<T> {
|
impl<T> PartialEq for Stored<T> {
|
||||||
fn eq(&self, other: &Stored<T>) -> bool {
|
fn eq(&self, other: &Stored<T>) -> bool {
|
||||||
self.store_id == other.store_id && self.index == other.index
|
self.store_id == other.store_id && self.index == other.index
|
||||||
@@ -232,6 +269,6 @@ impl<T> Clone for Stored<T> {
|
|||||||
|
|
||||||
impl<T> fmt::Debug for Stored<T> {
|
impl<T> fmt::Debug for Stored<T> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "store={}, index={}", self.store_id(), self.index())
|
write!(f, "store={}, index={}", self.store_id.0, self.index())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user