Validate alignment in the canonical ABI (#4238)

This commit updates the lifting and lowering done by Wasmtime to
validate that alignment is all correct. Previously alignment was ignored
because I wasn't sure how this would all work out.

To be extra safe I haven't actually modified any loads/stores and
they're all still unaligned. If this becomes a performance issue we can
investigate aligned loads and stores but otherwise I believe the
requisite locations have been guarded with traps and I've also added
debug asserts to catch possible future mistakes.
This commit is contained in:
Alex Crichton
2022-06-07 13:34:34 -05:00
committed by GitHub
parent 8ca3af0e37
commit 0b4448a423
6 changed files with 493 additions and 77 deletions

View File

@@ -230,12 +230,15 @@ where
fn validate_inbounds<T: ComponentType>(memory: &[u8], ptr: &ValRaw) -> Result<usize> {
// FIXME: needs memory64 support
let ptr = usize::try_from(ptr.get_u32())?;
if ptr % usize::try_from(T::align())? != 0 {
bail!("pointer not aligned");
}
let end = match ptr.checked_add(T::size()) {
Some(n) => n,
None => bail!("return pointer size overflow"),
None => bail!("pointer size overflow"),
};
if end > memory.len() {
bail!("return pointer out of bounds")
bail!("pointer out of bounds")
}
Ok(ptr)
}

View File

@@ -83,7 +83,7 @@ impl Options {
// Invoke the wasm malloc function using its raw and statically known
// signature.
let result = unsafe {
usize::try_from(crate::TypedFunc::<(u32, u32, u32, u32), u32>::call_raw(
crate::TypedFunc::<(u32, u32, u32, u32), u32>::call_raw(
store,
realloc,
(
@@ -92,9 +92,14 @@ impl Options {
old_align,
u32::try_from(new_size)?,
),
)?)?
)?
};
if result % old_align != 0 {
bail!("realloc return: result not aligned");
}
let result = usize::try_from(result)?;
let memory = self.memory_mut(store.0);
let result_slice = match memory.get_mut(result..).and_then(|s| s.get_mut(..new_size)) {

View File

@@ -236,7 +236,11 @@ where
fn lift_heap_result(store: &StoreOpaque, options: &Options, dst: &ValRaw) -> Result<Return> {
assert!(Return::flatten_count() > MAX_STACK_RESULTS);
// FIXME: needs to read an i64 for memory64
let ptr = usize::try_from(dst.get_u32()).unwrap();
let ptr = usize::try_from(dst.get_u32())?;
if ptr % usize::try_from(Return::align())? != 0 {
bail!("return pointer not aligned");
}
let memory = Memory::new(store, options);
let bytes = memory
.as_slice()
@@ -704,6 +708,7 @@ macro_rules! integers {
}
fn store<T>(&self, memory: &mut MemoryMut<'_, T>, offset: usize) -> Result<()> {
debug_assert!(offset % Self::size() == 0);
*memory.get(offset) = self.to_le_bytes();
Ok(())
}
@@ -717,6 +722,7 @@ macro_rules! integers {
#[inline]
fn load(_mem: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
debug_assert!((bytes.as_ptr() as usize) % Self::size() == 0);
Ok($primitive::from_le_bytes(bytes.try_into().unwrap()))
}
}
@@ -780,6 +786,7 @@ macro_rules! floats {
}
fn store<T>(&self, memory: &mut MemoryMut<'_, T>, offset: usize) -> Result<()> {
debug_assert!(offset % Self::size() == 0);
let ptr = memory.get(offset);
*ptr = canonicalize(*self).to_bits().to_le_bytes();
Ok(())
@@ -794,6 +801,7 @@ macro_rules! floats {
#[inline]
fn load(_mem: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
debug_assert!((bytes.as_ptr() as usize) % Self::size() == 0);
Ok(canonicalize($float::from_le_bytes(bytes.try_into().unwrap())))
}
}
@@ -838,6 +846,7 @@ unsafe impl Lower for bool {
}
fn store<T>(&self, memory: &mut MemoryMut<'_, T>, offset: usize) -> Result<()> {
debug_assert!(offset % Self::size() == 0);
memory.get::<1>(offset)[0] = *self as u8;
Ok(())
}
@@ -894,6 +903,7 @@ unsafe impl Lower for char {
}
fn store<T>(&self, memory: &mut MemoryMut<'_, T>, offset: usize) -> Result<()> {
debug_assert!(offset % Self::size() == 0);
*memory.get::<4>(offset) = u32::from(*self).to_le_bytes();
Ok(())
}
@@ -907,6 +917,7 @@ unsafe impl Lift for char {
#[inline]
fn load(_memory: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
debug_assert!((bytes.as_ptr() as usize) % Self::size() == 0);
let bits = u32::from_le_bytes(bytes.try_into().unwrap());
Ok(char::try_from(bits)?)
}
@@ -948,6 +959,7 @@ unsafe impl Lower for str {
}
fn store<T>(&self, mem: &mut MemoryMut<'_, T>, offset: usize) -> Result<()> {
debug_assert!(offset % (Self::align() as usize) == 0);
let (ptr, len) = lower_string(mem, self)?;
// FIXME: needs memory64 handling
*mem.get(offset + 0) = (ptr as i32).to_le_bytes();
@@ -1109,6 +1121,7 @@ unsafe impl Lift for WasmStr {
}
fn load(memory: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
debug_assert!((bytes.as_ptr() as usize) % (Self::align() as usize) == 0);
// FIXME: needs memory64 treatment
let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
@@ -1160,6 +1173,7 @@ where
}
fn store<U>(&self, mem: &mut MemoryMut<'_, U>, offset: usize) -> Result<()> {
debug_assert!(offset % (Self::align() as usize) == 0);
let (ptr, len) = lower_list(mem, self)?;
*mem.get(offset + 0) = (ptr as i32).to_le_bytes();
*mem.get(offset + 4) = (len as i32).to_le_bytes();
@@ -1225,6 +1239,9 @@ impl<T: Lift> WasmList<T> {
Some(n) if n <= memory.as_slice().len() => {}
_ => bail!("list pointer/length out of bounds of memory"),
}
if ptr % usize::try_from(T::align())? != 0 {
bail!("list pointer is not aligned")
}
Ok(WasmList {
ptr,
len,
@@ -1324,6 +1341,7 @@ unsafe impl<T: Lift> Lift for WasmList<T> {
}
fn load(memory: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
debug_assert!((bytes.as_ptr() as usize) % (Self::align() as usize) == 0);
// FIXME: needs memory64 treatment
let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
@@ -1432,6 +1450,7 @@ where
}
fn store<U>(&self, mem: &mut MemoryMut<'_, U>, offset: usize) -> Result<()> {
debug_assert!(offset % (Self::align() as usize) == 0);
match self {
None => {
mem.get::<1>(offset)[0] = 0;
@@ -1458,6 +1477,7 @@ where
}
fn load(memory: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
debug_assert!((bytes.as_ptr() as usize) % (Self::align() as usize) == 0);
let discrim = bytes[0];
let payload = &bytes[align_to(1, T::align())..];
match discrim {
@@ -1555,6 +1575,7 @@ where
}
fn store<U>(&self, mem: &mut MemoryMut<'_, U>, offset: usize) -> Result<()> {
debug_assert!(offset % (Self::align() as usize) == 0);
match self {
Ok(e) => {
mem.get::<1>(offset)[0] = 0;
@@ -1602,7 +1623,8 @@ where
}
fn load(memory: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
let align = <Result<T, E> as ComponentType>::align();
debug_assert!((bytes.as_ptr() as usize) % (Self::align() as usize) == 0);
let align = Self::align();
let discrim = bytes[0];
let payload = &bytes[align_to(1, align)..];
match discrim {
@@ -1674,9 +1696,8 @@ macro_rules! impl_component_ty_for_tuples {
}
fn store<U>(&self, memory: &mut MemoryMut<'_, U>, mut offset: usize) -> Result<()> {
debug_assert!(offset % (Self::align() as usize) == 0);
let ($($t,)*) = self;
// TODO: this requires that `offset` is aligned which we may not
// want to do
$($t.store(memory, next_field::<$t>(&mut offset))?;)*
Ok(())
}
@@ -1691,6 +1712,7 @@ macro_rules! impl_component_ty_for_tuples {
}
fn load(memory: &Memory<'_>, bytes: &[u8]) -> Result<Self> {
debug_assert!((bytes.as_ptr() as usize) % (Self::align() as usize) == 0);
let mut offset = 0;
$(let $t = $t::load(memory, &bytes[next_field::<$t>(&mut offset)..][..$t::size()])?;)*
Ok(($($t,)*))