wasi: avoid buffer underflow with shared memory (#5543)
* wasi: avoid buffer underflow with shared memory This change fixes an issue identified when using wasi-threads to perform file reads. In order to maintain Rust safety guarantees in the presence of WebAssembly shared memory, which can be modified concurrently by any of the running threads, the WASI implementations of `fd_read` and `fd_pread` were given special code paths when shared memory is detected: in these cases, the data is first read into a host-limited buffer and then subsequently copied into linear memory. The problem was that the rather-complex logic for doing this "buffer then copy" idea for multiple IO vectors could fail due to buffer underflow. If, e.g., a read was limited by the host to 64K (or even if the read returned less than the total buffer size) the `UnsafeGuestSlice::copy_from_slice` logic would fail, complaining that the sizes of both buffers were unequal. This change both simplifies and fixes the logic: - only the first IO vector is filled; this could represent a performance penalty for threaded programs, but the "buffer then copy" idea already imposes a non-trivial overhead. This simplifies the logic, allowing us to... - resize the shared memory buffer to the exact number of bytes read * review: early return when no IO vectors passed to shared memory * fix: add empty RoFlags on early exit
This commit is contained in:
@@ -533,12 +533,12 @@ impl wasi_unstable::WasiUnstable for WasiCtx {
|
|||||||
.get_file_mut(u32::from(fd))?
|
.get_file_mut(u32::from(fd))?
|
||||||
.get_cap_mut(FileCaps::READ)?;
|
.get_cap_mut(FileCaps::READ)?;
|
||||||
|
|
||||||
let iovs: Vec<wiggle::UnsafeGuestSlice<u8>> = iovs
|
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
|
||||||
.iter()
|
.iter()
|
||||||
.map(|iov_ptr| {
|
.map(|iov_ptr| {
|
||||||
let iov_ptr = iov_ptr?;
|
let iov_ptr = iov_ptr?;
|
||||||
let iov: types::Iovec = iov_ptr.read()?;
|
let iov: types::Iovec = iov_ptr.read()?;
|
||||||
Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?)
|
Ok(iov.buf.as_array(iov.buf_len))
|
||||||
})
|
})
|
||||||
.collect::<Result<_, Error>>()?;
|
.collect::<Result<_, Error>>()?;
|
||||||
|
|
||||||
@@ -560,24 +560,20 @@ impl wasi_unstable::WasiUnstable for WasiCtx {
|
|||||||
.and_then(|s| Some(s.is_shared_memory()))
|
.and_then(|s| Some(s.is_shared_memory()))
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
let bytes_read: u64 = if is_shared_memory {
|
let bytes_read: u64 = if is_shared_memory {
|
||||||
// Read into an intermediate buffer.
|
// For shared memory, read into an intermediate buffer. Only the
|
||||||
let total_available_size = iovs.iter().fold(0, |a, s| a + s.len());
|
// first iov will be filled and even then the read is capped by the
|
||||||
let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)];
|
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
|
||||||
let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?;
|
let iov = iovs.into_iter().next();
|
||||||
|
if let Some(iov) = iov {
|
||||||
// Copy the intermediate buffer into the Wasm shared memory--`iov`
|
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
|
||||||
// by `iov`.
|
let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?;
|
||||||
let mut data_to_write = &buffer[0..];
|
iov.get_range(0..bytes_read.try_into()?)
|
||||||
for iov in iovs.into_iter() {
|
.expect("it should always be possible to slice the iov smaller")
|
||||||
let len = data_to_write.len().min(iov.len());
|
.copy_from_slice(&buffer[0..bytes_read.try_into()?])?;
|
||||||
iov.copy_from_slice(&data_to_write[0..len])?;
|
bytes_read
|
||||||
data_to_write = &data_to_write[len..];
|
} else {
|
||||||
if data_to_write.is_empty() {
|
return Ok(0);
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_read
|
|
||||||
} else {
|
} else {
|
||||||
// Convert all of the unsafe guest slices to safe ones--this uses
|
// Convert all of the unsafe guest slices to safe ones--this uses
|
||||||
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
||||||
@@ -610,12 +606,12 @@ impl wasi_unstable::WasiUnstable for WasiCtx {
|
|||||||
.get_file_mut(u32::from(fd))?
|
.get_file_mut(u32::from(fd))?
|
||||||
.get_cap_mut(FileCaps::READ | FileCaps::SEEK)?;
|
.get_cap_mut(FileCaps::READ | FileCaps::SEEK)?;
|
||||||
|
|
||||||
let iovs: Vec<wiggle::UnsafeGuestSlice<u8>> = iovs
|
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
|
||||||
.iter()
|
.iter()
|
||||||
.map(|iov_ptr| {
|
.map(|iov_ptr| {
|
||||||
let iov_ptr = iov_ptr?;
|
let iov_ptr = iov_ptr?;
|
||||||
let iov: types::Iovec = iov_ptr.read()?;
|
let iov: types::Iovec = iov_ptr.read()?;
|
||||||
Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?)
|
Ok(iov.buf.as_array(iov.buf_len))
|
||||||
})
|
})
|
||||||
.collect::<Result<_, Error>>()?;
|
.collect::<Result<_, Error>>()?;
|
||||||
|
|
||||||
@@ -637,26 +633,22 @@ impl wasi_unstable::WasiUnstable for WasiCtx {
|
|||||||
.and_then(|s| Some(s.is_shared_memory()))
|
.and_then(|s| Some(s.is_shared_memory()))
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
let bytes_read: u64 = if is_shared_memory {
|
let bytes_read: u64 = if is_shared_memory {
|
||||||
// Read into an intermediate buffer.
|
// For shared memory, read into an intermediate buffer. Only the
|
||||||
let total_available_size = iovs.iter().fold(0, |a, s| a + s.len());
|
// first iov will be filled and even then the read is capped by the
|
||||||
let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)];
|
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
|
||||||
let bytes_read = f
|
let iov = iovs.into_iter().next();
|
||||||
.read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset)
|
if let Some(iov) = iov {
|
||||||
.await?;
|
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
|
||||||
|
let bytes_read = f
|
||||||
// Copy the intermediate buffer into the Wasm shared memory--`iov`
|
.read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset)
|
||||||
// by `iov`.
|
.await?;
|
||||||
let mut data_to_write = &buffer[0..];
|
iov.get_range(0..bytes_read.try_into()?)
|
||||||
for iov in iovs.into_iter() {
|
.expect("it should always be possible to slice the iov smaller")
|
||||||
let len = data_to_write.len().min(iov.len());
|
.copy_from_slice(&buffer[0..bytes_read.try_into()?])?;
|
||||||
iov.copy_from_slice(&data_to_write[0..len])?;
|
bytes_read
|
||||||
data_to_write = &data_to_write[len..];
|
} else {
|
||||||
if data_to_write.is_empty() {
|
return Ok(0);
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_read
|
|
||||||
} else {
|
} else {
|
||||||
// Convert all of the unsafe guest slices to safe ones--this uses
|
// Convert all of the unsafe guest slices to safe ones--this uses
|
||||||
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
||||||
|
|||||||
@@ -299,12 +299,12 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
|
|||||||
.get_file_mut(u32::from(fd))?
|
.get_file_mut(u32::from(fd))?
|
||||||
.get_cap_mut(FileCaps::READ)?;
|
.get_cap_mut(FileCaps::READ)?;
|
||||||
|
|
||||||
let iovs: Vec<wiggle::UnsafeGuestSlice<u8>> = iovs
|
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
|
||||||
.iter()
|
.iter()
|
||||||
.map(|iov_ptr| {
|
.map(|iov_ptr| {
|
||||||
let iov_ptr = iov_ptr?;
|
let iov_ptr = iov_ptr?;
|
||||||
let iov: types::Iovec = iov_ptr.read()?;
|
let iov: types::Iovec = iov_ptr.read()?;
|
||||||
Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?)
|
Ok(iov.buf.as_array(iov.buf_len))
|
||||||
})
|
})
|
||||||
.collect::<Result<_, Error>>()?;
|
.collect::<Result<_, Error>>()?;
|
||||||
|
|
||||||
@@ -326,24 +326,20 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
|
|||||||
.and_then(|s| Some(s.is_shared_memory()))
|
.and_then(|s| Some(s.is_shared_memory()))
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
let bytes_read: u64 = if is_shared_memory {
|
let bytes_read: u64 = if is_shared_memory {
|
||||||
// Read into an intermediate buffer.
|
// For shared memory, read into an intermediate buffer. Only the
|
||||||
let total_available_size = iovs.iter().fold(0, |a, s| a + s.len());
|
// first iov will be filled and even then the read is capped by the
|
||||||
let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)];
|
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
|
||||||
let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?;
|
let iov = iovs.into_iter().next();
|
||||||
|
if let Some(iov) = iov {
|
||||||
// Copy the intermediate buffer into the Wasm shared memory--`iov`
|
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
|
||||||
// by `iov`.
|
let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?;
|
||||||
let mut data_to_write = &buffer[0..];
|
iov.get_range(0..bytes_read.try_into()?)
|
||||||
for iov in iovs.into_iter() {
|
.expect("it should always be possible to slice the iov smaller")
|
||||||
let len = data_to_write.len().min(iov.len());
|
.copy_from_slice(&buffer[0..bytes_read.try_into()?])?;
|
||||||
iov.copy_from_slice(&data_to_write[0..len])?;
|
bytes_read
|
||||||
data_to_write = &data_to_write[len..];
|
} else {
|
||||||
if data_to_write.is_empty() {
|
return Ok(0);
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_read
|
|
||||||
} else {
|
} else {
|
||||||
// Convert all of the unsafe guest slices to safe ones--this uses
|
// Convert all of the unsafe guest slices to safe ones--this uses
|
||||||
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
||||||
@@ -376,12 +372,12 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
|
|||||||
.get_file_mut(u32::from(fd))?
|
.get_file_mut(u32::from(fd))?
|
||||||
.get_cap_mut(FileCaps::READ | FileCaps::SEEK)?;
|
.get_cap_mut(FileCaps::READ | FileCaps::SEEK)?;
|
||||||
|
|
||||||
let iovs: Vec<wiggle::UnsafeGuestSlice<u8>> = iovs
|
let iovs: Vec<wiggle::GuestPtr<[u8]>> = iovs
|
||||||
.iter()
|
.iter()
|
||||||
.map(|iov_ptr| {
|
.map(|iov_ptr| {
|
||||||
let iov_ptr = iov_ptr?;
|
let iov_ptr = iov_ptr?;
|
||||||
let iov: types::Iovec = iov_ptr.read()?;
|
let iov: types::Iovec = iov_ptr.read()?;
|
||||||
Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?)
|
Ok(iov.buf.as_array(iov.buf_len))
|
||||||
})
|
})
|
||||||
.collect::<Result<_, Error>>()?;
|
.collect::<Result<_, Error>>()?;
|
||||||
|
|
||||||
@@ -403,26 +399,22 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
|
|||||||
.and_then(|s| Some(s.is_shared_memory()))
|
.and_then(|s| Some(s.is_shared_memory()))
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
let bytes_read: u64 = if is_shared_memory {
|
let bytes_read: u64 = if is_shared_memory {
|
||||||
// Read into an intermediate buffer.
|
// For shared memory, read into an intermediate buffer. Only the
|
||||||
let total_available_size = iovs.iter().fold(0, |a, s| a + s.len());
|
// first iov will be filled and even then the read is capped by the
|
||||||
let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)];
|
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
|
||||||
let bytes_read = f
|
let iov = iovs.into_iter().next();
|
||||||
.read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset)
|
if let Some(iov) = iov {
|
||||||
.await?;
|
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
|
||||||
|
let bytes_read = f
|
||||||
// Copy the intermediate buffer into the Wasm shared memory--`iov`
|
.read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset)
|
||||||
// by `iov`.
|
.await?;
|
||||||
let mut data_to_write = &buffer[0..];
|
iov.get_range(0..bytes_read.try_into()?)
|
||||||
for iov in iovs.into_iter() {
|
.expect("it should always be possible to slice the iov smaller")
|
||||||
let len = data_to_write.len().min(iov.len());
|
.copy_from_slice(&buffer[0..bytes_read.try_into()?])?;
|
||||||
iov.copy_from_slice(&data_to_write[0..len])?;
|
bytes_read
|
||||||
data_to_write = &data_to_write[len..];
|
} else {
|
||||||
if data_to_write.is_empty() {
|
return Ok(0);
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_read
|
|
||||||
} else {
|
} else {
|
||||||
// Convert all of the unsafe guest slices to safe ones--this uses
|
// Convert all of the unsafe guest slices to safe ones--this uses
|
||||||
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
||||||
@@ -1169,12 +1161,12 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
|
|||||||
.get_file_mut(u32::from(fd))?
|
.get_file_mut(u32::from(fd))?
|
||||||
.get_cap_mut(FileCaps::READ)?;
|
.get_cap_mut(FileCaps::READ)?;
|
||||||
|
|
||||||
let iovs: Vec<wiggle::UnsafeGuestSlice<u8>> = ri_data
|
let iovs: Vec<wiggle::GuestPtr<[u8]>> = ri_data
|
||||||
.iter()
|
.iter()
|
||||||
.map(|iov_ptr| {
|
.map(|iov_ptr| {
|
||||||
let iov_ptr = iov_ptr?;
|
let iov_ptr = iov_ptr?;
|
||||||
let iov: types::Iovec = iov_ptr.read()?;
|
let iov: types::Iovec = iov_ptr.read()?;
|
||||||
Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?)
|
Ok(iov.buf.as_array(iov.buf_len))
|
||||||
})
|
})
|
||||||
.collect::<Result<_, Error>>()?;
|
.collect::<Result<_, Error>>()?;
|
||||||
|
|
||||||
@@ -1196,26 +1188,22 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx {
|
|||||||
.and_then(|s| Some(s.is_shared_memory()))
|
.and_then(|s| Some(s.is_shared_memory()))
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
let (bytes_read, ro_flags) = if is_shared_memory {
|
let (bytes_read, ro_flags) = if is_shared_memory {
|
||||||
// Read into an intermediate buffer.
|
// For shared memory, read into an intermediate buffer. Only the
|
||||||
let total_available_size = iovs.iter().fold(0, |a, s| a + s.len());
|
// first iov will be filled and even then the read is capped by the
|
||||||
let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)];
|
// `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call.
|
||||||
let (bytes_read, ro_flags) = f
|
let iov = iovs.into_iter().next();
|
||||||
.sock_recv(&mut [IoSliceMut::new(&mut buffer)], RiFlags::from(ri_flags))
|
if let Some(iov) = iov {
|
||||||
.await?;
|
let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)];
|
||||||
|
let (bytes_read, ro_flags) = f
|
||||||
// Copy the intermediate buffer into the Wasm shared memory--`iov`
|
.sock_recv(&mut [IoSliceMut::new(&mut buffer)], RiFlags::from(ri_flags))
|
||||||
// by `iov`.
|
.await?;
|
||||||
let mut data_to_write = &buffer[0..];
|
iov.get_range(0..bytes_read.try_into()?)
|
||||||
for iov in iovs.into_iter() {
|
.expect("it should always be possible to slice the iov smaller")
|
||||||
let len = data_to_write.len().min(iov.len());
|
.copy_from_slice(&buffer[0..bytes_read.try_into()?])?;
|
||||||
iov.copy_from_slice(&data_to_write[0..len])?;
|
(bytes_read, ro_flags)
|
||||||
data_to_write = &data_to_write[len..];
|
} else {
|
||||||
if data_to_write.is_empty() {
|
return Ok((0, RoFlags::empty().into()));
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
(bytes_read, ro_flags)
|
|
||||||
} else {
|
} else {
|
||||||
// Convert all of the unsafe guest slices to safe ones--this uses
|
// Convert all of the unsafe guest slices to safe ones--this uses
|
||||||
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
// Wiggle's internal borrow checker to ensure no overlaps. We assume
|
||||||
|
|||||||
Reference in New Issue
Block a user