diff --git a/crates/wasi-common/src/snapshots/preview_0.rs b/crates/wasi-common/src/snapshots/preview_0.rs index 0295777daa..763f685fcc 100644 --- a/crates/wasi-common/src/snapshots/preview_0.rs +++ b/crates/wasi-common/src/snapshots/preview_0.rs @@ -533,12 +533,12 @@ impl wasi_unstable::WasiUnstable for WasiCtx { .get_file_mut(u32::from(fd))? .get_cap_mut(FileCaps::READ)?; - let iovs: Vec> = iovs + let iovs: Vec> = iovs .iter() .map(|iov_ptr| { let iov_ptr = iov_ptr?; let iov: types::Iovec = iov_ptr.read()?; - Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?) + Ok(iov.buf.as_array(iov.buf_len)) }) .collect::>()?; @@ -560,24 +560,20 @@ impl wasi_unstable::WasiUnstable for WasiCtx { .and_then(|s| Some(s.is_shared_memory())) .unwrap_or(false); let bytes_read: u64 = if is_shared_memory { - // Read into an intermediate buffer. - let total_available_size = iovs.iter().fold(0, |a, s| a + s.len()); - let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)]; - let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?; - - // Copy the intermediate buffer into the Wasm shared memory--`iov` - // by `iov`. - let mut data_to_write = &buffer[0..]; - for iov in iovs.into_iter() { - let len = data_to_write.len().min(iov.len()); - iov.copy_from_slice(&data_to_write[0..len])?; - data_to_write = &data_to_write[len..]; - if data_to_write.is_empty() { - break; - } + // For shared memory, read into an intermediate buffer. Only the + // first iov will be filled and even then the read is capped by the + // `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call. + let iov = iovs.into_iter().next(); + if let Some(iov) = iov { + let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)]; + let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?; + iov.get_range(0..bytes_read.try_into()?) + .expect("it should always be possible to slice the iov smaller") + .copy_from_slice(&buffer[0..bytes_read.try_into()?])?; + bytes_read + } else { + return Ok(0); } - - bytes_read } else { // Convert all of the unsafe guest slices to safe ones--this uses // Wiggle's internal borrow checker to ensure no overlaps. We assume @@ -610,12 +606,12 @@ impl wasi_unstable::WasiUnstable for WasiCtx { .get_file_mut(u32::from(fd))? .get_cap_mut(FileCaps::READ | FileCaps::SEEK)?; - let iovs: Vec> = iovs + let iovs: Vec> = iovs .iter() .map(|iov_ptr| { let iov_ptr = iov_ptr?; let iov: types::Iovec = iov_ptr.read()?; - Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?) + Ok(iov.buf.as_array(iov.buf_len)) }) .collect::>()?; @@ -637,26 +633,22 @@ impl wasi_unstable::WasiUnstable for WasiCtx { .and_then(|s| Some(s.is_shared_memory())) .unwrap_or(false); let bytes_read: u64 = if is_shared_memory { - // Read into an intermediate buffer. - let total_available_size = iovs.iter().fold(0, |a, s| a + s.len()); - let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)]; - let bytes_read = f - .read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset) - .await?; - - // Copy the intermediate buffer into the Wasm shared memory--`iov` - // by `iov`. - let mut data_to_write = &buffer[0..]; - for iov in iovs.into_iter() { - let len = data_to_write.len().min(iov.len()); - iov.copy_from_slice(&data_to_write[0..len])?; - data_to_write = &data_to_write[len..]; - if data_to_write.is_empty() { - break; - } + // For shared memory, read into an intermediate buffer. Only the + // first iov will be filled and even then the read is capped by the + // `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call. + let iov = iovs.into_iter().next(); + if let Some(iov) = iov { + let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)]; + let bytes_read = f + .read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset) + .await?; + iov.get_range(0..bytes_read.try_into()?) + .expect("it should always be possible to slice the iov smaller") + .copy_from_slice(&buffer[0..bytes_read.try_into()?])?; + bytes_read + } else { + return Ok(0); } - - bytes_read } else { // Convert all of the unsafe guest slices to safe ones--this uses // Wiggle's internal borrow checker to ensure no overlaps. We assume diff --git a/crates/wasi-common/src/snapshots/preview_1.rs b/crates/wasi-common/src/snapshots/preview_1.rs index 0eee1df3a1..eac6d8544e 100644 --- a/crates/wasi-common/src/snapshots/preview_1.rs +++ b/crates/wasi-common/src/snapshots/preview_1.rs @@ -299,12 +299,12 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx { .get_file_mut(u32::from(fd))? .get_cap_mut(FileCaps::READ)?; - let iovs: Vec> = iovs + let iovs: Vec> = iovs .iter() .map(|iov_ptr| { let iov_ptr = iov_ptr?; let iov: types::Iovec = iov_ptr.read()?; - Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?) + Ok(iov.buf.as_array(iov.buf_len)) }) .collect::>()?; @@ -326,24 +326,20 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx { .and_then(|s| Some(s.is_shared_memory())) .unwrap_or(false); let bytes_read: u64 = if is_shared_memory { - // Read into an intermediate buffer. - let total_available_size = iovs.iter().fold(0, |a, s| a + s.len()); - let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)]; - let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?; - - // Copy the intermediate buffer into the Wasm shared memory--`iov` - // by `iov`. - let mut data_to_write = &buffer[0..]; - for iov in iovs.into_iter() { - let len = data_to_write.len().min(iov.len()); - iov.copy_from_slice(&data_to_write[0..len])?; - data_to_write = &data_to_write[len..]; - if data_to_write.is_empty() { - break; - } + // For shared memory, read into an intermediate buffer. Only the + // first iov will be filled and even then the read is capped by the + // `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call. + let iov = iovs.into_iter().next(); + if let Some(iov) = iov { + let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)]; + let bytes_read = f.read_vectored(&mut [IoSliceMut::new(&mut buffer)]).await?; + iov.get_range(0..bytes_read.try_into()?) + .expect("it should always be possible to slice the iov smaller") + .copy_from_slice(&buffer[0..bytes_read.try_into()?])?; + bytes_read + } else { + return Ok(0); } - - bytes_read } else { // Convert all of the unsafe guest slices to safe ones--this uses // Wiggle's internal borrow checker to ensure no overlaps. We assume @@ -376,12 +372,12 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx { .get_file_mut(u32::from(fd))? .get_cap_mut(FileCaps::READ | FileCaps::SEEK)?; - let iovs: Vec> = iovs + let iovs: Vec> = iovs .iter() .map(|iov_ptr| { let iov_ptr = iov_ptr?; let iov: types::Iovec = iov_ptr.read()?; - Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?) + Ok(iov.buf.as_array(iov.buf_len)) }) .collect::>()?; @@ -403,26 +399,22 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx { .and_then(|s| Some(s.is_shared_memory())) .unwrap_or(false); let bytes_read: u64 = if is_shared_memory { - // Read into an intermediate buffer. - let total_available_size = iovs.iter().fold(0, |a, s| a + s.len()); - let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)]; - let bytes_read = f - .read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset) - .await?; - - // Copy the intermediate buffer into the Wasm shared memory--`iov` - // by `iov`. - let mut data_to_write = &buffer[0..]; - for iov in iovs.into_iter() { - let len = data_to_write.len().min(iov.len()); - iov.copy_from_slice(&data_to_write[0..len])?; - data_to_write = &data_to_write[len..]; - if data_to_write.is_empty() { - break; - } + // For shared memory, read into an intermediate buffer. Only the + // first iov will be filled and even then the read is capped by the + // `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call. + let iov = iovs.into_iter().next(); + if let Some(iov) = iov { + let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)]; + let bytes_read = f + .read_vectored_at(&mut [IoSliceMut::new(&mut buffer)], offset) + .await?; + iov.get_range(0..bytes_read.try_into()?) + .expect("it should always be possible to slice the iov smaller") + .copy_from_slice(&buffer[0..bytes_read.try_into()?])?; + bytes_read + } else { + return Ok(0); } - - bytes_read } else { // Convert all of the unsafe guest slices to safe ones--this uses // Wiggle's internal borrow checker to ensure no overlaps. We assume @@ -1169,12 +1161,12 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx { .get_file_mut(u32::from(fd))? .get_cap_mut(FileCaps::READ)?; - let iovs: Vec> = ri_data + let iovs: Vec> = ri_data .iter() .map(|iov_ptr| { let iov_ptr = iov_ptr?; let iov: types::Iovec = iov_ptr.read()?; - Ok(iov.buf.as_array(iov.buf_len).as_unsafe_slice_mut()?) + Ok(iov.buf.as_array(iov.buf_len)) }) .collect::>()?; @@ -1196,26 +1188,22 @@ impl wasi_snapshot_preview1::WasiSnapshotPreview1 for WasiCtx { .and_then(|s| Some(s.is_shared_memory())) .unwrap_or(false); let (bytes_read, ro_flags) = if is_shared_memory { - // Read into an intermediate buffer. - let total_available_size = iovs.iter().fold(0, |a, s| a + s.len()); - let mut buffer = vec![0; total_available_size.min(MAX_SHARED_BUFFER_SIZE)]; - let (bytes_read, ro_flags) = f - .sock_recv(&mut [IoSliceMut::new(&mut buffer)], RiFlags::from(ri_flags)) - .await?; - - // Copy the intermediate buffer into the Wasm shared memory--`iov` - // by `iov`. - let mut data_to_write = &buffer[0..]; - for iov in iovs.into_iter() { - let len = data_to_write.len().min(iov.len()); - iov.copy_from_slice(&data_to_write[0..len])?; - data_to_write = &data_to_write[len..]; - if data_to_write.is_empty() { - break; - } + // For shared memory, read into an intermediate buffer. Only the + // first iov will be filled and even then the read is capped by the + // `MAX_SHARED_BUFFER_SIZE`, so users are expected to re-call. + let iov = iovs.into_iter().next(); + if let Some(iov) = iov { + let mut buffer = vec![0; (iov.len() as usize).min(MAX_SHARED_BUFFER_SIZE)]; + let (bytes_read, ro_flags) = f + .sock_recv(&mut [IoSliceMut::new(&mut buffer)], RiFlags::from(ri_flags)) + .await?; + iov.get_range(0..bytes_read.try_into()?) + .expect("it should always be possible to slice the iov smaller") + .copy_from_slice(&buffer[0..bytes_read.try_into()?])?; + (bytes_read, ro_flags) + } else { + return Ok((0, RoFlags::empty().into())); } - - (bytes_read, ro_flags) } else { // Convert all of the unsafe guest slices to safe ones--this uses // Wiggle's internal borrow checker to ensure no overlaps. We assume