More code review changes.

* Add more overflow checks in table/memory initialization.
* Comment for `with_allocation_strategy` to explain ignored `Config` options.
* Fix Wasmtime `Table` to not panic for type mismatches in `fill`/`copy`.
* Add tests for that fix.
This commit is contained in:
Peter Huene
2021-03-05 00:47:49 -08:00
parent a4084db096
commit a7190764e1
5 changed files with 152 additions and 77 deletions

View File

@@ -168,10 +168,12 @@ pub unsafe trait InstanceAllocator: Send + Sync {
unsafe fn deallocate_fiber_stack(&self, stack: *mut u8);
}
fn get_table_init_start(init: &TableInitializer, instance: &Instance) -> usize {
let mut start = init.offset;
if let Some(base) = init.base {
fn get_table_init_start(
init: &TableInitializer,
instance: &Instance,
) -> Result<usize, InstantiationError> {
match init.base {
Some(base) => {
let val = unsafe {
if let Some(def_index) = instance.module.defined_global_index(base) {
*instance.global(def_index).as_u32()
@@ -179,22 +181,32 @@ fn get_table_init_start(init: &TableInitializer, instance: &Instance) -> usize {
*(*instance.imported_global(base).from).as_u32()
}
};
start += usize::try_from(val).unwrap();
}
start
init.offset.checked_add(val as usize).ok_or_else(|| {
InstantiationError::Link(LinkError(
"element segment global base overflows".to_owned(),
))
})
}
None => Ok(init.offset),
}
}
fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_initializers {
let start = get_table_init_start(init, instance);
let table = instance.get_table(init.table_index);
let start = get_table_init_start(init, instance)?;
let end = start.checked_add(init.elements.len());
let size = usize::try_from(table.size()).unwrap();
if size < start + init.elements.len() {
match end {
Some(end) if end <= table.size() as usize => {
// Initializer is in bounds
}
_ => {
return Err(InstantiationError::Link(LinkError(
"table out of bounds: elements segment does not fit".to_owned(),
)));
)))
}
}
}
@@ -203,18 +215,12 @@ fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError
fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
for init in &instance.module.table_initializers {
let start = get_table_init_start(init, instance);
let table = instance.get_table(init.table_index);
let start = get_table_init_start(init, instance)?;
let end = start.checked_add(init.elements.len());
if start
.checked_add(init.elements.len())
.map_or(true, |end| end > table.size() as usize)
{
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::TableOutOfBounds,
)));
}
match end {
Some(end) if end <= table.size() as usize => {
for (i, func_idx) in init.elements.iter().enumerate() {
let item = match table.element_type() {
TableElementType::Func => instance
@@ -231,14 +237,23 @@ fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> {
table.set(u32::try_from(start + i).unwrap(), item).unwrap();
}
}
_ => {
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::TableOutOfBounds,
)))
}
}
}
Ok(())
}
fn get_memory_init_start(init: &MemoryInitializer, instance: &Instance) -> usize {
let mut start = init.offset;
if let Some(base) = init.base {
fn get_memory_init_start(
init: &MemoryInitializer,
instance: &Instance,
) -> Result<usize, InstantiationError> {
match init.base {
Some(base) => {
let val = unsafe {
if let Some(def_index) = instance.module.defined_global_index(base) {
*instance.global(def_index).as_u32()
@@ -246,10 +261,13 @@ fn get_memory_init_start(init: &MemoryInitializer, instance: &Instance) -> usize
*(*instance.imported_global(base).from).as_u32()
}
};
start += usize::try_from(val).unwrap();
}
start
init.offset.checked_add(val as usize).ok_or_else(|| {
InstantiationError::Link(LinkError("data segment global base overflows".to_owned()))
})
}
None => Ok(init.offset),
}
}
unsafe fn get_memory_slice<'instance>(
@@ -267,7 +285,7 @@ unsafe fn get_memory_slice<'instance>(
let foreign_index = foreign_instance.memory_index(foreign_memory);
foreign_instance.memory(foreign_index)
};
slice::from_raw_parts_mut(memory.base, memory.current_length)
&mut *ptr::slice_from_raw_parts_mut(memory.base, memory.current_length)
}
fn check_memory_init_bounds(
@@ -275,13 +293,18 @@ fn check_memory_init_bounds(
initializers: &[MemoryInitializer],
) -> Result<(), InstantiationError> {
for init in initializers {
let start = get_memory_init_start(init, instance);
unsafe {
let mem_slice = get_memory_slice(init, instance);
if mem_slice.get_mut(start..start + init.data.len()).is_none() {
let memory = instance.get_memory(init.memory_index);
let start = get_memory_init_start(init, instance)?;
let end = start.checked_add(init.data.len());
match end {
Some(end) if end <= memory.current_length => {
// Initializer is in bounds
}
_ => {
return Err(InstantiationError::Link(LinkError(
"memory out of bounds: data segment does not fit".into(),
)));
)))
}
}
}
@@ -295,22 +318,19 @@ fn initialize_memories(
) -> Result<(), InstantiationError> {
for init in initializers {
let memory = instance.get_memory(init.memory_index);
let start = get_memory_init_start(init, instance)?;
let end = start.checked_add(init.data.len());
let start = get_memory_init_start(init, instance);
if start
.checked_add(init.data.len())
.map_or(true, |end| end > memory.current_length)
{
match end {
Some(end) if end <= memory.current_length => {
let mem_slice = unsafe { get_memory_slice(init, instance) };
mem_slice[start..end].copy_from_slice(&init.data);
}
_ => {
return Err(InstantiationError::Trap(Trap::wasm(
ir::TrapCode::HeapOutOfBounds,
)));
)))
}
unsafe {
let mem_slice = get_memory_slice(init, instance);
let end = start + init.data.len();
let to_init = &mut mem_slice[start..end];
to_init.copy_from_slice(&init.data);
}
}

View File

@@ -826,6 +826,12 @@ impl Config {
}
/// Sets the instance allocation strategy to use.
///
/// When using the pooling instance allocation strategy, all linear memories will be created as "static".
///
/// This means the [`Config::static_memory_maximum_size`] and [`Config::static_memory_guard_size`] options
/// will be ignored in favor of [`InstanceLimits::memory_reservation_size`] when the pooling instance
/// allocation strategy is used.
pub fn with_allocation_strategy(
&mut self,
strategy: InstanceAllocationStrategy,

View File

@@ -547,10 +547,13 @@ impl Table {
bail!("cross-`Store` table copies are not supported");
}
if dst_table.ty() != src_table.ty() {
bail!("tables do not have the same element type");
}
// NB: We must use the `dst_table`'s `wasmtime_handle` for the
// `dst_table_index` and vice versa for `src_table` since each table can
// come from different modules.
let dst_table_index = dst_table.wasmtime_table_index();
let dst_table_index = dst_table.instance.get_defined_table(dst_table_index);
@@ -579,6 +582,11 @@ impl Table {
bail!("cross-`Store` table fills are not supported");
}
// Ensure the fill value is the correct type
if self.ty().element() != &val.ty() {
bail!("mismatched element fill type");
}
let table_index = self.wasmtime_table_index();
self.instance
.handle

View File

@@ -27,6 +27,8 @@ pub(crate) fn create_handle(
unsafe {
// Use the default allocator when creating handles associated with host objects
// The configured instance allocator should only be used when creating module instances
// as we don't want host objects to count towards instance limits.
let handle = store
.engine()
.config()

View File

@@ -11,3 +11,42 @@ fn get_none() {
}
assert!(table.get(1).is_none());
}
#[test]
fn fill_wrong() {
let store = Store::default();
let ty = TableType::new(ValType::FuncRef, Limits::new(1, None));
let table = Table::new(&store, ty, Val::FuncRef(None)).unwrap();
assert_eq!(
table
.fill(0, Val::ExternRef(None), 1)
.map_err(|e| e.to_string())
.unwrap_err(),
"mismatched element fill type"
);
let ty = TableType::new(ValType::ExternRef, Limits::new(1, None));
let table = Table::new(&store, ty, Val::ExternRef(None)).unwrap();
assert_eq!(
table
.fill(0, Val::FuncRef(None), 1)
.map_err(|e| e.to_string())
.unwrap_err(),
"mismatched element fill type"
);
}
#[test]
fn copy_wrong() {
let store = Store::default();
let ty = TableType::new(ValType::FuncRef, Limits::new(1, None));
let table1 = Table::new(&store, ty, Val::FuncRef(None)).unwrap();
let ty = TableType::new(ValType::ExternRef, Limits::new(1, None));
let table2 = Table::new(&store, ty, Val::ExternRef(None)).unwrap();
assert_eq!(
Table::copy(&table1, 0, &table2, 0, 1)
.map_err(|e| e.to_string())
.unwrap_err(),
"tables do not have the same element type"
);
}