From a7190764e12ad0e69a06a2296d2e8171edba8d58 Mon Sep 17 00:00:00 2001 From: Peter Huene Date: Fri, 5 Mar 2021 00:47:49 -0800 Subject: [PATCH] More code review changes. * Add more overflow checks in table/memory initialization. * Comment for `with_allocation_strategy` to explain ignored `Config` options. * Fix Wasmtime `Table` to not panic for type mismatches in `fill`/`copy`. * Add tests for that fix. --- crates/runtime/src/instance/allocator.rs | 172 ++++++++++-------- crates/wasmtime/src/config.rs | 6 + crates/wasmtime/src/externals.rs | 10 +- .../wasmtime/src/trampoline/create_handle.rs | 2 + tests/all/table.rs | 39 ++++ 5 files changed, 152 insertions(+), 77 deletions(-) diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index c70105dfb5..31ced83f5b 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -168,33 +168,45 @@ pub unsafe trait InstanceAllocator: Send + Sync { unsafe fn deallocate_fiber_stack(&self, stack: *mut u8); } -fn get_table_init_start(init: &TableInitializer, instance: &Instance) -> usize { - let mut start = init.offset; +fn get_table_init_start( + init: &TableInitializer, + instance: &Instance, +) -> Result { + match init.base { + Some(base) => { + let val = unsafe { + if let Some(def_index) = instance.module.defined_global_index(base) { + *instance.global(def_index).as_u32() + } else { + *(*instance.imported_global(base).from).as_u32() + } + }; - if let Some(base) = init.base { - let val = unsafe { - if let Some(def_index) = instance.module.defined_global_index(base) { - *instance.global(def_index).as_u32() - } else { - *(*instance.imported_global(base).from).as_u32() - } - }; - start += usize::try_from(val).unwrap(); + init.offset.checked_add(val as usize).ok_or_else(|| { + InstantiationError::Link(LinkError( + "element segment global base overflows".to_owned(), + )) + }) + } + None => Ok(init.offset), } - - start } fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError> { for init in &instance.module.table_initializers { - let start = get_table_init_start(init, instance); let table = instance.get_table(init.table_index); + let start = get_table_init_start(init, instance)?; + let end = start.checked_add(init.elements.len()); - let size = usize::try_from(table.size()).unwrap(); - if size < start + init.elements.len() { - return Err(InstantiationError::Link(LinkError( - "table out of bounds: elements segment does not fit".to_owned(), - ))); + match end { + Some(end) if end <= table.size() as usize => { + // Initializer is in bounds + } + _ => { + return Err(InstantiationError::Link(LinkError( + "table out of bounds: elements segment does not fit".to_owned(), + ))) + } } } @@ -203,53 +215,59 @@ fn check_table_init_bounds(instance: &Instance) -> Result<(), InstantiationError fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> { for init in &instance.module.table_initializers { - let start = get_table_init_start(init, instance); let table = instance.get_table(init.table_index); + let start = get_table_init_start(init, instance)?; + let end = start.checked_add(init.elements.len()); - if start - .checked_add(init.elements.len()) - .map_or(true, |end| end > table.size() as usize) - { - return Err(InstantiationError::Trap(Trap::wasm( - ir::TrapCode::TableOutOfBounds, - ))); - } - - for (i, func_idx) in init.elements.iter().enumerate() { - let item = match table.element_type() { - TableElementType::Func => instance - .get_caller_checked_anyfunc(*func_idx) - .map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| { - f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc - }) - .into(), - TableElementType::Val(_) => { - assert!(*func_idx == FuncIndex::reserved_value()); - TableElement::ExternRef(None) + match end { + Some(end) if end <= table.size() as usize => { + for (i, func_idx) in init.elements.iter().enumerate() { + let item = match table.element_type() { + TableElementType::Func => instance + .get_caller_checked_anyfunc(*func_idx) + .map_or(ptr::null_mut(), |f: &VMCallerCheckedAnyfunc| { + f as *const VMCallerCheckedAnyfunc as *mut VMCallerCheckedAnyfunc + }) + .into(), + TableElementType::Val(_) => { + assert!(*func_idx == FuncIndex::reserved_value()); + TableElement::ExternRef(None) + } + }; + table.set(u32::try_from(start + i).unwrap(), item).unwrap(); } - }; - table.set(u32::try_from(start + i).unwrap(), item).unwrap(); + } + _ => { + return Err(InstantiationError::Trap(Trap::wasm( + ir::TrapCode::TableOutOfBounds, + ))) + } } } Ok(()) } -fn get_memory_init_start(init: &MemoryInitializer, instance: &Instance) -> usize { - let mut start = init.offset; +fn get_memory_init_start( + init: &MemoryInitializer, + instance: &Instance, +) -> Result { + match init.base { + Some(base) => { + let val = unsafe { + if let Some(def_index) = instance.module.defined_global_index(base) { + *instance.global(def_index).as_u32() + } else { + *(*instance.imported_global(base).from).as_u32() + } + }; - if let Some(base) = init.base { - let val = unsafe { - if let Some(def_index) = instance.module.defined_global_index(base) { - *instance.global(def_index).as_u32() - } else { - *(*instance.imported_global(base).from).as_u32() - } - }; - start += usize::try_from(val).unwrap(); + init.offset.checked_add(val as usize).ok_or_else(|| { + InstantiationError::Link(LinkError("data segment global base overflows".to_owned())) + }) + } + None => Ok(init.offset), } - - start } unsafe fn get_memory_slice<'instance>( @@ -267,7 +285,7 @@ unsafe fn get_memory_slice<'instance>( let foreign_index = foreign_instance.memory_index(foreign_memory); foreign_instance.memory(foreign_index) }; - slice::from_raw_parts_mut(memory.base, memory.current_length) + &mut *ptr::slice_from_raw_parts_mut(memory.base, memory.current_length) } fn check_memory_init_bounds( @@ -275,13 +293,18 @@ fn check_memory_init_bounds( initializers: &[MemoryInitializer], ) -> Result<(), InstantiationError> { for init in initializers { - let start = get_memory_init_start(init, instance); - unsafe { - let mem_slice = get_memory_slice(init, instance); - if mem_slice.get_mut(start..start + init.data.len()).is_none() { + let memory = instance.get_memory(init.memory_index); + let start = get_memory_init_start(init, instance)?; + let end = start.checked_add(init.data.len()); + + match end { + Some(end) if end <= memory.current_length => { + // Initializer is in bounds + } + _ => { return Err(InstantiationError::Link(LinkError( "memory out of bounds: data segment does not fit".into(), - ))); + ))) } } } @@ -295,22 +318,19 @@ fn initialize_memories( ) -> Result<(), InstantiationError> { for init in initializers { let memory = instance.get_memory(init.memory_index); + let start = get_memory_init_start(init, instance)?; + let end = start.checked_add(init.data.len()); - let start = get_memory_init_start(init, instance); - if start - .checked_add(init.data.len()) - .map_or(true, |end| end > memory.current_length) - { - return Err(InstantiationError::Trap(Trap::wasm( - ir::TrapCode::HeapOutOfBounds, - ))); - } - - unsafe { - let mem_slice = get_memory_slice(init, instance); - let end = start + init.data.len(); - let to_init = &mut mem_slice[start..end]; - to_init.copy_from_slice(&init.data); + match end { + Some(end) if end <= memory.current_length => { + let mem_slice = unsafe { get_memory_slice(init, instance) }; + mem_slice[start..end].copy_from_slice(&init.data); + } + _ => { + return Err(InstantiationError::Trap(Trap::wasm( + ir::TrapCode::HeapOutOfBounds, + ))) + } } } diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index fdce7b9202..f3135dd6c8 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -826,6 +826,12 @@ impl Config { } /// Sets the instance allocation strategy to use. + /// + /// When using the pooling instance allocation strategy, all linear memories will be created as "static". + /// + /// This means the [`Config::static_memory_maximum_size`] and [`Config::static_memory_guard_size`] options + /// will be ignored in favor of [`InstanceLimits::memory_reservation_size`] when the pooling instance + /// allocation strategy is used. pub fn with_allocation_strategy( &mut self, strategy: InstanceAllocationStrategy, diff --git a/crates/wasmtime/src/externals.rs b/crates/wasmtime/src/externals.rs index e71fb7d75c..665acf9f02 100644 --- a/crates/wasmtime/src/externals.rs +++ b/crates/wasmtime/src/externals.rs @@ -547,10 +547,13 @@ impl Table { bail!("cross-`Store` table copies are not supported"); } + if dst_table.ty() != src_table.ty() { + bail!("tables do not have the same element type"); + } + // NB: We must use the `dst_table`'s `wasmtime_handle` for the // `dst_table_index` and vice versa for `src_table` since each table can // come from different modules. - let dst_table_index = dst_table.wasmtime_table_index(); let dst_table_index = dst_table.instance.get_defined_table(dst_table_index); @@ -579,6 +582,11 @@ impl Table { bail!("cross-`Store` table fills are not supported"); } + // Ensure the fill value is the correct type + if self.ty().element() != &val.ty() { + bail!("mismatched element fill type"); + } + let table_index = self.wasmtime_table_index(); self.instance .handle diff --git a/crates/wasmtime/src/trampoline/create_handle.rs b/crates/wasmtime/src/trampoline/create_handle.rs index 71595ff729..4ecf261b1e 100644 --- a/crates/wasmtime/src/trampoline/create_handle.rs +++ b/crates/wasmtime/src/trampoline/create_handle.rs @@ -27,6 +27,8 @@ pub(crate) fn create_handle( unsafe { // Use the default allocator when creating handles associated with host objects + // The configured instance allocator should only be used when creating module instances + // as we don't want host objects to count towards instance limits. let handle = store .engine() .config() diff --git a/tests/all/table.rs b/tests/all/table.rs index 26830c4ea9..abdddccc4a 100644 --- a/tests/all/table.rs +++ b/tests/all/table.rs @@ -11,3 +11,42 @@ fn get_none() { } assert!(table.get(1).is_none()); } + +#[test] +fn fill_wrong() { + let store = Store::default(); + let ty = TableType::new(ValType::FuncRef, Limits::new(1, None)); + let table = Table::new(&store, ty, Val::FuncRef(None)).unwrap(); + assert_eq!( + table + .fill(0, Val::ExternRef(None), 1) + .map_err(|e| e.to_string()) + .unwrap_err(), + "mismatched element fill type" + ); + + let ty = TableType::new(ValType::ExternRef, Limits::new(1, None)); + let table = Table::new(&store, ty, Val::ExternRef(None)).unwrap(); + assert_eq!( + table + .fill(0, Val::FuncRef(None), 1) + .map_err(|e| e.to_string()) + .unwrap_err(), + "mismatched element fill type" + ); +} + +#[test] +fn copy_wrong() { + let store = Store::default(); + let ty = TableType::new(ValType::FuncRef, Limits::new(1, None)); + let table1 = Table::new(&store, ty, Val::FuncRef(None)).unwrap(); + let ty = TableType::new(ValType::ExternRef, Limits::new(1, None)); + let table2 = Table::new(&store, ty, Val::ExternRef(None)).unwrap(); + assert_eq!( + Table::copy(&table1, 0, &table2, 0, 1) + .map_err(|e| e.to_string()) + .unwrap_err(), + "tables do not have the same element type" + ); +}