diff --git a/build.rs b/build.rs index 4d2961e9a5..1942deec89 100644 --- a/build.rs +++ b/build.rs @@ -186,7 +186,8 @@ fn ignore(testsuite: &str, testname: &str, strategy: &str) -> bool { | ("bulk_memory_operations", "table_init") | ("bulk_memory_operations", "elem") | ("bulk_memory_operations", "memory_copy") - | ("bulk_memory_operations", "memory_fill") => return false, + | ("bulk_memory_operations", "memory_fill") + | ("bulk_memory_operations", "linking") => return false, ("bulk_memory_operations", _) => return true, _ => {} diff --git a/crates/api/src/instance.rs b/crates/api/src/instance.rs index 065627dc51..546a1bb45d 100644 --- a/crates/api/src/instance.rs +++ b/crates/api/src/instance.rs @@ -26,11 +26,15 @@ fn instantiate( let mut resolver = SimpleResolver { imports }; unsafe { let instance = compiled_module - .instantiate(&mut resolver) + .instantiate( + config.validating_config.operator_config.enable_bulk_memory, + &mut resolver, + ) .map_err(|e| -> Error { match e { InstantiationError::StartTrap(trap) => Trap::from_jit(trap).into(), - e @ InstantiationError::TableOutOfBounds(_) => { + e @ InstantiationError::TableOutOfBounds(_) + | e @ InstantiationError::MemoryOutOfBounds(_) => { let msg = e.to_string(); if config.validating_config.operator_config.enable_bulk_memory { Trap::new(msg).into() diff --git a/crates/api/src/trampoline/create_handle.rs b/crates/api/src/trampoline/create_handle.rs index ea87ee394d..d3d295de9d 100644 --- a/crates/api/src/trampoline/create_handle.rs +++ b/crates/api/src/trampoline/create_handle.rs @@ -42,6 +42,12 @@ pub(crate) fn create_handle( &data_initializers, signatures.into_boxed_slice(), None, + store + .engine() + .config() + .validating_config + .operator_config + .enable_bulk_memory, state, )?) } diff --git a/crates/jit/src/instantiate.rs b/crates/jit/src/instantiate.rs index f860fb2f8d..957f1c4099 100644 --- a/crates/jit/src/instantiate.rs +++ b/crates/jit/src/instantiate.rs @@ -205,6 +205,7 @@ impl CompiledModule { /// See `InstanceHandle::new` pub unsafe fn instantiate( &self, + is_bulk_memory: bool, resolver: &mut dyn Resolver, ) -> Result { let data_initializers = self @@ -224,6 +225,7 @@ impl CompiledModule { &data_initializers, self.signatures.clone(), self.dbg_jit_registration.as_ref().map(|r| Rc::clone(&r)), + is_bulk_memory, Box::new(()), ) } @@ -277,9 +279,10 @@ pub unsafe fn instantiate( data: &[u8], resolver: &mut dyn Resolver, debug_info: bool, + is_bulk_memory: bool, profiler: Option<&Arc>>>, ) -> Result { - let instance = - CompiledModule::new(compiler, data, debug_info, profiler)?.instantiate(resolver)?; + let instance = CompiledModule::new(compiler, data, debug_info, profiler)? + .instantiate(is_bulk_memory, resolver)?; Ok(instance) } diff --git a/crates/runtime/src/instance.rs b/crates/runtime/src/instance.rs index 2d13b65d7a..5bf035f985 100644 --- a/crates/runtime/src/instance.rs +++ b/crates/runtime/src/instance.rs @@ -21,6 +21,7 @@ use more_asserts::assert_lt; use std::alloc::{self, Layout}; use std::any::Any; use std::cell::{Cell, RefCell}; +use std::cmp; use std::collections::HashMap; use std::collections::HashSet; use std::convert::TryFrom; @@ -810,6 +811,7 @@ impl InstanceHandle { data_initializers: &[DataInitializer<'_>], vmshared_signatures: BoxedSlice, dbg_jit_registration: Option>, + is_bulk_memory: bool, host_state: Box, ) -> Result { let tables = create_tables(&module); @@ -904,9 +906,14 @@ impl InstanceHandle { VMBuiltinFunctionsArray::initialized(), ); - // Check initializer bounds before initializing anything. - check_table_init_bounds(instance)?; - check_memory_init_bounds(instance, data_initializers)?; + // Check initializer bounds before initializing anything. Only do this + // when bulk memory is disabled, since the bulk memory proposal changes + // instantiation such that the intermediate results of failed + // initializations are visible. + if !is_bulk_memory { + check_table_init_bounds(instance)?; + check_memory_init_bounds(instance, data_initializers)?; + } // Apply the initializers. initialize_tables(instance)?; @@ -1133,9 +1140,9 @@ fn check_memory_init_bounds( unsafe { let mem_slice = get_memory_slice(init, instance); if mem_slice.get_mut(start..start + init.data.len()).is_none() { - return Err(InstantiationError::Link(LinkError( - "data segment does not fit".to_owned(), - ))); + return Err(InstantiationError::MemoryOutOfBounds( + "data segment does not fit".into(), + )); } } } @@ -1179,11 +1186,29 @@ fn initialize_tables(instance: &Instance) -> Result<(), InstantiationError> { let start = get_table_init_start(init, instance); let table = instance.get_table(init.table_index); + // Even if there aren't any elements being initialized, if its out of + // bounds, then we report an error. This is required by the bulk memory + // proposal and the way that it uses `table.init` during instantiation. + if start > table.size() as usize { + return Err(InstantiationError::TableOutOfBounds( + "active element segment does not fit in table".into(), + )); + } + for (i, func_idx) in init.elements.iter().enumerate() { let anyfunc = instance.get_caller_checked_anyfunc(*func_idx); table .set(u32::try_from(start + i).unwrap(), anyfunc) - .unwrap(); + // Note that when multi-value is disabled, this will never fail + // since we bounds check table element initialization before + // doing any table slot writes. However when multi-value is + // enabled, these become runtime traps and the intermediate + // table slot writes are visible. + .map_err(|()| { + InstantiationError::TableOutOfBounds( + "active element segment does not fit in table".into(), + ) + })?; } } @@ -1239,8 +1264,25 @@ fn initialize_memories( let start = get_memory_init_start(init, instance); unsafe { let mem_slice = get_memory_slice(init, instance); - let to_init = &mut mem_slice[start..start + init.data.len()]; - to_init.copy_from_slice(init.data); + + let end = start.saturating_add(init.data.len()); + let actual_end = cmp::min(mem_slice.len(), end); + let num_uncopied = end - actual_end; + + let to_init = &mut mem_slice[start..actual_end]; + to_init.copy_from_slice(&init.data[..init.data.len() - num_uncopied]); + + // Note that if bulk memory is disabled, we'll never hit this case + // because we did an eager bounds check in + // `check_memory_init_bounds`. However when bulk memory is enabled, + // we need the incremental writes up to the OOB trap to be visible, + // since this is dictated by the updated spec for the bulk memory + // proposal. + if num_uncopied > 0 { + return Err(InstantiationError::MemoryOutOfBounds( + "active data segment does not fit in memory".into(), + )); + } } } @@ -1307,6 +1349,13 @@ pub enum InstantiationError { #[error("Table out of bounds error: {0}")] TableOutOfBounds(String), + /// A memory out of bounds error. + /// + /// Depending on whether bulk memory is enabled or not, this is either a + /// link error or a trap raised during instantiation. + #[error("Memory out of bounds error: {0}")] + MemoryOutOfBounds(String), + /// A wasm link error occured. #[error("Failed to link module")] Link(#[from] LinkError), diff --git a/tests/instantiate.rs b/tests/instantiate.rs index b9bd6e1064..a72a2df878 100644 --- a/tests/instantiate.rs +++ b/tests/instantiate.rs @@ -24,7 +24,17 @@ fn test_environ_translate() { let cache_config = CacheConfig::new_cache_disabled(); let mut compiler = Compiler::new(isa, CompilationStrategy::Auto, cache_config); unsafe { - let instance = instantiate(&mut compiler, &data, &mut resolver, false, None); + let instance = instantiate( + &mut compiler, + &data, + &mut resolver, + // Bulk memory. + false, + // Debug info. + false, + // Profiler. + None, + ); assert!(instance.is_ok()); } }