diff --git a/lib/environ/src/tunables.rs b/lib/environ/src/tunables.rs index c2769bf2aa..20ddf7d2a4 100644 --- a/lib/environ/src/tunables.rs +++ b/lib/environ/src/tunables.rs @@ -14,12 +14,20 @@ pub struct Tunables { impl Default for Tunables { fn default() -> Self { Self { + #[cfg(target_pointer_width = "32")] + /// Size in wasm pages of the bound for static memories. + static_memory_bound: 0x4000, + #[cfg(target_pointer_width = "64")] /// Size in wasm pages of the bound for static memories. /// /// When we allocate 4 GiB of address space, we can avoid the /// need for explicit bounds checks. static_memory_bound: 0x1_0000, + #[cfg(target_pointer_width = "32")] + /// Size in bytes of the offset guard for static memories. + static_memory_offset_guard_size: 0x1_0000, + #[cfg(target_pointer_width = "64")] /// Size in bytes of the offset guard for static memories. /// /// Allocating 2 GiB of address space lets us translate wasm diff --git a/lib/environ/src/vmoffsets.rs b/lib/environ/src/vmoffsets.rs index 4192afc534..30e413f34a 100644 --- a/lib/environ/src/vmoffsets.rs +++ b/lib/environ/src/vmoffsets.rs @@ -2,33 +2,46 @@ //! module. use crate::module::Module; +use cast; use cranelift_codegen::ir; use cranelift_wasm::{ DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, GlobalIndex, MemoryIndex, SignatureIndex, TableIndex, }; +#[cfg(target_pointer_width = "32")] +fn cast_to_u32(sz: usize) -> u32 { + cast::u32(sz) +} +#[cfg(target_pointer_width = "64")] +fn cast_to_u32(sz: usize) -> u32 { + match cast::u32(sz) { + Ok(x) => x, + Err(_) => panic!("overflow in cast from usize to u32"), + } +} + /// This class computes offsets to fields within `VMContext` and other /// related structs that JIT code accesses directly. pub struct VMOffsets { /// The size in bytes of a pointer on the target. pub pointer_size: u8, /// The number of signature declarations in the module. - pub num_signature_ids: u64, + pub num_signature_ids: u32, /// The number of imported functions in the module. - pub num_imported_functions: u64, + pub num_imported_functions: u32, /// The number of imported tables in the module. - pub num_imported_tables: u64, + pub num_imported_tables: u32, /// The number of imported memories in the module. - pub num_imported_memories: u64, + pub num_imported_memories: u32, /// The number of imported globals in the module. - pub num_imported_globals: u64, + pub num_imported_globals: u32, /// The number of defined tables in the module. - pub num_defined_tables: u64, + pub num_defined_tables: u32, /// The number of defined memories in the module. - pub num_defined_memories: u64, + pub num_defined_memories: u32, /// The number of defined globals in the module. - pub num_defined_globals: u64, + pub num_defined_globals: u32, } impl VMOffsets { @@ -36,14 +49,14 @@ impl VMOffsets { pub fn new(pointer_size: u8, module: &Module) -> Self { Self { pointer_size, - num_signature_ids: module.signatures.len() as u64, - num_imported_functions: module.imported_funcs.len() as u64, - num_imported_tables: module.imported_tables.len() as u64, - num_imported_memories: module.imported_memories.len() as u64, - num_imported_globals: module.imported_globals.len() as u64, - num_defined_tables: module.table_plans.len() as u64, - num_defined_memories: module.memory_plans.len() as u64, - num_defined_globals: module.globals.len() as u64, + num_signature_ids: cast_to_u32(module.signatures.len()), + num_imported_functions: cast_to_u32(module.imported_funcs.len()), + num_imported_tables: cast_to_u32(module.imported_tables.len()), + num_imported_memories: cast_to_u32(module.imported_memories.len()), + num_imported_globals: cast_to_u32(module.imported_globals.len()), + num_defined_tables: cast_to_u32(module.table_plans.len()), + num_defined_memories: cast_to_u32(module.memory_plans.len()), + num_defined_globals: cast_to_u32(module.globals.len()), } } } @@ -236,224 +249,264 @@ impl VMOffsets { /// Offsets for `VMContext`. impl VMOffsets { /// The offset of the `signature_ids` array. - pub fn vmctx_signature_ids_begin(&self) -> u64 { + pub fn vmctx_signature_ids_begin(&self) -> u32 { 0 } /// The offset of the `tables` array. #[allow(clippy::erasing_op)] - pub fn vmctx_imported_functions_begin(&self) -> u64 { + pub fn vmctx_imported_functions_begin(&self) -> u32 { self.vmctx_signature_ids_begin() - + self.num_signature_ids * u64::from(self.size_of_vmshared_signature_index()) + .checked_add( + self.num_signature_ids + .checked_mul(u32::from(self.size_of_vmshared_signature_index())) + .unwrap(), + ) + .unwrap() } /// The offset of the `tables` array. #[allow(clippy::identity_op)] - pub fn vmctx_imported_tables_begin(&self) -> u64 { + pub fn vmctx_imported_tables_begin(&self) -> u32 { self.vmctx_imported_functions_begin() - + self.num_imported_functions * u64::from(self.size_of_vmfunction_import()) + .checked_add( + self.num_imported_functions + .checked_mul(u32::from(self.size_of_vmfunction_import())) + .unwrap(), + ) + .unwrap() } /// The offset of the `memories` array. - pub fn vmctx_imported_memories_begin(&self) -> u64 { + pub fn vmctx_imported_memories_begin(&self) -> u32 { self.vmctx_imported_tables_begin() - + self.num_imported_tables * u64::from(self.size_of_vmtable_import()) + .checked_add( + self.num_imported_tables + .checked_mul(u32::from(self.size_of_vmtable_import())) + .unwrap(), + ) + .unwrap() } /// The offset of the `globals` array. - pub fn vmctx_imported_globals_begin(&self) -> u64 { + pub fn vmctx_imported_globals_begin(&self) -> u32 { self.vmctx_imported_memories_begin() - + self.num_imported_memories * u64::from(self.size_of_vmmemory_import()) + .checked_add( + self.num_imported_memories + .checked_mul(u32::from(self.size_of_vmmemory_import())) + .unwrap(), + ) + .unwrap() } /// The offset of the `tables` array. - pub fn vmctx_tables_begin(&self) -> u64 { + pub fn vmctx_tables_begin(&self) -> u32 { self.vmctx_imported_globals_begin() - + self.num_imported_globals * u64::from(self.size_of_vmglobal_import()) + .checked_add( + self.num_imported_globals + .checked_mul(u32::from(self.size_of_vmglobal_import())) + .unwrap(), + ) + .unwrap() } /// The offset of the `memories` array. - pub fn vmctx_memories_begin(&self) -> u64 { + pub fn vmctx_memories_begin(&self) -> u32 { self.vmctx_tables_begin() - + self.num_defined_tables * u64::from(self.size_of_vmtable_definition()) + .checked_add( + self.num_defined_tables + .checked_mul(u32::from(self.size_of_vmtable_definition())) + .unwrap(), + ) + .unwrap() } /// The offset of the `globals` array. - pub fn vmctx_globals_begin(&self) -> u64 { + pub fn vmctx_globals_begin(&self) -> u32 { self.vmctx_memories_begin() - + self.num_defined_memories * u64::from(self.size_of_vmmemory_definition()) + .checked_add( + self.num_defined_memories + .checked_mul(u32::from(self.size_of_vmmemory_definition())) + .unwrap(), + ) + .unwrap() } /// Return the size of the `VMContext` allocation. #[allow(dead_code)] - pub fn size_of_vmctx(&self) -> u64 { + pub fn size_of_vmctx(&self) -> u32 { self.vmctx_globals_begin() - + self.num_defined_globals * u64::from(self.size_of_vmglobal_definition()) + .checked_add( + self.num_defined_globals + .checked_mul(u32::from(self.size_of_vmglobal_definition())) + .unwrap(), + ) + .unwrap() } /// Return the offset to `VMSharedSignatureId` index `index`. - pub fn vmctx_vmshared_signature_id(&self, index: SignatureIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_signature_ids); + pub fn vmctx_vmshared_signature_id(&self, index: SignatureIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_signature_ids); self.vmctx_signature_ids_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmshared_signature_index())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmshared_signature_index())) .unwrap(), ) .unwrap() } /// Return the offset to `VMFunctionImport` index `index`. - pub fn vmctx_vmfunction_import(&self, index: FuncIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_imported_functions); + pub fn vmctx_vmfunction_import(&self, index: FuncIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_imported_functions); self.vmctx_imported_functions_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmfunction_import())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmfunction_import())) .unwrap(), ) .unwrap() } /// Return the offset to `VMTableImport` index `index`. - pub fn vmctx_vmtable_import(&self, index: TableIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_imported_tables); + pub fn vmctx_vmtable_import(&self, index: TableIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_imported_tables); self.vmctx_imported_tables_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmtable_import())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmtable_import())) .unwrap(), ) .unwrap() } /// Return the offset to `VMMemoryImport` index `index`. - pub fn vmctx_vmmemory_import(&self, index: MemoryIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_imported_memories); + pub fn vmctx_vmmemory_import(&self, index: MemoryIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_imported_memories); self.vmctx_imported_memories_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmmemory_import())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmmemory_import())) .unwrap(), ) .unwrap() } /// Return the offset to `VMGlobalImport` index `index`. - pub fn vmctx_vmglobal_import(&self, index: GlobalIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_imported_globals); + pub fn vmctx_vmglobal_import(&self, index: GlobalIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_imported_globals); self.vmctx_imported_globals_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmglobal_import())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmglobal_import())) .unwrap(), ) .unwrap() } /// Return the offset to `VMTableDefinition` index `index`. - pub fn vmctx_vmtable_definition(&self, index: DefinedTableIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_defined_tables); + pub fn vmctx_vmtable_definition(&self, index: DefinedTableIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_defined_tables); self.vmctx_tables_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmtable_definition())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmtable_definition())) .unwrap(), ) .unwrap() } /// Return the offset to `VMMemoryDefinition` index `index`. - pub fn vmctx_vmmemory_definition(&self, index: DefinedMemoryIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_defined_memories); + pub fn vmctx_vmmemory_definition(&self, index: DefinedMemoryIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_defined_memories); self.vmctx_memories_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmmemory_definition())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmmemory_definition())) .unwrap(), ) .unwrap() } /// Return the offset to the `VMGlobalDefinition` index `index`. - pub fn vmctx_vmglobal_definition(&self, index: DefinedGlobalIndex) -> u64 { - assert!(u64::from(index.as_u32()) < self.num_defined_globals); + pub fn vmctx_vmglobal_definition(&self, index: DefinedGlobalIndex) -> u32 { + assert!(u32::from(index.as_u32()) < self.num_defined_globals); self.vmctx_globals_begin() .checked_add( - u64::from(index.as_u32()) - .checked_mul(u64::from(self.size_of_vmglobal_definition())) + u32::from(index.as_u32()) + .checked_mul(u32::from(self.size_of_vmglobal_definition())) .unwrap(), ) .unwrap() } /// Return the offset to the `body` field in `*const VMFunctionBody` index `index`. - pub fn vmctx_vmfunction_import_body(&self, index: FuncIndex) -> u64 { + pub fn vmctx_vmfunction_import_body(&self, index: FuncIndex) -> u32 { self.vmctx_vmfunction_import(index) - .checked_add(u64::from(self.vmfunction_import_body())) + .checked_add(u32::from(self.vmfunction_import_body())) .unwrap() } /// Return the offset to the `vmctx` field in `*const VMFunctionBody` index `index`. - pub fn vmctx_vmfunction_import_vmctx(&self, index: FuncIndex) -> u64 { + pub fn vmctx_vmfunction_import_vmctx(&self, index: FuncIndex) -> u32 { self.vmctx_vmfunction_import(index) - .checked_add(u64::from(self.vmfunction_import_vmctx())) + .checked_add(u32::from(self.vmfunction_import_vmctx())) .unwrap() } /// Return the offset to the `from` field in `VMTableImport` index `index`. - pub fn vmctx_vmtable_import_from(&self, index: TableIndex) -> u64 { + pub fn vmctx_vmtable_import_from(&self, index: TableIndex) -> u32 { self.vmctx_vmtable_import(index) - .checked_add(u64::from(self.vmtable_import_from())) + .checked_add(u32::from(self.vmtable_import_from())) .unwrap() } /// Return the offset to the `base` field in `VMTableDefinition` index `index`. - pub fn vmctx_vmtable_definition_base(&self, index: DefinedTableIndex) -> u64 { + pub fn vmctx_vmtable_definition_base(&self, index: DefinedTableIndex) -> u32 { self.vmctx_vmtable_definition(index) - .checked_add(u64::from(self.vmtable_definition_base())) + .checked_add(u32::from(self.vmtable_definition_base())) .unwrap() } /// Return the offset to the `current_elements` field in `VMTableDefinition` index `index`. - pub fn vmctx_vmtable_definition_current_elements(&self, index: DefinedTableIndex) -> u64 { + pub fn vmctx_vmtable_definition_current_elements(&self, index: DefinedTableIndex) -> u32 { self.vmctx_vmtable_definition(index) - .checked_add(u64::from(self.vmtable_definition_current_elements())) + .checked_add(u32::from(self.vmtable_definition_current_elements())) .unwrap() } /// Return the offset to the `from` field in `VMMemoryImport` index `index`. - pub fn vmctx_vmmemory_import_from(&self, index: MemoryIndex) -> u64 { + pub fn vmctx_vmmemory_import_from(&self, index: MemoryIndex) -> u32 { self.vmctx_vmmemory_import(index) - .checked_add(u64::from(self.vmmemory_import_from())) + .checked_add(u32::from(self.vmmemory_import_from())) .unwrap() } /// Return the offset to the `vmctx` field in `VMMemoryImport` index `index`. - pub fn vmctx_vmmemory_import_vmctx(&self, index: MemoryIndex) -> u64 { + pub fn vmctx_vmmemory_import_vmctx(&self, index: MemoryIndex) -> u32 { self.vmctx_vmmemory_import(index) - .checked_add(u64::from(self.vmmemory_import_vmctx())) + .checked_add(u32::from(self.vmmemory_import_vmctx())) .unwrap() } /// Return the offset to the `base` field in `VMMemoryDefinition` index `index`. - pub fn vmctx_vmmemory_definition_base(&self, index: DefinedMemoryIndex) -> u64 { + pub fn vmctx_vmmemory_definition_base(&self, index: DefinedMemoryIndex) -> u32 { self.vmctx_vmmemory_definition(index) - .checked_add(u64::from(self.vmmemory_definition_base())) + .checked_add(u32::from(self.vmmemory_definition_base())) .unwrap() } /// Return the offset to the `current_length` field in `VMMemoryDefinition` index `index`. - pub fn vmctx_vmmemory_definition_current_length(&self, index: DefinedMemoryIndex) -> u64 { + pub fn vmctx_vmmemory_definition_current_length(&self, index: DefinedMemoryIndex) -> u32 { self.vmctx_vmmemory_definition(index) - .checked_add(u64::from(self.vmmemory_definition_current_length())) + .checked_add(u32::from(self.vmmemory_definition_current_length())) .unwrap() } /// Return the offset to the `from` field in `VMGlobalImport` index `index`. - pub fn vmctx_vmglobal_import_from(&self, index: GlobalIndex) -> u64 { + pub fn vmctx_vmglobal_import_from(&self, index: GlobalIndex) -> u32 { self.vmctx_vmglobal_import(index) - .checked_add(u64::from(self.vmglobal_import_from())) + .checked_add(u32::from(self.vmglobal_import_from())) .unwrap() } } diff --git a/lib/jit/src/link.rs b/lib/jit/src/link.rs index 46786554fb..be88aba381 100644 --- a/lib/jit/src/link.rs +++ b/lib/jit/src/link.rs @@ -347,6 +347,10 @@ fn relocate( .unwrap(); write_unaligned(reloc_address as *mut u32, reloc_delta_u32); }, + #[cfg(target_pointer_width = "32")] + Reloc::X86CallPCRel4 => { + // ignore + } _ => panic!("unsupported reloc kind"), } } diff --git a/lib/runtime/signalhandlers/CMakeLists.txt b/lib/runtime/signalhandlers/CMakeLists.txt index 5fb9a0457a..b1568f2676 100644 --- a/lib/runtime/signalhandlers/CMakeLists.txt +++ b/lib/runtime/signalhandlers/CMakeLists.txt @@ -1,7 +1,11 @@ cmake_minimum_required(VERSION 3.0) project(SignalHandlers CXX) -set(CMAKE_CXX_FLAGS "-std=c++11 -fno-exceptions -fno-rtti -fPIC") +if( CMAKE_SIZEOF_VOID_P EQUAL 8 ) + set(CMAKE_CXX_FLAGS "-std=c++11 -fno-exceptions -fno-rtti -fPIC") +else( CMAKE_SIZEOF_VOID_P EQUAL 8 ) + set(CMAKE_CXX_FLAGS "-m32 -std=c++11 -fno-exceptions -fno-rtti -fPIC") +endif( CMAKE_SIZEOF_VOID_P EQUAL 8 ) add_library(SignalHandlers STATIC SignalHandlers.cpp) diff --git a/lib/runtime/src/instance.rs b/lib/runtime/src/instance.rs index bf2e77d717..b52e470874 100644 --- a/lib/runtime/src/instance.rs +++ b/lib/runtime/src/instance.rs @@ -27,7 +27,7 @@ use cranelift_wasm::{ use indexmap; use std::borrow::ToOwned; use std::boxed::Box; -use std::collections::{hash_map, HashMap}; +use std::collections::HashMap; use std::rc::Rc; use std::string::{String, ToString}; use wasmtime_environ::{DataInitializer, Module, TableElements, VMOffsets}; @@ -619,17 +619,7 @@ impl Instance { let vmctx_globals = create_globals(&module); - let offsets = VMOffsets { - pointer_size: mem::size_of::<*const u8>() as u8, - num_signature_ids: vmshared_signatures.len() as u64, - num_imported_functions: imports.functions.len() as u64, - num_imported_tables: imports.tables.len() as u64, - num_imported_memories: imports.memories.len() as u64, - num_imported_globals: imports.globals.len() as u64, - num_defined_tables: tables.len() as u64, - num_defined_memories: memories.len() as u64, - num_defined_globals: vmctx_globals.len() as u64, - }; + let offsets = VMOffsets::new(mem::size_of::<*const u8>() as u8, &module); let mut contents_mmap = Mmap::with_at_least( mem::size_of::() @@ -710,7 +700,7 @@ impl Instance { // Collect the exports for the global export map. for (field, decl) in &module.exports { - use hash_map::Entry::*; + use std::collections::hash_map::Entry::*; let cell: &RefCell>> = contents.global_exports.borrow(); let map: &mut HashMap> = diff --git a/lib/runtime/src/sig_registry.rs b/lib/runtime/src/sig_registry.rs index 73d8cf1439..646cac25c9 100644 --- a/lib/runtime/src/sig_registry.rs +++ b/lib/runtime/src/sig_registry.rs @@ -29,6 +29,9 @@ impl SignatureRegistry { match self.signature_hash.entry(sig.clone()) { hash_map::Entry::Occupied(entry) => *entry.get(), hash_map::Entry::Vacant(entry) => { + #[cfg(target_pointer_width = "32")] + let sig_id = VMSharedSignatureIndex::new(cast::u32(len)); + #[cfg(target_pointer_width = "64")] let sig_id = VMSharedSignatureIndex::new(cast::u32(len).unwrap()); entry.insert(sig_id); sig_id diff --git a/lib/runtime/src/vmcontext.rs b/lib/runtime/src/vmcontext.rs index f0fd91195c..1a4e5f4326 100644 --- a/lib/runtime/src/vmcontext.rs +++ b/lib/runtime/src/vmcontext.rs @@ -409,7 +409,7 @@ mod test_vmshared_signature_index { impl VMSharedSignatureIndex { /// Create a new `VMSharedSignatureIndex`. pub fn new(value: u32) -> Self { - Self(value) + VMSharedSignatureIndex(value) } }