diff --git a/cranelift/codegen/src/ir/dfg.rs b/cranelift/codegen/src/ir/dfg.rs index 861f3147c5..f64cf896c4 100644 --- a/cranelift/codegen/src/ir/dfg.rs +++ b/cranelift/codegen/src/ir/dfg.rs @@ -7,8 +7,8 @@ use crate::ir::extfunc::ExtFuncData; use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData}; use crate::ir::{types, ConstantData, ConstantPool, Immediate}; use crate::ir::{ - Block, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList, - ValueListPool, + Block, FuncRef, Inst, SigRef, Signature, SourceLoc, Type, Value, ValueLabelAssignments, + ValueList, ValueListPool, }; use crate::isa::TargetIsa; use crate::packed_option::ReservedValue; @@ -153,6 +153,14 @@ impl DataFlowGraph { self.values_labels = Some(HashMap::new()); } } + + /// Inserts a `ValueLabelAssignments::Alias` for `to_alias` if debug info + /// collection is enabled. + pub fn add_value_label_alias(&mut self, to_alias: Value, from: SourceLoc, value: Value) { + if let Some(values_labels) = self.values_labels.as_mut() { + values_labels.insert(to_alias, ir::ValueLabelAssignments::Alias { from, value }); + } + } } /// Resolve value aliases. diff --git a/cranelift/codegen/src/legalizer/heap.rs b/cranelift/codegen/src/legalizer/heap.rs index 26a0640f08..a208baee25 100644 --- a/cranelift/codegen/src/legalizer/heap.rs +++ b/cranelift/codegen/src/legalizer/heap.rs @@ -64,8 +64,10 @@ fn dynamic_addr( let mut pos = FuncCursor::new(func).at_inst(inst); pos.use_srcloc(inst); + let offset = cast_offset_to_pointer_ty(offset, offset_ty, addr_ty, &mut pos); + // Start with the bounds check. Trap if `offset + access_size > bound`. - let bound = pos.ins().global_value(offset_ty, bound_gv); + let bound = pos.ins().global_value(addr_ty, bound_gv); let (cc, lhs, bound) = if access_size == 1 { // `offset > bound - 1` is the same as `offset >= bound`. (IntCC::UnsignedGreaterThanOrEqual, offset, bound) @@ -76,7 +78,7 @@ fn dynamic_addr( (IntCC::UnsignedGreaterThan, offset, adj_bound) } else { // We need an overflow check for the adjusted offset. - let access_size_val = pos.ins().iconst(offset_ty, access_size as i64); + let access_size_val = pos.ins().iconst(addr_ty, access_size as i64); let (adj_offset, overflow) = pos.ins().iadd_ifcout(offset, access_size_val); pos.ins().trapif( isa.unsigned_add_overflow_condition(), @@ -100,7 +102,6 @@ fn dynamic_addr( heap, addr_ty, offset, - offset_ty, pos.func, spectre_oob_comparison, ); @@ -111,7 +112,7 @@ fn static_addr( isa: &dyn TargetIsa, inst: ir::Inst, heap: ir::Heap, - offset: ir::Value, + mut offset: ir::Value, access_size: u32, bound: u64, func: &mut ir::Function, @@ -156,6 +157,7 @@ fn static_addr( // `bound - access_size >= 4GB` we can omit a bounds check. let limit = bound - access_size; let mut spectre_oob_comparison = None; + offset = cast_offset_to_pointer_ty(offset, offset_ty, addr_ty, &mut pos); if offset_ty != ir::types::I32 || limit < 0xffff_ffff { let (cc, lhs, limit_imm) = if limit & 1 == 1 { // Prefer testing `offset >= limit - 1` when limit is odd because an even number is @@ -169,7 +171,7 @@ fn static_addr( let oob = pos.ins().icmp_imm(cc, lhs, limit_imm); pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds); if isa.flags().enable_heap_access_spectre_mitigation() { - let limit = pos.ins().iconst(offset_ty, limit_imm); + let limit = pos.ins().iconst(addr_ty, limit_imm); spectre_oob_comparison = Some((cc, lhs, limit)); } } @@ -180,20 +182,46 @@ fn static_addr( heap, addr_ty, offset, - offset_ty, pos.func, spectre_oob_comparison, ); } +fn cast_offset_to_pointer_ty( + offset: ir::Value, + offset_ty: ir::Type, + addr_ty: ir::Type, + pos: &mut FuncCursor, +) -> ir::Value { + if offset_ty == addr_ty { + return offset; + } + // Note that using 64-bit heaps on a 32-bit host is not currently supported, + // would require at least a bounds check here to ensure that the truncation + // from 64-to-32 bits doesn't lose any upper bits. For now though we're + // mostly interested in the 32-bit-heaps-on-64-bit-hosts cast. + assert!(offset_ty.bits() < addr_ty.bits()); + + // Convert `offset` to `addr_ty`. + let extended_offset = pos.ins().uextend(addr_ty, offset); + + // Add debug value-label alias so that debuginfo can name the extended + // value as the address + let loc = pos.srcloc(); + pos.func + .dfg + .add_value_label_alias(extended_offset, loc, offset); + + extended_offset +} + /// Emit code for the base address computation of a `heap_addr` instruction. fn compute_addr( isa: &dyn TargetIsa, inst: ir::Inst, heap: ir::Heap, addr_ty: ir::Type, - mut offset: ir::Value, - offset_ty: ir::Type, + offset: ir::Value, func: &mut ir::Function, // If we are performing Spectre mitigation with conditional selects, the // values to compare and the condition code that indicates an out-of bounds @@ -201,24 +229,10 @@ fn compute_addr( // speculatively safe address (a zero / null pointer) instead. spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>, ) { + debug_assert_eq!(func.dfg.value_type(offset), addr_ty); let mut pos = FuncCursor::new(func).at_inst(inst); pos.use_srcloc(inst); - // Convert `offset` to `addr_ty`. - if offset_ty != addr_ty { - let labels_value = offset; - offset = pos.ins().uextend(addr_ty, offset); - if let Some(values_labels) = pos.func.dfg.values_labels.as_mut() { - values_labels.insert( - offset, - ir::ValueLabelAssignments::Alias { - from: pos.func.srclocs[inst], - value: labels_value, - }, - ); - } - } - // Add the heap base address base let base = if isa.flags().enable_pinned_reg() && isa.flags().use_pinned_reg_as_heap_base() { pos.ins().get_pinned_reg(isa.pointer_type()) diff --git a/cranelift/codegen/src/verifier/mod.rs b/cranelift/codegen/src/verifier/mod.rs index d1850ba010..c1721a38fd 100644 --- a/cranelift/codegen/src/verifier/mod.rs +++ b/cranelift/codegen/src/verifier/mod.rs @@ -439,14 +439,13 @@ impl<'a> Verifier<'a> { .nonfatal((heap, format!("invalid bound global value {}", bound_gv))); } - let index_type = heap_data.index_type; let bound_type = self.func.global_values[bound_gv].global_type(isa); - if index_type != bound_type { + if pointer_type != bound_type { errors.report(( heap, format!( - "heap index type {} differs from the type of its bound, {}", - index_type, bound_type + "heap pointer type {} differs from the type of its bound, {}", + pointer_type, bound_type ), )); } diff --git a/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif b/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif index 32031aac0d..f78ec80b42 100644 --- a/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif +++ b/cranelift/filetests/filetests/isa/aarch64/heap_addr.clif @@ -5,7 +5,7 @@ target aarch64 function %dynamic_heap_check(i64 vmctx, i32) -> i64 { gv0 = vmctx - gv1 = load.i32 notrap aligned gv0 + gv1 = load.i64 notrap aligned gv0 heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 block0(v0: i64, v1: i32): @@ -16,13 +16,14 @@ block0(v0: i64, v1: i32): ; check: Block 0: ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp -; nextln: ldr w2, [x0] -; nextln: add w2, w2, #0 -; nextln: subs wzr, w1, w2 +; nextln: mov w2, w1 +; nextln: ldr x3, [x0] +; nextln: mov x3, x3 +; nextln: subs xzr, x2, x3 ; nextln: b.ls label1 ; b label2 ; check: Block 1: ; check: add x0, x0, x1, UXTW -; nextln: subs wzr, w1, w2 +; nextln: subs xzr, x2, x3 ; nextln: movz x1, #0 ; nextln: csel x0, x1, x0, hi ; nextln: ldp fp, lr, [sp], #16 @@ -42,11 +43,12 @@ block0(v0: i64, v1: i32): ; check: Block 0: ; check: stp fp, lr, [sp, #-16]! ; nextln: mov fp, sp -; nextln: subs wzr, w1, #65536 +; nextln: mov w2, w1 +; nextln: subs xzr, x2, #65536 ; nextln: b.ls label1 ; b label2 ; check: Block 1: ; check: add x0, x0, x1, UXTW -; nextln: subs wzr, w1, #65536 +; nextln: subs xzr, x2, #65536 ; nextln: movz x1, #0 ; nextln: csel x0, x1, x0, hi ; nextln: ldp fp, lr, [sp], #16 diff --git a/cranelift/filetests/filetests/isa/s390x/heap_addr.clif b/cranelift/filetests/filetests/isa/s390x/heap_addr.clif index 659ba71ae6..1eda3470cd 100644 --- a/cranelift/filetests/filetests/isa/s390x/heap_addr.clif +++ b/cranelift/filetests/filetests/isa/s390x/heap_addr.clif @@ -3,7 +3,7 @@ target s390x function %dynamic_heap_check(i64 vmctx, i32) -> i64 { gv0 = vmctx - gv1 = load.i32 notrap aligned gv0 + gv1 = load.i64 notrap aligned gv0 heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 block0(v0: i64, v1: i32): @@ -12,15 +12,15 @@ block0(v0: i64, v1: i32): } ; check: Block 0: -; check: l %r4, 0(%r2) -; nextln: ahi %r4, 0 -; nextln: clr %r3, %r4 +; check: llgfr %r3, %r3 +; nextln: lg %r4, 0(%r2) +; nextln: aghi %r4, 0 +; nextln: clgr %r3, %r4 ; nextln: jgnh label1 ; jg label2 ; check: Block 1: -; check: llgfr %r5, %r3 -; nextln: agr %r2, %r5 +; check: agr %r2, %r3 ; nextln: lghi %r5, 0 -; nextln: clr %r3, %r4 +; nextln: clgr %r3, %r4 ; nextln: locgrh %r2, %r5 ; nextln: br %r14 ; check: Block 2: @@ -36,13 +36,13 @@ block0(v0: i64, v1: i32): } ; check: Block 0: -; check: clfi %r3, 65536 +; check: llgfr %r3, %r3 +; nextln: clgfi %r3, 65536 ; nextln: jgnh label1 ; jg label2 ; check: Block 1: -; check: llgfr %r4, %r3 -; nextln: agr %r2, %r4 +; check: agr %r2, %r3 ; nextln: lghi %r4, 0 -; nextln: clfi %r3, 65536 +; nextln: clgfi %r3, 65536 ; nextln: locgrh %r2, %r4 ; nextln: br %r14 ; check: Block 2: diff --git a/cranelift/filetests/filetests/isa/x64/heap.clif b/cranelift/filetests/filetests/isa/x64/heap.clif index 35660c826a..2c77bc7ec2 100644 --- a/cranelift/filetests/filetests/isa/x64/heap.clif +++ b/cranelift/filetests/filetests/isa/x64/heap.clif @@ -4,19 +4,20 @@ target x86_64 machinst function %f(i32, i64 vmctx) -> i64 { gv0 = vmctx gv1 = load.i64 notrap aligned gv0+0 - gv2 = load.i32 notrap aligned gv0+8 + gv2 = load.i64 notrap aligned gv0+8 heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32 block0(v0: i32, v1: i64): v2 = heap_addr.i64 heap0, v0, 0x8000 - ; check: movl 8(%rsi), %ecx - ; nextln: movq %rdi, %rax - ; nextln: addl $$32768, %eax + ; check: movl %edi, %ecx + ; nextln: movq 8(%rsi), %rdi + ; nextln: movq %rcx, %rax + ; nextln: addq $$32768, %rax ; nextln: jnb ; ud2 heap_oob ; - ; nextln: cmpl %ecx, %eax + ; nextln: cmpq %rdi, %rax ; nextln: jbe label1; j label2 ; check: Block 1: - + return v2 } diff --git a/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif b/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif index f833f3b3ca..242a0f8dfa 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-heaps.clif @@ -10,7 +10,7 @@ function %heap_addrs(i32, i64, i64 vmctx) { gv0 = iadd_imm.i64 gv4, 64 gv1 = iadd_imm.i64 gv4, 72 gv2 = iadd_imm.i64 gv4, 80 - gv3 = load.i32 notrap aligned gv4+88 + gv3 = load.i64 notrap aligned gv4+88 heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32 heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32 @@ -38,15 +38,15 @@ block0(v0: i32, v1: i64, v3: i64): ; check: v4 = iadd v13, v12 v5 = heap_addr.i64 heap1, v0, 0 - ; check: v14 = icmp_imm ugt v0, 0x0001_0000 - ; check: brz v14, $(resume_1=$BB) + ; check: v14 = uextend.i64 v0 + ; check: v15 = icmp_imm ugt v14, 0x0001_0000 + ; check: brz v15, $(resume_1=$BB) ; nextln: jump $(trap_1=$BB) ; check: $trap_1: ; nextln: trap heap_oob ; check: $resume_1: - ; check: v15 = uextend.i64 v0 ; check: v16 = iadd_imm.i64 v3, 64 - ; check: v5 = iadd v16, v15 + ; check: v5 = iadd v16, v14 v6 = heap_addr.i64 heap2, v1, 0 ; check: v19 = iconst.i64 0x0001_0000_0000 @@ -70,30 +70,30 @@ block0(v0: i32, v1: i64, v3: i64): ; check: v7 = iadd v21, v1 v8 = heap_addr.i64 heap4, v0, 0 - ; check: v22 = load.i32 notrap aligned v3+88 - ; check: v23 = iadd_imm v22, 0 - ; check: v24 = icmp.i32 ugt v0, v23 - ; check: brz v24, $(resume_4=$BB) + ; check: v22 = uextend.i64 v0 + ; check: v23 = load.i64 notrap aligned v3+88 + ; check: v24 = iadd_imm v23, 0 + ; check: v25 = icmp ugt v22, v24 + ; check: brz v25, $(resume_4=$BB) ; nextln: jump $(trap_4=$BB) ; check: $trap_4: ; nextln: trap heap_oob ; check: $resume_4: - ; check: v25 = uextend.i64 v0 ; check: v26 = iadd_imm.i64 v3, 72 - ; check: v8 = iadd v26, v25 + ; check: v8 = iadd v26, v22 v9 = heap_addr.i64 heap5, v0, 0 - ; check: v27 = load.i32 notrap aligned v3+88 - ; check: v28 = iadd_imm v27, 0 - ; check: v29 = icmp.i32 ugt v0, v28 - ; check: brz v29, $(resume_5=$BB) + ; check: v27 = uextend.i64 v0 + ; check: v28 = load.i64 notrap aligned v3+88 + ; check: v29 = iadd_imm v28, 0 + ; check: v30 = icmp ugt v27, v29 + ; check: brz v30, $(resume_5=$BB) ; nextln: jump $(trap_5=$BB) ; check: $trap_5: ; nextln: trap heap_oob ; check: $resume_5: - ; check: v30 = uextend.i64 v0 ; check: v31 = iadd_imm.i64 v3, 72 - ; check: v9 = iadd v31, v30 + ; check: v9 = iadd v31, v27 v10 = heap_addr.i64 heap6, v1, 0 ; check: v32 = iadd_imm.i64 v3, 80 diff --git a/cranelift/filetests/filetests/isa/x86/legalize-memory.clif b/cranelift/filetests/filetests/isa/x86/legalize-memory.clif index 5e7113b415..11a0f1d20f 100644 --- a/cranelift/filetests/filetests/isa/x86/legalize-memory.clif +++ b/cranelift/filetests/filetests/isa/x86/legalize-memory.clif @@ -56,7 +56,7 @@ block0(v0: i32, v999: i64): ; Boundscheck should be eliminated. ; Checks here are assuming that no pipehole opts fold the load offsets. ; nextln: $(xoff=$V) = uextend.i64 v0 - ; nextln: $(hbase=$V) = iadd_imm v999, 64 + ; check: $(hbase=$V) = iadd_imm v999, 64 ; nextln: v1 = iadd $hbase, $xoff v2 = load.f32 v1+16 ; nextln: v2 = load.f32 v1+16 @@ -99,6 +99,7 @@ block0(v0: i32, v999: i64): ; check: block0( v1 = heap_addr.i64 heap0, v0, 0x8000_0000 ; Boundscheck code + ; check: $(xoff=$V) = uextend.i64 v0 ; check: $(oob=$V) = icmp ; nextln: brz $oob, $(ok=$BB) ; nextln: jump $(trap_oob=$BB) @@ -106,8 +107,7 @@ block0(v0: i32, v999: i64): ; nextln: trap heap_oob ; check: $ok: ; Checks here are assuming that no pipehole opts fold the load offsets. - ; nextln: $(xoff=$V) = uextend.i64 v0 - ; nextln: $(hbase=$V) = iadd_imm.i64 v999, 64 + ; check: $(hbase=$V) = iadd_imm.i64 v999, 64 ; nextln: v1 = iadd $hbase, $xoff v2 = load.f32 v1+0x7fff_ffff ; nextln: v2 = load.f32 v1+0x7fff_ffff diff --git a/cranelift/filetests/filetests/verifier/heap.clif b/cranelift/filetests/filetests/verifier/heap.clif index ffd6bb7ac4..2a73f4ee8f 100644 --- a/cranelift/filetests/filetests/verifier/heap.clif +++ b/cranelift/filetests/filetests/verifier/heap.clif @@ -29,7 +29,7 @@ block0(v0: i64): function %heap_bound_type(i64 vmctx) { gv0 = vmctx gv1 = load.i16 notrap aligned gv0 - heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16 + heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap pointer type i64 differs from the type of its bound, i16 block0(v0: i64): return diff --git a/cranelift/wasm/src/code_translator.rs b/cranelift/wasm/src/code_translator.rs index 31efed58da..cc6866fc4d 100644 --- a/cranelift/wasm/src/code_translator.rs +++ b/cranelift/wasm/src/code_translator.rs @@ -2155,6 +2155,9 @@ fn get_heap_addr( addr_ty: Type, builder: &mut FunctionBuilder, ) -> (ir::Value, i32) { + // This function will need updates for 64-bit memories + debug_assert_eq!(builder.func.dfg.value_type(addr32), I32); + let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into(); // How exactly the bounds check is performed here and what it's performed diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index d021bf7ba9..7c5f978a4a 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -1154,7 +1154,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let heap_bound = func.create_global_value(ir::GlobalValueData::Load { base: ptr, offset: Offset32::new(current_length_offset), - global_type: self.offsets.type_of_vmmemory_definition_current_length(), + global_type: pointer_type, readonly: false, }); ( @@ -1417,7 +1417,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m .vmctx_vmmemory_definition_current_length(def_index), ) .unwrap(); - pos.ins().load(I32, ir::MemFlags::trusted(), base, offset) + pos.ins() + .load(pointer_type, ir::MemFlags::trusted(), base, offset) } None => { let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap(); @@ -1425,7 +1426,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m pos.ins() .load(pointer_type, ir::MemFlags::trusted(), base, offset); pos.ins().load( - I32, + pointer_type, ir::MemFlags::trusted(), vmmemory_ptr, i32::from(self.offsets.vmmemory_definition_current_length()), @@ -1435,7 +1436,12 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let current_length_in_pages = pos .ins() .udiv_imm(current_length_in_bytes, i64::from(WASM_PAGE_SIZE)); - Ok(current_length_in_pages) + if pointer_type == I32 { + Ok(current_length_in_pages) + } else { + assert_eq!(pointer_type, I64); + Ok(pos.ins().ireduce(I32, current_length_in_pages)) + } } fn translate_memory_copy( diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index bf21ab0121..c4d2bf113b 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -422,23 +422,11 @@ impl VMOffsets

{ 1 * self.pointer_size() } - /// The size of the `current_length` field. - #[inline] - pub fn size_of_vmmemory_definition_current_length(&self) -> u8 { - 4 - } - /// Return the size of `VMMemoryDefinition`. #[inline] pub fn size_of_vmmemory_definition(&self) -> u8 { 2 * self.pointer_size() } - - /// The type of the `current_length` field. - #[inline] - pub fn type_of_vmmemory_definition_current_length(&self) -> ir::Type { - ir::Type::int(u16::from(self.size_of_vmmemory_definition_current_length()) * 8).unwrap() - } } /// Offsets for `VMGlobalImport`. diff --git a/crates/runtime/src/instance.rs b/crates/runtime/src/instance.rs index 216979bdf8..a3e578322f 100644 --- a/crates/runtime/src/instance.rs +++ b/crates/runtime/src/instance.rs @@ -626,13 +626,11 @@ impl Instance { let src_mem = self.get_memory(src_index); let dst_mem = self.get_memory(dst_index); - if src - .checked_add(len) - .map_or(true, |n| n > src_mem.current_length) - || dst - .checked_add(len) - .map_or(true, |m| m > dst_mem.current_length) - { + if src.checked_add(len).map_or(true, |n| { + usize::try_from(n).unwrap() > src_mem.current_length + }) || dst.checked_add(len).map_or(true, |m| { + usize::try_from(m).unwrap() > dst_mem.current_length + }) { return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds)); } @@ -664,10 +662,9 @@ impl Instance { ) -> Result<(), Trap> { let memory = self.get_memory(memory_index); - if dst - .checked_add(len) - .map_or(true, |m| m > memory.current_length) - { + if dst.checked_add(len).map_or(true, |m| { + usize::try_from(m).unwrap() > memory.current_length + }) { return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds)); } @@ -726,10 +723,10 @@ impl Instance { if src .checked_add(len) - .map_or(true, |n| n as usize > data.len()) - || dst - .checked_add(len) - .map_or(true, |m| m > memory.current_length) + .map_or(true, |n| usize::try_from(n).unwrap() > data.len()) + || dst.checked_add(len).map_or(true, |m| { + usize::try_from(m).unwrap() > memory.current_length + }) { return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds)); } diff --git a/crates/runtime/src/instance/allocator.rs b/crates/runtime/src/instance/allocator.rs index 647b539e45..a0af4a5055 100644 --- a/crates/runtime/src/instance/allocator.rs +++ b/crates/runtime/src/instance/allocator.rs @@ -309,7 +309,7 @@ fn check_memory_init_bounds( let end = start.checked_add(init.data.len()); match end { - Some(end) if end <= memory.current_length as usize => { + Some(end) if end <= memory.current_length => { // Initializer is in bounds } _ => { @@ -382,9 +382,8 @@ fn initialize_instance( MemoryInitialization::Paged { map, out_of_bounds } => { for (index, pages) in map { let memory = instance.memory(index); - let slice = unsafe { - slice::from_raw_parts_mut(memory.base, memory.current_length as usize) - }; + let slice = + unsafe { slice::from_raw_parts_mut(memory.base, memory.current_length) }; for (page_index, page) in pages.iter().enumerate() { if let Some(data) = page { diff --git a/crates/runtime/src/memory.rs b/crates/runtime/src/memory.rs index 5d5dbdc283..e69dd056ee 100644 --- a/crates/runtime/src/memory.rs +++ b/crates/runtime/src/memory.rs @@ -155,10 +155,6 @@ impl RuntimeLinearMemory for MmapMemory { // Linear memory size would exceed the index range. return None; } - // FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022 - if new_pages == WASM_MAX_PAGES { - return None; - } let delta_bytes = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize; let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize; @@ -198,8 +194,7 @@ impl RuntimeLinearMemory for MmapMemory { fn vmmemory(&self) -> VMMemoryDefinition { VMMemoryDefinition { base: unsafe { self.mmap.alloc.as_mut_ptr().add(self.pre_guard_size) }, - current_length: u32::try_from(self.mmap.size as usize * WASM_PAGE_SIZE as usize) - .unwrap(), + current_length: self.mmap.size as usize * WASM_PAGE_SIZE as usize, } } } @@ -276,13 +271,6 @@ impl Memory { } fn limit_new(plan: &MemoryPlan, limiter: Option<&mut dyn ResourceLimiter>) -> Result<()> { - // FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022 - if plan.memory.minimum == WASM_MAX_PAGES { - bail!( - "memory minimum size of {} pages exceeds memory limits", - plan.memory.minimum - ); - } if let Some(limiter) = limiter { if !limiter.memory_growing(0, plan.memory.minimum, plan.memory.maximum) { bail!( @@ -374,10 +362,6 @@ impl Memory { if new_size > maximum.unwrap_or(WASM_MAX_PAGES) { return None; } - // FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022 - if new_size == WASM_MAX_PAGES { - return None; - } let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize; let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize; @@ -397,7 +381,7 @@ impl Memory { match self { Memory::Static { base, size, .. } => VMMemoryDefinition { base: base.as_ptr() as *mut _, - current_length: u32::try_from(*size as usize * WASM_PAGE_SIZE as usize).unwrap(), + current_length: *size as usize * WASM_PAGE_SIZE as usize, }, Memory::Dynamic(mem) => mem.vmmemory(), } diff --git a/crates/runtime/src/vmcontext.rs b/crates/runtime/src/vmcontext.rs index 5229e3bce7..21660c47dd 100644 --- a/crates/runtime/src/vmcontext.rs +++ b/crates/runtime/src/vmcontext.rs @@ -207,7 +207,7 @@ pub struct VMMemoryDefinition { pub base: *mut u8, /// The current logical size of this linear memory in bytes. - pub current_length: u32, + pub current_length: usize, } #[cfg(test)] diff --git a/crates/wasmtime/src/memory.rs b/crates/wasmtime/src/memory.rs index a65b8d29f2..7ea2c96a53 100644 --- a/crates/wasmtime/src/memory.rs +++ b/crates/wasmtime/src/memory.rs @@ -318,7 +318,7 @@ impl Memory { unsafe { let store = store.into(); let definition = *store[self.0].definition; - slice::from_raw_parts(definition.base, definition.current_length as usize) + slice::from_raw_parts(definition.base, definition.current_length) } } @@ -334,7 +334,7 @@ impl Memory { unsafe { let store = store.into(); let definition = *store[self.0].definition; - slice::from_raw_parts_mut(definition.base, definition.current_length as usize) + slice::from_raw_parts_mut(definition.base, definition.current_length) } } @@ -395,7 +395,7 @@ impl Memory { /// /// Panics if this memory doesn't belong to `store`. pub fn data_size(&self, store: impl AsContext) -> usize { - unsafe { (*store.as_context()[self.0].definition).current_length as usize } + unsafe { (*store.as_context()[self.0].definition).current_length } } /// Returns the size, in WebAssembly pages, of this wasm memory. diff --git a/crates/wasmtime/src/trampoline/memory.rs b/crates/wasmtime/src/trampoline/memory.rs index e5e03db211..a8d86a6ca8 100644 --- a/crates/wasmtime/src/trampoline/memory.rs +++ b/crates/wasmtime/src/trampoline/memory.rs @@ -3,13 +3,11 @@ use crate::store::{InstanceId, StoreOpaque}; use crate::trampoline::create_handle; use crate::{Limits, MemoryType}; use anyhow::{anyhow, Result}; +use std::sync::Arc; use wasmtime_environ::entity::PrimaryMap; use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE}; use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition}; -use std::convert::TryFrom; -use std::sync::Arc; - pub fn create_memory(store: &mut StoreOpaque<'_>, memory: &MemoryType) -> Result { let mut module = Module::new(); @@ -49,8 +47,7 @@ impl RuntimeLinearMemory for LinearMemoryProxy { fn vmmemory(&self) -> VMMemoryDefinition { VMMemoryDefinition { base: self.mem.as_ptr(), - current_length: u32::try_from(self.mem.size() as usize * WASM_PAGE_SIZE as usize) - .unwrap(), + current_length: self.mem.size() as usize * WASM_PAGE_SIZE as usize, } } } diff --git a/tests/all/instance.rs b/tests/all/instance.rs index 1def40354d..3ba16eee11 100644 --- a/tests/all/instance.rs +++ b/tests/all/instance.rs @@ -57,19 +57,27 @@ fn linear_memory_limits() -> Result<()> { (module (memory 65534) - (func (export "foo") (result i32) + (func (export "grow") (result i32) i32.const 1 memory.grow) + (func (export "size") (result i32) + memory.size) ) "#; let module = Module::new(engine, wat)?; let mut store = Store::new(engine, ()); let instance = Instance::new(&mut store, &module, &[])?; - let foo = instance.get_typed_func::<(), i32, _>(&mut store, "foo")?; + let size = instance.get_typed_func::<(), i32, _>(&mut store, "size")?; + let grow = instance.get_typed_func::<(), i32, _>(&mut store, "grow")?; - assert_eq!(foo.call(&mut store, ())?, 65534); - assert_eq!(foo.call(&mut store, ())?, -1); + assert_eq!(size.call(&mut store, ())?, 65534); + assert_eq!(grow.call(&mut store, ())?, 65534); + assert_eq!(size.call(&mut store, ())?, 65535); + assert_eq!(grow.call(&mut store, ())?, 65535); + assert_eq!(size.call(&mut store, ())?, 65536); + assert_eq!(grow.call(&mut store, ())?, -1); + assert_eq!(size.call(&mut store, ())?, 65536); Ok(()) } }