Change VMMemoryDefinition::current_length to usize (#3134)

* Change VMMemoryDefinition::current_length to `usize`

This commit changes the definition of
`VMMemoryDefinition::current_length` to `usize` from its previous
definition of `u32`. This is a pretty impactful change because it also
changes the cranelift semantics of "dynamic" heaps where the bound
global value specifier must now match the pointer type for the platform
rather than the index type for the heap.

The motivation for this change is that the `current_length` field (or
bound for the heap) is intended to reflect the current size of the heap.
This is bound by `usize` on the host platform rather than `u32` or`
u64`. The previous choice of `u32` couldn't represent a 4GB memory
because we couldn't put a number representing 4GB into the
`current_length` field. By using `usize`, which reflects the host's
memory allocation, this should better reflect the size of the heap and
allows Wasmtime to support a full 4GB heap for a wasm program (instead
of 4GB minus one page).

This commit also updates the legalization of the `heap_addr` clif
instruction to appropriately cast the address to the platform's pointer
type, handling bounds checks along the way. The practical impact for
today's targets is that a `uextend` is happening sooner than it happened
before, but otherwise there is no intended impact of this change. In the
future when 64-bit memories are supported there will likely need to be
fancier logic which handles offsets a bit differently (especially in the
case of a 64-bit memory on a 32-bit host).

The clif `filetest` changes should show the differences in codegen, and
the Wasmtime changes are largely removing casts here and there.

Closes #3022

* Add tests for memory.size at maximum memory size

* Add a dfg helper method
This commit is contained in:
Alex Crichton
2021-08-02 13:09:40 -05:00
committed by GitHub
parent 87fefd8a21
commit 63a3bbbf5a
19 changed files with 146 additions and 140 deletions

View File

@@ -7,8 +7,8 @@ use crate::ir::extfunc::ExtFuncData;
use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData}; use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData};
use crate::ir::{types, ConstantData, ConstantPool, Immediate}; use crate::ir::{types, ConstantData, ConstantPool, Immediate};
use crate::ir::{ use crate::ir::{
Block, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList, Block, FuncRef, Inst, SigRef, Signature, SourceLoc, Type, Value, ValueLabelAssignments,
ValueListPool, ValueList, ValueListPool,
}; };
use crate::isa::TargetIsa; use crate::isa::TargetIsa;
use crate::packed_option::ReservedValue; use crate::packed_option::ReservedValue;
@@ -153,6 +153,14 @@ impl DataFlowGraph {
self.values_labels = Some(HashMap::new()); self.values_labels = Some(HashMap::new());
} }
} }
/// Inserts a `ValueLabelAssignments::Alias` for `to_alias` if debug info
/// collection is enabled.
pub fn add_value_label_alias(&mut self, to_alias: Value, from: SourceLoc, value: Value) {
if let Some(values_labels) = self.values_labels.as_mut() {
values_labels.insert(to_alias, ir::ValueLabelAssignments::Alias { from, value });
}
}
} }
/// Resolve value aliases. /// Resolve value aliases.

View File

@@ -64,8 +64,10 @@ fn dynamic_addr(
let mut pos = FuncCursor::new(func).at_inst(inst); let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst); pos.use_srcloc(inst);
let offset = cast_offset_to_pointer_ty(offset, offset_ty, addr_ty, &mut pos);
// Start with the bounds check. Trap if `offset + access_size > bound`. // Start with the bounds check. Trap if `offset + access_size > bound`.
let bound = pos.ins().global_value(offset_ty, bound_gv); let bound = pos.ins().global_value(addr_ty, bound_gv);
let (cc, lhs, bound) = if access_size == 1 { let (cc, lhs, bound) = if access_size == 1 {
// `offset > bound - 1` is the same as `offset >= bound`. // `offset > bound - 1` is the same as `offset >= bound`.
(IntCC::UnsignedGreaterThanOrEqual, offset, bound) (IntCC::UnsignedGreaterThanOrEqual, offset, bound)
@@ -76,7 +78,7 @@ fn dynamic_addr(
(IntCC::UnsignedGreaterThan, offset, adj_bound) (IntCC::UnsignedGreaterThan, offset, adj_bound)
} else { } else {
// We need an overflow check for the adjusted offset. // We need an overflow check for the adjusted offset.
let access_size_val = pos.ins().iconst(offset_ty, access_size as i64); let access_size_val = pos.ins().iconst(addr_ty, access_size as i64);
let (adj_offset, overflow) = pos.ins().iadd_ifcout(offset, access_size_val); let (adj_offset, overflow) = pos.ins().iadd_ifcout(offset, access_size_val);
pos.ins().trapif( pos.ins().trapif(
isa.unsigned_add_overflow_condition(), isa.unsigned_add_overflow_condition(),
@@ -100,7 +102,6 @@ fn dynamic_addr(
heap, heap,
addr_ty, addr_ty,
offset, offset,
offset_ty,
pos.func, pos.func,
spectre_oob_comparison, spectre_oob_comparison,
); );
@@ -111,7 +112,7 @@ fn static_addr(
isa: &dyn TargetIsa, isa: &dyn TargetIsa,
inst: ir::Inst, inst: ir::Inst,
heap: ir::Heap, heap: ir::Heap,
offset: ir::Value, mut offset: ir::Value,
access_size: u32, access_size: u32,
bound: u64, bound: u64,
func: &mut ir::Function, func: &mut ir::Function,
@@ -156,6 +157,7 @@ fn static_addr(
// `bound - access_size >= 4GB` we can omit a bounds check. // `bound - access_size >= 4GB` we can omit a bounds check.
let limit = bound - access_size; let limit = bound - access_size;
let mut spectre_oob_comparison = None; let mut spectre_oob_comparison = None;
offset = cast_offset_to_pointer_ty(offset, offset_ty, addr_ty, &mut pos);
if offset_ty != ir::types::I32 || limit < 0xffff_ffff { if offset_ty != ir::types::I32 || limit < 0xffff_ffff {
let (cc, lhs, limit_imm) = if limit & 1 == 1 { let (cc, lhs, limit_imm) = if limit & 1 == 1 {
// Prefer testing `offset >= limit - 1` when limit is odd because an even number is // Prefer testing `offset >= limit - 1` when limit is odd because an even number is
@@ -169,7 +171,7 @@ fn static_addr(
let oob = pos.ins().icmp_imm(cc, lhs, limit_imm); let oob = pos.ins().icmp_imm(cc, lhs, limit_imm);
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds); pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
if isa.flags().enable_heap_access_spectre_mitigation() { if isa.flags().enable_heap_access_spectre_mitigation() {
let limit = pos.ins().iconst(offset_ty, limit_imm); let limit = pos.ins().iconst(addr_ty, limit_imm);
spectre_oob_comparison = Some((cc, lhs, limit)); spectre_oob_comparison = Some((cc, lhs, limit));
} }
} }
@@ -180,20 +182,46 @@ fn static_addr(
heap, heap,
addr_ty, addr_ty,
offset, offset,
offset_ty,
pos.func, pos.func,
spectre_oob_comparison, spectre_oob_comparison,
); );
} }
fn cast_offset_to_pointer_ty(
offset: ir::Value,
offset_ty: ir::Type,
addr_ty: ir::Type,
pos: &mut FuncCursor,
) -> ir::Value {
if offset_ty == addr_ty {
return offset;
}
// Note that using 64-bit heaps on a 32-bit host is not currently supported,
// would require at least a bounds check here to ensure that the truncation
// from 64-to-32 bits doesn't lose any upper bits. For now though we're
// mostly interested in the 32-bit-heaps-on-64-bit-hosts cast.
assert!(offset_ty.bits() < addr_ty.bits());
// Convert `offset` to `addr_ty`.
let extended_offset = pos.ins().uextend(addr_ty, offset);
// Add debug value-label alias so that debuginfo can name the extended
// value as the address
let loc = pos.srcloc();
pos.func
.dfg
.add_value_label_alias(extended_offset, loc, offset);
extended_offset
}
/// Emit code for the base address computation of a `heap_addr` instruction. /// Emit code for the base address computation of a `heap_addr` instruction.
fn compute_addr( fn compute_addr(
isa: &dyn TargetIsa, isa: &dyn TargetIsa,
inst: ir::Inst, inst: ir::Inst,
heap: ir::Heap, heap: ir::Heap,
addr_ty: ir::Type, addr_ty: ir::Type,
mut offset: ir::Value, offset: ir::Value,
offset_ty: ir::Type,
func: &mut ir::Function, func: &mut ir::Function,
// If we are performing Spectre mitigation with conditional selects, the // If we are performing Spectre mitigation with conditional selects, the
// values to compare and the condition code that indicates an out-of bounds // values to compare and the condition code that indicates an out-of bounds
@@ -201,24 +229,10 @@ fn compute_addr(
// speculatively safe address (a zero / null pointer) instead. // speculatively safe address (a zero / null pointer) instead.
spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>, spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>,
) { ) {
debug_assert_eq!(func.dfg.value_type(offset), addr_ty);
let mut pos = FuncCursor::new(func).at_inst(inst); let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst); pos.use_srcloc(inst);
// Convert `offset` to `addr_ty`.
if offset_ty != addr_ty {
let labels_value = offset;
offset = pos.ins().uextend(addr_ty, offset);
if let Some(values_labels) = pos.func.dfg.values_labels.as_mut() {
values_labels.insert(
offset,
ir::ValueLabelAssignments::Alias {
from: pos.func.srclocs[inst],
value: labels_value,
},
);
}
}
// Add the heap base address base // Add the heap base address base
let base = if isa.flags().enable_pinned_reg() && isa.flags().use_pinned_reg_as_heap_base() { let base = if isa.flags().enable_pinned_reg() && isa.flags().use_pinned_reg_as_heap_base() {
pos.ins().get_pinned_reg(isa.pointer_type()) pos.ins().get_pinned_reg(isa.pointer_type())

View File

@@ -439,14 +439,13 @@ impl<'a> Verifier<'a> {
.nonfatal((heap, format!("invalid bound global value {}", bound_gv))); .nonfatal((heap, format!("invalid bound global value {}", bound_gv)));
} }
let index_type = heap_data.index_type;
let bound_type = self.func.global_values[bound_gv].global_type(isa); let bound_type = self.func.global_values[bound_gv].global_type(isa);
if index_type != bound_type { if pointer_type != bound_type {
errors.report(( errors.report((
heap, heap,
format!( format!(
"heap index type {} differs from the type of its bound, {}", "heap pointer type {} differs from the type of its bound, {}",
index_type, bound_type pointer_type, bound_type
), ),
)); ));
} }

View File

@@ -5,7 +5,7 @@ target aarch64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 { function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx gv0 = vmctx
gv1 = load.i32 notrap aligned gv0 gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32): block0(v0: i64, v1: i32):
@@ -16,13 +16,14 @@ block0(v0: i64, v1: i32):
; check: Block 0: ; check: Block 0:
; check: stp fp, lr, [sp, #-16]! ; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp ; nextln: mov fp, sp
; nextln: ldr w2, [x0] ; nextln: mov w2, w1
; nextln: add w2, w2, #0 ; nextln: ldr x3, [x0]
; nextln: subs wzr, w1, w2 ; nextln: mov x3, x3
; nextln: subs xzr, x2, x3
; nextln: b.ls label1 ; b label2 ; nextln: b.ls label1 ; b label2
; check: Block 1: ; check: Block 1:
; check: add x0, x0, x1, UXTW ; check: add x0, x0, x1, UXTW
; nextln: subs wzr, w1, w2 ; nextln: subs xzr, x2, x3
; nextln: movz x1, #0 ; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi ; nextln: csel x0, x1, x0, hi
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16
@@ -42,11 +43,12 @@ block0(v0: i64, v1: i32):
; check: Block 0: ; check: Block 0:
; check: stp fp, lr, [sp, #-16]! ; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp ; nextln: mov fp, sp
; nextln: subs wzr, w1, #65536 ; nextln: mov w2, w1
; nextln: subs xzr, x2, #65536
; nextln: b.ls label1 ; b label2 ; nextln: b.ls label1 ; b label2
; check: Block 1: ; check: Block 1:
; check: add x0, x0, x1, UXTW ; check: add x0, x0, x1, UXTW
; nextln: subs wzr, w1, #65536 ; nextln: subs xzr, x2, #65536
; nextln: movz x1, #0 ; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi ; nextln: csel x0, x1, x0, hi
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16

View File

@@ -3,7 +3,7 @@ target s390x
function %dynamic_heap_check(i64 vmctx, i32) -> i64 { function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx gv0 = vmctx
gv1 = load.i32 notrap aligned gv0 gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32): block0(v0: i64, v1: i32):
@@ -12,15 +12,15 @@ block0(v0: i64, v1: i32):
} }
; check: Block 0: ; check: Block 0:
; check: l %r4, 0(%r2) ; check: llgfr %r3, %r3
; nextln: ahi %r4, 0 ; nextln: lg %r4, 0(%r2)
; nextln: clr %r3, %r4 ; nextln: aghi %r4, 0
; nextln: clgr %r3, %r4
; nextln: jgnh label1 ; jg label2 ; nextln: jgnh label1 ; jg label2
; check: Block 1: ; check: Block 1:
; check: llgfr %r5, %r3 ; check: agr %r2, %r3
; nextln: agr %r2, %r5
; nextln: lghi %r5, 0 ; nextln: lghi %r5, 0
; nextln: clr %r3, %r4 ; nextln: clgr %r3, %r4
; nextln: locgrh %r2, %r5 ; nextln: locgrh %r2, %r5
; nextln: br %r14 ; nextln: br %r14
; check: Block 2: ; check: Block 2:
@@ -36,13 +36,13 @@ block0(v0: i64, v1: i32):
} }
; check: Block 0: ; check: Block 0:
; check: clfi %r3, 65536 ; check: llgfr %r3, %r3
; nextln: clgfi %r3, 65536
; nextln: jgnh label1 ; jg label2 ; nextln: jgnh label1 ; jg label2
; check: Block 1: ; check: Block 1:
; check: llgfr %r4, %r3 ; check: agr %r2, %r3
; nextln: agr %r2, %r4
; nextln: lghi %r4, 0 ; nextln: lghi %r4, 0
; nextln: clfi %r3, 65536 ; nextln: clgfi %r3, 65536
; nextln: locgrh %r2, %r4 ; nextln: locgrh %r2, %r4
; nextln: br %r14 ; nextln: br %r14
; check: Block 2: ; check: Block 2:

View File

@@ -4,17 +4,18 @@ target x86_64 machinst
function %f(i32, i64 vmctx) -> i64 { function %f(i32, i64 vmctx) -> i64 {
gv0 = vmctx gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0 gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i32 notrap aligned gv0+8 gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32 heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
block0(v0: i32, v1: i64): block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0x8000 v2 = heap_addr.i64 heap0, v0, 0x8000
; check: movl 8(%rsi), %ecx ; check: movl %edi, %ecx
; nextln: movq %rdi, %rax ; nextln: movq 8(%rsi), %rdi
; nextln: addl $$32768, %eax ; nextln: movq %rcx, %rax
; nextln: addq $$32768, %rax
; nextln: jnb ; ud2 heap_oob ; ; nextln: jnb ; ud2 heap_oob ;
; nextln: cmpl %ecx, %eax ; nextln: cmpq %rdi, %rax
; nextln: jbe label1; j label2 ; nextln: jbe label1; j label2
; check: Block 1: ; check: Block 1:

View File

@@ -10,7 +10,7 @@ function %heap_addrs(i32, i64, i64 vmctx) {
gv0 = iadd_imm.i64 gv4, 64 gv0 = iadd_imm.i64 gv4, 64
gv1 = iadd_imm.i64 gv4, 72 gv1 = iadd_imm.i64 gv4, 72
gv2 = iadd_imm.i64 gv4, 80 gv2 = iadd_imm.i64 gv4, 80
gv3 = load.i32 notrap aligned gv4+88 gv3 = load.i64 notrap aligned gv4+88
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32 heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32 heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
@@ -38,15 +38,15 @@ block0(v0: i32, v1: i64, v3: i64):
; check: v4 = iadd v13, v12 ; check: v4 = iadd v13, v12
v5 = heap_addr.i64 heap1, v0, 0 v5 = heap_addr.i64 heap1, v0, 0
; check: v14 = icmp_imm ugt v0, 0x0001_0000 ; check: v14 = uextend.i64 v0
; check: brz v14, $(resume_1=$BB) ; check: v15 = icmp_imm ugt v14, 0x0001_0000
; check: brz v15, $(resume_1=$BB)
; nextln: jump $(trap_1=$BB) ; nextln: jump $(trap_1=$BB)
; check: $trap_1: ; check: $trap_1:
; nextln: trap heap_oob ; nextln: trap heap_oob
; check: $resume_1: ; check: $resume_1:
; check: v15 = uextend.i64 v0
; check: v16 = iadd_imm.i64 v3, 64 ; check: v16 = iadd_imm.i64 v3, 64
; check: v5 = iadd v16, v15 ; check: v5 = iadd v16, v14
v6 = heap_addr.i64 heap2, v1, 0 v6 = heap_addr.i64 heap2, v1, 0
; check: v19 = iconst.i64 0x0001_0000_0000 ; check: v19 = iconst.i64 0x0001_0000_0000
@@ -70,30 +70,30 @@ block0(v0: i32, v1: i64, v3: i64):
; check: v7 = iadd v21, v1 ; check: v7 = iadd v21, v1
v8 = heap_addr.i64 heap4, v0, 0 v8 = heap_addr.i64 heap4, v0, 0
; check: v22 = load.i32 notrap aligned v3+88 ; check: v22 = uextend.i64 v0
; check: v23 = iadd_imm v22, 0 ; check: v23 = load.i64 notrap aligned v3+88
; check: v24 = icmp.i32 ugt v0, v23 ; check: v24 = iadd_imm v23, 0
; check: brz v24, $(resume_4=$BB) ; check: v25 = icmp ugt v22, v24
; check: brz v25, $(resume_4=$BB)
; nextln: jump $(trap_4=$BB) ; nextln: jump $(trap_4=$BB)
; check: $trap_4: ; check: $trap_4:
; nextln: trap heap_oob ; nextln: trap heap_oob
; check: $resume_4: ; check: $resume_4:
; check: v25 = uextend.i64 v0
; check: v26 = iadd_imm.i64 v3, 72 ; check: v26 = iadd_imm.i64 v3, 72
; check: v8 = iadd v26, v25 ; check: v8 = iadd v26, v22
v9 = heap_addr.i64 heap5, v0, 0 v9 = heap_addr.i64 heap5, v0, 0
; check: v27 = load.i32 notrap aligned v3+88 ; check: v27 = uextend.i64 v0
; check: v28 = iadd_imm v27, 0 ; check: v28 = load.i64 notrap aligned v3+88
; check: v29 = icmp.i32 ugt v0, v28 ; check: v29 = iadd_imm v28, 0
; check: brz v29, $(resume_5=$BB) ; check: v30 = icmp ugt v27, v29
; check: brz v30, $(resume_5=$BB)
; nextln: jump $(trap_5=$BB) ; nextln: jump $(trap_5=$BB)
; check: $trap_5: ; check: $trap_5:
; nextln: trap heap_oob ; nextln: trap heap_oob
; check: $resume_5: ; check: $resume_5:
; check: v30 = uextend.i64 v0
; check: v31 = iadd_imm.i64 v3, 72 ; check: v31 = iadd_imm.i64 v3, 72
; check: v9 = iadd v31, v30 ; check: v9 = iadd v31, v27
v10 = heap_addr.i64 heap6, v1, 0 v10 = heap_addr.i64 heap6, v1, 0
; check: v32 = iadd_imm.i64 v3, 80 ; check: v32 = iadd_imm.i64 v3, 80

View File

@@ -56,7 +56,7 @@ block0(v0: i32, v999: i64):
; Boundscheck should be eliminated. ; Boundscheck should be eliminated.
; Checks here are assuming that no pipehole opts fold the load offsets. ; Checks here are assuming that no pipehole opts fold the load offsets.
; nextln: $(xoff=$V) = uextend.i64 v0 ; nextln: $(xoff=$V) = uextend.i64 v0
; nextln: $(hbase=$V) = iadd_imm v999, 64 ; check: $(hbase=$V) = iadd_imm v999, 64
; nextln: v1 = iadd $hbase, $xoff ; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+16 v2 = load.f32 v1+16
; nextln: v2 = load.f32 v1+16 ; nextln: v2 = load.f32 v1+16
@@ -99,6 +99,7 @@ block0(v0: i32, v999: i64):
; check: block0( ; check: block0(
v1 = heap_addr.i64 heap0, v0, 0x8000_0000 v1 = heap_addr.i64 heap0, v0, 0x8000_0000
; Boundscheck code ; Boundscheck code
; check: $(xoff=$V) = uextend.i64 v0
; check: $(oob=$V) = icmp ; check: $(oob=$V) = icmp
; nextln: brz $oob, $(ok=$BB) ; nextln: brz $oob, $(ok=$BB)
; nextln: jump $(trap_oob=$BB) ; nextln: jump $(trap_oob=$BB)
@@ -106,8 +107,7 @@ block0(v0: i32, v999: i64):
; nextln: trap heap_oob ; nextln: trap heap_oob
; check: $ok: ; check: $ok:
; Checks here are assuming that no pipehole opts fold the load offsets. ; Checks here are assuming that no pipehole opts fold the load offsets.
; nextln: $(xoff=$V) = uextend.i64 v0 ; check: $(hbase=$V) = iadd_imm.i64 v999, 64
; nextln: $(hbase=$V) = iadd_imm.i64 v999, 64
; nextln: v1 = iadd $hbase, $xoff ; nextln: v1 = iadd $hbase, $xoff
v2 = load.f32 v1+0x7fff_ffff v2 = load.f32 v1+0x7fff_ffff
; nextln: v2 = load.f32 v1+0x7fff_ffff ; nextln: v2 = load.f32 v1+0x7fff_ffff

View File

@@ -29,7 +29,7 @@ block0(v0: i64):
function %heap_bound_type(i64 vmctx) { function %heap_bound_type(i64 vmctx) {
gv0 = vmctx gv0 = vmctx
gv1 = load.i16 notrap aligned gv0 gv1 = load.i16 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16 heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap pointer type i64 differs from the type of its bound, i16
block0(v0: i64): block0(v0: i64):
return return

View File

@@ -2155,6 +2155,9 @@ fn get_heap_addr(
addr_ty: Type, addr_ty: Type,
builder: &mut FunctionBuilder, builder: &mut FunctionBuilder,
) -> (ir::Value, i32) { ) -> (ir::Value, i32) {
// This function will need updates for 64-bit memories
debug_assert_eq!(builder.func.dfg.value_type(addr32), I32);
let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into(); let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into();
// How exactly the bounds check is performed here and what it's performed // How exactly the bounds check is performed here and what it's performed

View File

@@ -1154,7 +1154,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
let heap_bound = func.create_global_value(ir::GlobalValueData::Load { let heap_bound = func.create_global_value(ir::GlobalValueData::Load {
base: ptr, base: ptr,
offset: Offset32::new(current_length_offset), offset: Offset32::new(current_length_offset),
global_type: self.offsets.type_of_vmmemory_definition_current_length(), global_type: pointer_type,
readonly: false, readonly: false,
}); });
( (
@@ -1417,7 +1417,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
.vmctx_vmmemory_definition_current_length(def_index), .vmctx_vmmemory_definition_current_length(def_index),
) )
.unwrap(); .unwrap();
pos.ins().load(I32, ir::MemFlags::trusted(), base, offset) pos.ins()
.load(pointer_type, ir::MemFlags::trusted(), base, offset)
} }
None => { None => {
let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap(); let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap();
@@ -1425,7 +1426,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
pos.ins() pos.ins()
.load(pointer_type, ir::MemFlags::trusted(), base, offset); .load(pointer_type, ir::MemFlags::trusted(), base, offset);
pos.ins().load( pos.ins().load(
I32, pointer_type,
ir::MemFlags::trusted(), ir::MemFlags::trusted(),
vmmemory_ptr, vmmemory_ptr,
i32::from(self.offsets.vmmemory_definition_current_length()), i32::from(self.offsets.vmmemory_definition_current_length()),
@@ -1435,7 +1436,12 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
let current_length_in_pages = pos let current_length_in_pages = pos
.ins() .ins()
.udiv_imm(current_length_in_bytes, i64::from(WASM_PAGE_SIZE)); .udiv_imm(current_length_in_bytes, i64::from(WASM_PAGE_SIZE));
Ok(current_length_in_pages) if pointer_type == I32 {
Ok(current_length_in_pages)
} else {
assert_eq!(pointer_type, I64);
Ok(pos.ins().ireduce(I32, current_length_in_pages))
}
} }
fn translate_memory_copy( fn translate_memory_copy(

View File

@@ -422,23 +422,11 @@ impl<P: PtrSize> VMOffsets<P> {
1 * self.pointer_size() 1 * self.pointer_size()
} }
/// The size of the `current_length` field.
#[inline]
pub fn size_of_vmmemory_definition_current_length(&self) -> u8 {
4
}
/// Return the size of `VMMemoryDefinition`. /// Return the size of `VMMemoryDefinition`.
#[inline] #[inline]
pub fn size_of_vmmemory_definition(&self) -> u8 { pub fn size_of_vmmemory_definition(&self) -> u8 {
2 * self.pointer_size() 2 * self.pointer_size()
} }
/// The type of the `current_length` field.
#[inline]
pub fn type_of_vmmemory_definition_current_length(&self) -> ir::Type {
ir::Type::int(u16::from(self.size_of_vmmemory_definition_current_length()) * 8).unwrap()
}
} }
/// Offsets for `VMGlobalImport`. /// Offsets for `VMGlobalImport`.

View File

@@ -626,13 +626,11 @@ impl Instance {
let src_mem = self.get_memory(src_index); let src_mem = self.get_memory(src_index);
let dst_mem = self.get_memory(dst_index); let dst_mem = self.get_memory(dst_index);
if src if src.checked_add(len).map_or(true, |n| {
.checked_add(len) usize::try_from(n).unwrap() > src_mem.current_length
.map_or(true, |n| n > src_mem.current_length) }) || dst.checked_add(len).map_or(true, |m| {
|| dst usize::try_from(m).unwrap() > dst_mem.current_length
.checked_add(len) }) {
.map_or(true, |m| m > dst_mem.current_length)
{
return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds)); return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds));
} }
@@ -664,10 +662,9 @@ impl Instance {
) -> Result<(), Trap> { ) -> Result<(), Trap> {
let memory = self.get_memory(memory_index); let memory = self.get_memory(memory_index);
if dst if dst.checked_add(len).map_or(true, |m| {
.checked_add(len) usize::try_from(m).unwrap() > memory.current_length
.map_or(true, |m| m > memory.current_length) }) {
{
return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds)); return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds));
} }
@@ -726,10 +723,10 @@ impl Instance {
if src if src
.checked_add(len) .checked_add(len)
.map_or(true, |n| n as usize > data.len()) .map_or(true, |n| usize::try_from(n).unwrap() > data.len())
|| dst || dst.checked_add(len).map_or(true, |m| {
.checked_add(len) usize::try_from(m).unwrap() > memory.current_length
.map_or(true, |m| m > memory.current_length) })
{ {
return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds)); return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds));
} }

View File

@@ -309,7 +309,7 @@ fn check_memory_init_bounds(
let end = start.checked_add(init.data.len()); let end = start.checked_add(init.data.len());
match end { match end {
Some(end) if end <= memory.current_length as usize => { Some(end) if end <= memory.current_length => {
// Initializer is in bounds // Initializer is in bounds
} }
_ => { _ => {
@@ -382,9 +382,8 @@ fn initialize_instance(
MemoryInitialization::Paged { map, out_of_bounds } => { MemoryInitialization::Paged { map, out_of_bounds } => {
for (index, pages) in map { for (index, pages) in map {
let memory = instance.memory(index); let memory = instance.memory(index);
let slice = unsafe { let slice =
slice::from_raw_parts_mut(memory.base, memory.current_length as usize) unsafe { slice::from_raw_parts_mut(memory.base, memory.current_length) };
};
for (page_index, page) in pages.iter().enumerate() { for (page_index, page) in pages.iter().enumerate() {
if let Some(data) = page { if let Some(data) = page {

View File

@@ -155,10 +155,6 @@ impl RuntimeLinearMemory for MmapMemory {
// Linear memory size would exceed the index range. // Linear memory size would exceed the index range.
return None; return None;
} }
// FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022
if new_pages == WASM_MAX_PAGES {
return None;
}
let delta_bytes = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize; let delta_bytes = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize; let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize;
@@ -198,8 +194,7 @@ impl RuntimeLinearMemory for MmapMemory {
fn vmmemory(&self) -> VMMemoryDefinition { fn vmmemory(&self) -> VMMemoryDefinition {
VMMemoryDefinition { VMMemoryDefinition {
base: unsafe { self.mmap.alloc.as_mut_ptr().add(self.pre_guard_size) }, base: unsafe { self.mmap.alloc.as_mut_ptr().add(self.pre_guard_size) },
current_length: u32::try_from(self.mmap.size as usize * WASM_PAGE_SIZE as usize) current_length: self.mmap.size as usize * WASM_PAGE_SIZE as usize,
.unwrap(),
} }
} }
} }
@@ -276,13 +271,6 @@ impl Memory {
} }
fn limit_new(plan: &MemoryPlan, limiter: Option<&mut dyn ResourceLimiter>) -> Result<()> { fn limit_new(plan: &MemoryPlan, limiter: Option<&mut dyn ResourceLimiter>) -> Result<()> {
// FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022
if plan.memory.minimum == WASM_MAX_PAGES {
bail!(
"memory minimum size of {} pages exceeds memory limits",
plan.memory.minimum
);
}
if let Some(limiter) = limiter { if let Some(limiter) = limiter {
if !limiter.memory_growing(0, plan.memory.minimum, plan.memory.maximum) { if !limiter.memory_growing(0, plan.memory.minimum, plan.memory.maximum) {
bail!( bail!(
@@ -374,10 +362,6 @@ impl Memory {
if new_size > maximum.unwrap_or(WASM_MAX_PAGES) { if new_size > maximum.unwrap_or(WASM_MAX_PAGES) {
return None; return None;
} }
// FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022
if new_size == WASM_MAX_PAGES {
return None;
}
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize; let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize; let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
@@ -397,7 +381,7 @@ impl Memory {
match self { match self {
Memory::Static { base, size, .. } => VMMemoryDefinition { Memory::Static { base, size, .. } => VMMemoryDefinition {
base: base.as_ptr() as *mut _, base: base.as_ptr() as *mut _,
current_length: u32::try_from(*size as usize * WASM_PAGE_SIZE as usize).unwrap(), current_length: *size as usize * WASM_PAGE_SIZE as usize,
}, },
Memory::Dynamic(mem) => mem.vmmemory(), Memory::Dynamic(mem) => mem.vmmemory(),
} }

View File

@@ -207,7 +207,7 @@ pub struct VMMemoryDefinition {
pub base: *mut u8, pub base: *mut u8,
/// The current logical size of this linear memory in bytes. /// The current logical size of this linear memory in bytes.
pub current_length: u32, pub current_length: usize,
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -318,7 +318,7 @@ impl Memory {
unsafe { unsafe {
let store = store.into(); let store = store.into();
let definition = *store[self.0].definition; let definition = *store[self.0].definition;
slice::from_raw_parts(definition.base, definition.current_length as usize) slice::from_raw_parts(definition.base, definition.current_length)
} }
} }
@@ -334,7 +334,7 @@ impl Memory {
unsafe { unsafe {
let store = store.into(); let store = store.into();
let definition = *store[self.0].definition; let definition = *store[self.0].definition;
slice::from_raw_parts_mut(definition.base, definition.current_length as usize) slice::from_raw_parts_mut(definition.base, definition.current_length)
} }
} }
@@ -395,7 +395,7 @@ impl Memory {
/// ///
/// Panics if this memory doesn't belong to `store`. /// Panics if this memory doesn't belong to `store`.
pub fn data_size(&self, store: impl AsContext) -> usize { pub fn data_size(&self, store: impl AsContext) -> usize {
unsafe { (*store.as_context()[self.0].definition).current_length as usize } unsafe { (*store.as_context()[self.0].definition).current_length }
} }
/// Returns the size, in WebAssembly pages, of this wasm memory. /// Returns the size, in WebAssembly pages, of this wasm memory.

View File

@@ -3,13 +3,11 @@ use crate::store::{InstanceId, StoreOpaque};
use crate::trampoline::create_handle; use crate::trampoline::create_handle;
use crate::{Limits, MemoryType}; use crate::{Limits, MemoryType};
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use std::sync::Arc;
use wasmtime_environ::entity::PrimaryMap; use wasmtime_environ::entity::PrimaryMap;
use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE}; use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE};
use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition}; use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition};
use std::convert::TryFrom;
use std::sync::Arc;
pub fn create_memory(store: &mut StoreOpaque<'_>, memory: &MemoryType) -> Result<InstanceId> { pub fn create_memory(store: &mut StoreOpaque<'_>, memory: &MemoryType) -> Result<InstanceId> {
let mut module = Module::new(); let mut module = Module::new();
@@ -49,8 +47,7 @@ impl RuntimeLinearMemory for LinearMemoryProxy {
fn vmmemory(&self) -> VMMemoryDefinition { fn vmmemory(&self) -> VMMemoryDefinition {
VMMemoryDefinition { VMMemoryDefinition {
base: self.mem.as_ptr(), base: self.mem.as_ptr(),
current_length: u32::try_from(self.mem.size() as usize * WASM_PAGE_SIZE as usize) current_length: self.mem.size() as usize * WASM_PAGE_SIZE as usize,
.unwrap(),
} }
} }
} }

View File

@@ -57,19 +57,27 @@ fn linear_memory_limits() -> Result<()> {
(module (module
(memory 65534) (memory 65534)
(func (export "foo") (result i32) (func (export "grow") (result i32)
i32.const 1 i32.const 1
memory.grow) memory.grow)
(func (export "size") (result i32)
memory.size)
) )
"#; "#;
let module = Module::new(engine, wat)?; let module = Module::new(engine, wat)?;
let mut store = Store::new(engine, ()); let mut store = Store::new(engine, ());
let instance = Instance::new(&mut store, &module, &[])?; let instance = Instance::new(&mut store, &module, &[])?;
let foo = instance.get_typed_func::<(), i32, _>(&mut store, "foo")?; let size = instance.get_typed_func::<(), i32, _>(&mut store, "size")?;
let grow = instance.get_typed_func::<(), i32, _>(&mut store, "grow")?;
assert_eq!(foo.call(&mut store, ())?, 65534); assert_eq!(size.call(&mut store, ())?, 65534);
assert_eq!(foo.call(&mut store, ())?, -1); assert_eq!(grow.call(&mut store, ())?, 65534);
assert_eq!(size.call(&mut store, ())?, 65535);
assert_eq!(grow.call(&mut store, ())?, 65535);
assert_eq!(size.call(&mut store, ())?, 65536);
assert_eq!(grow.call(&mut store, ())?, -1);
assert_eq!(size.call(&mut store, ())?, 65536);
Ok(()) Ok(())
} }
} }