Change VMMemoryDefinition::current_length to usize (#3134)
* Change VMMemoryDefinition::current_length to `usize` This commit changes the definition of `VMMemoryDefinition::current_length` to `usize` from its previous definition of `u32`. This is a pretty impactful change because it also changes the cranelift semantics of "dynamic" heaps where the bound global value specifier must now match the pointer type for the platform rather than the index type for the heap. The motivation for this change is that the `current_length` field (or bound for the heap) is intended to reflect the current size of the heap. This is bound by `usize` on the host platform rather than `u32` or` u64`. The previous choice of `u32` couldn't represent a 4GB memory because we couldn't put a number representing 4GB into the `current_length` field. By using `usize`, which reflects the host's memory allocation, this should better reflect the size of the heap and allows Wasmtime to support a full 4GB heap for a wasm program (instead of 4GB minus one page). This commit also updates the legalization of the `heap_addr` clif instruction to appropriately cast the address to the platform's pointer type, handling bounds checks along the way. The practical impact for today's targets is that a `uextend` is happening sooner than it happened before, but otherwise there is no intended impact of this change. In the future when 64-bit memories are supported there will likely need to be fancier logic which handles offsets a bit differently (especially in the case of a 64-bit memory on a 32-bit host). The clif `filetest` changes should show the differences in codegen, and the Wasmtime changes are largely removing casts here and there. Closes #3022 * Add tests for memory.size at maximum memory size * Add a dfg helper method
This commit is contained in:
@@ -7,8 +7,8 @@ use crate::ir::extfunc::ExtFuncData;
|
||||
use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData};
|
||||
use crate::ir::{types, ConstantData, ConstantPool, Immediate};
|
||||
use crate::ir::{
|
||||
Block, FuncRef, Inst, SigRef, Signature, Type, Value, ValueLabelAssignments, ValueList,
|
||||
ValueListPool,
|
||||
Block, FuncRef, Inst, SigRef, Signature, SourceLoc, Type, Value, ValueLabelAssignments,
|
||||
ValueList, ValueListPool,
|
||||
};
|
||||
use crate::isa::TargetIsa;
|
||||
use crate::packed_option::ReservedValue;
|
||||
@@ -153,6 +153,14 @@ impl DataFlowGraph {
|
||||
self.values_labels = Some(HashMap::new());
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a `ValueLabelAssignments::Alias` for `to_alias` if debug info
|
||||
/// collection is enabled.
|
||||
pub fn add_value_label_alias(&mut self, to_alias: Value, from: SourceLoc, value: Value) {
|
||||
if let Some(values_labels) = self.values_labels.as_mut() {
|
||||
values_labels.insert(to_alias, ir::ValueLabelAssignments::Alias { from, value });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve value aliases.
|
||||
|
||||
@@ -64,8 +64,10 @@ fn dynamic_addr(
|
||||
let mut pos = FuncCursor::new(func).at_inst(inst);
|
||||
pos.use_srcloc(inst);
|
||||
|
||||
let offset = cast_offset_to_pointer_ty(offset, offset_ty, addr_ty, &mut pos);
|
||||
|
||||
// Start with the bounds check. Trap if `offset + access_size > bound`.
|
||||
let bound = pos.ins().global_value(offset_ty, bound_gv);
|
||||
let bound = pos.ins().global_value(addr_ty, bound_gv);
|
||||
let (cc, lhs, bound) = if access_size == 1 {
|
||||
// `offset > bound - 1` is the same as `offset >= bound`.
|
||||
(IntCC::UnsignedGreaterThanOrEqual, offset, bound)
|
||||
@@ -76,7 +78,7 @@ fn dynamic_addr(
|
||||
(IntCC::UnsignedGreaterThan, offset, adj_bound)
|
||||
} else {
|
||||
// We need an overflow check for the adjusted offset.
|
||||
let access_size_val = pos.ins().iconst(offset_ty, access_size as i64);
|
||||
let access_size_val = pos.ins().iconst(addr_ty, access_size as i64);
|
||||
let (adj_offset, overflow) = pos.ins().iadd_ifcout(offset, access_size_val);
|
||||
pos.ins().trapif(
|
||||
isa.unsigned_add_overflow_condition(),
|
||||
@@ -100,7 +102,6 @@ fn dynamic_addr(
|
||||
heap,
|
||||
addr_ty,
|
||||
offset,
|
||||
offset_ty,
|
||||
pos.func,
|
||||
spectre_oob_comparison,
|
||||
);
|
||||
@@ -111,7 +112,7 @@ fn static_addr(
|
||||
isa: &dyn TargetIsa,
|
||||
inst: ir::Inst,
|
||||
heap: ir::Heap,
|
||||
offset: ir::Value,
|
||||
mut offset: ir::Value,
|
||||
access_size: u32,
|
||||
bound: u64,
|
||||
func: &mut ir::Function,
|
||||
@@ -156,6 +157,7 @@ fn static_addr(
|
||||
// `bound - access_size >= 4GB` we can omit a bounds check.
|
||||
let limit = bound - access_size;
|
||||
let mut spectre_oob_comparison = None;
|
||||
offset = cast_offset_to_pointer_ty(offset, offset_ty, addr_ty, &mut pos);
|
||||
if offset_ty != ir::types::I32 || limit < 0xffff_ffff {
|
||||
let (cc, lhs, limit_imm) = if limit & 1 == 1 {
|
||||
// Prefer testing `offset >= limit - 1` when limit is odd because an even number is
|
||||
@@ -169,7 +171,7 @@ fn static_addr(
|
||||
let oob = pos.ins().icmp_imm(cc, lhs, limit_imm);
|
||||
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
|
||||
if isa.flags().enable_heap_access_spectre_mitigation() {
|
||||
let limit = pos.ins().iconst(offset_ty, limit_imm);
|
||||
let limit = pos.ins().iconst(addr_ty, limit_imm);
|
||||
spectre_oob_comparison = Some((cc, lhs, limit));
|
||||
}
|
||||
}
|
||||
@@ -180,20 +182,46 @@ fn static_addr(
|
||||
heap,
|
||||
addr_ty,
|
||||
offset,
|
||||
offset_ty,
|
||||
pos.func,
|
||||
spectre_oob_comparison,
|
||||
);
|
||||
}
|
||||
|
||||
fn cast_offset_to_pointer_ty(
|
||||
offset: ir::Value,
|
||||
offset_ty: ir::Type,
|
||||
addr_ty: ir::Type,
|
||||
pos: &mut FuncCursor,
|
||||
) -> ir::Value {
|
||||
if offset_ty == addr_ty {
|
||||
return offset;
|
||||
}
|
||||
// Note that using 64-bit heaps on a 32-bit host is not currently supported,
|
||||
// would require at least a bounds check here to ensure that the truncation
|
||||
// from 64-to-32 bits doesn't lose any upper bits. For now though we're
|
||||
// mostly interested in the 32-bit-heaps-on-64-bit-hosts cast.
|
||||
assert!(offset_ty.bits() < addr_ty.bits());
|
||||
|
||||
// Convert `offset` to `addr_ty`.
|
||||
let extended_offset = pos.ins().uextend(addr_ty, offset);
|
||||
|
||||
// Add debug value-label alias so that debuginfo can name the extended
|
||||
// value as the address
|
||||
let loc = pos.srcloc();
|
||||
pos.func
|
||||
.dfg
|
||||
.add_value_label_alias(extended_offset, loc, offset);
|
||||
|
||||
extended_offset
|
||||
}
|
||||
|
||||
/// Emit code for the base address computation of a `heap_addr` instruction.
|
||||
fn compute_addr(
|
||||
isa: &dyn TargetIsa,
|
||||
inst: ir::Inst,
|
||||
heap: ir::Heap,
|
||||
addr_ty: ir::Type,
|
||||
mut offset: ir::Value,
|
||||
offset_ty: ir::Type,
|
||||
offset: ir::Value,
|
||||
func: &mut ir::Function,
|
||||
// If we are performing Spectre mitigation with conditional selects, the
|
||||
// values to compare and the condition code that indicates an out-of bounds
|
||||
@@ -201,24 +229,10 @@ fn compute_addr(
|
||||
// speculatively safe address (a zero / null pointer) instead.
|
||||
spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>,
|
||||
) {
|
||||
debug_assert_eq!(func.dfg.value_type(offset), addr_ty);
|
||||
let mut pos = FuncCursor::new(func).at_inst(inst);
|
||||
pos.use_srcloc(inst);
|
||||
|
||||
// Convert `offset` to `addr_ty`.
|
||||
if offset_ty != addr_ty {
|
||||
let labels_value = offset;
|
||||
offset = pos.ins().uextend(addr_ty, offset);
|
||||
if let Some(values_labels) = pos.func.dfg.values_labels.as_mut() {
|
||||
values_labels.insert(
|
||||
offset,
|
||||
ir::ValueLabelAssignments::Alias {
|
||||
from: pos.func.srclocs[inst],
|
||||
value: labels_value,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Add the heap base address base
|
||||
let base = if isa.flags().enable_pinned_reg() && isa.flags().use_pinned_reg_as_heap_base() {
|
||||
pos.ins().get_pinned_reg(isa.pointer_type())
|
||||
|
||||
@@ -439,14 +439,13 @@ impl<'a> Verifier<'a> {
|
||||
.nonfatal((heap, format!("invalid bound global value {}", bound_gv)));
|
||||
}
|
||||
|
||||
let index_type = heap_data.index_type;
|
||||
let bound_type = self.func.global_values[bound_gv].global_type(isa);
|
||||
if index_type != bound_type {
|
||||
if pointer_type != bound_type {
|
||||
errors.report((
|
||||
heap,
|
||||
format!(
|
||||
"heap index type {} differs from the type of its bound, {}",
|
||||
index_type, bound_type
|
||||
"heap pointer type {} differs from the type of its bound, {}",
|
||||
pointer_type, bound_type
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ target aarch64
|
||||
|
||||
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -16,13 +16,14 @@ block0(v0: i64, v1: i32):
|
||||
; check: Block 0:
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: ldr w2, [x0]
|
||||
; nextln: add w2, w2, #0
|
||||
; nextln: subs wzr, w1, w2
|
||||
; nextln: mov w2, w1
|
||||
; nextln: ldr x3, [x0]
|
||||
; nextln: mov x3, x3
|
||||
; nextln: subs xzr, x2, x3
|
||||
; nextln: b.ls label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: add x0, x0, x1, UXTW
|
||||
; nextln: subs wzr, w1, w2
|
||||
; nextln: subs xzr, x2, x3
|
||||
; nextln: movz x1, #0
|
||||
; nextln: csel x0, x1, x0, hi
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
@@ -42,11 +43,12 @@ block0(v0: i64, v1: i32):
|
||||
; check: Block 0:
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: subs wzr, w1, #65536
|
||||
; nextln: mov w2, w1
|
||||
; nextln: subs xzr, x2, #65536
|
||||
; nextln: b.ls label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: add x0, x0, x1, UXTW
|
||||
; nextln: subs wzr, w1, #65536
|
||||
; nextln: subs xzr, x2, #65536
|
||||
; nextln: movz x1, #0
|
||||
; nextln: csel x0, x1, x0, hi
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
|
||||
@@ -3,7 +3,7 @@ target s390x
|
||||
|
||||
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -12,15 +12,15 @@ block0(v0: i64, v1: i32):
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: l %r4, 0(%r2)
|
||||
; nextln: ahi %r4, 0
|
||||
; nextln: clr %r3, %r4
|
||||
; check: llgfr %r3, %r3
|
||||
; nextln: lg %r4, 0(%r2)
|
||||
; nextln: aghi %r4, 0
|
||||
; nextln: clgr %r3, %r4
|
||||
; nextln: jgnh label1 ; jg label2
|
||||
; check: Block 1:
|
||||
; check: llgfr %r5, %r3
|
||||
; nextln: agr %r2, %r5
|
||||
; check: agr %r2, %r3
|
||||
; nextln: lghi %r5, 0
|
||||
; nextln: clr %r3, %r4
|
||||
; nextln: clgr %r3, %r4
|
||||
; nextln: locgrh %r2, %r5
|
||||
; nextln: br %r14
|
||||
; check: Block 2:
|
||||
@@ -36,13 +36,13 @@ block0(v0: i64, v1: i32):
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: clfi %r3, 65536
|
||||
; check: llgfr %r3, %r3
|
||||
; nextln: clgfi %r3, 65536
|
||||
; nextln: jgnh label1 ; jg label2
|
||||
; check: Block 1:
|
||||
; check: llgfr %r4, %r3
|
||||
; nextln: agr %r2, %r4
|
||||
; check: agr %r2, %r3
|
||||
; nextln: lghi %r4, 0
|
||||
; nextln: clfi %r3, 65536
|
||||
; nextln: clgfi %r3, 65536
|
||||
; nextln: locgrh %r2, %r4
|
||||
; nextln: br %r14
|
||||
; check: Block 2:
|
||||
|
||||
@@ -4,19 +4,20 @@ target x86_64 machinst
|
||||
function %f(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i32 notrap aligned gv0+8
|
||||
gv2 = load.i64 notrap aligned gv0+8
|
||||
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i32, v1: i64):
|
||||
|
||||
v2 = heap_addr.i64 heap0, v0, 0x8000
|
||||
; check: movl 8(%rsi), %ecx
|
||||
; nextln: movq %rdi, %rax
|
||||
; nextln: addl $$32768, %eax
|
||||
; check: movl %edi, %ecx
|
||||
; nextln: movq 8(%rsi), %rdi
|
||||
; nextln: movq %rcx, %rax
|
||||
; nextln: addq $$32768, %rax
|
||||
; nextln: jnb ; ud2 heap_oob ;
|
||||
; nextln: cmpl %ecx, %eax
|
||||
; nextln: cmpq %rdi, %rax
|
||||
; nextln: jbe label1; j label2
|
||||
; check: Block 1:
|
||||
|
||||
|
||||
return v2
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ function %heap_addrs(i32, i64, i64 vmctx) {
|
||||
gv0 = iadd_imm.i64 gv4, 64
|
||||
gv1 = iadd_imm.i64 gv4, 72
|
||||
gv2 = iadd_imm.i64 gv4, 80
|
||||
gv3 = load.i32 notrap aligned gv4+88
|
||||
gv3 = load.i64 notrap aligned gv4+88
|
||||
|
||||
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
|
||||
heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
|
||||
@@ -38,15 +38,15 @@ block0(v0: i32, v1: i64, v3: i64):
|
||||
; check: v4 = iadd v13, v12
|
||||
|
||||
v5 = heap_addr.i64 heap1, v0, 0
|
||||
; check: v14 = icmp_imm ugt v0, 0x0001_0000
|
||||
; check: brz v14, $(resume_1=$BB)
|
||||
; check: v14 = uextend.i64 v0
|
||||
; check: v15 = icmp_imm ugt v14, 0x0001_0000
|
||||
; check: brz v15, $(resume_1=$BB)
|
||||
; nextln: jump $(trap_1=$BB)
|
||||
; check: $trap_1:
|
||||
; nextln: trap heap_oob
|
||||
; check: $resume_1:
|
||||
; check: v15 = uextend.i64 v0
|
||||
; check: v16 = iadd_imm.i64 v3, 64
|
||||
; check: v5 = iadd v16, v15
|
||||
; check: v5 = iadd v16, v14
|
||||
|
||||
v6 = heap_addr.i64 heap2, v1, 0
|
||||
; check: v19 = iconst.i64 0x0001_0000_0000
|
||||
@@ -70,30 +70,30 @@ block0(v0: i32, v1: i64, v3: i64):
|
||||
; check: v7 = iadd v21, v1
|
||||
|
||||
v8 = heap_addr.i64 heap4, v0, 0
|
||||
; check: v22 = load.i32 notrap aligned v3+88
|
||||
; check: v23 = iadd_imm v22, 0
|
||||
; check: v24 = icmp.i32 ugt v0, v23
|
||||
; check: brz v24, $(resume_4=$BB)
|
||||
; check: v22 = uextend.i64 v0
|
||||
; check: v23 = load.i64 notrap aligned v3+88
|
||||
; check: v24 = iadd_imm v23, 0
|
||||
; check: v25 = icmp ugt v22, v24
|
||||
; check: brz v25, $(resume_4=$BB)
|
||||
; nextln: jump $(trap_4=$BB)
|
||||
; check: $trap_4:
|
||||
; nextln: trap heap_oob
|
||||
; check: $resume_4:
|
||||
; check: v25 = uextend.i64 v0
|
||||
; check: v26 = iadd_imm.i64 v3, 72
|
||||
; check: v8 = iadd v26, v25
|
||||
; check: v8 = iadd v26, v22
|
||||
|
||||
v9 = heap_addr.i64 heap5, v0, 0
|
||||
; check: v27 = load.i32 notrap aligned v3+88
|
||||
; check: v28 = iadd_imm v27, 0
|
||||
; check: v29 = icmp.i32 ugt v0, v28
|
||||
; check: brz v29, $(resume_5=$BB)
|
||||
; check: v27 = uextend.i64 v0
|
||||
; check: v28 = load.i64 notrap aligned v3+88
|
||||
; check: v29 = iadd_imm v28, 0
|
||||
; check: v30 = icmp ugt v27, v29
|
||||
; check: brz v30, $(resume_5=$BB)
|
||||
; nextln: jump $(trap_5=$BB)
|
||||
; check: $trap_5:
|
||||
; nextln: trap heap_oob
|
||||
; check: $resume_5:
|
||||
; check: v30 = uextend.i64 v0
|
||||
; check: v31 = iadd_imm.i64 v3, 72
|
||||
; check: v9 = iadd v31, v30
|
||||
; check: v9 = iadd v31, v27
|
||||
|
||||
v10 = heap_addr.i64 heap6, v1, 0
|
||||
; check: v32 = iadd_imm.i64 v3, 80
|
||||
|
||||
@@ -56,7 +56,7 @@ block0(v0: i32, v999: i64):
|
||||
; Boundscheck should be eliminated.
|
||||
; Checks here are assuming that no pipehole opts fold the load offsets.
|
||||
; nextln: $(xoff=$V) = uextend.i64 v0
|
||||
; nextln: $(hbase=$V) = iadd_imm v999, 64
|
||||
; check: $(hbase=$V) = iadd_imm v999, 64
|
||||
; nextln: v1 = iadd $hbase, $xoff
|
||||
v2 = load.f32 v1+16
|
||||
; nextln: v2 = load.f32 v1+16
|
||||
@@ -99,6 +99,7 @@ block0(v0: i32, v999: i64):
|
||||
; check: block0(
|
||||
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
|
||||
; Boundscheck code
|
||||
; check: $(xoff=$V) = uextend.i64 v0
|
||||
; check: $(oob=$V) = icmp
|
||||
; nextln: brz $oob, $(ok=$BB)
|
||||
; nextln: jump $(trap_oob=$BB)
|
||||
@@ -106,8 +107,7 @@ block0(v0: i32, v999: i64):
|
||||
; nextln: trap heap_oob
|
||||
; check: $ok:
|
||||
; Checks here are assuming that no pipehole opts fold the load offsets.
|
||||
; nextln: $(xoff=$V) = uextend.i64 v0
|
||||
; nextln: $(hbase=$V) = iadd_imm.i64 v999, 64
|
||||
; check: $(hbase=$V) = iadd_imm.i64 v999, 64
|
||||
; nextln: v1 = iadd $hbase, $xoff
|
||||
v2 = load.f32 v1+0x7fff_ffff
|
||||
; nextln: v2 = load.f32 v1+0x7fff_ffff
|
||||
|
||||
@@ -29,7 +29,7 @@ block0(v0: i64):
|
||||
function %heap_bound_type(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i16 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap pointer type i64 differs from the type of its bound, i16
|
||||
|
||||
block0(v0: i64):
|
||||
return
|
||||
|
||||
@@ -2155,6 +2155,9 @@ fn get_heap_addr(
|
||||
addr_ty: Type,
|
||||
builder: &mut FunctionBuilder,
|
||||
) -> (ir::Value, i32) {
|
||||
// This function will need updates for 64-bit memories
|
||||
debug_assert_eq!(builder.func.dfg.value_type(addr32), I32);
|
||||
|
||||
let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into();
|
||||
|
||||
// How exactly the bounds check is performed here and what it's performed
|
||||
|
||||
@@ -1154,7 +1154,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
let heap_bound = func.create_global_value(ir::GlobalValueData::Load {
|
||||
base: ptr,
|
||||
offset: Offset32::new(current_length_offset),
|
||||
global_type: self.offsets.type_of_vmmemory_definition_current_length(),
|
||||
global_type: pointer_type,
|
||||
readonly: false,
|
||||
});
|
||||
(
|
||||
@@ -1417,7 +1417,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
.vmctx_vmmemory_definition_current_length(def_index),
|
||||
)
|
||||
.unwrap();
|
||||
pos.ins().load(I32, ir::MemFlags::trusted(), base, offset)
|
||||
pos.ins()
|
||||
.load(pointer_type, ir::MemFlags::trusted(), base, offset)
|
||||
}
|
||||
None => {
|
||||
let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap();
|
||||
@@ -1425,7 +1426,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
pos.ins()
|
||||
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
|
||||
pos.ins().load(
|
||||
I32,
|
||||
pointer_type,
|
||||
ir::MemFlags::trusted(),
|
||||
vmmemory_ptr,
|
||||
i32::from(self.offsets.vmmemory_definition_current_length()),
|
||||
@@ -1435,7 +1436,12 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
|
||||
let current_length_in_pages = pos
|
||||
.ins()
|
||||
.udiv_imm(current_length_in_bytes, i64::from(WASM_PAGE_SIZE));
|
||||
Ok(current_length_in_pages)
|
||||
if pointer_type == I32 {
|
||||
Ok(current_length_in_pages)
|
||||
} else {
|
||||
assert_eq!(pointer_type, I64);
|
||||
Ok(pos.ins().ireduce(I32, current_length_in_pages))
|
||||
}
|
||||
}
|
||||
|
||||
fn translate_memory_copy(
|
||||
|
||||
@@ -422,23 +422,11 @@ impl<P: PtrSize> VMOffsets<P> {
|
||||
1 * self.pointer_size()
|
||||
}
|
||||
|
||||
/// The size of the `current_length` field.
|
||||
#[inline]
|
||||
pub fn size_of_vmmemory_definition_current_length(&self) -> u8 {
|
||||
4
|
||||
}
|
||||
|
||||
/// Return the size of `VMMemoryDefinition`.
|
||||
#[inline]
|
||||
pub fn size_of_vmmemory_definition(&self) -> u8 {
|
||||
2 * self.pointer_size()
|
||||
}
|
||||
|
||||
/// The type of the `current_length` field.
|
||||
#[inline]
|
||||
pub fn type_of_vmmemory_definition_current_length(&self) -> ir::Type {
|
||||
ir::Type::int(u16::from(self.size_of_vmmemory_definition_current_length()) * 8).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Offsets for `VMGlobalImport`.
|
||||
|
||||
@@ -626,13 +626,11 @@ impl Instance {
|
||||
let src_mem = self.get_memory(src_index);
|
||||
let dst_mem = self.get_memory(dst_index);
|
||||
|
||||
if src
|
||||
.checked_add(len)
|
||||
.map_or(true, |n| n > src_mem.current_length)
|
||||
|| dst
|
||||
.checked_add(len)
|
||||
.map_or(true, |m| m > dst_mem.current_length)
|
||||
{
|
||||
if src.checked_add(len).map_or(true, |n| {
|
||||
usize::try_from(n).unwrap() > src_mem.current_length
|
||||
}) || dst.checked_add(len).map_or(true, |m| {
|
||||
usize::try_from(m).unwrap() > dst_mem.current_length
|
||||
}) {
|
||||
return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds));
|
||||
}
|
||||
|
||||
@@ -664,10 +662,9 @@ impl Instance {
|
||||
) -> Result<(), Trap> {
|
||||
let memory = self.get_memory(memory_index);
|
||||
|
||||
if dst
|
||||
.checked_add(len)
|
||||
.map_or(true, |m| m > memory.current_length)
|
||||
{
|
||||
if dst.checked_add(len).map_or(true, |m| {
|
||||
usize::try_from(m).unwrap() > memory.current_length
|
||||
}) {
|
||||
return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds));
|
||||
}
|
||||
|
||||
@@ -726,10 +723,10 @@ impl Instance {
|
||||
|
||||
if src
|
||||
.checked_add(len)
|
||||
.map_or(true, |n| n as usize > data.len())
|
||||
|| dst
|
||||
.checked_add(len)
|
||||
.map_or(true, |m| m > memory.current_length)
|
||||
.map_or(true, |n| usize::try_from(n).unwrap() > data.len())
|
||||
|| dst.checked_add(len).map_or(true, |m| {
|
||||
usize::try_from(m).unwrap() > memory.current_length
|
||||
})
|
||||
{
|
||||
return Err(Trap::wasm(ir::TrapCode::HeapOutOfBounds));
|
||||
}
|
||||
|
||||
@@ -309,7 +309,7 @@ fn check_memory_init_bounds(
|
||||
let end = start.checked_add(init.data.len());
|
||||
|
||||
match end {
|
||||
Some(end) if end <= memory.current_length as usize => {
|
||||
Some(end) if end <= memory.current_length => {
|
||||
// Initializer is in bounds
|
||||
}
|
||||
_ => {
|
||||
@@ -382,9 +382,8 @@ fn initialize_instance(
|
||||
MemoryInitialization::Paged { map, out_of_bounds } => {
|
||||
for (index, pages) in map {
|
||||
let memory = instance.memory(index);
|
||||
let slice = unsafe {
|
||||
slice::from_raw_parts_mut(memory.base, memory.current_length as usize)
|
||||
};
|
||||
let slice =
|
||||
unsafe { slice::from_raw_parts_mut(memory.base, memory.current_length) };
|
||||
|
||||
for (page_index, page) in pages.iter().enumerate() {
|
||||
if let Some(data) = page {
|
||||
|
||||
@@ -155,10 +155,6 @@ impl RuntimeLinearMemory for MmapMemory {
|
||||
// Linear memory size would exceed the index range.
|
||||
return None;
|
||||
}
|
||||
// FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022
|
||||
if new_pages == WASM_MAX_PAGES {
|
||||
return None;
|
||||
}
|
||||
|
||||
let delta_bytes = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
let prev_bytes = usize::try_from(prev_pages).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
@@ -198,8 +194,7 @@ impl RuntimeLinearMemory for MmapMemory {
|
||||
fn vmmemory(&self) -> VMMemoryDefinition {
|
||||
VMMemoryDefinition {
|
||||
base: unsafe { self.mmap.alloc.as_mut_ptr().add(self.pre_guard_size) },
|
||||
current_length: u32::try_from(self.mmap.size as usize * WASM_PAGE_SIZE as usize)
|
||||
.unwrap(),
|
||||
current_length: self.mmap.size as usize * WASM_PAGE_SIZE as usize,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -276,13 +271,6 @@ impl Memory {
|
||||
}
|
||||
|
||||
fn limit_new(plan: &MemoryPlan, limiter: Option<&mut dyn ResourceLimiter>) -> Result<()> {
|
||||
// FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022
|
||||
if plan.memory.minimum == WASM_MAX_PAGES {
|
||||
bail!(
|
||||
"memory minimum size of {} pages exceeds memory limits",
|
||||
plan.memory.minimum
|
||||
);
|
||||
}
|
||||
if let Some(limiter) = limiter {
|
||||
if !limiter.memory_growing(0, plan.memory.minimum, plan.memory.maximum) {
|
||||
bail!(
|
||||
@@ -374,10 +362,6 @@ impl Memory {
|
||||
if new_size > maximum.unwrap_or(WASM_MAX_PAGES) {
|
||||
return None;
|
||||
}
|
||||
// FIXME: https://github.com/bytecodealliance/wasmtime/issues/3022
|
||||
if new_size == WASM_MAX_PAGES {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = usize::try_from(old_size).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
let len = usize::try_from(delta).unwrap() * WASM_PAGE_SIZE as usize;
|
||||
@@ -397,7 +381,7 @@ impl Memory {
|
||||
match self {
|
||||
Memory::Static { base, size, .. } => VMMemoryDefinition {
|
||||
base: base.as_ptr() as *mut _,
|
||||
current_length: u32::try_from(*size as usize * WASM_PAGE_SIZE as usize).unwrap(),
|
||||
current_length: *size as usize * WASM_PAGE_SIZE as usize,
|
||||
},
|
||||
Memory::Dynamic(mem) => mem.vmmemory(),
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ pub struct VMMemoryDefinition {
|
||||
pub base: *mut u8,
|
||||
|
||||
/// The current logical size of this linear memory in bytes.
|
||||
pub current_length: u32,
|
||||
pub current_length: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -318,7 +318,7 @@ impl Memory {
|
||||
unsafe {
|
||||
let store = store.into();
|
||||
let definition = *store[self.0].definition;
|
||||
slice::from_raw_parts(definition.base, definition.current_length as usize)
|
||||
slice::from_raw_parts(definition.base, definition.current_length)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,7 +334,7 @@ impl Memory {
|
||||
unsafe {
|
||||
let store = store.into();
|
||||
let definition = *store[self.0].definition;
|
||||
slice::from_raw_parts_mut(definition.base, definition.current_length as usize)
|
||||
slice::from_raw_parts_mut(definition.base, definition.current_length)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -395,7 +395,7 @@ impl Memory {
|
||||
///
|
||||
/// Panics if this memory doesn't belong to `store`.
|
||||
pub fn data_size(&self, store: impl AsContext) -> usize {
|
||||
unsafe { (*store.as_context()[self.0].definition).current_length as usize }
|
||||
unsafe { (*store.as_context()[self.0].definition).current_length }
|
||||
}
|
||||
|
||||
/// Returns the size, in WebAssembly pages, of this wasm memory.
|
||||
|
||||
@@ -3,13 +3,11 @@ use crate::store::{InstanceId, StoreOpaque};
|
||||
use crate::trampoline::create_handle;
|
||||
use crate::{Limits, MemoryType};
|
||||
use anyhow::{anyhow, Result};
|
||||
use std::sync::Arc;
|
||||
use wasmtime_environ::entity::PrimaryMap;
|
||||
use wasmtime_environ::{wasm, MemoryPlan, MemoryStyle, Module, WASM_PAGE_SIZE};
|
||||
use wasmtime_runtime::{RuntimeLinearMemory, RuntimeMemoryCreator, VMMemoryDefinition};
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn create_memory(store: &mut StoreOpaque<'_>, memory: &MemoryType) -> Result<InstanceId> {
|
||||
let mut module = Module::new();
|
||||
|
||||
@@ -49,8 +47,7 @@ impl RuntimeLinearMemory for LinearMemoryProxy {
|
||||
fn vmmemory(&self) -> VMMemoryDefinition {
|
||||
VMMemoryDefinition {
|
||||
base: self.mem.as_ptr(),
|
||||
current_length: u32::try_from(self.mem.size() as usize * WASM_PAGE_SIZE as usize)
|
||||
.unwrap(),
|
||||
current_length: self.mem.size() as usize * WASM_PAGE_SIZE as usize,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,19 +57,27 @@ fn linear_memory_limits() -> Result<()> {
|
||||
(module
|
||||
(memory 65534)
|
||||
|
||||
(func (export "foo") (result i32)
|
||||
(func (export "grow") (result i32)
|
||||
i32.const 1
|
||||
memory.grow)
|
||||
(func (export "size") (result i32)
|
||||
memory.size)
|
||||
)
|
||||
"#;
|
||||
let module = Module::new(engine, wat)?;
|
||||
|
||||
let mut store = Store::new(engine, ());
|
||||
let instance = Instance::new(&mut store, &module, &[])?;
|
||||
let foo = instance.get_typed_func::<(), i32, _>(&mut store, "foo")?;
|
||||
let size = instance.get_typed_func::<(), i32, _>(&mut store, "size")?;
|
||||
let grow = instance.get_typed_func::<(), i32, _>(&mut store, "grow")?;
|
||||
|
||||
assert_eq!(foo.call(&mut store, ())?, 65534);
|
||||
assert_eq!(foo.call(&mut store, ())?, -1);
|
||||
assert_eq!(size.call(&mut store, ())?, 65534);
|
||||
assert_eq!(grow.call(&mut store, ())?, 65534);
|
||||
assert_eq!(size.call(&mut store, ())?, 65535);
|
||||
assert_eq!(grow.call(&mut store, ())?, 65535);
|
||||
assert_eq!(size.call(&mut store, ())?, 65536);
|
||||
assert_eq!(grow.call(&mut store, ())?, -1);
|
||||
assert_eq!(size.call(&mut store, ())?, 65536);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user