Change VMMemoryDefinition::current_length to usize (#3134)
* Change VMMemoryDefinition::current_length to `usize` This commit changes the definition of `VMMemoryDefinition::current_length` to `usize` from its previous definition of `u32`. This is a pretty impactful change because it also changes the cranelift semantics of "dynamic" heaps where the bound global value specifier must now match the pointer type for the platform rather than the index type for the heap. The motivation for this change is that the `current_length` field (or bound for the heap) is intended to reflect the current size of the heap. This is bound by `usize` on the host platform rather than `u32` or` u64`. The previous choice of `u32` couldn't represent a 4GB memory because we couldn't put a number representing 4GB into the `current_length` field. By using `usize`, which reflects the host's memory allocation, this should better reflect the size of the heap and allows Wasmtime to support a full 4GB heap for a wasm program (instead of 4GB minus one page). This commit also updates the legalization of the `heap_addr` clif instruction to appropriately cast the address to the platform's pointer type, handling bounds checks along the way. The practical impact for today's targets is that a `uextend` is happening sooner than it happened before, but otherwise there is no intended impact of this change. In the future when 64-bit memories are supported there will likely need to be fancier logic which handles offsets a bit differently (especially in the case of a 64-bit memory on a 32-bit host). The clif `filetest` changes should show the differences in codegen, and the Wasmtime changes are largely removing casts here and there. Closes #3022 * Add tests for memory.size at maximum memory size * Add a dfg helper method
This commit is contained in:
@@ -5,7 +5,7 @@ target aarch64
|
||||
|
||||
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -16,13 +16,14 @@ block0(v0: i64, v1: i32):
|
||||
; check: Block 0:
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: ldr w2, [x0]
|
||||
; nextln: add w2, w2, #0
|
||||
; nextln: subs wzr, w1, w2
|
||||
; nextln: mov w2, w1
|
||||
; nextln: ldr x3, [x0]
|
||||
; nextln: mov x3, x3
|
||||
; nextln: subs xzr, x2, x3
|
||||
; nextln: b.ls label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: add x0, x0, x1, UXTW
|
||||
; nextln: subs wzr, w1, w2
|
||||
; nextln: subs xzr, x2, x3
|
||||
; nextln: movz x1, #0
|
||||
; nextln: csel x0, x1, x0, hi
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
@@ -42,11 +43,12 @@ block0(v0: i64, v1: i32):
|
||||
; check: Block 0:
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: subs wzr, w1, #65536
|
||||
; nextln: mov w2, w1
|
||||
; nextln: subs xzr, x2, #65536
|
||||
; nextln: b.ls label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: add x0, x0, x1, UXTW
|
||||
; nextln: subs wzr, w1, #65536
|
||||
; nextln: subs xzr, x2, #65536
|
||||
; nextln: movz x1, #0
|
||||
; nextln: csel x0, x1, x0, hi
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
|
||||
@@ -3,7 +3,7 @@ target s390x
|
||||
|
||||
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -12,15 +12,15 @@ block0(v0: i64, v1: i32):
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: l %r4, 0(%r2)
|
||||
; nextln: ahi %r4, 0
|
||||
; nextln: clr %r3, %r4
|
||||
; check: llgfr %r3, %r3
|
||||
; nextln: lg %r4, 0(%r2)
|
||||
; nextln: aghi %r4, 0
|
||||
; nextln: clgr %r3, %r4
|
||||
; nextln: jgnh label1 ; jg label2
|
||||
; check: Block 1:
|
||||
; check: llgfr %r5, %r3
|
||||
; nextln: agr %r2, %r5
|
||||
; check: agr %r2, %r3
|
||||
; nextln: lghi %r5, 0
|
||||
; nextln: clr %r3, %r4
|
||||
; nextln: clgr %r3, %r4
|
||||
; nextln: locgrh %r2, %r5
|
||||
; nextln: br %r14
|
||||
; check: Block 2:
|
||||
@@ -36,13 +36,13 @@ block0(v0: i64, v1: i32):
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: clfi %r3, 65536
|
||||
; check: llgfr %r3, %r3
|
||||
; nextln: clgfi %r3, 65536
|
||||
; nextln: jgnh label1 ; jg label2
|
||||
; check: Block 1:
|
||||
; check: llgfr %r4, %r3
|
||||
; nextln: agr %r2, %r4
|
||||
; check: agr %r2, %r3
|
||||
; nextln: lghi %r4, 0
|
||||
; nextln: clfi %r3, 65536
|
||||
; nextln: clgfi %r3, 65536
|
||||
; nextln: locgrh %r2, %r4
|
||||
; nextln: br %r14
|
||||
; check: Block 2:
|
||||
|
||||
@@ -4,19 +4,20 @@ target x86_64 machinst
|
||||
function %f(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i32 notrap aligned gv0+8
|
||||
gv2 = load.i64 notrap aligned gv0+8
|
||||
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
|
||||
|
||||
block0(v0: i32, v1: i64):
|
||||
|
||||
v2 = heap_addr.i64 heap0, v0, 0x8000
|
||||
; check: movl 8(%rsi), %ecx
|
||||
; nextln: movq %rdi, %rax
|
||||
; nextln: addl $$32768, %eax
|
||||
; check: movl %edi, %ecx
|
||||
; nextln: movq 8(%rsi), %rdi
|
||||
; nextln: movq %rcx, %rax
|
||||
; nextln: addq $$32768, %rax
|
||||
; nextln: jnb ; ud2 heap_oob ;
|
||||
; nextln: cmpl %ecx, %eax
|
||||
; nextln: cmpq %rdi, %rax
|
||||
; nextln: jbe label1; j label2
|
||||
; check: Block 1:
|
||||
|
||||
|
||||
return v2
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ function %heap_addrs(i32, i64, i64 vmctx) {
|
||||
gv0 = iadd_imm.i64 gv4, 64
|
||||
gv1 = iadd_imm.i64 gv4, 72
|
||||
gv2 = iadd_imm.i64 gv4, 80
|
||||
gv3 = load.i32 notrap aligned gv4+88
|
||||
gv3 = load.i64 notrap aligned gv4+88
|
||||
|
||||
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
|
||||
heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
|
||||
@@ -38,15 +38,15 @@ block0(v0: i32, v1: i64, v3: i64):
|
||||
; check: v4 = iadd v13, v12
|
||||
|
||||
v5 = heap_addr.i64 heap1, v0, 0
|
||||
; check: v14 = icmp_imm ugt v0, 0x0001_0000
|
||||
; check: brz v14, $(resume_1=$BB)
|
||||
; check: v14 = uextend.i64 v0
|
||||
; check: v15 = icmp_imm ugt v14, 0x0001_0000
|
||||
; check: brz v15, $(resume_1=$BB)
|
||||
; nextln: jump $(trap_1=$BB)
|
||||
; check: $trap_1:
|
||||
; nextln: trap heap_oob
|
||||
; check: $resume_1:
|
||||
; check: v15 = uextend.i64 v0
|
||||
; check: v16 = iadd_imm.i64 v3, 64
|
||||
; check: v5 = iadd v16, v15
|
||||
; check: v5 = iadd v16, v14
|
||||
|
||||
v6 = heap_addr.i64 heap2, v1, 0
|
||||
; check: v19 = iconst.i64 0x0001_0000_0000
|
||||
@@ -70,30 +70,30 @@ block0(v0: i32, v1: i64, v3: i64):
|
||||
; check: v7 = iadd v21, v1
|
||||
|
||||
v8 = heap_addr.i64 heap4, v0, 0
|
||||
; check: v22 = load.i32 notrap aligned v3+88
|
||||
; check: v23 = iadd_imm v22, 0
|
||||
; check: v24 = icmp.i32 ugt v0, v23
|
||||
; check: brz v24, $(resume_4=$BB)
|
||||
; check: v22 = uextend.i64 v0
|
||||
; check: v23 = load.i64 notrap aligned v3+88
|
||||
; check: v24 = iadd_imm v23, 0
|
||||
; check: v25 = icmp ugt v22, v24
|
||||
; check: brz v25, $(resume_4=$BB)
|
||||
; nextln: jump $(trap_4=$BB)
|
||||
; check: $trap_4:
|
||||
; nextln: trap heap_oob
|
||||
; check: $resume_4:
|
||||
; check: v25 = uextend.i64 v0
|
||||
; check: v26 = iadd_imm.i64 v3, 72
|
||||
; check: v8 = iadd v26, v25
|
||||
; check: v8 = iadd v26, v22
|
||||
|
||||
v9 = heap_addr.i64 heap5, v0, 0
|
||||
; check: v27 = load.i32 notrap aligned v3+88
|
||||
; check: v28 = iadd_imm v27, 0
|
||||
; check: v29 = icmp.i32 ugt v0, v28
|
||||
; check: brz v29, $(resume_5=$BB)
|
||||
; check: v27 = uextend.i64 v0
|
||||
; check: v28 = load.i64 notrap aligned v3+88
|
||||
; check: v29 = iadd_imm v28, 0
|
||||
; check: v30 = icmp ugt v27, v29
|
||||
; check: brz v30, $(resume_5=$BB)
|
||||
; nextln: jump $(trap_5=$BB)
|
||||
; check: $trap_5:
|
||||
; nextln: trap heap_oob
|
||||
; check: $resume_5:
|
||||
; check: v30 = uextend.i64 v0
|
||||
; check: v31 = iadd_imm.i64 v3, 72
|
||||
; check: v9 = iadd v31, v30
|
||||
; check: v9 = iadd v31, v27
|
||||
|
||||
v10 = heap_addr.i64 heap6, v1, 0
|
||||
; check: v32 = iadd_imm.i64 v3, 80
|
||||
|
||||
@@ -56,7 +56,7 @@ block0(v0: i32, v999: i64):
|
||||
; Boundscheck should be eliminated.
|
||||
; Checks here are assuming that no pipehole opts fold the load offsets.
|
||||
; nextln: $(xoff=$V) = uextend.i64 v0
|
||||
; nextln: $(hbase=$V) = iadd_imm v999, 64
|
||||
; check: $(hbase=$V) = iadd_imm v999, 64
|
||||
; nextln: v1 = iadd $hbase, $xoff
|
||||
v2 = load.f32 v1+16
|
||||
; nextln: v2 = load.f32 v1+16
|
||||
@@ -99,6 +99,7 @@ block0(v0: i32, v999: i64):
|
||||
; check: block0(
|
||||
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
|
||||
; Boundscheck code
|
||||
; check: $(xoff=$V) = uextend.i64 v0
|
||||
; check: $(oob=$V) = icmp
|
||||
; nextln: brz $oob, $(ok=$BB)
|
||||
; nextln: jump $(trap_oob=$BB)
|
||||
@@ -106,8 +107,7 @@ block0(v0: i32, v999: i64):
|
||||
; nextln: trap heap_oob
|
||||
; check: $ok:
|
||||
; Checks here are assuming that no pipehole opts fold the load offsets.
|
||||
; nextln: $(xoff=$V) = uextend.i64 v0
|
||||
; nextln: $(hbase=$V) = iadd_imm.i64 v999, 64
|
||||
; check: $(hbase=$V) = iadd_imm.i64 v999, 64
|
||||
; nextln: v1 = iadd $hbase, $xoff
|
||||
v2 = load.f32 v1+0x7fff_ffff
|
||||
; nextln: v2 = load.f32 v1+0x7fff_ffff
|
||||
|
||||
@@ -29,7 +29,7 @@ block0(v0: i64):
|
||||
function %heap_bound_type(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i16 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16
|
||||
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap pointer type i64 differs from the type of its bound, i16
|
||||
|
||||
block0(v0: i64):
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user