cranelift: Add heap support to the interpreter (#3302)

* cranelift: Add heaps to interpreter

* cranelift: Add RunTest Environment mechanism to  test interpret

* cranelift: Remove unused `MemoryError`

* cranelift: Add docs for `State::resolve_global_value`

* cranelift: Rename heap tests

* cranelift: Refactor heap address resolution

* Fix typos and clarify docs (thanks @cfallin)
This commit is contained in:
Afonso Bordado
2022-07-05 17:05:26 +01:00
committed by GitHub
parent 76a2545a7f
commit e91f493ff5
7 changed files with 531 additions and 140 deletions

View File

@@ -1,10 +1,11 @@
test interpret
test run
target x86_64
target s390x
target aarch64
function %static_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
function %static_heap_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
@@ -16,13 +17,13 @@ block0(v0: i64, v1: i64, v2: i32):
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 1
; run: %static_heap_i64_load_store(0, -1) == -1
; run: %static_heap_i64_load_store(16, 1) == 1
; run: %static_heap_i64_load_store(16, -1) == -1
; run: %static_heap_i64(0, 1) == 1
; run: %static_heap_i64(0, -1) == -1
; run: %static_heap_i64(16, 1) == 1
; run: %static_heap_i64(16, -1) == -1
function %static_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
function %static_heap_i32(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
@@ -34,13 +35,13 @@ block0(v0: i64, v1: i32, v2: i32):
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i32_load_store(0, 1) == 1
; run: %static_heap_i32_load_store(0, -1) == -1
; run: %static_heap_i32_load_store(16, 1) == 1
; run: %static_heap_i32_load_store(16, -1) == -1
; run: %static_heap_i32(0, 1) == 1
; run: %static_heap_i32(0, -1) == -1
; run: %static_heap_i32(16, 1) == 1
; run: %static_heap_i32(16, -1) == -1
function %static_heap_i32_load_store_no_min(i64 vmctx, i32, i32) -> i32 {
function %heap_no_min(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
@@ -52,13 +53,13 @@ block0(v0: i64, v1: i32, v2: i32):
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i32_load_store_no_min(0, 1) == 1
; run: %static_heap_i32_load_store_no_min(0, -1) == -1
; run: %static_heap_i32_load_store_no_min(16, 1) == 1
; run: %static_heap_i32_load_store_no_min(16, -1) == -1
; run: %heap_no_min(0, 1) == 1
; run: %heap_no_min(0, -1) == -1
; run: %heap_no_min(16, 1) == 1
; run: %heap_no_min(16, -1) == -1
function %dynamic_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 {
function %dynamic_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
@@ -71,13 +72,13 @@ block0(v0: i64, v1: i64, v2: i32):
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_heap_i64_load_store(0, 1) == 1
; run: %dynamic_heap_i64_load_store(0, -1) == -1
; run: %dynamic_heap_i64_load_store(16, 1) == 1
; run: %dynamic_heap_i64_load_store(16, -1) == -1
; run: %dynamic_i64(0, 1) == 1
; run: %dynamic_i64(0, -1) == -1
; run: %dynamic_i64(16, 1) == 1
; run: %dynamic_i64(16, -1) == -1
function %dynamic_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 {
function %dynamic_i32(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
@@ -90,13 +91,13 @@ block0(v0: i64, v1: i32, v2: i32):
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_heap_i32_load_store(0, 1) == 1
; run: %dynamic_heap_i32_load_store(0, -1) == -1
; run: %dynamic_heap_i32_load_store(16, 1) == 1
; run: %dynamic_heap_i32_load_store(16, -1) == -1
; run: %dynamic_i32(0, 1) == 1
; run: %dynamic_i32(0, -1) == -1
; run: %dynamic_i32(16, 1) == 1
; run: %dynamic_i32(16, -1) == -1
function %multi_heap_load_store(i64 vmctx, i32, i32) -> i32 {
function %multi_load_store(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+16
@@ -125,12 +126,47 @@ block0(v0: i64, v1: i32, v2: i32):
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
; run: %multi_heap_load_store(1, 2) == 3
; run: %multi_heap_load_store(4, 5) == 9
; run: %multi_load_store(1, 2) == 3
; run: %multi_load_store(4, 5) == 9
function %static_heap_i64_load_store_unaligned(i64 vmctx, i64, i32) -> i32 {
; Uses multiple heaps, but heap0 refers to the second heap, and heap1 refers to the first heap
; This is a regression test for the interpreter
function %out_of_order(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+16
gv3 = load.i64 notrap aligned gv0+24
heap0 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
heap1 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i32, v2: i32):
v3 = iconst.i32 0
v4 = iconst.i64 0
; Store lhs in heap0
v5 = heap_addr.i64 heap0, v3, 4
store.i32 v1, v5
; Store rhs in heap1
v6 = heap_addr.i64 heap1, v4, 4
store.i32 v2, v6
v7 = load.i32 v5
v8 = load.i32 v6
v9 = iadd.i32 v7, v8
return v9
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
; run: %out_of_order(1, 2) == 3
; run: %out_of_order(4, 5) == 9
function %unaligned_access(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
@@ -142,18 +178,18 @@ block0(v0: i64, v1: i64, v2: i32):
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store_unaligned(0, 1) == 1
; run: %static_heap_i64_load_store_unaligned(0, -1) == -1
; run: %static_heap_i64_load_store_unaligned(1, 1) == 1
; run: %static_heap_i64_load_store_unaligned(1, -1) == -1
; run: %static_heap_i64_load_store_unaligned(2, 1) == 1
; run: %static_heap_i64_load_store_unaligned(2, -1) == -1
; run: %static_heap_i64_load_store_unaligned(3, 1) == 1
; run: %static_heap_i64_load_store_unaligned(3, -1) == -1
; run: %unaligned_access(0, 1) == 1
; run: %unaligned_access(0, -1) == -1
; run: %unaligned_access(1, 1) == 1
; run: %unaligned_access(1, -1) == -1
; run: %unaligned_access(2, 1) == 1
; run: %unaligned_access(2, -1) == -1
; run: %unaligned_access(3, 1) == 1
; run: %unaligned_access(3, -1) == -1
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
function %static_heap_i64_iadd_imm(i64 vmctx, i32) -> i32 {
function %iadd_imm(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
@@ -166,5 +202,5 @@ block0(v0: i64, v1: i32):
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_iadd_imm(1) == 1
; run: %static_heap_i64_iadd_imm(-1) == -1
; run: %iadd_imm(1) == 1
; run: %iadd_imm(-1) == -1