Remove heaps from core Cranelift, push them into cranelift-wasm (#5386)
* cranelift-wasm: translate Wasm loads into lower-level CLIF operations
Rather than using `heap_{load,store,addr}`.
* cranelift: Remove the `heap_{addr,load,store}` instructions
These are now legalized in the `cranelift-wasm` frontend.
* cranelift: Remove the `ir::Heap` entity from CLIF
* Port basic memory operation tests to .wat filetests
* Remove test for verifying CLIF heaps
* Remove `heap_addr` from replace_branching_instructions_and_cfg_predecessors.clif test
* Remove `heap_addr` from readonly.clif test
* Remove `heap_addr` from `table_addr.clif` test
* Remove `heap_addr` from the simd-fvpromote_low.clif test
* Remove `heap_addr` from simd-fvdemote.clif test
* Remove `heap_addr` from the load-op-store.clif test
* Remove the CLIF heap runtest
* Remove `heap_addr` from the global_value.clif test
* Remove `heap_addr` from fpromote.clif runtests
* Remove `heap_addr` from fdemote.clif runtests
* Remove `heap_addr` from memory.clif parser test
* Remove `heap_addr` from reject_load_readonly.clif test
* Remove `heap_addr` from reject_load_notrap.clif test
* Remove `heap_addr` from load_readonly_notrap.clif test
* Remove `static-heap-without-guard-pages.clif` test
Will be subsumed when we port `make-heap-load-store-tests.sh` to generating
`.wat` tests.
* Remove `static-heap-with-guard-pages.clif` test
Will be subsumed when we port `make-heap-load-store-tests.sh` over to `.wat`
tests.
* Remove more heap tests
These will be subsumed by porting `make-heap-load-store-tests.sh` over to `.wat`
tests.
* Remove `heap_addr` from `simple-alias.clif` test
* Remove `heap_addr` from partial-redundancy.clif test
* Remove `heap_addr` from multiple-blocks.clif test
* Remove `heap_addr` from fence.clif test
* Remove `heap_addr` from extends.clif test
* Remove runtests that rely on heaps
Heaps are not a thing in CLIF or the interpreter anymore
* Add generated load/store `.wat` tests
* Enable memory-related wasm features in `.wat` tests
* Remove CLIF heap from fcmp-mem-bug.clif test
* Add a mode for compiling `.wat` all the way to assembly in filetests
* Also generate WAT to assembly tests in `make-load-store-tests.sh`
* cargo fmt
* Reinstate `f{de,pro}mote.clif` tests without the heap bits
* Remove undefined doc link
* Remove outdated SVG and dot file from docs
* Add docs about `None` returns for base address computation helpers
* Factor out `env.heap_access_spectre_mitigation()` to a local
* Expand docs for `FuncEnvironment::heaps` trait method
* Restore f{de,pro}mote+load clif runtests with stack memory
This commit is contained in:
@@ -71,19 +71,16 @@ block0(v0: f64):
|
||||
|
||||
|
||||
;; Tests a fdemote+load combo which some backends may optimize
|
||||
function %fdemote_load(i64 vmctx, i64, f64) -> f32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x10, bound 0x10, offset_guard 0x0, index_type i64
|
||||
function %fdemote_load(i64, f64) -> f32 {
|
||||
ss0 = explicit_slot 16
|
||||
|
||||
block0(v0: i64, v1: i64, v2: f64):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 8
|
||||
block0(v1: i64, v2: f64):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.f64 v2, v3
|
||||
v4 = load.f64 v3
|
||||
v5 = fdemote.f32 v4
|
||||
return v5
|
||||
}
|
||||
; heap: static, size=0x10, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %fdemote_load(0, 0x0.0) == 0x0.0
|
||||
; run: %fdemote_load(1, 0x0.1) == 0x0.1
|
||||
; run: %fdemote_load(2, 0x0.2) == 0x0.2
|
||||
|
||||
@@ -79,20 +79,16 @@ block0(v0: f32):
|
||||
|
||||
|
||||
;; Tests a fpromote+load combo which some backends may optimize
|
||||
function %fpromote_load(i64 vmctx, i64, f32) -> f64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x10, bound 0x10, offset_guard 0x0, index_type i64
|
||||
function %fpromote_load(i64, f32) -> f64 {
|
||||
ss0 = explicit_slot 16
|
||||
|
||||
block0(v0: i64, v1: i64, v2: f32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
block0(v1: i64, v2: f32):
|
||||
v3 = stack_addr.i64 ss0
|
||||
store.f32 v2, v3
|
||||
v4 = load.f32 v3
|
||||
v5 = fpromote.f64 v4
|
||||
return v5
|
||||
}
|
||||
|
||||
; heap: static, size=0x10, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %fpromote_load(0, 0x0.0) == 0x0.0
|
||||
; run: %fpromote_load(1, 0x0.1) == 0x0.1
|
||||
; run: %fpromote_load(2, 0x0.2) == 0x0.2
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
test interpret
|
||||
test run
|
||||
target x86_64
|
||||
target s390x
|
||||
target aarch64
|
||||
target riscv64
|
||||
|
||||
; Store a value in the heap using `heap_addr` and load it using `global_value`
|
||||
function %store_load(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 0
|
||||
store.i32 v2, v3
|
||||
|
||||
v4 = global_value.i64 gv1
|
||||
v5 = load.i32 v4
|
||||
return v5
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %store_load(0, 1) == 1
|
||||
; run: %store_load(0, -1) == -1
|
||||
@@ -1,223 +0,0 @@
|
||||
test interpret
|
||||
test run
|
||||
target x86_64
|
||||
target s390x
|
||||
target aarch64
|
||||
target riscv64
|
||||
|
||||
function %static_heap_i64(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64(0, 1) == 1
|
||||
; run: %static_heap_i64(0, -1) == -1
|
||||
; run: %static_heap_i64(16, 1) == 1
|
||||
; run: %static_heap_i64(16, -1) == -1
|
||||
|
||||
|
||||
function %static_heap_i32(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i32(0, 1) == 1
|
||||
; run: %static_heap_i32(0, -1) == -1
|
||||
; run: %static_heap_i32(16, 1) == 1
|
||||
; run: %static_heap_i32(16, -1) == -1
|
||||
|
||||
|
||||
function %heap_no_min(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %heap_no_min(0, 1) == 1
|
||||
; run: %heap_no_min(0, -1) == -1
|
||||
; run: %heap_no_min(16, 1) == 1
|
||||
; run: %heap_no_min(16, -1) == -1
|
||||
|
||||
|
||||
function %dynamic_i64(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i64 notrap aligned gv0+8
|
||||
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %dynamic_i64(0, 1) == 1
|
||||
; run: %dynamic_i64(0, -1) == -1
|
||||
; run: %dynamic_i64(16, 1) == 1
|
||||
; run: %dynamic_i64(16, -1) == -1
|
||||
|
||||
|
||||
function %dynamic_i32(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i64 notrap aligned gv0+8
|
||||
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %dynamic_i32(0, 1) == 1
|
||||
; run: %dynamic_i32(0, -1) == -1
|
||||
; run: %dynamic_i32(16, 1) == 1
|
||||
; run: %dynamic_i32(16, -1) == -1
|
||||
|
||||
|
||||
function %multi_load_store(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i64 notrap aligned gv0+16
|
||||
gv3 = load.i64 notrap aligned gv0+24
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
heap1 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = iconst.i64 0
|
||||
v4 = iconst.i32 0
|
||||
|
||||
; Store lhs in heap0
|
||||
v5 = heap_addr.i64 heap0, v3, 0, 4
|
||||
store.i32 v1, v5
|
||||
|
||||
; Store rhs in heap1
|
||||
v6 = heap_addr.i64 heap1, v4, 0, 4
|
||||
store.i32 v2, v6
|
||||
|
||||
|
||||
v7 = load.i32 v5
|
||||
v8 = load.i32 v6
|
||||
|
||||
v9 = iadd.i32 v7, v8
|
||||
return v9
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
|
||||
; run: %multi_load_store(1, 2) == 3
|
||||
; run: %multi_load_store(4, 5) == 9
|
||||
|
||||
|
||||
|
||||
; Uses multiple heaps, but heap0 refers to the second heap, and heap1 refers to the first heap
|
||||
; This is a regression test for the interpreter
|
||||
function %out_of_order(i64 vmctx, i32, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
gv2 = load.i64 notrap aligned gv0+16
|
||||
gv3 = load.i64 notrap aligned gv0+24
|
||||
heap0 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
|
||||
heap1 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i32, v2: i32):
|
||||
v3 = iconst.i32 0
|
||||
v4 = iconst.i64 0
|
||||
|
||||
; Store lhs in heap0
|
||||
v5 = heap_addr.i64 heap0, v3, 0, 4
|
||||
store.i32 v1, v5
|
||||
|
||||
; Store rhs in heap1
|
||||
v6 = heap_addr.i64 heap1, v4, 0, 4
|
||||
store.i32 v2, v6
|
||||
|
||||
|
||||
v7 = load.i32 v5
|
||||
v8 = load.i32 v6
|
||||
|
||||
v9 = iadd.i32 v7, v8
|
||||
return v9
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
|
||||
; run: %out_of_order(1, 2) == 3
|
||||
; run: %out_of_order(4, 5) == 9
|
||||
|
||||
|
||||
function %unaligned_access(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %unaligned_access(0, 1) == 1
|
||||
; run: %unaligned_access(0, -1) == -1
|
||||
; run: %unaligned_access(1, 1) == 1
|
||||
; run: %unaligned_access(1, -1) == -1
|
||||
; run: %unaligned_access(2, 1) == 1
|
||||
; run: %unaligned_access(2, -1) == -1
|
||||
; run: %unaligned_access(3, 1) == 1
|
||||
; run: %unaligned_access(3, -1) == -1
|
||||
|
||||
|
||||
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
|
||||
function %iadd_imm(i64 vmctx, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = iadd_imm.i64 gv0, 0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i32):
|
||||
v2 = iconst.i64 0
|
||||
v3 = heap_addr.i64 heap0, v2, 0, 4
|
||||
store.i32 v1, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %iadd_imm(1) == 1
|
||||
; run: %iadd_imm(-1) == -1
|
||||
|
||||
function %heap_limit_i64(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0, bound 0x8, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x8, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %heap_limit_i64(0, 1) == 1
|
||||
; run: %heap_limit_i64(0, -1) == -1
|
||||
; run: %heap_limit_i64(4, 1) == 1
|
||||
; run: %heap_limit_i64(4, -1) == -1
|
||||
@@ -1,98 +0,0 @@
|
||||
test run
|
||||
target x86_64
|
||||
target s390x
|
||||
target aarch64
|
||||
target riscv64
|
||||
|
||||
|
||||
function %load_op_store_iadd_i64(i64 vmctx, i64, i64) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 8
|
||||
v4 = iconst.i64 42
|
||||
store.i64 v4, v3
|
||||
v5 = load.i64 v3
|
||||
v6 = iadd.i64 v5, v2
|
||||
store.i64 v6, v3
|
||||
v7 = load.i64 v3
|
||||
return v7
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64_load_store(0, 1) == 43
|
||||
; run: %static_heap_i64_load_store(0, -1) == 41
|
||||
|
||||
function %load_op_store_iadd_i32(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
v4 = iconst.i32 42
|
||||
store.i32 v4, v3
|
||||
v5 = load.i32 v3
|
||||
v6 = iadd.i32 v5, v2
|
||||
store.i32 v6, v3
|
||||
v7 = load.i32 v3
|
||||
return v7
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64_load_store(0, 1) == 43
|
||||
; run: %static_heap_i64_load_store(0, -1) == 41
|
||||
|
||||
function %load_op_store_iadd_i8(i64 vmctx, i64, i8) -> i8 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 4
|
||||
v4 = iconst.i8 42
|
||||
store.i8 v4, v3
|
||||
v5 = load.i8 v3
|
||||
v6 = iadd.i8 v5, v2
|
||||
store.i8 v6, v3
|
||||
v7 = load.i8 v3
|
||||
return v7
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64_load_store(0, 1) == 43
|
||||
; run: %static_heap_i64_load_store(0, -1) == 41
|
||||
|
||||
function %load_op_store_iadd_isub_iand_ior_ixor_i64(i64 vmctx, i64, i64) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 8
|
||||
store.i64 v2, v3
|
||||
v4 = load.i64 v3
|
||||
v5 = iconst.i64 1
|
||||
v6 = iadd.i64 v5, v4
|
||||
store.i64 v6, v3
|
||||
v7 = load.i64 v3
|
||||
v8 = iconst.i64 2
|
||||
v9 = load.i64 v3
|
||||
v10 = isub.i64 v9, v8
|
||||
store.i64 v10, v3
|
||||
v11 = load.i64 v3
|
||||
v12 = iconst.i64 0xf
|
||||
v13 = band.i64 v12, v11
|
||||
store.i64 v13, v3
|
||||
v14 = iconst.i64 0x10
|
||||
v15 = load.i64 v3
|
||||
v16 = bor.i64 v15, v14
|
||||
store.i64 v16, v3
|
||||
v17 = load.i64 v3
|
||||
v18 = iconst.i64 0xff
|
||||
v19 = bxor.i64 v17, v18
|
||||
store.i64 v19, v3
|
||||
v20 = load.i64 v3
|
||||
return v20
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %static_heap_i64_load_store(0, 0x1234) == 236
|
||||
@@ -1,25 +0,0 @@
|
||||
test interpret
|
||||
test run
|
||||
target x86_64
|
||||
target s390x
|
||||
target aarch64
|
||||
|
||||
function %fvdemote_test(i64 vmctx, i64, f64x2) -> f32x4 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x20, bound 0x20, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: f64x2):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 16
|
||||
store.f64x2 v2, v3
|
||||
v4 = load.f64x2 v3
|
||||
v5 = fvdemote v4
|
||||
return v5
|
||||
}
|
||||
|
||||
; heap: static, size=0x20, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %fvdemote_test(0, [0x0.0 0x0.0]) == [0x0.0 0x0.0 0x0.0 0x0.0]
|
||||
; run: %fvdemote_test(1, [0x0.1 0x0.2]) == [0x0.1 0x0.2 0x0.0 0x0.0]
|
||||
; run: %fvdemote_test(2, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]
|
||||
; run: %fvdemote_test(8, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]
|
||||
; run: %fvdemote_test(16, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]
|
||||
@@ -1,26 +0,0 @@
|
||||
test interpret
|
||||
test run
|
||||
target x86_64
|
||||
target s390x
|
||||
target aarch64
|
||||
|
||||
|
||||
function %fvpromote_low_test(i64 vmctx, i64, f32x4) -> f64x2 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+0
|
||||
heap0 = static gv1, min 0x20, bound 0x20, offset_guard 0, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: f32x4):
|
||||
v3 = heap_addr.i64 heap0, v1, 0, 16
|
||||
store.f32x4 v2, v3
|
||||
v4 = load.f32x4 v3
|
||||
v5 = fvpromote_low v4
|
||||
return v5
|
||||
}
|
||||
|
||||
; heap: static, size=0x20, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %fvpromote_low_test(0, [0x0.0 0x0.0 0x0.0 0x0.0]) == [0x0.0 0x0.0]
|
||||
; run: %fvpromote_low_test(1, [0x0.1 0x0.2 0x0.0 0x0.0]) == [0x0.1 0x0.2]
|
||||
; run: %fvpromote_low_test(2, [0x2.1 0x1.2 0x0.0 0x0.0]) == [0x2.1 0x1.2]
|
||||
; run: %fvpromote_low_test(5, [0x0.0 0x0.0 0x2.1 0x1.2]) == [0x0.0 0x0.0]
|
||||
; run: %fvpromote_low_test(16, [0x0.0 0x0.0 0x2.1 0x1.2]) == [0x0.0 0x0.0]
|
||||
@@ -1,144 +0,0 @@
|
||||
test interpret
|
||||
test run
|
||||
target x86_64
|
||||
target s390x
|
||||
target aarch64
|
||||
target riscv64
|
||||
|
||||
function %set_get_i64(i64 vmctx, i64, i64) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
gv2 = load.i64 notrap aligned gv0 +8
|
||||
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = table_addr.i64 table0, v1, +0
|
||||
store.i64 v2, v3
|
||||
v4 = load.i64 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %set_get_i64(0, 1) == 1
|
||||
; run: %set_get_i64(0, 10) == 10
|
||||
; run: %set_get_i64(1, 1) == 1
|
||||
; run: %set_get_i64(1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
; run: %set_get_i64(10, 1) == 1
|
||||
; run: %set_get_i64(10, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
|
||||
|
||||
function %set_get_i32(i64 vmctx, i64, i32) -> i32 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
gv2 = load.i64 notrap aligned gv0 +8
|
||||
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i32):
|
||||
;; Note here the offset +4
|
||||
v3 = table_addr.i64 table0, v1, +4
|
||||
store.i32 v2, v3
|
||||
v4 = load.i32 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %set_get_i32(0, 1) == 1
|
||||
; run: %set_get_i32(0, 10) == 10
|
||||
; run: %set_get_i32(1, 1) == 1
|
||||
; run: %set_get_i32(1, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
; run: %set_get_i32(10, 1) == 1
|
||||
; run: %set_get_i32(10, 0xC0FFEEEE) == 0xC0FFEEEE
|
||||
|
||||
|
||||
function %set_get_i8(i64 vmctx, i64, i8) -> i8 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
gv2 = load.i64 notrap aligned gv0 +8
|
||||
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i8):
|
||||
v3 = table_addr.i64 table0, v1, +0
|
||||
store.i8 v2, v3
|
||||
v4 = load.i8 v3
|
||||
return v4
|
||||
}
|
||||
; heap: static, size=2, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %set_get_i8(0, 1) == 1
|
||||
; run: %set_get_i8(0, 0xC0) == 0xC0
|
||||
; run: %set_get_i8(1, 1) == 1
|
||||
; run: %set_get_i8(1, 0xFF) == 0xFF
|
||||
|
||||
|
||||
|
||||
function %large_elm_size(i64 vmctx, i64, i64, i8) -> i8 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
gv2 = load.i64 notrap aligned gv0 +8
|
||||
table0 = dynamic gv1, element_size 10240, bound gv2, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64, v3: i8):
|
||||
v4 = table_addr.i64 table0, v1, +0
|
||||
v5 = iadd.i64 v4, v2
|
||||
store.i8 v3, v5
|
||||
v6 = load.i8 v5
|
||||
return v6
|
||||
}
|
||||
; heap: static, size=0xC800, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %large_elm_size(0, 0, 1) == 1
|
||||
; run: %large_elm_size(1, 0, 0xC0) == 0xC0
|
||||
; run: %large_elm_size(0, 1, 1) == 1
|
||||
; run: %large_elm_size(1, 1, 0xFF) == 0xFF
|
||||
; run: %large_elm_size(0, 127, 1) == 1
|
||||
; run: %large_elm_size(1, 127, 0xFF) == 0xFF
|
||||
; run: %large_elm_size(0, 10239, 1) == 1
|
||||
; run: %large_elm_size(1, 10239, 0xBB) == 0xBB
|
||||
|
||||
|
||||
; Tests writing a i64 which covers 8 table entries at once
|
||||
; Loads the first byte and the last to confirm that the slots were written
|
||||
function %multi_elm_write(i64 vmctx, i64, i64) -> i8, i8 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
gv2 = load.i64 notrap aligned gv0 +8
|
||||
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
v3 = table_addr.i64 table0, v1, +0
|
||||
v4 = table_addr.i64 table0, v1, +7
|
||||
store.i64 v2, v3
|
||||
v5 = load.i8 v3
|
||||
v6 = load.i8 v4
|
||||
return v5, v6
|
||||
}
|
||||
; heap: static, size=16, ptr=vmctx+0, bound=vmctx+8
|
||||
|
||||
;; When writing these test cases keep in mind that s390x is big endian!
|
||||
;; We just make sure that the first and last byte are the same to deal with that.
|
||||
; run: %multi_elm_write(0, 0xC0FFEEEE_FFEEEEC0) == [0xC0, 0xC0]
|
||||
; run: %multi_elm_write(1, 0xAABBCCDD_EEFF00AA) == [0xAA, 0xAA]
|
||||
|
||||
|
||||
|
||||
function %heap_table(i64 vmctx, i64, i64, i64) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0
|
||||
gv2 = load.i64 notrap aligned gv0 +8
|
||||
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
|
||||
table0 = dynamic gv1, element_size 9, bound gv2, index_type i64
|
||||
|
||||
block0(v0: i64, v1: i64, v2: i64, v3: i64):
|
||||
; v1 - heap offset (bytes)
|
||||
; v2 - table offset (elements)
|
||||
; v3 - store/load value
|
||||
v4 = heap_addr.i64 heap0, v1, 0, 0
|
||||
v5 = table_addr.i64 table0, v2, +2
|
||||
|
||||
; Store via heap, load via table
|
||||
store.i64 v3, v4
|
||||
v6 = load.i64 v5
|
||||
|
||||
return v6
|
||||
}
|
||||
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
|
||||
; run: %heap_table(2, 0, 0xAABBCCDD_EEFF0011) == 0xAABBCCDD_EEFF0011
|
||||
; run: %heap_table(11, 1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
|
||||
; run: %heap_table(20, 2, 1) == 1
|
||||
; run: %heap_table(29, 3, -10) == -10
|
||||
Reference in New Issue
Block a user