* cranelift: Add stack support to the interpreter We also change the approach for heap loads and stores. Previously we would use the offset as the address to the heap. However, this approach does not allow using the load/store instructions to read/write from both the heap and the stack. This commit changes the addressing mechanism of the interpreter. We now return the real addresses from the addressing instructions (stack_addr/heap_addr), and instead check if the address passed into the load/store instructions points to an area in the heap or the stack. * cranelift: Add virtual addresses to cranelift interpreter Adds a Virtual Addressing scheme that was discussed as a better alternative to returning the real addresses. The virtual addresses are split into 4 regions (stack, heap, tables and global values), and the address itself is composed of an `entry` field and an `offset` field. In general the `entry` field corresponds to the instance of the resource (e.g. table5 is entry 5) and the `offset` field is a byte offset inside that entry. There is one exception to this which is the stack, where due to only having one stack, the whole address is an offset field. The number of bits in entry vs offset fields is variable with respect to the `region` and the address size (32bits vs 64bits). This is done because with 32 bit addresses we would have to compromise on heap size, or have a small number of global values / tables. With 64 bit addresses we do not have to compromise on this, but we need to support 32 bit addresses. * cranelift: Remove interpreter trap codes * cranelift: Calculate frame_offset when entering or exiting a frame * cranelift: Add safe read/write interface to DataValue * cranelift: DataValue write full 128bit slot for booleans * cranelift: Use DataValue accessors for trampoline.
131 lines
2.7 KiB
Plaintext
131 lines
2.7 KiB
Plaintext
test interpret
|
|
test run
|
|
target x86_64 machinst
|
|
target s390x
|
|
target aarch64
|
|
|
|
function %stack_simple(i64) -> i64 {
|
|
ss0 = explicit_slot 8
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0
|
|
v1 = stack_load.i64 ss0
|
|
return v1
|
|
}
|
|
; run: %stack_simple(0) == 0
|
|
; run: %stack_simple(1) == 1
|
|
; run: %stack_simple(-1) == -1
|
|
|
|
|
|
function %slot_offset(i64) -> i64 {
|
|
ss0 = explicit_slot 8, offset 8
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0
|
|
v1 = stack_load.i64 ss0
|
|
return v1
|
|
}
|
|
; run: %slot_offset(0) == 0
|
|
; run: %slot_offset(1) == 1
|
|
; run: %slot_offset(-1) == -1
|
|
|
|
function %stack_offset(i64) -> i64 {
|
|
ss0 = explicit_slot 16
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0+8
|
|
v1 = stack_load.i64 ss0+8
|
|
return v1
|
|
}
|
|
; run: %stack_offset(0) == 0
|
|
; run: %stack_offset(1) == 1
|
|
; run: %stack_offset(-1) == -1
|
|
|
|
|
|
function %offset_unaligned(i64) -> i64 {
|
|
ss0 = explicit_slot 11
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0+3
|
|
v1 = stack_load.i64 ss0+3
|
|
return v1
|
|
}
|
|
; run: %offset_unaligned(0) == 0
|
|
; run: %offset_unaligned(1) == 1
|
|
; run: %offset_unaligned(-1) == -1
|
|
|
|
|
|
|
|
function %multi_slot_stack(i64, i64) -> i64 {
|
|
ss0 = explicit_slot 8
|
|
ss1 = explicit_slot 8
|
|
|
|
block0(v0: i64, v1: i64):
|
|
stack_store.i64 v0, ss0
|
|
stack_store.i64 v1, ss1
|
|
v2 = stack_load.i64 ss0
|
|
v3 = stack_load.i64 ss1
|
|
v4 = iadd.i64 v2, v3
|
|
return v4
|
|
}
|
|
; run: %multi_slot_stack(0, 1) == 1
|
|
; run: %multi_slot_stack(1, 2) == 3
|
|
|
|
|
|
|
|
function %multi_slot_out_of_bounds_writes(i8, i64) -> i8, i64 {
|
|
ss0 = explicit_slot 1
|
|
ss1 = explicit_slot 8
|
|
|
|
block0(v0: i8, v1: i64):
|
|
stack_store.i8 v0, ss0
|
|
stack_store.i64 v1, ss1
|
|
v2 = stack_load.i8 ss0
|
|
v3 = stack_load.i64 ss1
|
|
return v2, v3
|
|
}
|
|
; run: %multi_slot_out_o(10, 1) == [10, 1]
|
|
; run: %multi_slot_out_o(0, 2) == [0, 2]
|
|
|
|
|
|
function %multi_slot_offset_writes(i8, i64) -> i8, i64 {
|
|
ss0 = explicit_slot 8, offset 8
|
|
ss1 = explicit_slot 8
|
|
|
|
block0(v0: i8, v1: i64):
|
|
stack_store.i8 v0, ss0
|
|
stack_store.i64 v1, ss1
|
|
v2 = stack_load.i8 ss0
|
|
v3 = stack_load.i64 ss1
|
|
return v2, v3
|
|
}
|
|
; run: %multi_slot_offse(0, 1) == [0, 1]
|
|
; run: %multi_slot_offse(1, 2) == [1, 2]
|
|
|
|
function %slot_offset_negative(i64, i64) -> i64, i64 {
|
|
ss0 = explicit_slot 8
|
|
ss1 = explicit_slot 8, offset -8
|
|
|
|
block0(v0: i64, v1: i64):
|
|
stack_store.i64 v0, ss0
|
|
stack_store.i64 v1, ss1
|
|
v2 = stack_load.i64 ss0
|
|
v3 = stack_load.i64 ss1
|
|
return v2, v3
|
|
}
|
|
; run: %slot_offset_nega(0, 1) == [0, 1]
|
|
; run: %slot_offset_nega(2, 3) == [2, 3]
|
|
|
|
|
|
function %huge_slots(i64) -> i64 {
|
|
ss0 = explicit_slot 1048576 ; 1MB Slot
|
|
|
|
block0(v0: i64):
|
|
stack_store.i64 v0, ss0+1048568 ; Store at 1MB - 8bytes
|
|
v1 = stack_load.i64 ss0+1048568
|
|
return v1
|
|
}
|
|
; run: %huge_slots(0) == 0
|
|
; run: %huge_slots(1) == 1
|
|
; run: %huge_slots(-1) == -1
|