Reorganize the global value kinds. (#490)
* Reorganize the global value kinds. This: - renames "deref" global values to "load" and gives it a offset that works like the "load" instructions' does - adds an explicit "iadd_imm" global value kind, which replaces the builtin iadd in "vmctx" and "deref" global values. - also renames "globalsym" to "symbol"
This commit is contained in:
@@ -1,9 +1,10 @@
|
||||
test verifier
|
||||
|
||||
function %add_members(i32, i64 vmctx) -> f32 baldrdash {
|
||||
gv0 = vmctx+64
|
||||
gv1 = vmctx+72
|
||||
heap0 = dynamic gv0, min 0x1000, bound gv1, guard 0
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+64
|
||||
gv2 = load.i32 notrap aligned gv0+72
|
||||
heap0 = dynamic gv1, min 0x1000, bound gv2, guard 0
|
||||
|
||||
ebb0(v0: i32, v6: i64):
|
||||
v1 = heap_addr.i64 heap0, v0, 20
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
test verifier
|
||||
|
||||
function %add_members(i32, i32 vmctx) -> f32 baldrdash {
|
||||
gv0 = vmctx+64
|
||||
heap0 = static gv0, min 0x1000, bound 0x10_0000, guard 0x1000
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0+64
|
||||
heap0 = static gv1, min 0x1000, bound 0x10_0000, guard 0x1000
|
||||
|
||||
ebb0(v0: i32, v5: i32):
|
||||
v1 = heap_addr.i32 heap0, v0, 1
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
test verifier
|
||||
|
||||
function %add_members(i32, i64 vmctx) -> f32 baldrdash {
|
||||
gv0 = vmctx+64
|
||||
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0+64
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v5: i64):
|
||||
v1 = heap_addr.i64 heap0, v0, 1
|
||||
|
||||
@@ -562,58 +562,58 @@ to have access to a *VM context pointer* which is used as the base address.
|
||||
Typically, the VM context pointer is passed as a hidden function argument to
|
||||
Cranelift functions.
|
||||
|
||||
.. inst:: GV = vmctx+Offset
|
||||
Chains of global value expressions are possible, but cycles are not allowed.
|
||||
They will be caught by the IR verifier.
|
||||
|
||||
Declare a global value of the address of a field in the VM context struct.
|
||||
.. inst:: GV = vmctx
|
||||
|
||||
This declares a global value which is a constant offset from the
|
||||
VM context pointer which is passed as a hidden argument to all functions
|
||||
JIT-compiled for the VM.
|
||||
Declare a global value of the address of the VM context struct.
|
||||
|
||||
Typically, the VM context is a C struct, and the declared global value
|
||||
is the address of a member of the struct.
|
||||
This declares a global value which is the VM context pointer which may
|
||||
be passed as a hidden argument to functions JIT-compiled for a VM.
|
||||
|
||||
Typically, the VM context is a `#[repr(C, packed)]` struct.
|
||||
|
||||
:arg Offset: Byte offset from the VM context pointer to the global
|
||||
value.
|
||||
:result GV: Global value.
|
||||
|
||||
The address of a global value can also be derived by treating another global
|
||||
variable as a struct pointer. This makes it possible to chase pointers into VM
|
||||
runtime data structures.
|
||||
A global value can also be derived by treating another global variable as a
|
||||
struct pointer and loading from one of its fields. This makes it possible to
|
||||
chase pointers into VM runtime data structures.
|
||||
|
||||
.. inst:: GV = deref(BaseGV)+Offset
|
||||
.. inst:: GV = load.Type BaseGV [Offset]
|
||||
|
||||
Declare a global value in a struct pointed to by BaseGV.
|
||||
Declare a global value pointed to by BaseGV plus Offset, with type Type.
|
||||
|
||||
The address of GV can be computed by first loading a pointer from BaseGV
|
||||
and adding Offset to it.
|
||||
|
||||
It is assumed the BaseGV resides in accessible memory with the appropriate
|
||||
alignment for storing a pointer.
|
||||
|
||||
Chains of ``deref`` global values are possible, but cycles are not
|
||||
allowed. They will be caught by the IR verifier.
|
||||
It is assumed the BaseGV plus Offset resides in accessible memory with the
|
||||
appropriate alignment for storing a value with type Type.
|
||||
|
||||
:arg BaseGV: Global value providing the base pointer.
|
||||
:arg Offset: Byte offset added to the loaded value.
|
||||
:arg Offset: Offset added to the base before loading.
|
||||
:result GV: Global value.
|
||||
|
||||
.. inst:: GV = [colocated] globalsym name
|
||||
.. inst:: GV = iadd_imm BaseGV, Offset
|
||||
|
||||
Declare a global value at a symbolic address.
|
||||
Declare a global value which has the value of BaseGV offset by Offset.
|
||||
|
||||
The address of GV is symbolic and will be assigned a relocation, so that
|
||||
:arg BaseGV: Global value providing the base value.
|
||||
:arg Offset: Offset added to the base value.
|
||||
|
||||
.. inst:: GV = [colocated] symbol Name
|
||||
|
||||
Declare a symbolic address global value.
|
||||
|
||||
The value of GV is symbolic and will be assigned a relocation, so that
|
||||
it can be resolved by a later linking phase.
|
||||
|
||||
If the colocated keyword is present, the symbol's definition will be
|
||||
defined along with the current function, such that it can use more
|
||||
efficient addressing.
|
||||
|
||||
:arg name: External name.
|
||||
:arg Name: External name.
|
||||
:result GV: Global value.
|
||||
|
||||
.. autoinst:: global_value
|
||||
.. autoinst:: globalsym_addr
|
||||
.. autoinst:: symbol_value
|
||||
|
||||
|
||||
Heaps
|
||||
|
||||
@@ -12,7 +12,7 @@ function %I32() {
|
||||
sig0 = ()
|
||||
fn0 = %foo()
|
||||
|
||||
gv0 = globalsym %some_gv
|
||||
gv0 = symbol %some_gv
|
||||
|
||||
ss0 = incoming_arg 8, offset 0
|
||||
ss1 = incoming_arg 1024, offset -1024
|
||||
@@ -365,9 +365,9 @@ ebb0:
|
||||
call_indirect sig0, v401() ; bin: stk_ovf ff d6
|
||||
|
||||
; asm: movl $0, %ecx
|
||||
[-,%rcx] v450 = globalsym_addr.i32 gv0 ; bin: b9 Abs4(%some_gv) 00000000
|
||||
[-,%rcx] v450 = symbol_value.i32 gv0 ; bin: b9 Abs4(%some_gv) 00000000
|
||||
; asm: movl $0, %esi
|
||||
[-,%rsi] v451 = globalsym_addr.i32 gv0 ; bin: be Abs4(%some_gv) 00000000
|
||||
[-,%rsi] v451 = symbol_value.i32 gv0 ; bin: be Abs4(%some_gv) 00000000
|
||||
|
||||
; Spill / Fill.
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ function %I64() {
|
||||
fn0 = %foo()
|
||||
fn1 = colocated %bar()
|
||||
|
||||
gv0 = globalsym %some_gv
|
||||
gv1 = globalsym colocated %some_gv
|
||||
gv0 = symbol %some_gv
|
||||
gv1 = symbol colocated %some_gv
|
||||
|
||||
; Use incoming_arg stack slots because they won't be relocated by the frame
|
||||
; layout.
|
||||
@@ -66,18 +66,18 @@ ebb0:
|
||||
call_indirect sig0, v102() ; bin: stk_ovf 41 ff d2
|
||||
|
||||
; asm: mov 0x0(%rip), %rcx
|
||||
[-,%rcx] v3 = globalsym_addr.i64 gv0 ; bin: 48 8b 0d GOTPCRel4(%some_gv-4) 00000000
|
||||
[-,%rcx] v3 = symbol_value.i64 gv0 ; bin: 48 8b 0d GOTPCRel4(%some_gv-4) 00000000
|
||||
; asm: mov 0x0(%rip), %rsi
|
||||
[-,%rsi] v4 = globalsym_addr.i64 gv0 ; bin: 48 8b 35 GOTPCRel4(%some_gv-4) 00000000
|
||||
[-,%rsi] v4 = symbol_value.i64 gv0 ; bin: 48 8b 35 GOTPCRel4(%some_gv-4) 00000000
|
||||
; asm: mov 0x0(%rip), %r10
|
||||
[-,%r10] v5 = globalsym_addr.i64 gv0 ; bin: 4c 8b 15 GOTPCRel4(%some_gv-4) 00000000
|
||||
[-,%r10] v5 = symbol_value.i64 gv0 ; bin: 4c 8b 15 GOTPCRel4(%some_gv-4) 00000000
|
||||
|
||||
; asm: lea 0x0(%rip), %rcx
|
||||
[-,%rcx] v6 = globalsym_addr.i64 gv1 ; bin: 48 8d 0d PCRel4(%some_gv-4) 00000000
|
||||
[-,%rcx] v6 = symbol_value.i64 gv1 ; bin: 48 8d 0d PCRel4(%some_gv-4) 00000000
|
||||
; asm: lea 0x0(%rip), %rsi
|
||||
[-,%rsi] v7 = globalsym_addr.i64 gv1 ; bin: 48 8d 35 PCRel4(%some_gv-4) 00000000
|
||||
[-,%rsi] v7 = symbol_value.i64 gv1 ; bin: 48 8d 35 PCRel4(%some_gv-4) 00000000
|
||||
; asm: lea 0x0(%rip), %r10
|
||||
[-,%r10] v8 = globalsym_addr.i64 gv1 ; bin: 4c 8d 15 PCRel4(%some_gv-4) 00000000
|
||||
[-,%r10] v8 = symbol_value.i64 gv1 ; bin: 4c 8d 15 PCRel4(%some_gv-4) 00000000
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ function %I64() {
|
||||
fn0 = %foo()
|
||||
fn1 = colocated %bar()
|
||||
|
||||
gv0 = globalsym %some_gv
|
||||
gv0 = symbol %some_gv
|
||||
|
||||
; Use incoming_arg stack slots because they won't be relocated by the frame
|
||||
; layout.
|
||||
@@ -518,11 +518,11 @@ ebb0:
|
||||
call_indirect sig0, v412() ; bin: stk_ovf 41 ff d2
|
||||
|
||||
; asm: movabsq $-1, %rcx
|
||||
[-,%rcx] v450 = globalsym_addr.i64 gv0 ; bin: 48 b9 Abs8(%some_gv) 0000000000000000
|
||||
[-,%rcx] v450 = symbol_value.i64 gv0 ; bin: 48 b9 Abs8(%some_gv) 0000000000000000
|
||||
; asm: movabsq $-1, %rsi
|
||||
[-,%rsi] v451 = globalsym_addr.i64 gv0 ; bin: 48 be Abs8(%some_gv) 0000000000000000
|
||||
[-,%rsi] v451 = symbol_value.i64 gv0 ; bin: 48 be Abs8(%some_gv) 0000000000000000
|
||||
; asm: movabsq $-1, %r10
|
||||
[-,%r10] v452 = globalsym_addr.i64 gv0 ; bin: 49 ba Abs8(%some_gv) 0000000000000000
|
||||
[-,%r10] v452 = symbol_value.i64 gv0 ; bin: 49 ba Abs8(%some_gv) 0000000000000000
|
||||
|
||||
; Spill / Fill.
|
||||
|
||||
|
||||
@@ -4,18 +4,18 @@ target x86_64
|
||||
; Test legalization for various forms of heap addresses.
|
||||
|
||||
function %heap_addrs(i32, i64, i64 vmctx) {
|
||||
gv0 = vmctx+64
|
||||
gv1 = vmctx+72
|
||||
gv2 = vmctx+80
|
||||
gv3 = vmctx+88
|
||||
gv4 = deref(gv3): i32
|
||||
gv4 = vmctx
|
||||
gv0 = iadd_imm.i64 gv4, 64
|
||||
gv1 = iadd_imm.i64 gv4, 72
|
||||
gv2 = iadd_imm.i64 gv4, 80
|
||||
gv3 = load.i32 notrap aligned gv4+88
|
||||
|
||||
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000, index_type i32
|
||||
heap1 = static gv0, guard 0x1000, bound 0x1_0000, index_type i32
|
||||
heap2 = static gv0, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000, index_type i64
|
||||
heap3 = static gv0, guard 0x1000, bound 0x1_0000, index_type i64
|
||||
heap4 = dynamic gv1, min 0x1_0000, bound gv4, guard 0x8000_0000, index_type i32
|
||||
heap5 = dynamic gv1, bound gv4, guard 0x1000, index_type i32
|
||||
heap4 = dynamic gv1, min 0x1_0000, bound gv3, guard 0x8000_0000, index_type i32
|
||||
heap5 = dynamic gv1, bound gv3, guard 0x1000, index_type i32
|
||||
heap6 = dynamic gv1, min 0x1_0000, bound gv2, guard 0x8000_0000, index_type i64
|
||||
heap7 = dynamic gv1, bound gv2, guard 0x1000, index_type i64
|
||||
|
||||
@@ -23,8 +23,8 @@ function %heap_addrs(i32, i64, i64 vmctx) {
|
||||
; check: heap1 = static gv0, min 0, bound 0x0001_0000, guard 4096, index_type i32
|
||||
; check: heap2 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000, index_type i64
|
||||
; check: heap3 = static gv0, min 0, bound 0x0001_0000, guard 4096, index_type i64
|
||||
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv4, guard 0x8000_0000, index_type i32
|
||||
; check: heap5 = dynamic gv1, min 0, bound gv4, guard 4096, index_type i32
|
||||
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv3, guard 0x8000_0000, index_type i32
|
||||
; check: heap5 = dynamic gv1, min 0, bound gv3, guard 4096, index_type i32
|
||||
; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, guard 0x8000_0000, index_type i64
|
||||
; check: heap7 = dynamic gv1, min 0, bound gv2, guard 4096, index_type i64
|
||||
|
||||
@@ -62,9 +62,7 @@ ebb0(v0: i32, v1: i64, v3: i64):
|
||||
; check: v7 = iadd v21, v1
|
||||
|
||||
v8 = heap_addr.i64 heap4, v0, 0
|
||||
; check: v27 = iadd_imm.i64 v3, 88
|
||||
; check: v28 = load.i32 notrap aligned v27
|
||||
; check: v22 = iadd_imm v28, 0
|
||||
; check: v22 = load.i32 notrap aligned v3+88
|
||||
; check: v23 = iadd_imm v22, 0
|
||||
; check: v24 = icmp.i32 ugt v0, v23
|
||||
; check: brz v24, ebb4
|
||||
@@ -75,37 +73,35 @@ ebb0(v0: i32, v1: i64, v3: i64):
|
||||
; check: v8 = iadd v26, v25
|
||||
|
||||
v9 = heap_addr.i64 heap5, v0, 0
|
||||
; check: v34 = iadd_imm.i64 v3, 88
|
||||
; check: v35 = load.i32 notrap aligned v34
|
||||
; check: v29 = iadd_imm v35, 0
|
||||
; check: v30 = iadd_imm v29, 0
|
||||
; check: v31 = icmp.i32 ugt v0, v30
|
||||
; check: brz v31, ebb5
|
||||
; check: v27 = load.i32 notrap aligned v3+88
|
||||
; check: v28 = iadd_imm v27, 0
|
||||
; check: v29 = icmp.i32 ugt v0, v28
|
||||
; check: brz v29, ebb5
|
||||
; check: trap heap_oob
|
||||
; check: ebb5:
|
||||
; check: v32 = uextend.i64 v0
|
||||
; check: v33 = iadd_imm.i64 v3, 72
|
||||
; check: v9 = iadd v33, v32
|
||||
; check: v30 = uextend.i64 v0
|
||||
; check: v31 = iadd_imm.i64 v3, 72
|
||||
; check: v9 = iadd v31, v30
|
||||
|
||||
v10 = heap_addr.i64 heap6, v1, 0
|
||||
; check: v32 = iadd_imm.i64 v3, 80
|
||||
; check: v33 = iadd_imm v32, 0
|
||||
; check: v34 = icmp.i64 ugt v1, v33
|
||||
; check: brz v34, ebb6
|
||||
; check: trap heap_oob
|
||||
; check: ebb6:
|
||||
; check: v35 = iadd_imm.i64 v3, 72
|
||||
; check: v10 = iadd v35, v1
|
||||
|
||||
v11 = heap_addr.i64 heap7, v1, 0
|
||||
; check: v36 = iadd_imm.i64 v3, 80
|
||||
; check: v37 = iadd_imm v36, 0
|
||||
; check: v38 = icmp.i64 ugt v1, v37
|
||||
; check: brz v38, ebb6
|
||||
; check: trap heap_oob
|
||||
; check: ebb6:
|
||||
; check: v39 = iadd_imm.i64 v3, 72
|
||||
; check: v10 = iadd v39, v1
|
||||
|
||||
v11 = heap_addr.i64 heap7, v1, 0
|
||||
; check: v40 = iadd_imm.i64 v3, 80
|
||||
; check: v41 = iadd_imm v40, 0
|
||||
; check: v42 = icmp.i64 ugt v1, v41
|
||||
; check: brz v42, ebb7
|
||||
; check: brz v38, ebb7
|
||||
; check: trap heap_oob
|
||||
; check: ebb7:
|
||||
; check: v43 = iadd_imm.i64 v3, 72
|
||||
; check: v11 = iadd v43, v1
|
||||
; check: v39 = iadd_imm.i64 v3, 72
|
||||
; check: v11 = iadd v39, v1
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,7 +6,8 @@ target x86_64
|
||||
; regex: EBB=ebb\d+
|
||||
|
||||
function %vmctx(i64 vmctx) -> i64 {
|
||||
gv1 = vmctx-16
|
||||
gv0 = vmctx
|
||||
gv1 = iadd_imm.i64 gv0, -16
|
||||
|
||||
ebb1(v1: i64):
|
||||
v2 = global_value.i64 gv1
|
||||
@@ -15,28 +16,28 @@ ebb1(v1: i64):
|
||||
; check: return v2
|
||||
}
|
||||
|
||||
function %deref(i64 vmctx) -> i64 {
|
||||
gv1 = vmctx-16
|
||||
gv2 = deref(gv1)+32: i64
|
||||
function %load(i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i64 notrap aligned gv0-16
|
||||
gv2 = iadd_imm.i64 gv1, 32
|
||||
|
||||
ebb1(v1: i64):
|
||||
v2 = global_value.i64 gv2
|
||||
; check: $(a1=$V) = iadd_imm v1, -16
|
||||
; check: $(p1=$V) = load.i64 notrap aligned $a1
|
||||
; check: $(p1=$V) = load.i64 notrap aligned v1-16
|
||||
; check: v2 = iadd_imm $p1, 32
|
||||
return v2
|
||||
; check: return v2
|
||||
}
|
||||
|
||||
function %sym() -> i64 {
|
||||
gv0 = globalsym %something
|
||||
gv1 = globalsym u123:456
|
||||
function %symbol() -> i64 {
|
||||
gv0 = symbol %something
|
||||
gv1 = symbol u123:456
|
||||
|
||||
ebb1:
|
||||
v0 = global_value.i64 gv0
|
||||
; check: v0 = globalsym_addr.i64 gv0
|
||||
; check: v0 = symbol_value.i64 gv0
|
||||
v1 = global_value.i64 gv1
|
||||
; check: v1 = globalsym_addr.i64 gv1
|
||||
; check: v1 = symbol_value.i64 gv1
|
||||
v2 = bxor v0, v1
|
||||
return v2
|
||||
}
|
||||
@@ -44,8 +45,9 @@ ebb1:
|
||||
; SpiderMonkey VM-style static 4+2 GB heap.
|
||||
; This eliminates bounds checks completely for offsets < 2GB.
|
||||
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
|
||||
gv0 = vmctx+64
|
||||
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
gv0 = vmctx
|
||||
gv1 = iadd_imm.i64 gv0, 64
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v999: i64):
|
||||
; check: ebb0(
|
||||
@@ -64,8 +66,9 @@ ebb0(v0: i32, v999: i64):
|
||||
}
|
||||
|
||||
function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash {
|
||||
gv0 = vmctx+64
|
||||
heap0 = static gv0, min 0x1000, bound 0x1000_0000, guard 0x8000_0000
|
||||
gv0 = vmctx
|
||||
gv1 = iadd_imm.i64 gv0, 64
|
||||
heap0 = static gv1, min 0x1000, bound 0x1000_0000, guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v999: i64):
|
||||
; Everything after the obviously OOB access should be eliminated, leaving
|
||||
@@ -87,8 +90,9 @@ ebb0(v0: i32, v999: i64):
|
||||
; SpiderMonkey VM-style static 4+2 GB heap.
|
||||
; Offsets >= 2 GB do require a boundscheck.
|
||||
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
|
||||
gv0 = vmctx+64
|
||||
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
gv0 = vmctx
|
||||
gv1 = iadd_imm.i64 gv0, 64
|
||||
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v999: i64):
|
||||
; check: ebb0(
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
; Test legalization of tables
|
||||
test legalizer
|
||||
target x86_64
|
||||
|
||||
; regex: V=v\d+
|
||||
; regex: EBB=ebb\d+
|
||||
|
||||
function %test0(i64 vmctx, i64) -> i64 {
|
||||
gv0 = vmctx+12
|
||||
gv1 = vmctx+14
|
||||
table0 = dynamic gv0, min 20, bound gv1, element_size 4, index_type i64
|
||||
|
||||
ebb0(v0: i64, v1: i64):
|
||||
v2 = table_addr.i64 table0, v1, +3
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: $(bound=$V) = iadd_imm $(input=$V), 14
|
||||
; nextln: $(cond=$V) = icmp uge $(limit=$V), $bound
|
||||
; nextln: brz $cond, ebb1
|
||||
; nextln: trap table_oob
|
||||
; nextln:
|
||||
; nextln: ebb1:
|
||||
; nextln: $(base=$V) = iadd_imm.i64 $(vmctx=$V), 12
|
||||
; nextln: $(scaled=$V) = ishl_imm.i64 $(index=$V), 2
|
||||
; nextln: $(elem_addr=$V) = iadd $base, $scaled
|
||||
; nextln: $(field_addr=$V) = iadd_imm $elem_addr, 3
|
||||
@@ -4,26 +4,24 @@ target x86_64
|
||||
; Test legalization for various forms of table addresses.
|
||||
|
||||
function %table_addrs(i32, i64, i64 vmctx) {
|
||||
gv0 = vmctx+72
|
||||
gv1 = vmctx+80
|
||||
gv2 = vmctx+88
|
||||
gv3 = deref(gv2): i32
|
||||
gv4 = vmctx
|
||||
gv0 = iadd_imm.i64 gv4, 72
|
||||
gv1 = iadd_imm.i64 gv4, 80
|
||||
gv2 = load.i32 notrap aligned gv4+88
|
||||
|
||||
table0 = dynamic gv0, min 0x1_0000, bound gv3, element_size 1, index_type i32
|
||||
table1 = dynamic gv0, bound gv3, element_size 16, index_type i32
|
||||
table0 = dynamic gv0, min 0x1_0000, bound gv2, element_size 1, index_type i32
|
||||
table1 = dynamic gv0, bound gv2, element_size 16, index_type i32
|
||||
table2 = dynamic gv0, min 0x1_0000, bound gv1, element_size 1, index_type i64
|
||||
table3 = dynamic gv0, bound gv1, element_size 16, index_type i64
|
||||
|
||||
; check: table0 = dynamic gv0, min 0x0001_0000, bound gv3, element_size 1, index_type i32
|
||||
; check: table1 = dynamic gv0, min 0, bound gv3, element_size 16, index_type i32
|
||||
; check: table0 = dynamic gv0, min 0x0001_0000, bound gv2, element_size 1, index_type i32
|
||||
; check: table1 = dynamic gv0, min 0, bound gv2, element_size 16, index_type i32
|
||||
; check: table2 = dynamic gv0, min 0x0001_0000, bound gv1, element_size 1, index_type i64
|
||||
; check: table3 = dynamic gv0, min 0, bound gv1, element_size 16, index_type i64
|
||||
|
||||
ebb0(v0: i32, v1: i64, v3: i64):
|
||||
v4 = table_addr.i64 table0, v0, +0
|
||||
; check: v12 = iadd_imm v3, 88
|
||||
; check: v13 = load.i32 notrap aligned v12
|
||||
; check: v8 = iadd_imm v13, 0
|
||||
; check: v8 = load.i32 notrap aligned v3+88
|
||||
; check: v9 = icmp uge v0, v8
|
||||
; check: brz v9, ebb1
|
||||
; check: trap table_oob
|
||||
@@ -33,36 +31,34 @@ ebb0(v0: i32, v1: i64, v3: i64):
|
||||
; check: v4 = iadd v11, v10
|
||||
|
||||
v5 = table_addr.i64 table1, v0, +0
|
||||
; check: v19 = iadd_imm.i64 v3, 88
|
||||
; check: v20 = load.i32 notrap aligned v19
|
||||
; check: v14 = iadd_imm v20, 0
|
||||
; check: v15 = icmp.i32 uge v0, v14
|
||||
; check: brz v15, ebb2
|
||||
; check: v12 = load.i32 notrap aligned v3+88
|
||||
; check: v13 = icmp.i32 uge v0, v12
|
||||
; check: brz v13, ebb2
|
||||
; check: trap table_oob
|
||||
; check: ebb2:
|
||||
; check: v16 = uextend.i64 v0
|
||||
; check: v17 = iadd_imm.i64 v3, 72
|
||||
; check: v18 = ishl_imm v16, 4
|
||||
; check: v5 = iadd v17, v18
|
||||
; check: v14 = uextend.i64 v0
|
||||
; check: v15 = iadd_imm.i64 v3, 72
|
||||
; check: v16 = ishl_imm v14, 4
|
||||
; check: v5 = iadd v15, v16
|
||||
|
||||
v6 = table_addr.i64 table2, v1, +0
|
||||
; check: v21 = iadd_imm.i64 v3, 80
|
||||
; check: v22 = icmp.i64 uge v1, v21
|
||||
; check: brz v22, ebb3
|
||||
; check: v17 = iadd_imm.i64 v3, 80
|
||||
; check: v18 = icmp.i64 uge v1, v17
|
||||
; check: brz v18, ebb3
|
||||
; check: trap table_oob
|
||||
; check: ebb3:
|
||||
; check: v23 = iadd_imm.i64 v3, 72
|
||||
; check: v6 = iadd v23, v1
|
||||
; check: v19 = iadd_imm.i64 v3, 72
|
||||
; check: v6 = iadd v19, v1
|
||||
|
||||
v7 = table_addr.i64 table3, v1, +0
|
||||
; check: v24 = iadd_imm.i64 v3, 80
|
||||
; check: v25 = icmp.i64 uge v1, v24
|
||||
; check: brz v25, ebb4
|
||||
; check: v20 = iadd_imm.i64 v3, 80
|
||||
; check: v21 = icmp.i64 uge v1, v20
|
||||
; check: brz v21, ebb4
|
||||
; check: trap table_oob
|
||||
; check: ebb4:
|
||||
; check: v26 = iadd_imm.i64 v3, 72
|
||||
; check: v27 = ishl_imm.i64 v1, 4
|
||||
; check: v7 = iadd v26, v27
|
||||
; check: v22 = iadd_imm.i64 v3, 72
|
||||
; check: v23 = ishl_imm.i64 v1, 4
|
||||
; check: v7 = iadd v22, v23
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,23 +2,21 @@ test cat
|
||||
test verifier
|
||||
|
||||
function %vmglobal(i64 vmctx) -> i32 {
|
||||
gv3 = vmctx+16
|
||||
; check: gv3 = vmctx+16
|
||||
gv4 = vmctx+0
|
||||
; check: gv4 = vmctx
|
||||
; not: +0
|
||||
gv5 = vmctx -256
|
||||
; check: gv5 = vmctx-256
|
||||
gv3 = vmctx
|
||||
; check: gv3 = vmctx
|
||||
ebb0(v0: i64):
|
||||
v1 = global_value.i32 gv3
|
||||
; check: v1 = global_value.i32 gv3
|
||||
return v1
|
||||
}
|
||||
|
||||
function %deref(i64 vmctx) -> i32 {
|
||||
gv3 = vmctx+16
|
||||
gv4 = deref(gv3)-32: i32
|
||||
; check: gv4 = deref(gv3)-32
|
||||
function %load_and_add_imm(i64 vmctx) -> i32 {
|
||||
gv2 = vmctx
|
||||
gv3 = load.i32 notrap aligned gv2-72
|
||||
gv4 = iadd_imm.i32 gv3, -32
|
||||
; check: gv2 = vmctx
|
||||
; check: gv3 = load.i32 notrap aligned gv2-72
|
||||
; check: gv4 = iadd_imm.i32 gv3, -32
|
||||
ebb0(v0: i64):
|
||||
v1 = global_value.i32 gv4
|
||||
; check: v1 = global_value.i32 gv4
|
||||
@@ -27,20 +25,22 @@ ebb0(v0: i64):
|
||||
|
||||
; Refer to a global value before it's been declared.
|
||||
function %backref(i64 vmctx) -> i32 {
|
||||
gv1 = deref(gv2)-32: i32
|
||||
; check: gv1 = deref(gv2)-32
|
||||
gv2 = vmctx+16
|
||||
; check: gv2 = vmctx+16
|
||||
gv0 = iadd_imm.i32 gv1, -32
|
||||
; check: gv0 = iadd_imm.i32 gv1, -32
|
||||
gv1 = load.i32 notrap aligned gv2
|
||||
; check: gv1 = load.i32 notrap aligned gv2
|
||||
gv2 = vmctx
|
||||
; check: gv2 = vmctx
|
||||
ebb0(v0: i64):
|
||||
v1 = global_value.i32 gv1
|
||||
return v1
|
||||
}
|
||||
|
||||
function %sym() -> i32 {
|
||||
gv0 = globalsym %something
|
||||
; check: gv0 = globalsym %something
|
||||
gv1 = globalsym u8:9
|
||||
; check: gv1 = globalsym u8:9
|
||||
function %symbol() -> i32 {
|
||||
gv0 = symbol %something
|
||||
; check: gv0 = symbol %something
|
||||
gv1 = symbol u8:9
|
||||
; check: gv1 = symbol u8:9
|
||||
ebb0:
|
||||
v0 = global_value.i32 gv0
|
||||
; check: v0 = global_value.i32 gv0
|
||||
@@ -54,7 +54,8 @@ ebb0:
|
||||
function %sheap(i32, i64 vmctx) -> i64 {
|
||||
heap1 = static gv5, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000
|
||||
heap2 = static gv5, guard 0x1000, bound 0x1_0000
|
||||
gv5 = vmctx+64
|
||||
gv4 = vmctx
|
||||
gv5 = iadd_imm.i64 gv4, 64
|
||||
|
||||
; check: heap1 = static gv5, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
; check: heap2 = static gv5, min 0, bound 0x0001_0000, guard 4096
|
||||
@@ -68,8 +69,9 @@ ebb0(v1: i32, v2: i64):
|
||||
function %dheap(i32, i64 vmctx) -> i64 {
|
||||
heap1 = dynamic gv5, min 0x1_0000, bound gv6, guard 0x8000_0000
|
||||
heap2 = dynamic gv5, bound gv6, guard 0x1000
|
||||
gv5 = vmctx+64
|
||||
gv6 = vmctx+72
|
||||
gv4 = vmctx
|
||||
gv5 = iadd_imm.i64 gv4, 64
|
||||
gv6 = iadd_imm.i64 gv4, 72
|
||||
|
||||
; check: heap1 = dynamic gv5, min 0x0001_0000, bound gv6, guard 0x8000_0000
|
||||
; check: heap2 = dynamic gv5, min 0, bound gv6, guard 4096
|
||||
|
||||
@@ -5,7 +5,8 @@ target x86_64 haswell
|
||||
;
|
||||
; The coalescer creates a virtual register with two interfering values.
|
||||
function %pr207(i64 vmctx, i32, i32) -> i32 system_v {
|
||||
gv0 = vmctx-8
|
||||
gv1 = vmctx
|
||||
gv0 = iadd_imm.i64 gv1, -8
|
||||
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
|
||||
sig0 = (i64 vmctx, i32, i32) -> i32 system_v
|
||||
sig1 = (i64 vmctx, i32, i32, i32) -> i32 system_v
|
||||
|
||||
@@ -104,7 +104,8 @@ ebb1(v31: i64):
|
||||
}
|
||||
|
||||
function u0:26(i64 vmctx [%r14]) -> i64 [%rax] baldrdash {
|
||||
gv0 = vmctx+48
|
||||
gv1 = vmctx
|
||||
gv0 = iadd_imm.i64 gv1, 48
|
||||
sig0 = (i32 [%rdi], i64 [%rsi], i64 vmctx [%r14], i64 sigid [%rbx]) -> i64 [%rax] baldrdash
|
||||
|
||||
ebb0(v0: i64):
|
||||
|
||||
@@ -11,7 +11,8 @@ target x86_64 haswell
|
||||
; The problem was the reload pass rewriting EBB arguments on "brnz v9, ebb3(v9)"
|
||||
|
||||
function %pr208(i64 vmctx [%rdi]) system_v {
|
||||
gv0 = vmctx-8
|
||||
gv1 = vmctx
|
||||
gv0 = iadd_imm.i64 gv1, -8
|
||||
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
|
||||
sig0 = (i64 vmctx [%rdi]) -> i32 [%rax] system_v
|
||||
sig1 = (i64 vmctx [%rdi], i32 [%rsi]) system_v
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
test verifier
|
||||
target x86_64
|
||||
|
||||
function %deref_base_type(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv1 = deref(gv0): i32
|
||||
gv2 = deref(gv1): i32 ; error: deref base gv1 has type i32, which is not the pointer type i64
|
||||
function %load_base_type(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
gv2 = load.i32 notrap aligned gv1 ; error: base gv1 has type i32, which is not the pointer type i64
|
||||
|
||||
ebb0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
function %global_value_wrong_type(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv0 = vmctx
|
||||
|
||||
ebb0(v0: i64):
|
||||
v1 = global_value.i32 gv0 ; error: global_value instruction with type i32 references global value with type i64
|
||||
|
||||
@@ -2,8 +2,8 @@ test verifier
|
||||
target x86_64
|
||||
|
||||
function %heap_base_type(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv1 = deref(gv0): i32
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
heap0 = static gv1, guard 0x1000, bound 0x1_0000, index_type i32 ; error: heap base has type i32, which is not the pointer type i64
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -11,7 +11,7 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %invalid_base(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv0 = vmctx
|
||||
heap0 = dynamic gv1, bound gv0, guard 0x1000, index_type i64 ; error: invalid base global value gv1
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -19,7 +19,7 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %invalid_bound(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv0 = vmctx
|
||||
heap0 = dynamic gv0, bound gv1, guard 0x1000, index_type i64 ; error: invalid bound global value gv1
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -27,8 +27,8 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %heap_bound_type(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv1 = deref(gv0): i16
|
||||
gv0 = vmctx
|
||||
gv1 = load.i16 notrap aligned gv0
|
||||
heap0 = dynamic gv0, bound gv1, guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -36,7 +36,7 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %heap_addr_index_type(i64 vmctx, i64) {
|
||||
gv0 = vmctx+0
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, guard 0x1000, bound 0x1_0000, index_type i32
|
||||
|
||||
ebb0(v0: i64, v1: i64):
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
test verifier
|
||||
|
||||
function %deref_cycle() {
|
||||
gv1 = deref(gv2)-32: i32 ; error: deref cycle: [gv1, gv2]
|
||||
gv2 = deref(gv1): i32
|
||||
function %cycle() {
|
||||
gv0 = load.i32 notrap aligned gv1 ; error: global value cycle: [gv0, gv1]
|
||||
gv1 = load.i32 notrap aligned gv0-32
|
||||
|
||||
ebb1:
|
||||
return
|
||||
}
|
||||
|
||||
function %self_cycle() {
|
||||
gv0 = deref(gv0)-32: i32 ; error: deref cycle: [gv0]
|
||||
gv0 = load.i32 notrap aligned gv0 ; error: global value cycle: [gv0]
|
||||
|
||||
ebb1:
|
||||
return
|
||||
|
||||
@@ -2,8 +2,8 @@ test verifier
|
||||
target x86_64
|
||||
|
||||
function %table_base_type(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv1 = deref(gv0): i32
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
table0 = dynamic gv1, element_size 1, bound gv1, index_type i32 ; error: table base has type i32, which is not the pointer type i64
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -11,7 +11,7 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %invalid_base(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv0 = vmctx
|
||||
table0 = dynamic gv1, bound gv0, element_size 1, index_type i64 ; error: invalid base global value gv1
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -19,7 +19,7 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %invalid_bound(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv0 = vmctx
|
||||
table0 = dynamic gv0, bound gv1, element_size 1, index_type i64 ; error: invalid bound global value gv1
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -27,8 +27,8 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %table_bound_type(i64 vmctx) {
|
||||
gv0 = vmctx+0
|
||||
gv1 = deref(gv0): i16
|
||||
gv0 = vmctx
|
||||
gv1 = load.i16 notrap aligned gv0
|
||||
table0 = dynamic gv0, bound gv1, element_size 1, index_type i32 ; error: table index type i32 differs from the type of its bound, i16
|
||||
|
||||
ebb0(v0: i64):
|
||||
@@ -36,8 +36,8 @@ ebb0(v0: i64):
|
||||
}
|
||||
|
||||
function %table_addr_index_type(i64 vmctx, i64) {
|
||||
gv0 = vmctx+0
|
||||
gv1 = deref(gv0): i32
|
||||
gv0 = vmctx
|
||||
gv1 = load.i32 notrap aligned gv0
|
||||
table0 = dynamic gv0, element_size 1, bound gv1, index_type i32
|
||||
|
||||
ebb0(v0: i64, v1: i64):
|
||||
|
||||
@@ -2,7 +2,7 @@ test verifier
|
||||
|
||||
; Using a vmctx global value without declaring it first leads to an error.
|
||||
function %vmglobal_err(i64) -> i64 {
|
||||
gv4 = vmctx+0 ; error: undeclared vmctx reference
|
||||
gv4 = vmctx ; error: undeclared vmctx reference
|
||||
ebb0(v0: i64):
|
||||
v1 = global_value.i64 gv4
|
||||
return v1
|
||||
@@ -10,7 +10,7 @@ ebb0(v0: i64):
|
||||
|
||||
; If it is declared, all is fine.
|
||||
function %vmglobal_ok(i64 vmctx) -> i64 {
|
||||
gv4 = vmctx+0
|
||||
gv4 = vmctx
|
||||
ebb0(v0: i64):
|
||||
v1 = global_value.i64 gv4
|
||||
return v1
|
||||
|
||||
Reference in New Issue
Block a user