Reorganize the global value kinds. (#490)

* Reorganize the global value kinds.

This:
 - renames "deref" global values to "load" and gives it a offset that works
   like the "load" instructions' does
 - adds an explicit "iadd_imm" global value kind, which replaces the
   builtin iadd in "vmctx" and "deref" global values.
 - also renames "globalsym" to "symbol"
This commit is contained in:
Dan Gohman
2018-09-04 21:09:04 -07:00
committed by GitHub
parent 59b83912ba
commit ca9da7702e
30 changed files with 467 additions and 320 deletions

View File

@@ -1,9 +1,10 @@
test verifier
function %add_members(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx+64
gv1 = vmctx+72
heap0 = dynamic gv0, min 0x1000, bound gv1, guard 0
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+64
gv2 = load.i32 notrap aligned gv0+72
heap0 = dynamic gv1, min 0x1000, bound gv2, guard 0
ebb0(v0: i32, v6: i64):
v1 = heap_addr.i64 heap0, v0, 20

View File

@@ -1,8 +1,9 @@
test verifier
function %add_members(i32, i32 vmctx) -> f32 baldrdash {
gv0 = vmctx+64
heap0 = static gv0, min 0x1000, bound 0x10_0000, guard 0x1000
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0+64
heap0 = static gv1, min 0x1000, bound 0x10_0000, guard 0x1000
ebb0(v0: i32, v5: i32):
v1 = heap_addr.i32 heap0, v0, 1

View File

@@ -1,8 +1,9 @@
test verifier
function %add_members(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx+64
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
ebb0(v0: i32, v5: i64):
v1 = heap_addr.i64 heap0, v0, 1

View File

@@ -562,58 +562,58 @@ to have access to a *VM context pointer* which is used as the base address.
Typically, the VM context pointer is passed as a hidden function argument to
Cranelift functions.
.. inst:: GV = vmctx+Offset
Chains of global value expressions are possible, but cycles are not allowed.
They will be caught by the IR verifier.
Declare a global value of the address of a field in the VM context struct.
.. inst:: GV = vmctx
This declares a global value which is a constant offset from the
VM context pointer which is passed as a hidden argument to all functions
JIT-compiled for the VM.
Declare a global value of the address of the VM context struct.
Typically, the VM context is a C struct, and the declared global value
is the address of a member of the struct.
This declares a global value which is the VM context pointer which may
be passed as a hidden argument to functions JIT-compiled for a VM.
Typically, the VM context is a `#[repr(C, packed)]` struct.
:arg Offset: Byte offset from the VM context pointer to the global
value.
:result GV: Global value.
The address of a global value can also be derived by treating another global
variable as a struct pointer. This makes it possible to chase pointers into VM
runtime data structures.
A global value can also be derived by treating another global variable as a
struct pointer and loading from one of its fields. This makes it possible to
chase pointers into VM runtime data structures.
.. inst:: GV = deref(BaseGV)+Offset
.. inst:: GV = load.Type BaseGV [Offset]
Declare a global value in a struct pointed to by BaseGV.
Declare a global value pointed to by BaseGV plus Offset, with type Type.
The address of GV can be computed by first loading a pointer from BaseGV
and adding Offset to it.
It is assumed the BaseGV resides in accessible memory with the appropriate
alignment for storing a pointer.
Chains of ``deref`` global values are possible, but cycles are not
allowed. They will be caught by the IR verifier.
It is assumed the BaseGV plus Offset resides in accessible memory with the
appropriate alignment for storing a value with type Type.
:arg BaseGV: Global value providing the base pointer.
:arg Offset: Byte offset added to the loaded value.
:arg Offset: Offset added to the base before loading.
:result GV: Global value.
.. inst:: GV = [colocated] globalsym name
.. inst:: GV = iadd_imm BaseGV, Offset
Declare a global value at a symbolic address.
Declare a global value which has the value of BaseGV offset by Offset.
The address of GV is symbolic and will be assigned a relocation, so that
:arg BaseGV: Global value providing the base value.
:arg Offset: Offset added to the base value.
.. inst:: GV = [colocated] symbol Name
Declare a symbolic address global value.
The value of GV is symbolic and will be assigned a relocation, so that
it can be resolved by a later linking phase.
If the colocated keyword is present, the symbol's definition will be
defined along with the current function, such that it can use more
efficient addressing.
:arg name: External name.
:arg Name: External name.
:result GV: Global value.
.. autoinst:: global_value
.. autoinst:: globalsym_addr
.. autoinst:: symbol_value
Heaps

View File

@@ -12,7 +12,7 @@ function %I32() {
sig0 = ()
fn0 = %foo()
gv0 = globalsym %some_gv
gv0 = symbol %some_gv
ss0 = incoming_arg 8, offset 0
ss1 = incoming_arg 1024, offset -1024
@@ -365,9 +365,9 @@ ebb0:
call_indirect sig0, v401() ; bin: stk_ovf ff d6
; asm: movl $0, %ecx
[-,%rcx] v450 = globalsym_addr.i32 gv0 ; bin: b9 Abs4(%some_gv) 00000000
[-,%rcx] v450 = symbol_value.i32 gv0 ; bin: b9 Abs4(%some_gv) 00000000
; asm: movl $0, %esi
[-,%rsi] v451 = globalsym_addr.i32 gv0 ; bin: be Abs4(%some_gv) 00000000
[-,%rsi] v451 = symbol_value.i32 gv0 ; bin: be Abs4(%some_gv) 00000000
; Spill / Fill.

View File

@@ -15,8 +15,8 @@ function %I64() {
fn0 = %foo()
fn1 = colocated %bar()
gv0 = globalsym %some_gv
gv1 = globalsym colocated %some_gv
gv0 = symbol %some_gv
gv1 = symbol colocated %some_gv
; Use incoming_arg stack slots because they won't be relocated by the frame
; layout.
@@ -66,18 +66,18 @@ ebb0:
call_indirect sig0, v102() ; bin: stk_ovf 41 ff d2
; asm: mov 0x0(%rip), %rcx
[-,%rcx] v3 = globalsym_addr.i64 gv0 ; bin: 48 8b 0d GOTPCRel4(%some_gv-4) 00000000
[-,%rcx] v3 = symbol_value.i64 gv0 ; bin: 48 8b 0d GOTPCRel4(%some_gv-4) 00000000
; asm: mov 0x0(%rip), %rsi
[-,%rsi] v4 = globalsym_addr.i64 gv0 ; bin: 48 8b 35 GOTPCRel4(%some_gv-4) 00000000
[-,%rsi] v4 = symbol_value.i64 gv0 ; bin: 48 8b 35 GOTPCRel4(%some_gv-4) 00000000
; asm: mov 0x0(%rip), %r10
[-,%r10] v5 = globalsym_addr.i64 gv0 ; bin: 4c 8b 15 GOTPCRel4(%some_gv-4) 00000000
[-,%r10] v5 = symbol_value.i64 gv0 ; bin: 4c 8b 15 GOTPCRel4(%some_gv-4) 00000000
; asm: lea 0x0(%rip), %rcx
[-,%rcx] v6 = globalsym_addr.i64 gv1 ; bin: 48 8d 0d PCRel4(%some_gv-4) 00000000
[-,%rcx] v6 = symbol_value.i64 gv1 ; bin: 48 8d 0d PCRel4(%some_gv-4) 00000000
; asm: lea 0x0(%rip), %rsi
[-,%rsi] v7 = globalsym_addr.i64 gv1 ; bin: 48 8d 35 PCRel4(%some_gv-4) 00000000
[-,%rsi] v7 = symbol_value.i64 gv1 ; bin: 48 8d 35 PCRel4(%some_gv-4) 00000000
; asm: lea 0x0(%rip), %r10
[-,%r10] v8 = globalsym_addr.i64 gv1 ; bin: 4c 8d 15 PCRel4(%some_gv-4) 00000000
[-,%r10] v8 = symbol_value.i64 gv1 ; bin: 4c 8d 15 PCRel4(%some_gv-4) 00000000
return
}

View File

@@ -14,7 +14,7 @@ function %I64() {
fn0 = %foo()
fn1 = colocated %bar()
gv0 = globalsym %some_gv
gv0 = symbol %some_gv
; Use incoming_arg stack slots because they won't be relocated by the frame
; layout.
@@ -518,11 +518,11 @@ ebb0:
call_indirect sig0, v412() ; bin: stk_ovf 41 ff d2
; asm: movabsq $-1, %rcx
[-,%rcx] v450 = globalsym_addr.i64 gv0 ; bin: 48 b9 Abs8(%some_gv) 0000000000000000
[-,%rcx] v450 = symbol_value.i64 gv0 ; bin: 48 b9 Abs8(%some_gv) 0000000000000000
; asm: movabsq $-1, %rsi
[-,%rsi] v451 = globalsym_addr.i64 gv0 ; bin: 48 be Abs8(%some_gv) 0000000000000000
[-,%rsi] v451 = symbol_value.i64 gv0 ; bin: 48 be Abs8(%some_gv) 0000000000000000
; asm: movabsq $-1, %r10
[-,%r10] v452 = globalsym_addr.i64 gv0 ; bin: 49 ba Abs8(%some_gv) 0000000000000000
[-,%r10] v452 = symbol_value.i64 gv0 ; bin: 49 ba Abs8(%some_gv) 0000000000000000
; Spill / Fill.

View File

@@ -4,18 +4,18 @@ target x86_64
; Test legalization for various forms of heap addresses.
function %heap_addrs(i32, i64, i64 vmctx) {
gv0 = vmctx+64
gv1 = vmctx+72
gv2 = vmctx+80
gv3 = vmctx+88
gv4 = deref(gv3): i32
gv4 = vmctx
gv0 = iadd_imm.i64 gv4, 64
gv1 = iadd_imm.i64 gv4, 72
gv2 = iadd_imm.i64 gv4, 80
gv3 = load.i32 notrap aligned gv4+88
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000, index_type i32
heap1 = static gv0, guard 0x1000, bound 0x1_0000, index_type i32
heap2 = static gv0, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000, index_type i64
heap3 = static gv0, guard 0x1000, bound 0x1_0000, index_type i64
heap4 = dynamic gv1, min 0x1_0000, bound gv4, guard 0x8000_0000, index_type i32
heap5 = dynamic gv1, bound gv4, guard 0x1000, index_type i32
heap4 = dynamic gv1, min 0x1_0000, bound gv3, guard 0x8000_0000, index_type i32
heap5 = dynamic gv1, bound gv3, guard 0x1000, index_type i32
heap6 = dynamic gv1, min 0x1_0000, bound gv2, guard 0x8000_0000, index_type i64
heap7 = dynamic gv1, bound gv2, guard 0x1000, index_type i64
@@ -23,8 +23,8 @@ function %heap_addrs(i32, i64, i64 vmctx) {
; check: heap1 = static gv0, min 0, bound 0x0001_0000, guard 4096, index_type i32
; check: heap2 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000, index_type i64
; check: heap3 = static gv0, min 0, bound 0x0001_0000, guard 4096, index_type i64
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv4, guard 0x8000_0000, index_type i32
; check: heap5 = dynamic gv1, min 0, bound gv4, guard 4096, index_type i32
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv3, guard 0x8000_0000, index_type i32
; check: heap5 = dynamic gv1, min 0, bound gv3, guard 4096, index_type i32
; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, guard 0x8000_0000, index_type i64
; check: heap7 = dynamic gv1, min 0, bound gv2, guard 4096, index_type i64
@@ -62,9 +62,7 @@ ebb0(v0: i32, v1: i64, v3: i64):
; check: v7 = iadd v21, v1
v8 = heap_addr.i64 heap4, v0, 0
; check: v27 = iadd_imm.i64 v3, 88
; check: v28 = load.i32 notrap aligned v27
; check: v22 = iadd_imm v28, 0
; check: v22 = load.i32 notrap aligned v3+88
; check: v23 = iadd_imm v22, 0
; check: v24 = icmp.i32 ugt v0, v23
; check: brz v24, ebb4
@@ -75,37 +73,35 @@ ebb0(v0: i32, v1: i64, v3: i64):
; check: v8 = iadd v26, v25
v9 = heap_addr.i64 heap5, v0, 0
; check: v34 = iadd_imm.i64 v3, 88
; check: v35 = load.i32 notrap aligned v34
; check: v29 = iadd_imm v35, 0
; check: v30 = iadd_imm v29, 0
; check: v31 = icmp.i32 ugt v0, v30
; check: brz v31, ebb5
; check: v27 = load.i32 notrap aligned v3+88
; check: v28 = iadd_imm v27, 0
; check: v29 = icmp.i32 ugt v0, v28
; check: brz v29, ebb5
; check: trap heap_oob
; check: ebb5:
; check: v32 = uextend.i64 v0
; check: v33 = iadd_imm.i64 v3, 72
; check: v9 = iadd v33, v32
; check: v30 = uextend.i64 v0
; check: v31 = iadd_imm.i64 v3, 72
; check: v9 = iadd v31, v30
v10 = heap_addr.i64 heap6, v1, 0
; check: v32 = iadd_imm.i64 v3, 80
; check: v33 = iadd_imm v32, 0
; check: v34 = icmp.i64 ugt v1, v33
; check: brz v34, ebb6
; check: trap heap_oob
; check: ebb6:
; check: v35 = iadd_imm.i64 v3, 72
; check: v10 = iadd v35, v1
v11 = heap_addr.i64 heap7, v1, 0
; check: v36 = iadd_imm.i64 v3, 80
; check: v37 = iadd_imm v36, 0
; check: v38 = icmp.i64 ugt v1, v37
; check: brz v38, ebb6
; check: trap heap_oob
; check: ebb6:
; check: v39 = iadd_imm.i64 v3, 72
; check: v10 = iadd v39, v1
v11 = heap_addr.i64 heap7, v1, 0
; check: v40 = iadd_imm.i64 v3, 80
; check: v41 = iadd_imm v40, 0
; check: v42 = icmp.i64 ugt v1, v41
; check: brz v42, ebb7
; check: brz v38, ebb7
; check: trap heap_oob
; check: ebb7:
; check: v43 = iadd_imm.i64 v3, 72
; check: v11 = iadd v43, v1
; check: v39 = iadd_imm.i64 v3, 72
; check: v11 = iadd v39, v1
return
}

View File

@@ -6,7 +6,8 @@ target x86_64
; regex: EBB=ebb\d+
function %vmctx(i64 vmctx) -> i64 {
gv1 = vmctx-16
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, -16
ebb1(v1: i64):
v2 = global_value.i64 gv1
@@ -15,28 +16,28 @@ ebb1(v1: i64):
; check: return v2
}
function %deref(i64 vmctx) -> i64 {
gv1 = vmctx-16
gv2 = deref(gv1)+32: i64
function %load(i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0-16
gv2 = iadd_imm.i64 gv1, 32
ebb1(v1: i64):
v2 = global_value.i64 gv2
; check: $(a1=$V) = iadd_imm v1, -16
; check: $(p1=$V) = load.i64 notrap aligned $a1
; check: $(p1=$V) = load.i64 notrap aligned v1-16
; check: v2 = iadd_imm $p1, 32
return v2
; check: return v2
}
function %sym() -> i64 {
gv0 = globalsym %something
gv1 = globalsym u123:456
function %symbol() -> i64 {
gv0 = symbol %something
gv1 = symbol u123:456
ebb1:
v0 = global_value.i64 gv0
; check: v0 = globalsym_addr.i64 gv0
; check: v0 = symbol_value.i64 gv0
v1 = global_value.i64 gv1
; check: v1 = globalsym_addr.i64 gv1
; check: v1 = symbol_value.i64 gv1
v2 = bxor v0, v1
return v2
}
@@ -44,8 +45,9 @@ ebb1:
; SpiderMonkey VM-style static 4+2 GB heap.
; This eliminates bounds checks completely for offsets < 2GB.
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx+64
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: ebb0(
@@ -64,8 +66,9 @@ ebb0(v0: i32, v999: i64):
}
function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx+64
heap0 = static gv0, min 0x1000, bound 0x1000_0000, guard 0x8000_0000
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1000_0000, guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; Everything after the obviously OOB access should be eliminated, leaving
@@ -87,8 +90,9 @@ ebb0(v0: i32, v999: i64):
; SpiderMonkey VM-style static 4+2 GB heap.
; Offsets >= 2 GB do require a boundscheck.
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx+64
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: ebb0(

View File

@@ -1,27 +0,0 @@
; Test legalization of tables
test legalizer
target x86_64
; regex: V=v\d+
; regex: EBB=ebb\d+
function %test0(i64 vmctx, i64) -> i64 {
gv0 = vmctx+12
gv1 = vmctx+14
table0 = dynamic gv0, min 20, bound gv1, element_size 4, index_type i64
ebb0(v0: i64, v1: i64):
v2 = table_addr.i64 table0, v1, +3
return v2
}
; check: $(bound=$V) = iadd_imm $(input=$V), 14
; nextln: $(cond=$V) = icmp uge $(limit=$V), $bound
; nextln: brz $cond, ebb1
; nextln: trap table_oob
; nextln:
; nextln: ebb1:
; nextln: $(base=$V) = iadd_imm.i64 $(vmctx=$V), 12
; nextln: $(scaled=$V) = ishl_imm.i64 $(index=$V), 2
; nextln: $(elem_addr=$V) = iadd $base, $scaled
; nextln: $(field_addr=$V) = iadd_imm $elem_addr, 3

View File

@@ -4,26 +4,24 @@ target x86_64
; Test legalization for various forms of table addresses.
function %table_addrs(i32, i64, i64 vmctx) {
gv0 = vmctx+72
gv1 = vmctx+80
gv2 = vmctx+88
gv3 = deref(gv2): i32
gv4 = vmctx
gv0 = iadd_imm.i64 gv4, 72
gv1 = iadd_imm.i64 gv4, 80
gv2 = load.i32 notrap aligned gv4+88
table0 = dynamic gv0, min 0x1_0000, bound gv3, element_size 1, index_type i32
table1 = dynamic gv0, bound gv3, element_size 16, index_type i32
table0 = dynamic gv0, min 0x1_0000, bound gv2, element_size 1, index_type i32
table1 = dynamic gv0, bound gv2, element_size 16, index_type i32
table2 = dynamic gv0, min 0x1_0000, bound gv1, element_size 1, index_type i64
table3 = dynamic gv0, bound gv1, element_size 16, index_type i64
; check: table0 = dynamic gv0, min 0x0001_0000, bound gv3, element_size 1, index_type i32
; check: table1 = dynamic gv0, min 0, bound gv3, element_size 16, index_type i32
; check: table0 = dynamic gv0, min 0x0001_0000, bound gv2, element_size 1, index_type i32
; check: table1 = dynamic gv0, min 0, bound gv2, element_size 16, index_type i32
; check: table2 = dynamic gv0, min 0x0001_0000, bound gv1, element_size 1, index_type i64
; check: table3 = dynamic gv0, min 0, bound gv1, element_size 16, index_type i64
ebb0(v0: i32, v1: i64, v3: i64):
v4 = table_addr.i64 table0, v0, +0
; check: v12 = iadd_imm v3, 88
; check: v13 = load.i32 notrap aligned v12
; check: v8 = iadd_imm v13, 0
; check: v8 = load.i32 notrap aligned v3+88
; check: v9 = icmp uge v0, v8
; check: brz v9, ebb1
; check: trap table_oob
@@ -33,36 +31,34 @@ ebb0(v0: i32, v1: i64, v3: i64):
; check: v4 = iadd v11, v10
v5 = table_addr.i64 table1, v0, +0
; check: v19 = iadd_imm.i64 v3, 88
; check: v20 = load.i32 notrap aligned v19
; check: v14 = iadd_imm v20, 0
; check: v15 = icmp.i32 uge v0, v14
; check: brz v15, ebb2
; check: v12 = load.i32 notrap aligned v3+88
; check: v13 = icmp.i32 uge v0, v12
; check: brz v13, ebb2
; check: trap table_oob
; check: ebb2:
; check: v16 = uextend.i64 v0
; check: v17 = iadd_imm.i64 v3, 72
; check: v18 = ishl_imm v16, 4
; check: v5 = iadd v17, v18
; check: v14 = uextend.i64 v0
; check: v15 = iadd_imm.i64 v3, 72
; check: v16 = ishl_imm v14, 4
; check: v5 = iadd v15, v16
v6 = table_addr.i64 table2, v1, +0
; check: v21 = iadd_imm.i64 v3, 80
; check: v22 = icmp.i64 uge v1, v21
; check: brz v22, ebb3
; check: v17 = iadd_imm.i64 v3, 80
; check: v18 = icmp.i64 uge v1, v17
; check: brz v18, ebb3
; check: trap table_oob
; check: ebb3:
; check: v23 = iadd_imm.i64 v3, 72
; check: v6 = iadd v23, v1
; check: v19 = iadd_imm.i64 v3, 72
; check: v6 = iadd v19, v1
v7 = table_addr.i64 table3, v1, +0
; check: v24 = iadd_imm.i64 v3, 80
; check: v25 = icmp.i64 uge v1, v24
; check: brz v25, ebb4
; check: v20 = iadd_imm.i64 v3, 80
; check: v21 = icmp.i64 uge v1, v20
; check: brz v21, ebb4
; check: trap table_oob
; check: ebb4:
; check: v26 = iadd_imm.i64 v3, 72
; check: v27 = ishl_imm.i64 v1, 4
; check: v7 = iadd v26, v27
; check: v22 = iadd_imm.i64 v3, 72
; check: v23 = ishl_imm.i64 v1, 4
; check: v7 = iadd v22, v23
return
}

View File

@@ -2,23 +2,21 @@ test cat
test verifier
function %vmglobal(i64 vmctx) -> i32 {
gv3 = vmctx+16
; check: gv3 = vmctx+16
gv4 = vmctx+0
; check: gv4 = vmctx
; not: +0
gv5 = vmctx -256
; check: gv5 = vmctx-256
gv3 = vmctx
; check: gv3 = vmctx
ebb0(v0: i64):
v1 = global_value.i32 gv3
; check: v1 = global_value.i32 gv3
return v1
}
function %deref(i64 vmctx) -> i32 {
gv3 = vmctx+16
gv4 = deref(gv3)-32: i32
; check: gv4 = deref(gv3)-32
function %load_and_add_imm(i64 vmctx) -> i32 {
gv2 = vmctx
gv3 = load.i32 notrap aligned gv2-72
gv4 = iadd_imm.i32 gv3, -32
; check: gv2 = vmctx
; check: gv3 = load.i32 notrap aligned gv2-72
; check: gv4 = iadd_imm.i32 gv3, -32
ebb0(v0: i64):
v1 = global_value.i32 gv4
; check: v1 = global_value.i32 gv4
@@ -27,20 +25,22 @@ ebb0(v0: i64):
; Refer to a global value before it's been declared.
function %backref(i64 vmctx) -> i32 {
gv1 = deref(gv2)-32: i32
; check: gv1 = deref(gv2)-32
gv2 = vmctx+16
; check: gv2 = vmctx+16
gv0 = iadd_imm.i32 gv1, -32
; check: gv0 = iadd_imm.i32 gv1, -32
gv1 = load.i32 notrap aligned gv2
; check: gv1 = load.i32 notrap aligned gv2
gv2 = vmctx
; check: gv2 = vmctx
ebb0(v0: i64):
v1 = global_value.i32 gv1
return v1
}
function %sym() -> i32 {
gv0 = globalsym %something
; check: gv0 = globalsym %something
gv1 = globalsym u8:9
; check: gv1 = globalsym u8:9
function %symbol() -> i32 {
gv0 = symbol %something
; check: gv0 = symbol %something
gv1 = symbol u8:9
; check: gv1 = symbol u8:9
ebb0:
v0 = global_value.i32 gv0
; check: v0 = global_value.i32 gv0
@@ -54,7 +54,8 @@ ebb0:
function %sheap(i32, i64 vmctx) -> i64 {
heap1 = static gv5, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000
heap2 = static gv5, guard 0x1000, bound 0x1_0000
gv5 = vmctx+64
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
; check: heap1 = static gv5, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
; check: heap2 = static gv5, min 0, bound 0x0001_0000, guard 4096
@@ -68,8 +69,9 @@ ebb0(v1: i32, v2: i64):
function %dheap(i32, i64 vmctx) -> i64 {
heap1 = dynamic gv5, min 0x1_0000, bound gv6, guard 0x8000_0000
heap2 = dynamic gv5, bound gv6, guard 0x1000
gv5 = vmctx+64
gv6 = vmctx+72
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
gv6 = iadd_imm.i64 gv4, 72
; check: heap1 = dynamic gv5, min 0x0001_0000, bound gv6, guard 0x8000_0000
; check: heap2 = dynamic gv5, min 0, bound gv6, guard 4096

View File

@@ -5,7 +5,8 @@ target x86_64 haswell
;
; The coalescer creates a virtual register with two interfering values.
function %pr207(i64 vmctx, i32, i32) -> i32 system_v {
gv0 = vmctx-8
gv1 = vmctx
gv0 = iadd_imm.i64 gv1, -8
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
sig0 = (i64 vmctx, i32, i32) -> i32 system_v
sig1 = (i64 vmctx, i32, i32, i32) -> i32 system_v

View File

@@ -104,7 +104,8 @@ ebb1(v31: i64):
}
function u0:26(i64 vmctx [%r14]) -> i64 [%rax] baldrdash {
gv0 = vmctx+48
gv1 = vmctx
gv0 = iadd_imm.i64 gv1, 48
sig0 = (i32 [%rdi], i64 [%rsi], i64 vmctx [%r14], i64 sigid [%rbx]) -> i64 [%rax] baldrdash
ebb0(v0: i64):

View File

@@ -11,7 +11,8 @@ target x86_64 haswell
; The problem was the reload pass rewriting EBB arguments on "brnz v9, ebb3(v9)"
function %pr208(i64 vmctx [%rdi]) system_v {
gv0 = vmctx-8
gv1 = vmctx
gv0 = iadd_imm.i64 gv1, -8
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
sig0 = (i64 vmctx [%rdi]) -> i32 [%rax] system_v
sig1 = (i64 vmctx [%rdi], i32 [%rsi]) system_v

View File

@@ -1,17 +1,17 @@
test verifier
target x86_64
function %deref_base_type(i64 vmctx) {
gv0 = vmctx+0
gv1 = deref(gv0): i32
gv2 = deref(gv1): i32 ; error: deref base gv1 has type i32, which is not the pointer type i64
function %load_base_type(i64 vmctx) {
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
gv2 = load.i32 notrap aligned gv1 ; error: base gv1 has type i32, which is not the pointer type i64
ebb0(v0: i64):
return
}
function %global_value_wrong_type(i64 vmctx) {
gv0 = vmctx+0
gv0 = vmctx
ebb0(v0: i64):
v1 = global_value.i32 gv0 ; error: global_value instruction with type i32 references global value with type i64

View File

@@ -2,8 +2,8 @@ test verifier
target x86_64
function %heap_base_type(i64 vmctx) {
gv0 = vmctx+0
gv1 = deref(gv0): i32
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
heap0 = static gv1, guard 0x1000, bound 0x1_0000, index_type i32 ; error: heap base has type i32, which is not the pointer type i64
ebb0(v0: i64):
@@ -11,7 +11,7 @@ ebb0(v0: i64):
}
function %invalid_base(i64 vmctx) {
gv0 = vmctx+0
gv0 = vmctx
heap0 = dynamic gv1, bound gv0, guard 0x1000, index_type i64 ; error: invalid base global value gv1
ebb0(v0: i64):
@@ -19,7 +19,7 @@ ebb0(v0: i64):
}
function %invalid_bound(i64 vmctx) {
gv0 = vmctx+0
gv0 = vmctx
heap0 = dynamic gv0, bound gv1, guard 0x1000, index_type i64 ; error: invalid bound global value gv1
ebb0(v0: i64):
@@ -27,8 +27,8 @@ ebb0(v0: i64):
}
function %heap_bound_type(i64 vmctx) {
gv0 = vmctx+0
gv1 = deref(gv0): i16
gv0 = vmctx
gv1 = load.i16 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16
ebb0(v0: i64):
@@ -36,7 +36,7 @@ ebb0(v0: i64):
}
function %heap_addr_index_type(i64 vmctx, i64) {
gv0 = vmctx+0
gv0 = vmctx
heap0 = static gv0, guard 0x1000, bound 0x1_0000, index_type i32
ebb0(v0: i64, v1: i64):

View File

@@ -1,15 +1,15 @@
test verifier
function %deref_cycle() {
gv1 = deref(gv2)-32: i32 ; error: deref cycle: [gv1, gv2]
gv2 = deref(gv1): i32
function %cycle() {
gv0 = load.i32 notrap aligned gv1 ; error: global value cycle: [gv0, gv1]
gv1 = load.i32 notrap aligned gv0-32
ebb1:
return
}
function %self_cycle() {
gv0 = deref(gv0)-32: i32 ; error: deref cycle: [gv0]
gv0 = load.i32 notrap aligned gv0 ; error: global value cycle: [gv0]
ebb1:
return

View File

@@ -2,8 +2,8 @@ test verifier
target x86_64
function %table_base_type(i64 vmctx) {
gv0 = vmctx+0
gv1 = deref(gv0): i32
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
table0 = dynamic gv1, element_size 1, bound gv1, index_type i32 ; error: table base has type i32, which is not the pointer type i64
ebb0(v0: i64):
@@ -11,7 +11,7 @@ ebb0(v0: i64):
}
function %invalid_base(i64 vmctx) {
gv0 = vmctx+0
gv0 = vmctx
table0 = dynamic gv1, bound gv0, element_size 1, index_type i64 ; error: invalid base global value gv1
ebb0(v0: i64):
@@ -19,7 +19,7 @@ ebb0(v0: i64):
}
function %invalid_bound(i64 vmctx) {
gv0 = vmctx+0
gv0 = vmctx
table0 = dynamic gv0, bound gv1, element_size 1, index_type i64 ; error: invalid bound global value gv1
ebb0(v0: i64):
@@ -27,8 +27,8 @@ ebb0(v0: i64):
}
function %table_bound_type(i64 vmctx) {
gv0 = vmctx+0
gv1 = deref(gv0): i16
gv0 = vmctx
gv1 = load.i16 notrap aligned gv0
table0 = dynamic gv0, bound gv1, element_size 1, index_type i32 ; error: table index type i32 differs from the type of its bound, i16
ebb0(v0: i64):
@@ -36,8 +36,8 @@ ebb0(v0: i64):
}
function %table_addr_index_type(i64 vmctx, i64) {
gv0 = vmctx+0
gv1 = deref(gv0): i32
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
table0 = dynamic gv0, element_size 1, bound gv1, index_type i32
ebb0(v0: i64, v1: i64):

View File

@@ -2,7 +2,7 @@ test verifier
; Using a vmctx global value without declaring it first leads to an error.
function %vmglobal_err(i64) -> i64 {
gv4 = vmctx+0 ; error: undeclared vmctx reference
gv4 = vmctx ; error: undeclared vmctx reference
ebb0(v0: i64):
v1 = global_value.i64 gv4
return v1
@@ -10,7 +10,7 @@ ebb0(v0: i64):
; If it is declared, all is fine.
function %vmglobal_ok(i64 vmctx) -> i64 {
gv4 = vmctx+0
gv4 = vmctx
ebb0(v0: i64):
v1 = global_value.i64 gv4
return v1

View File

@@ -506,9 +506,9 @@ global_value = Instruction(
# A specialized form of global_value instructions that only handles
# symbolic names.
globalsym_addr = Instruction(
'globalsym_addr', r"""
Compute the address of global GV, which is a symbolic name.
symbol_value = Instruction(
'symbol_value', r"""
Compute the value of global GV, which is a symbolic address.
""",
ins=GV, outs=addr)

View File

@@ -428,18 +428,18 @@ X86_64.enc(base.func_addr.i64, *r.got_fnaddr8.rex(0x8b, w=1),
#
# Non-PIC
X86_32.enc(base.globalsym_addr.i32, *r.gvaddr4(0xb8),
X86_32.enc(base.symbol_value.i32, *r.gvaddr4(0xb8),
isap=Not(is_pic))
X86_64.enc(base.globalsym_addr.i64, *r.gvaddr8.rex(0xb8, w=1),
X86_64.enc(base.symbol_value.i64, *r.gvaddr8.rex(0xb8, w=1),
isap=Not(is_pic))
# PIC, colocated
X86_64.enc(base.globalsym_addr.i64, *r.pcrel_gvaddr8.rex(0x8d, w=1),
X86_64.enc(base.symbol_value.i64, *r.pcrel_gvaddr8.rex(0x8d, w=1),
isap=is_pic,
instp=IsColocatedData())
# PIC, non-colocated
X86_64.enc(base.globalsym_addr.i64, *r.got_gvaddr8.rex(0x8b, w=1),
X86_64.enc(base.symbol_value.i64, *r.got_gvaddr8.rex(0x8b, w=1),
isap=is_pic)
#

View File

@@ -1,6 +1,6 @@
//! Global values.
use ir::immediates::Offset32;
use ir::immediates::{Imm64, Offset32};
use ir::{ExternalName, GlobalValue, Type};
use isa::TargetIsa;
use std::fmt;
@@ -8,35 +8,47 @@ use std::fmt;
/// Information about a global value declaration.
#[derive(Clone)]
pub enum GlobalValueData {
/// Value is the address of a field in the VM context struct, a constant offset from the VM
/// context pointer.
VMContext {
/// Offset from the `vmctx` pointer.
offset: Offset32,
},
/// Value is the address of the VM context struct.
VMContext,
/// Value is pointed to by another global value.
///
/// The `base` global value is assumed to contain a pointer. This global value is computed
/// by loading from memory at that pointer value, and then adding an offset. The memory must
/// be accessible, and naturally aligned to hold a value of the type.
Deref {
/// by loading from memory at that pointer value. The memory must be accessible, and
/// naturally aligned to hold a value of the type.
Load {
/// The base pointer global value.
base: GlobalValue,
/// Byte offset to be added to the loaded value.
/// Offset added to the base pointer before doing the load.
offset: Offset32,
/// Type of the loaded value.
memory_type: Type,
global_type: Type,
},
/// Value is identified by a symbolic name. Cranelift itself does not interpret this name;
/// Value is an offset from another global value.
IAddImm {
/// The base pointer global value.
base: GlobalValue,
/// Byte offset to be added to the value.
offset: Imm64,
/// Type of the iadd.
global_type: Type,
},
/// Value is a symbolic address. Cranelift itself does not interpret this name;
/// it's used by embedders to link with other data structures.
Sym {
Symbol {
/// The symbolic name.
name: ExternalName,
/// Offset from the symbol. This can be used instead of IAddImm to represent folding an
/// offset into a symbol.
offset: Imm64,
/// Will this symbol be defined nearby, such that it will always be a certain distance
/// away, after linking? If so, references to it can avoid going through a GOT. Note that
/// symbols meant to be preemptible cannot be colocated.
@@ -45,10 +57,10 @@ pub enum GlobalValueData {
}
impl GlobalValueData {
/// Assume that `self` is an `GlobalValueData::Sym` and return its name.
/// Assume that `self` is an `GlobalValueData::Symbol` and return its name.
pub fn symbol_name(&self) -> &ExternalName {
match *self {
GlobalValueData::Sym { ref name, .. } => name,
GlobalValueData::Symbol { ref name, .. } => name,
_ => panic!("only symbols have names"),
}
}
@@ -56,8 +68,11 @@ impl GlobalValueData {
/// Return the type of this global.
pub fn global_type(&self, isa: &TargetIsa) -> Type {
match *self {
GlobalValueData::VMContext { .. } | GlobalValueData::Sym { .. } => isa.pointer_type(),
GlobalValueData::Deref { memory_type, .. } => memory_type,
GlobalValueData::VMContext { .. } | GlobalValueData::Symbol { .. } => {
isa.pointer_type()
}
GlobalValueData::IAddImm { global_type, .. }
| GlobalValueData::Load { global_type, .. } => global_type,
}
}
}
@@ -65,20 +80,34 @@ impl GlobalValueData {
impl fmt::Display for GlobalValueData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
GlobalValueData::VMContext { offset } => write!(f, "vmctx{}", offset),
GlobalValueData::Deref {
GlobalValueData::VMContext => write!(f, "vmctx"),
GlobalValueData::Load {
base,
offset,
memory_type,
} => write!(f, "deref({}){}: {}", base, offset, memory_type),
GlobalValueData::Sym {
global_type,
} => write!(f, "load.{} notrap aligned {}{}", global_type, base, offset),
GlobalValueData::IAddImm {
global_type,
base,
offset,
} => write!(f, "iadd_imm.{} {}, {}", global_type, base, offset),
GlobalValueData::Symbol {
ref name,
offset,
colocated,
} => {
if colocated {
write!(f, "colocated ")?;
}
write!(f, "globalsym {}", name)
write!(f, "symbol {}", name)?;
let offset_val: i64 = offset.into();
if offset_val > 0 {
write!(f, "+")?;
}
if offset_val != 0 {
write!(f, "{}", offset)?;
}
Ok(())
}
}
}

View File

@@ -214,6 +214,16 @@ impl Offset32 {
pub fn new(x: i32) -> Self {
Offset32(x)
}
/// Create a new `Offset32` representing the signed numver `x` if possible.
pub fn try_from_i64(x: i64) -> Option<Self> {
let casted = x as i32;
if casted as i64 == x {
Some(Self::new(casted))
} else {
None
}
}
}
impl Into<i32> for Offset32 {

View File

@@ -28,54 +28,99 @@ pub fn expand_global_value(
};
match func.global_values[gv] {
ir::GlobalValueData::VMContext { offset } => vmctx_addr(inst, func, offset.into()),
ir::GlobalValueData::Deref {
ir::GlobalValueData::VMContext => vmctx_addr(inst, func),
ir::GlobalValueData::IAddImm {
base,
offset,
memory_type,
} => deref_addr(inst, func, base, offset.into(), memory_type, isa),
ir::GlobalValueData::Sym { .. } => globalsym(inst, func, gv, isa),
global_type,
} => iadd_imm_addr(inst, func, base, offset.into(), global_type),
ir::GlobalValueData::Load {
base,
offset,
global_type,
} => load_addr(inst, func, base, offset, global_type, isa),
ir::GlobalValueData::Symbol { .. } => symbol(inst, func, gv, isa),
}
}
/// Expand a `global_value` instruction for a vmctx global.
fn vmctx_addr(inst: ir::Inst, func: &mut ir::Function, offset: i64) {
fn vmctx_addr(inst: ir::Inst, func: &mut ir::Function) {
// Get the value representing the `vmctx` argument.
let vmctx = func
.special_param(ir::ArgumentPurpose::VMContext)
.expect("Missing vmctx parameter");
// Simply replace the `global_value` instruction with an `iadd_imm`, reusing the result value.
func.dfg.replace(inst).iadd_imm(vmctx, offset);
// Replace the `global_value` instruction's value with an alias to the vmctx arg.
let result = func.dfg.first_result(inst);
func.dfg.clear_results(inst);
func.dfg.change_to_alias(result, vmctx);
func.layout.remove_inst(inst);
}
/// Expand a `global_value` instruction for a deref global.
fn deref_addr(
/// Expand a `global_value` instruction for an iadd_imm global.
fn iadd_imm_addr(
inst: ir::Inst,
func: &mut ir::Function,
base: ir::GlobalValue,
offset: i64,
memory_type: ir::Type,
global_type: ir::Type,
) {
let mut pos = FuncCursor::new(func).at_inst(inst);
// Get the value for the lhs. For tidiness, expand VMContext here so that we avoid
// `vmctx_addr` which creates an otherwise unneeded value alias.
let lhs = if let ir::GlobalValueData::VMContext = pos.func.global_values[base] {
pos.func
.special_param(ir::ArgumentPurpose::VMContext)
.expect("Missing vmctx parameter")
} else {
pos.ins().global_value(global_type, base)
};
// Simply replace the `global_value` instruction with an `iadd_imm`, reusing the result value.
pos.func.dfg.replace(inst).iadd_imm(lhs, offset);
}
/// Expand a `global_value` instruction for a load global.
fn load_addr(
inst: ir::Inst,
func: &mut ir::Function,
base: ir::GlobalValue,
offset: ir::immediates::Offset32,
global_type: ir::Type,
isa: &TargetIsa,
) {
// We need to load a pointer from the `base` global value, so insert a new `global_value`
// instruction. This depends on the iterative legalization loop. Note that the IR verifier
// detects any cycles in the `deref` globals.
// detects any cycles in the `load` globals.
let ptr_ty = isa.pointer_type();
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
let base_addr = pos.ins().global_value(ptr_ty, base);
// Get the value for the base. For tidiness, expand VMContext here so that we avoid
// `vmctx_addr` which creates an otherwise unneeded value alias.
let base_addr = if let ir::GlobalValueData::VMContext = pos.func.global_values[base] {
pos.func
.special_param(ir::ArgumentPurpose::VMContext)
.expect("Missing vmctx parameter")
} else {
pos.ins().global_value(ptr_ty, base)
};
// Global-value loads are always notrap and aligned.
let mut mflags = ir::MemFlags::new();
// Deref globals are required to be accessible and aligned.
mflags.set_notrap();
mflags.set_aligned();
let loaded = pos.ins().load(memory_type, mflags, base_addr, 0);
pos.func.dfg.replace(inst).iadd_imm(loaded, offset);
// Perform the load.
pos.func
.dfg
.replace(inst)
.load(global_type, mflags, base_addr, offset);
}
/// Expand a `global_value` instruction for a symbolic name global.
fn globalsym(inst: ir::Inst, func: &mut ir::Function, gv: ir::GlobalValue, isa: &TargetIsa) {
fn symbol(inst: ir::Inst, func: &mut ir::Function, gv: ir::GlobalValue, isa: &TargetIsa) {
let ptr_ty = isa.pointer_type();
func.dfg.replace(inst).globalsym_addr(ptr_ty, gv);
func.dfg.replace(inst).symbol_value(ptr_ty, gv);
}

View File

@@ -55,7 +55,7 @@ pub fn is_colocated_func(func_ref: ir::FuncRef, func: &ir::Function) -> bool {
#[allow(dead_code)]
pub fn is_colocated_data(global_value: ir::GlobalValue, func: &ir::Function) -> bool {
match func.global_values[global_value] {
ir::GlobalValueData::Sym { colocated, .. } => colocated,
ir::GlobalValueData::Symbol { colocated, .. } => colocated,
_ => panic!("is_colocated_data only makes sense for data with symbolic addresses"),
}
}

View File

@@ -43,7 +43,7 @@
//!
//! Global values
//!
//! - Detect cycles in deref(base) declarations.
//! - Detect cycles in global values.
//! - Detect use of 'vmctx' global value when no corresponding parameter is defined.
//!
//! TODO:
@@ -336,17 +336,29 @@ impl<'a> Verifier<'a> {
seen.insert(gv);
let mut cur = gv;
while let ir::GlobalValueData::Deref { base, .. } = self.func.global_values[cur] {
loop {
match self.func.global_values[cur] {
ir::GlobalValueData::Load { base, .. }
| ir::GlobalValueData::IAddImm { base, .. } => {
if seen.insert(base).is_some() {
if !cycle_seen {
report!(errors, gv, "deref cycle: {}", DisplayList(seen.as_slice()));
cycle_seen = true; // ensures we don't report the cycle multiple times
report!(
errors,
gv,
"global value cycle: {}",
DisplayList(seen.as_slice())
);
// ensures we don't report the cycle multiple times
cycle_seen = true;
}
continue 'gvs;
}
cur = base;
}
_ => break,
}
}
match self.func.global_values[gv] {
ir::GlobalValueData::VMContext { .. } => {
@@ -358,7 +370,30 @@ impl<'a> Verifier<'a> {
report!(errors, gv, "undeclared vmctx reference {}", gv);
}
}
ir::GlobalValueData::Deref { base, .. } => {
ir::GlobalValueData::IAddImm {
base, global_type, ..
} => {
if !global_type.is_int() {
report!(
errors,
gv,
"iadd_imm global value with non-int type {}",
global_type
);
} else if let Some(isa) = self.isa {
let base_type = self.func.global_values[base].global_type(isa);
if global_type != base_type {
report!(
errors,
gv,
"iadd_imm type {} differs from operand type {}",
global_type,
base_type
);
}
}
}
ir::GlobalValueData::Load { base, .. } => {
if let Some(isa) = self.isa {
let base_type = self.func.global_values[base].global_type(isa);
let pointer_type = isa.pointer_type();
@@ -366,7 +401,7 @@ impl<'a> Verifier<'a> {
report!(
errors,
gv,
"deref base {} has type {}, which is not the pointer type {}",
"base {} has type {}, which is not the pointer type {}",
base,
base_type,
pointer_type

View File

@@ -462,8 +462,9 @@ where
pub fn declare_data_in_func(&self, data: DataId, func: &mut ir::Function) -> ir::GlobalValue {
let decl = &self.contents.data_objects[data].decl;
let colocated = decl.linkage.is_final();
func.create_global_value(ir::GlobalValueData::Sym {
func.create_global_value(ir::GlobalValueData::Symbol {
name: ir::ExternalName::user(1, data.index() as u32),
offset: ir::immediates::Imm64::new(0),
colocated,
})
}

View File

@@ -164,8 +164,9 @@ impl<'a> Context<'a> {
fn add_gv(&mut self, gv: GlobalValue, data: GlobalValueData, loc: Location) -> ParseResult<()> {
self.map.def_gv(gv, loc)?;
while self.function.global_values.next_key().index() <= gv.index() {
self.function.create_global_value(GlobalValueData::Sym {
self.function.create_global_value(GlobalValueData::Symbol {
name: ExternalName::testcase(""),
offset: Imm64::new(0),
colocated: false,
});
}
@@ -590,14 +591,32 @@ impl<'a> Parser<'a> {
// present, it must contain a sign.
fn optional_offset32(&mut self) -> ParseResult<Offset32> {
if let Some(Token::Integer(text)) = self.token() {
if text.starts_with('+') || text.starts_with('-') {
self.consume();
// Lexer just gives us raw text that looks like an integer.
// Parse it as an `Offset32` to check for overflow and other issues.
text.parse().map_err(|e| self.error(e))
} else {
return text.parse().map_err(|e| self.error(e));
}
}
// An offset32 operand can be absent.
Ok(Offset32::new(0))
}
// Match and consume an optional offset32 immediate.
//
// Note that this will match an empty string as an empty offset, and that if an offset is
// present, it must contain a sign.
fn optional_offset_imm64(&mut self) -> ParseResult<Imm64> {
if let Some(Token::Integer(text)) = self.token() {
if text.starts_with('+') || text.starts_with('-') {
self.consume();
// Lexer just gives us raw text that looks like an integer.
// Parse it as an `Offset32` to check for overflow and other issues.
return text.parse().map_err(|e| self.error(e));
}
}
// If no explicit offset is present, the offset is 0.
Ok(Imm64::new(0))
}
// Match and consume an Ieee32 immediate.
@@ -1185,9 +1204,10 @@ impl<'a> Parser<'a> {
// Parse a global value decl.
//
// global-val-decl ::= * GlobalValue(gv) "=" global-val-desc
// global-val-desc ::= "vmctx" offset32
// | "deref" "(" GlobalValue(base) ")" offset32
// | globalsym ["colocated"] name
// global-val-desc ::= "vmctx"
// | "load" "." type "notrap" "aligned" GlobalValue(base) [offset]
// | "iadd_imm" "(" GlobalValue(base) ")" imm64
// | "symbol" ["colocated"] name + imm64
//
fn parse_global_value_decl(&mut self) -> ParseResult<(GlobalValue, GlobalValueData)> {
let gv = self.match_gv("expected global value number: gv«n»")?;
@@ -1195,27 +1215,55 @@ impl<'a> Parser<'a> {
self.match_token(Token::Equal, "expected '=' in global value declaration")?;
let data = match self.match_any_identifier("expected global value kind")? {
"vmctx" => {
let offset = self.optional_offset32()?;
GlobalValueData::VMContext { offset }
}
"deref" => {
self.match_token(Token::LPar, "expected '(' in 'deref' global value decl")?;
"vmctx" => GlobalValueData::VMContext,
"load" => {
self.match_token(
Token::Dot,
"expected '.' followed by type in load global value decl",
)?;
let global_type = self.match_type("expected load type")?;
let flags = self.optional_memflags();
let base = self.match_gv("expected global value: gv«n»")?;
self.match_token(Token::RPar, "expected ')' in 'deref' global value decl")?;
let offset = self.optional_offset32()?;
self.match_token(Token::Colon, "expected ':' in 'deref' global value decl")?;
let memory_type = self.match_type("expected deref type")?;
GlobalValueData::Deref {
let mut expected_flags = MemFlags::new();
expected_flags.set_notrap();
expected_flags.set_aligned();
if flags != expected_flags {
return err!(self.loc, "global-value load must be notrap and aligned");
}
GlobalValueData::Load {
base,
offset,
memory_type,
global_type,
}
}
"globalsym" => {
"iadd_imm" => {
self.match_token(
Token::Dot,
"expected '.' followed by type in iadd_imm global value decl",
)?;
let global_type = self.match_type("expected iadd type")?;
let base = self.match_gv("expected global value: gv«n»")?;
self.match_token(
Token::Comma,
"expected ',' followed by rhs in iadd_imm global value decl",
)?;
let offset = self.match_imm64("expected iadd_imm immediate")?;
GlobalValueData::IAddImm {
base,
offset,
global_type,
}
}
"symbol" => {
let colocated = self.optional(Token::Identifier("colocated"));
let name = self.parse_external_name()?;
GlobalValueData::Sym { name, colocated }
let offset = self.optional_offset_imm64()?;
GlobalValueData::Symbol {
name,
offset,
colocated,
}
}
other => return err!(self.loc, "Unknown global value kind '{}'", other),
};
@@ -2680,8 +2728,8 @@ mod tests {
fn duplicate_gv() {
let ParseError { location, message } = Parser::new(
"function %ebbs() system_v {
gv0 = vmctx+64
gv0 = vmctx+64",
gv0 = vmctx
gv0 = vmctx",
).parse_function(None)
.unwrap_err();

View File

@@ -2,7 +2,7 @@
//! wasm translation.
use cranelift_codegen::cursor::FuncCursor;
use cranelift_codegen::ir::immediates::Imm64;
use cranelift_codegen::ir::immediates::{Imm64, Offset32};
use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{self, InstBuilder};
use cranelift_codegen::settings;
@@ -162,21 +162,26 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
fn make_global(&mut self, func: &mut ir::Function, index: GlobalIndex) -> GlobalVariable {
// Just create a dummy `vmctx` global.
let offset = ((index * 8) as i32 + 8).into();
let gv = func.create_global_value(ir::GlobalValueData::VMContext { offset });
let offset = ((index * 8) as i64 + 8).into();
let vmctx = func.create_global_value(ir::GlobalValueData::VMContext {});
let iadd = func.create_global_value(ir::GlobalValueData::IAddImm {
base: vmctx,
offset,
global_type: self.pointer_type(),
});
GlobalVariable::Memory {
gv,
gv: iadd,
ty: self.mod_info.globals[index].entity.ty,
}
}
fn make_heap(&mut self, func: &mut ir::Function, _index: MemoryIndex) -> ir::Heap {
// Create a static heap whose base address is stored at `vmctx+0`.
let addr = func.create_global_value(ir::GlobalValueData::VMContext { offset: 0.into() });
let gv = func.create_global_value(ir::GlobalValueData::Deref {
let addr = func.create_global_value(ir::GlobalValueData::VMContext);
let gv = func.create_global_value(ir::GlobalValueData::Load {
base: addr,
offset: 0.into(),
memory_type: self.pointer_type(),
offset: Offset32::new(0),
global_type: self.pointer_type(),
});
func.create_heap(ir::HeapData {
@@ -192,19 +197,16 @@ impl<'dummy_environment> FuncEnvironment for DummyFuncEnvironment<'dummy_environ
fn make_table(&mut self, func: &mut ir::Function, _index: TableIndex) -> ir::Table {
// Create a table whose base address is stored at `vmctx+0`.
let base_gv_addr =
func.create_global_value(ir::GlobalValueData::VMContext { offset: 0.into() });
let base_gv = func.create_global_value(ir::GlobalValueData::Deref {
base: base_gv_addr,
offset: 0.into(),
memory_type: self.pointer_type(),
let vmctx = func.create_global_value(ir::GlobalValueData::VMContext);
let base_gv = func.create_global_value(ir::GlobalValueData::Load {
base: vmctx,
offset: Offset32::new(0),
global_type: self.pointer_type(),
});
let bound_gv_addr =
func.create_global_value(ir::GlobalValueData::VMContext { offset: 0.into() });
let bound_gv = func.create_global_value(ir::GlobalValueData::Deref {
base: bound_gv_addr,
offset: 0.into(),
memory_type: self.pointer_type(),
let bound_gv = func.create_global_value(ir::GlobalValueData::Load {
base: vmctx,
offset: Offset32::new(0),
global_type: self.pointer_type(),
});
func.create_table(ir::TableData {