Remove heaps from core Cranelift, push them into cranelift-wasm (#5386)

* cranelift-wasm: translate Wasm loads into lower-level CLIF operations

Rather than using `heap_{load,store,addr}`.

* cranelift: Remove the `heap_{addr,load,store}` instructions

These are now legalized in the `cranelift-wasm` frontend.

* cranelift: Remove the `ir::Heap` entity from CLIF

* Port basic memory operation tests to .wat filetests

* Remove test for verifying CLIF heaps

* Remove `heap_addr` from replace_branching_instructions_and_cfg_predecessors.clif test

* Remove `heap_addr` from readonly.clif test

* Remove `heap_addr` from `table_addr.clif` test

* Remove `heap_addr` from the simd-fvpromote_low.clif test

* Remove `heap_addr` from simd-fvdemote.clif test

* Remove `heap_addr` from the load-op-store.clif test

* Remove the CLIF heap runtest

* Remove `heap_addr` from the global_value.clif test

* Remove `heap_addr` from fpromote.clif runtests

* Remove `heap_addr` from fdemote.clif runtests

* Remove `heap_addr` from memory.clif parser test

* Remove `heap_addr` from reject_load_readonly.clif test

* Remove `heap_addr` from reject_load_notrap.clif test

* Remove `heap_addr` from load_readonly_notrap.clif test

* Remove `static-heap-without-guard-pages.clif` test

Will be subsumed when we port `make-heap-load-store-tests.sh` to generating
`.wat` tests.

* Remove `static-heap-with-guard-pages.clif` test

Will be subsumed when we port `make-heap-load-store-tests.sh` over to `.wat`
tests.

* Remove more heap tests

These will be subsumed by porting `make-heap-load-store-tests.sh` over to `.wat`
tests.

* Remove `heap_addr` from `simple-alias.clif` test

* Remove `heap_addr` from partial-redundancy.clif test

* Remove `heap_addr` from multiple-blocks.clif test

* Remove `heap_addr` from fence.clif test

* Remove `heap_addr` from extends.clif test

* Remove runtests that rely on heaps

Heaps are not a thing in CLIF or the interpreter anymore

* Add generated load/store `.wat` tests

* Enable memory-related wasm features in `.wat` tests

* Remove CLIF heap from fcmp-mem-bug.clif test

* Add a mode for compiling `.wat` all the way to assembly in filetests

* Also generate WAT to assembly tests in `make-load-store-tests.sh`

* cargo fmt

* Reinstate `f{de,pro}mote.clif` tests without the heap bits

* Remove undefined doc link

* Remove outdated SVG and dot file from docs

* Add docs about `None` returns for base address computation helpers

* Factor out `env.heap_access_spectre_mitigation()` to a local

* Expand docs for `FuncEnvironment::heaps` trait method

* Restore f{de,pro}mote+load clif runtests with stack memory
This commit is contained in:
Nick Fitzgerald
2022-12-14 16:26:45 -08:00
committed by GitHub
parent e03d65cca7
commit c0b587ac5f
198 changed files with 2494 additions and 4232 deletions

View File

@@ -8,10 +8,9 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32, i32, i64, i64, i64 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
;; Initial load. This will not be reused by anything below, even
;; though it does access the same address.

View File

@@ -8,10 +8,9 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+8
v4 = load.i32 vmctx v0+16

View File

@@ -7,11 +7,9 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+8
brz v2, block1
jump block2

View File

@@ -8,7 +8,6 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
fn0 = %g(i64 vmctx)
block0(v0: i64, v1: i32):
@@ -16,17 +15,17 @@ block0(v0: i64, v1: i32):
jump block2
block1:
v2 = heap_addr.i64 heap0, v1, 68, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+64
jump block3(v3)
block2:
v4 = heap_addr.i64 heap0, v1, 132, 0
v4 = global_value.i64 gv1
v5 = load.i32 v4+128
jump block3(v5)
block3(v6: i32):
v7 = heap_addr.i64 heap0, v1, 68, 0
v7 = global_value.i64 gv1
v8 = load.i32 v7+64
;; load should survive:
; check: v8 = load.i32 v7+64

View File

@@ -9,14 +9,13 @@ target aarch64
function %f0(i64 vmctx, i32) -> i32, i32, i32, i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
fn0 = %g(i64 vmctx)
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
v3 = load.i32 v2+8
;; This should reuse the load above.
v4 = heap_addr.i64 heap0, v1, 12, 0
v4 = global_value.i64 gv1
v5 = load.i32 v4+8
; check: v5 -> v3
@@ -38,15 +37,14 @@ block0(v0: i64, v1: i32):
function %f1(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap readonly aligned gv0+8
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
fn0 = %g(i64 vmctx)
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 12, 0
v2 = global_value.i64 gv1
store.i32 v1, v2+8
;; This load should pick up the store above.
v3 = heap_addr.i64 heap0, v1, 12, 0
v3 = global_value.i64 gv1
v4 = load.i32 v3+8
; check: v4 -> v1

View File

@@ -1,88 +0,0 @@
test compile precise-output
set unwind_info=false
set enable_heap_access_spectre_mitigation=true
target aarch64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; mov w8, w1
; ldr x9, [x0]
; mov x9, x9
; add x10, x0, x1, UXTW
; movz x7, #0
; subs xzr, x8, x9
; csel x0, x7, x10, hi
; csdb
; ret
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; mov w6, w1
; add x7, x0, x1, UXTW
; movz x5, #0
; subs xzr, x6, #65536
; csel x0, x5, x7, hi
; csdb
; ret
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; mov w10, w1
; movz x9, #24
; adds x11, x10, x9
; b.lo 8 ; udf
; ldr x12, [x0]
; add x13, x0, x1, UXTW
; add x13, x13, #16
; movz x10, #0
; subs xzr, x11, x12
; csel x0, x10, x13, hi
; csdb
; ret
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; mov w8, w1
; add x9, x0, x1, UXTW
; add x9, x9, #16
; movz x6, #65512
; movz x10, #0
; subs xzr, x8, x6
; csel x0, x10, x9, hi
; csdb
; ret

View File

@@ -1,86 +0,0 @@
test compile precise-output
set unwind_info=false
target riscv64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; uext.w a6,a1
; ld a7,0(a0)
; addi t3,a7,0
; add a7,a0,a6
; ugt a5,a6,t3##ty=i64
; li t3,0
; selectif_spectre_guard a0,t3,a7##test=a5
; ret
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; uext.w a6,a1
; add a5,a0,a6
; lui a3,16
; ugt a6,a6,a3##ty=i64
; li a7,0
; selectif_spectre_guard a0,a7,a5##test=a6
; ret
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; uext.w t4,a1
; li a7,24
; add t0,t4,a7
; ult t1,t0,t4##ty=i64
; trap_if t1,heap_oob
; ld t1,0(a0)
; add t2,a0,t4
; addi t2,t2,16
; ugt t4,t0,t1##ty=i64
; li t1,0
; selectif_spectre_guard a0,t1,t2##test=t4
; ret
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; uext.w a7,a1
; add t3,a0,a7
; addi t3,t3,16
; lui a5,16
; addi a5,a5,4072
; ugt t4,a7,a5##ty=i64
; li t0,0
; selectif_spectre_guard a0,t0,t3##test=t4
; ret

View File

@@ -1,81 +0,0 @@
test compile precise-output
target s390x
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; llgfr %r4, %r3
; lghi %r3, 0
; ag %r3, 0(%r2)
; agr %r2, %r4
; lghi %r5, 0
; clgr %r4, %r3
; locgrh %r2, %r5
; br %r14
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 0
return v2
}
; block0:
; llgfr %r4, %r3
; agr %r2, %r4
; lghi %r3, 0
; clgfi %r4, 65536
; locgrh %r2, %r3
; br %r14
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; llgfr %r5, %r3
; lghi %r4, 24
; algfr %r4, %r3
; jle 6 ; trap
; lg %r3, 0(%r2)
; agrk %r5, %r2, %r5
; aghik %r2, %r5, 16
; lghi %r5, 0
; clgr %r4, %r3
; locgrh %r2, %r5
; br %r14
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; block0:
; llgfr %r5, %r3
; agr %r2, %r5
; aghi %r2, 16
; lghi %r4, 0
; clgfi %r5, 65512
; locgrh %r2, %r4
; br %r14

View File

@@ -7,7 +7,6 @@ function u0:11335(i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast {
gv2 = load.i64 notrap aligned gv1
gv3 = vmctx
gv4 = load.i64 notrap aligned readonly gv3+504
heap0 = static gv4, min 0, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
sig0 = (i64 vmctx, i64, i32, i32, i32) -> i32 fast
sig1 = (i64 vmctx, i64, i32, i32, i32) -> i32 fast
sig2 = (i64 vmctx, i64, i32, i32, i32, i32, i32, i32, i32, i32) fast

View File

@@ -1,86 +0,0 @@
test compile precise-output
set enable_heap_access_spectre_mitigation=false
target x86_64
;; Calculate a heap address on a dynamically-allocated memory with Spectre
;; mitigations disabled. This is a 7-instruction sequence with loads, ignoring
;; intermediate `mov`s.
function %f(i32, i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0x8000, 0
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %edi, %eax
; movq %rax, %r10
; addq %r10, $32768, %r10
; jnb ; ud2 heap_oob ;
; movq 8(%rsi), %r11
; cmpq %r11, %r10
; jbe label1; j label2
; block1:
; addq %rax, 0(%rsi), %rax
; addq %rax, $32768, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
; block2:
; ud2 heap_oob
;; For a static memory with no Spectre mitigations, we observe a smaller amount
;; of bounds checking: the offset check (`cmp + jbe + j`) and the offset
;; calculation (`add`)--4 instructions.
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %eax
; cmpq $4096, %rax
; jbe label1; j label2
; block1:
; addq %rax, 0(%rdi), %rax
; movq %rbp, %rsp
; popq %rbp
; ret
; block2:
; ud2 heap_oob
;; For a static memory with no Spectre mitigations and the "right" size (4GB
;; memory, 2GB guard regions), Cranelift emits no bounds checking, simply
;; `add`--a single instruction.
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %eax
; addq %rax, 0(%rdi), %rax
; movq %rbp, %rsp
; popq %rbp
; ret

View File

@@ -1,147 +0,0 @@
test compile precise-output
target x86_64
;; Calculate a heap address on a dynamically-allocated memory. Because the
;; Spectre mitigations are on by default (i.e.,
;; `set enable_heap_access_spectre_mitigation=true`), this code not only does
;; the dynamic bounds check (`add + jnb + cmp + jbe + j`) but also re-compares
;; the address to the upper bound (`add + xor + cmp + cmov`)--Cranelift's
;; Spectre mitigation. With loads and ignoring intermediate `mov`s, this amounts
;; to a 10-instruction sequence.
;;
;; And it uses quite a few registers; see this breakdown of what each register
;; generally contains:
;; - %rax holds the passed-in heap offset (argument #1) and ends up holding the
;; final address
;; - %rcx also holds the passed-in heap offset; checked for overflow when added
;; to the `0x8000` immediate
;; - %rsi holds the VM context pointer (argument #2)
;; - %rdi holds the heap limit (computed from argument #2)
;; - %rdx holds the null pointer
function %f(i32, i64 vmctx) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i32
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0x8000, 0
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %edi, %eax
; movq %rax, %rdi
; addq %rdi, $32768, %rdi
; jnb ; ud2 heap_oob ;
; movq 8(%rsi), %rcx
; addq %rax, 0(%rsi), %rax
; addq %rax, $32768, %rax
; xorq %rsi, %rsi, %rsi
; cmpq %rcx, %rdi
; cmovnbeq %rsi, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
;; The heap address calculation for this statically-allocated memory checks that
;; the passed offset (%r11) is within bounds (`cmp + jbe + j`) and then includes
;; the same Spectre mitigation as above. This results in a 7-instruction
;; sequence (ignoring `mov`s).
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %r9d
; movq %r9, %rax
; addq %rax, 0(%rdi), %rax
; xorq %r8, %r8, %r8
; cmpq $4096, %r9
; cmovnbeq %r8, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
;; When a static memory is the "right" size (4GB memory, 2GB guard regions), the
;; Spectre mitigation is not present. Cranelift relies on the memory permissions
;; and emits no bounds checking, simply `add`--a single instruction.
function %f(i64 vmctx, i32) -> i64 system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v10 = heap_addr.i64 heap0, v1, 0, 0
return v10
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %eax
; addq %rax, 0(%rdi), %rax
; movq %rbp, %rsp
; popq %rbp
; ret
function %dynamic_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %esi
; movq %rsi, %r11
; addq %r11, $24, %r11
; jnb ; ud2 heap_oob ;
; movq %rdi, %rax
; addq %rax, %rsi, %rax
; addq %rax, $16, %rax
; xorq %rsi, %rsi, %rsi
; cmpq 0(%rdi), %r11
; cmovnbeq %rsi, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret
function %static_heap_check_with_offset(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 16, 8
return v2
}
; pushq %rbp
; movq %rsp, %rbp
; block0:
; movl %esi, %r10d
; movq %rdi, %rax
; addq %rax, %r10, %rax
; addq %rax, $16, %rax
; xorq %r9, %r9, %r9
; cmpq $65512, %r10
; cmovnbeq %r9, %rax, %rax
; movq %rbp, %rsp
; popq %rbp
; ret

View File

@@ -1,33 +0,0 @@
test legalizer
set enable_heap_access_spectre_mitigation=true
target aarch64
target x86_64
;; Test that when both (1) dynamic memories and (2) heap access spectre
;; mitigations are enabled, we deduplicate the bounds check between the two.
function %wasm_load(i64 vmctx, i32) -> i32 wasmtime_system_v {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+88
gv2 = load.i64 notrap aligned gv0+80
heap0 = dynamic gv2, min 0, bound gv1, offset_guard 0x8000_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 4
v3 = load.i32 little heap v2
return v3
}
; check: block0(v0: i64, v1: i32):
; nextln: v4 = uextend.i64 v1
; nextln: v5 = iconst.i64 4
; nextln: v6 = uadd_overflow_trap v4, v5, heap_oob ; v5 = 4
; nextln: v7 = load.i64 notrap aligned v0+88
; nextln: v8 = load.i64 notrap aligned v0+80
; nextln: v9 = iadd v8, v4
; nextln: v10 = iconst.i64 0
; nextln: v11 = icmp ugt v6, v7
; nextln: v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
; nextln: v2 -> v12
; nextln: v3 = load.i32 little heap v2
; nextln: return v3

View File

@@ -1,22 +0,0 @@
test legalizer
set enable_heap_access_spectre_mitigation=true
target x86_64
;; The offset guard is large enough that we don't need explicit bounds checks.
function %test(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1000, offset_guard 0xffff_ffff, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 4
return v2
}
; check: block0(v0: i64, v1: i32):
; nextln: v3 = uextend.i64 v1
; nextln: v4 = load.i64 notrap aligned v0
; nextln: v5 = iadd v4, v3
; nextln: v2 -> v5
; nextln: return v2

View File

@@ -1,28 +0,0 @@
test legalizer
set enable_heap_access_spectre_mitigation=true
target x86_64
;; The offset guard is not large enough to avoid explicit bounds checks.
;; Additionally, the explicit bounds check gets deduped with the Spectre
;; mitigation.
function %test(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1000, offset_guard 0xffff_0000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0, 4
return v2
}
; check: block0(v0: i64, v1: i32):
; nextln: v3 = uextend.i64 v1
; nextln: v4 = iconst.i64 4092
; nextln: v5 = load.i64 notrap aligned v0
; nextln: v6 = iadd v5, v3
; nextln: v7 = iconst.i64 0
; nextln: v8 = icmp ugt v3, v4 ; v4 = 4092
; nextln: v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
; nextln: v2 -> v9
; nextln: return v2

View File

@@ -9,14 +9,13 @@ target x86_64
function %hoist_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
jump block1(v0, v1)
block1(v2: i32, v3: i64):
v4 = iconst.i32 1
v5 = heap_addr.i64 heap0, v4, 0, 4
v5 = global_value.i64 gv1
v6 = load.i32 notrap aligned readonly v5
v7 = iadd v2, v6
brz v2, block3(v2)
@@ -33,11 +32,10 @@ block3(v9: i32):
; sameln: function %hoist_load(i32, i64 vmctx) -> i32 fast {
; nextln: gv0 = vmctx
; nextln: gv1 = load.i64 notrap aligned readonly gv0
; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; nextln:
; nextln: block0(v0: i32, v1: i64):
; nextln: v4 = iconst.i32 1
; nextln: v5 = heap_addr.i64 heap0, v4, 0, 4
; nextln: v5 = global_value.i64 gv1
; nextln: v6 = load.i32 notrap aligned readonly v5
; nextln: jump block1(v0, v1)
; nextln:

View File

@@ -10,11 +10,10 @@ target x86_64
function %hoist_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
v4 = iconst.i32 1
v5 = heap_addr.i64 heap0, v4, 0, 4
v5 = global_value.i64 gv1
jump block1(v0, v1)
block1(v2: i32, v3: i64):
@@ -34,11 +33,10 @@ block3(v9: i32):
; sameln: function %hoist_load(i32, i64 vmctx) -> i32 fast {
; nextln: gv0 = vmctx
; nextln: gv1 = load.i64 notrap aligned readonly gv0
; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; nextln:
; nextln: block0(v0: i32, v1: i64):
; nextln: v4 = iconst.i32 1
; nextln: v5 = heap_addr.i64 heap0, v4, 0, 4 ; v4 = 1
; nextln: v5 = global_value.i64 gv1
; nextln: jump block1(v0, v1)
; nextln:
; nextln: block1(v2: i32, v3: i64):

View File

@@ -10,14 +10,13 @@ target x86_64
function %hoist_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
jump block1(v0, v1)
block1(v2: i32, v3: i64):
v4 = iconst.i32 1
v5 = heap_addr.i64 heap0, v4, 0, 4
v5 = global_value.i64 gv1
v6 = load.i32 aligned readonly v5
v7 = iadd v2, v6
brz v2, block3(v2)
@@ -34,11 +33,10 @@ block3(v9: i32):
; sameln: function %hoist_load(i32, i64 vmctx) -> i32 fast {
; nextln: gv0 = vmctx
; nextln: gv1 = load.i64 notrap aligned readonly gv0
; nextln: heap0 = static gv1, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; nextln:
; nextln: block0(v0: i32, v1: i64):
; nextln: v4 = iconst.i32 1
; nextln: v5 = heap_addr.i64 heap0, v4, 0, 4
; nextln: v5 = global_value.i64 gv1
; nextln: jump block1(v0, v1)
; nextln:
; nextln: block1(v2: i32, v3: i64):

View File

@@ -49,34 +49,3 @@ block0:
v2 = bxor v0, v1
return v2
}
; Declare static heaps.
function %sheap(i32, i64 vmctx) -> i64 {
heap1 = static gv5, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000
heap2 = static gv5, offset_guard 0x1000, bound 0x1_0000
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
; check: heap1 = static gv5, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
; check: heap2 = static gv5, min 0, bound 0x0001_0000, offset_guard 4096
block0(v1: i32, v2: i64):
v3 = heap_addr.i64 heap1, v1, 0, 0
; check: v3 = heap_addr.i64 heap1, v1, 0, 0
return v3
}
; Declare dynamic heaps.
function %dheap(i32, i64 vmctx) -> i64 {
heap1 = dynamic gv5, min 0x1_0000, bound gv6, offset_guard 0x8000_0000
heap2 = dynamic gv5, bound gv6, offset_guard 0x1000
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
gv6 = iadd_imm.i64 gv4, 72
; check: heap1 = dynamic gv5, min 0x0001_0000, bound gv6, offset_guard 0x8000_0000
; check: heap2 = dynamic gv5, min 0, bound gv6, offset_guard 4096
block0(v1: i32, v2: i64):
v3 = heap_addr.i64 heap2, v1, 0, 0
; check: v3 = heap_addr.i64 heap2, v1, 0, 0
return v3
}

View File

@@ -71,19 +71,16 @@ block0(v0: f64):
;; Tests a fdemote+load combo which some backends may optimize
function %fdemote_load(i64 vmctx, i64, f64) -> f32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x10, bound 0x10, offset_guard 0x0, index_type i64
function %fdemote_load(i64, f64) -> f32 {
ss0 = explicit_slot 16
block0(v0: i64, v1: i64, v2: f64):
v3 = heap_addr.i64 heap0, v1, 0, 8
block0(v1: i64, v2: f64):
v3 = stack_addr.i64 ss0
store.f64 v2, v3
v4 = load.f64 v3
v5 = fdemote.f32 v4
return v5
}
; heap: static, size=0x10, ptr=vmctx+0, bound=vmctx+8
; run: %fdemote_load(0, 0x0.0) == 0x0.0
; run: %fdemote_load(1, 0x0.1) == 0x0.1
; run: %fdemote_load(2, 0x0.2) == 0x0.2

View File

@@ -79,20 +79,16 @@ block0(v0: f32):
;; Tests a fpromote+load combo which some backends may optimize
function %fpromote_load(i64 vmctx, i64, f32) -> f64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x10, bound 0x10, offset_guard 0x0, index_type i64
function %fpromote_load(i64, f32) -> f64 {
ss0 = explicit_slot 16
block0(v0: i64, v1: i64, v2: f32):
v3 = heap_addr.i64 heap0, v1, 0, 4
block0(v1: i64, v2: f32):
v3 = stack_addr.i64 ss0
store.f32 v2, v3
v4 = load.f32 v3
v5 = fpromote.f64 v4
return v5
}
; heap: static, size=0x10, ptr=vmctx+0, bound=vmctx+8
; run: %fpromote_load(0, 0x0.0) == 0x0.0
; run: %fpromote_load(1, 0x0.1) == 0x0.1
; run: %fpromote_load(2, 0x0.2) == 0x0.2

View File

@@ -1,24 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
target riscv64
; Store a value in the heap using `heap_addr` and load it using `global_value`
function %store_load(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 0
store.i32 v2, v3
v4 = global_value.i64 gv1
v5 = load.i32 v4
return v5
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %store_load(0, 1) == 1
; run: %store_load(0, -1) == -1

View File

@@ -1,223 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
target riscv64
function %static_heap_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64(0, 1) == 1
; run: %static_heap_i64(0, -1) == -1
; run: %static_heap_i64(16, 1) == 1
; run: %static_heap_i64(16, -1) == -1
function %static_heap_i32(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i32(0, 1) == 1
; run: %static_heap_i32(0, -1) == -1
; run: %static_heap_i32(16, 1) == 1
; run: %static_heap_i32(16, -1) == -1
function %heap_no_min(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %heap_no_min(0, 1) == 1
; run: %heap_no_min(0, -1) == -1
; run: %heap_no_min(16, 1) == 1
; run: %heap_no_min(16, -1) == -1
function %dynamic_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_i64(0, 1) == 1
; run: %dynamic_i64(0, -1) == -1
; run: %dynamic_i64(16, 1) == 1
; run: %dynamic_i64(16, -1) == -1
function %dynamic_i32(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %dynamic_i32(0, 1) == 1
; run: %dynamic_i32(0, -1) == -1
; run: %dynamic_i32(16, 1) == 1
; run: %dynamic_i32(16, -1) == -1
function %multi_load_store(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+16
gv3 = load.i64 notrap aligned gv0+24
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
heap1 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
block0(v0: i64, v1: i32, v2: i32):
v3 = iconst.i64 0
v4 = iconst.i32 0
; Store lhs in heap0
v5 = heap_addr.i64 heap0, v3, 0, 4
store.i32 v1, v5
; Store rhs in heap1
v6 = heap_addr.i64 heap1, v4, 0, 4
store.i32 v2, v6
v7 = load.i32 v5
v8 = load.i32 v6
v9 = iadd.i32 v7, v8
return v9
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
; run: %multi_load_store(1, 2) == 3
; run: %multi_load_store(4, 5) == 9
; Uses multiple heaps, but heap0 refers to the second heap, and heap1 refers to the first heap
; This is a regression test for the interpreter
function %out_of_order(i64 vmctx, i32, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
gv2 = load.i64 notrap aligned gv0+16
gv3 = load.i64 notrap aligned gv0+24
heap0 = dynamic gv2, bound gv3, offset_guard 0, index_type i32
heap1 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i32, v2: i32):
v3 = iconst.i32 0
v4 = iconst.i64 0
; Store lhs in heap0
v5 = heap_addr.i64 heap0, v3, 0, 4
store.i32 v1, v5
; Store rhs in heap1
v6 = heap_addr.i64 heap1, v4, 0, 4
store.i32 v2, v6
v7 = load.i32 v5
v8 = load.i32 v6
v9 = iadd.i32 v7, v8
return v9
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24
; run: %out_of_order(1, 2) == 3
; run: %out_of_order(4, 5) == 9
function %unaligned_access(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %unaligned_access(0, 1) == 1
; run: %unaligned_access(0, -1) == -1
; run: %unaligned_access(1, 1) == 1
; run: %unaligned_access(1, -1) == -1
; run: %unaligned_access(2, 1) == 1
; run: %unaligned_access(2, -1) == -1
; run: %unaligned_access(3, 1) == 1
; run: %unaligned_access(3, -1) == -1
; This stores data in the place of the pointer in the vmctx struct, not in the heap itself.
function %iadd_imm(i64 vmctx, i32) -> i32 {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
block0(v0: i64, v1: i32):
v2 = iconst.i64 0
v3 = heap_addr.i64 heap0, v2, 0, 4
store.i32 v1, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %iadd_imm(1) == 1
; run: %iadd_imm(-1) == -1
function %heap_limit_i64(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0, bound 0x8, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x8, ptr=vmctx+0, bound=vmctx+8
; run: %heap_limit_i64(0, 1) == 1
; run: %heap_limit_i64(0, -1) == -1
; run: %heap_limit_i64(4, 1) == 1
; run: %heap_limit_i64(4, -1) == -1

View File

@@ -1,98 +0,0 @@
test run
target x86_64
target s390x
target aarch64
target riscv64
function %load_op_store_iadd_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 8
v4 = iconst.i64 42
store.i64 v4, v3
v5 = load.i64 v3
v6 = iadd.i64 v5, v2
store.i64 v6, v3
v7 = load.i64 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i32(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i32):
v3 = heap_addr.i64 heap0, v1, 0, 4
v4 = iconst.i32 42
store.i32 v4, v3
v5 = load.i32 v3
v6 = iadd.i32 v5, v2
store.i32 v6, v3
v7 = load.i32 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_i8(i64 vmctx, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i8):
v3 = heap_addr.i64 heap0, v1, 0, 4
v4 = iconst.i8 42
store.i8 v4, v3
v5 = load.i8 v3
v6 = iadd.i8 v5, v2
store.i8 v6, v3
v7 = load.i8 v3
return v7
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 1) == 43
; run: %static_heap_i64_load_store(0, -1) == 41
function %load_op_store_iadd_isub_iand_ior_ixor_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 8
store.i64 v2, v3
v4 = load.i64 v3
v5 = iconst.i64 1
v6 = iadd.i64 v5, v4
store.i64 v6, v3
v7 = load.i64 v3
v8 = iconst.i64 2
v9 = load.i64 v3
v10 = isub.i64 v9, v8
store.i64 v10, v3
v11 = load.i64 v3
v12 = iconst.i64 0xf
v13 = band.i64 v12, v11
store.i64 v13, v3
v14 = iconst.i64 0x10
v15 = load.i64 v3
v16 = bor.i64 v15, v14
store.i64 v16, v3
v17 = load.i64 v3
v18 = iconst.i64 0xff
v19 = bxor.i64 v17, v18
store.i64 v19, v3
v20 = load.i64 v3
return v20
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %static_heap_i64_load_store(0, 0x1234) == 236

View File

@@ -1,25 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
function %fvdemote_test(i64 vmctx, i64, f64x2) -> f32x4 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x20, bound 0x20, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: f64x2):
v3 = heap_addr.i64 heap0, v1, 0, 16
store.f64x2 v2, v3
v4 = load.f64x2 v3
v5 = fvdemote v4
return v5
}
; heap: static, size=0x20, ptr=vmctx+0, bound=vmctx+8
; run: %fvdemote_test(0, [0x0.0 0x0.0]) == [0x0.0 0x0.0 0x0.0 0x0.0]
; run: %fvdemote_test(1, [0x0.1 0x0.2]) == [0x0.1 0x0.2 0x0.0 0x0.0]
; run: %fvdemote_test(2, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]
; run: %fvdemote_test(8, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]
; run: %fvdemote_test(16, [0x2.1 0x1.2]) == [0x2.1 0x1.2 0x0.0 0x0.0]

View File

@@ -1,26 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
function %fvpromote_low_test(i64 vmctx, i64, f32x4) -> f64x2 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+0
heap0 = static gv1, min 0x20, bound 0x20, offset_guard 0, index_type i64
block0(v0: i64, v1: i64, v2: f32x4):
v3 = heap_addr.i64 heap0, v1, 0, 16
store.f32x4 v2, v3
v4 = load.f32x4 v3
v5 = fvpromote_low v4
return v5
}
; heap: static, size=0x20, ptr=vmctx+0, bound=vmctx+8
; run: %fvpromote_low_test(0, [0x0.0 0x0.0 0x0.0 0x0.0]) == [0x0.0 0x0.0]
; run: %fvpromote_low_test(1, [0x0.1 0x0.2 0x0.0 0x0.0]) == [0x0.1 0x0.2]
; run: %fvpromote_low_test(2, [0x2.1 0x1.2 0x0.0 0x0.0]) == [0x2.1 0x1.2]
; run: %fvpromote_low_test(5, [0x0.0 0x0.0 0x2.1 0x1.2]) == [0x0.0 0x0.0]
; run: %fvpromote_low_test(16, [0x0.0 0x0.0 0x2.1 0x1.2]) == [0x0.0 0x0.0]

View File

@@ -1,144 +0,0 @@
test interpret
test run
target x86_64
target s390x
target aarch64
target riscv64
function %set_get_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = table_addr.i64 table0, v1, +0
store.i64 v2, v3
v4 = load.i64 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i64(0, 1) == 1
; run: %set_get_i64(0, 10) == 10
; run: %set_get_i64(1, 1) == 1
; run: %set_get_i64(1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
; run: %set_get_i64(10, 1) == 1
; run: %set_get_i64(10, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %set_get_i32(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i32):
;; Note here the offset +4
v3 = table_addr.i64 table0, v1, +4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i32(0, 1) == 1
; run: %set_get_i32(0, 10) == 10
; run: %set_get_i32(1, 1) == 1
; run: %set_get_i32(1, 0xC0FFEEEE) == 0xC0FFEEEE
; run: %set_get_i32(10, 1) == 1
; run: %set_get_i32(10, 0xC0FFEEEE) == 0xC0FFEEEE
function %set_get_i8(i64 vmctx, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i8):
v3 = table_addr.i64 table0, v1, +0
store.i8 v2, v3
v4 = load.i8 v3
return v4
}
; heap: static, size=2, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i8(0, 1) == 1
; run: %set_get_i8(0, 0xC0) == 0xC0
; run: %set_get_i8(1, 1) == 1
; run: %set_get_i8(1, 0xFF) == 0xFF
function %large_elm_size(i64 vmctx, i64, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 10240, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64, v3: i8):
v4 = table_addr.i64 table0, v1, +0
v5 = iadd.i64 v4, v2
store.i8 v3, v5
v6 = load.i8 v5
return v6
}
; heap: static, size=0xC800, ptr=vmctx+0, bound=vmctx+8
; run: %large_elm_size(0, 0, 1) == 1
; run: %large_elm_size(1, 0, 0xC0) == 0xC0
; run: %large_elm_size(0, 1, 1) == 1
; run: %large_elm_size(1, 1, 0xFF) == 0xFF
; run: %large_elm_size(0, 127, 1) == 1
; run: %large_elm_size(1, 127, 0xFF) == 0xFF
; run: %large_elm_size(0, 10239, 1) == 1
; run: %large_elm_size(1, 10239, 0xBB) == 0xBB
; Tests writing a i64 which covers 8 table entries at once
; Loads the first byte and the last to confirm that the slots were written
function %multi_elm_write(i64 vmctx, i64, i64) -> i8, i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = table_addr.i64 table0, v1, +0
v4 = table_addr.i64 table0, v1, +7
store.i64 v2, v3
v5 = load.i8 v3
v6 = load.i8 v4
return v5, v6
}
; heap: static, size=16, ptr=vmctx+0, bound=vmctx+8
;; When writing these test cases keep in mind that s390x is big endian!
;; We just make sure that the first and last byte are the same to deal with that.
; run: %multi_elm_write(0, 0xC0FFEEEE_FFEEEEC0) == [0xC0, 0xC0]
; run: %multi_elm_write(1, 0xAABBCCDD_EEFF00AA) == [0xAA, 0xAA]
function %heap_table(i64 vmctx, i64, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
table0 = dynamic gv1, element_size 9, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64, v3: i64):
; v1 - heap offset (bytes)
; v2 - table offset (elements)
; v3 - store/load value
v4 = heap_addr.i64 heap0, v1, 0, 0
v5 = table_addr.i64 table0, v2, +2
; Store via heap, load via table
store.i64 v3, v4
v6 = load.i64 v5
return v6
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %heap_table(2, 0, 0xAABBCCDD_EEFF0011) == 0xAABBCCDD_EEFF0011
; run: %heap_table(11, 1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
; run: %heap_table(20, 2, 1) == 1
; run: %heap_table(29, 3, -10) == -10

View File

@@ -6,11 +6,10 @@ target x86_64
function %eliminate_redundant_global_loads(i32, i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = heap_addr.i64 heap0, v0, 0, 1
v2 = global_value.i64 gv1
v3 = global_value.i64 gv1
v4 = iconst.i32 0
store.i32 notrap aligned v4, v2
@@ -18,7 +17,7 @@ block0(v0: i32, v1: i64):
return
}
; check: v2 = heap_addr.i64 heap0, v0, 0, 1
; check: v2 = global_value.i64 gv1
; check: v3 -> v2
; check: v4 = iconst.i32 0
; check: store notrap aligned v4, v2

View File

@@ -3,12 +3,8 @@ target aarch64
target x86_64
function u0:2(i64 , i64) {
gv1 = load.i64 notrap aligned gv0
heap0 = static gv1
block0(v0: i64, v1: i64):
v16 = iconst.i32 6
v17 = heap_addr.i64 heap0, v16, 0, 1
v18 = load.i32 v17
v18 = load.i32 v0
v19 = iconst.i32 4
v20 = icmp ne v18, v19
v21 = uextend.i32 v20

View File

@@ -1,45 +0,0 @@
test verifier
target x86_64
function %heap_base_type(i64 vmctx) {
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
heap0 = static gv1, offset_guard 0x1000, bound 0x1_0000, index_type i32 ; error: heap base has type i32, which is not the pointer type i64
block0(v0: i64):
return
}
function %invalid_base(i64 vmctx) {
gv0 = vmctx
heap0 = dynamic gv1, bound gv0, offset_guard 0x1000, index_type i64 ; error: invalid base global value gv1
block0(v0: i64):
return
}
function %invalid_bound(i64 vmctx) {
gv0 = vmctx
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i64 ; error: invalid bound global value gv1
block0(v0: i64):
return
}
function %heap_bound_type(i64 vmctx) {
gv0 = vmctx
gv1 = load.i16 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap pointer type i64 differs from the type of its bound, i16
block0(v0: i64):
return
}
function %heap_addr_index_type(i64 vmctx, i64) {
gv0 = vmctx
heap0 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
block0(v0: i64, v1: i64):
v2 = heap_addr.i64 heap0, v1, 0, 0; error: index type i64 differs from heap index type i32
return
}

View File

@@ -27,16 +27,19 @@
;; function u0:0(i32, i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;; heap0 = static gv1, min 0, bound 4096, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0021 v4 = heap_addr.i64 heap0, v0, 0, 4
;; @0021 v5 = load.i32 little heap v4
;; @0026 v6 = heap_addr.i64 heap0, v1, 0, 4
;; @0026 v7 = load.i32 little heap v6
;; @0029 v8 = iadd v5, v7
;; @002a jump block1(v8)
;; @0021 v4 = uextend.i64 v0
;; @0021 v5 = global_value.i64 gv1
;; @0021 v6 = iadd v5, v4
;; @0021 v7 = load.i32 little heap v6
;; @0026 v8 = uextend.i64 v1
;; @0026 v9 = global_value.i64 gv1
;; @0026 v10 = iadd v9, v8
;; @0026 v11 = load.i32 little heap v10
;; @0029 v12 = iadd v7, v11
;; @002a jump block1(v12)
;;
;; block1(v3: i32):
;; @002a return v3
;; }
;; }

View File

@@ -0,0 +1,22 @@
;;! target = "x86_64"
(module
(memory 1)
(func (export "f32.load") (param i32) (result f32)
local.get 0
f32.load))
;; function u0:0(i32, i64 vmctx) -> f32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.f32 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: f32):
;; @0031 return v2
;; }

View File

@@ -1,27 +0,0 @@
; Test basic code generation for f32 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %f32_load(i32, i64 vmctx) -> f32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.f32 v2
return v3
}
function %f32_store(f32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: f32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for f32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "f32.store") (param i32 f32)
local.get 0
local.get 1
f32.store))
;; function u0:0(i32, f32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: f32, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for f64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "f64.load") (param i32) (result f64)
local.get 0
f64.load))
;; function u0:0(i32, i64 vmctx) -> f64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.f64 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: f64):
;; @0031 return v2
;; }

View File

@@ -1,27 +0,0 @@
; Test basic code generation for f64 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %f64_load(i32, i64 vmctx) -> f64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.f64 v2
return v3
}
function %f64_store(f64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: f64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for f64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "f64.store") (param i32 f64)
local.get 0
local.get 1
f64.store))
;; function u0:0(i32, f64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: f64, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load") (param i32) (result i32)
local.get 0
i32.load))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.i32 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: i32):
;; @0031 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load16_s") (param i32) (result i32)
local.get 0
i32.load16_s))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = sload16.i32 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i32):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load16_u") (param i32) (result i32)
local.get 0
i32.load16_u))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = uload16.i32 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i32):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load8_s") (param i32) (result i32)
local.get 0
i32.load8_s))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = sload8.i32 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i32):
;; @0034 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.load8_u") (param i32) (result i32)
local.get 0
i32.load8_u))
;; function u0:0(i32, i64 vmctx) -> i32 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = uload8.i32 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i32):
;; @0034 return v2
;; }

View File

@@ -1,87 +0,0 @@
; Test basic code generation for i32 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %i32_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.i32 v2
return v3
}
function %i32_store(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}
function %i32_load8_s(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload8.i32 v2
return v3
}
function %i32_load8_u(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload8.i32 v2
return v3
}
function %i32_store8(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore8 v0, v3
return
}
function %i32_load16_s(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload16.i32 v2
return v3
}
function %i32_load16_u(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload16.i32 v2
return v3
}
function %i32_store16(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore16 v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.store") (param i32 i32)
local.get 0
local.get 1
i32.store))
;; function u0:0(i32, i32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.store16") (param i32 i32)
local.get 0
local.get 1
i32.store16))
;; function u0:0(i32, i32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0033 v3 = uextend.i64 v0
;; @0033 v4 = global_value.i64 gv1
;; @0033 v5 = iadd v4, v3
;; @0033 istore16 little heap v1, v5
;; @0036 jump block1
;;
;; block1:
;; @0036 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i32.store8") (param i32 i32)
local.get 0
local.get 1
i32.store8))
;; function u0:0(i32, i32, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 istore8 little heap v1, v5
;; @0035 jump block1
;;
;; block1:
;; @0035 return
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load") (param i32) (result i64)
local.get 0
i64.load))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @002e v3 = uextend.i64 v0
;; @002e v4 = global_value.i64 gv1
;; @002e v5 = iadd v4, v3
;; @002e v6 = load.i64 little heap v5
;; @0031 jump block1(v6)
;;
;; block1(v2: i64):
;; @0031 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load16_s") (param i32) (result i64)
local.get 0
i64.load16_s))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = sload16.i64 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i64):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load16_u") (param i32) (result i64)
local.get 0
i64.load16_u))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 v6 = uload16.i64 little heap v5
;; @0035 jump block1(v6)
;;
;; block1(v2: i64):
;; @0035 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load8_s") (param i32) (result i64)
local.get 0
i64.load8_s))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = sload8.i64 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i64):
;; @0034 return v2
;; }

View File

@@ -0,0 +1,24 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.load8_u") (param i32) (result i64)
local.get 0
i64.load8_u))
;; function u0:0(i32, i64 vmctx) -> i64 fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 v6 = uload8.i64 little heap v5
;; @0034 jump block1(v6)
;;
;; block1(v2: i64):
;; @0034 return v2
;; }

View File

@@ -1,117 +0,0 @@
; Test basic code generation for i32 memory WebAssembly instructions.
test compile
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
; explicitly mention the pointer width.
target aarch64
target x86_64 haswell
function %i64_load(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = load.i64 v2
return v3
}
function %i64_store(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
store v0, v3
return
}
function %i64_load8_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload8.i64 v2
return v3
}
function %i64_load8_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload8.i64 v2
return v3
}
function %i64_store8(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore8 v0, v3
return
}
function %i64_load16_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload16.i64 v2
return v3
}
function %i64_load16_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload16.i64 v2
return v3
}
function %i64_store16(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore16 v0, v3
return
}
function %i64_load32_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = sload32.i64 v2
return v3
}
function %i64_load32_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0, 1
v3 = uload32.i64 v2
return v3
}
function %i64_store32(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
block0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 0, 1
istore32 v0, v3
return
}

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i32 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store") (param i32 i64)
local.get 0
local.get 1
i64.store))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0031 v3 = uextend.i64 v0
;; @0031 v4 = global_value.i64 gv1
;; @0031 v5 = iadd v4, v3
;; @0031 store little heap v1, v5
;; @0034 jump block1
;;
;; block1:
;; @0034 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store16") (param i32 i64)
local.get 0
local.get 1
i64.store16))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0033 v3 = uextend.i64 v0
;; @0033 v4 = global_value.i64 gv1
;; @0033 v5 = iadd v4, v3
;; @0033 istore16 little heap v1, v5
;; @0036 jump block1
;;
;; block1:
;; @0036 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store32") (param i32 i64)
local.get 0
local.get 1
i64.store32))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0033 v3 = uextend.i64 v0
;; @0033 v4 = global_value.i64 gv1
;; @0033 v5 = iadd v4, v3
;; @0033 istore32 little heap v1, v5
;; @0036 jump block1
;;
;; block1:
;; @0036 return
;; }

View File

@@ -0,0 +1,25 @@
;;! target = "x86_64"
;; Test basic code generation for i64 memory WebAssembly instructions.
(module
(memory 1)
(func (export "i64.store8") (param i32 i64)
local.get 0
local.get 1
i64.store8))
;; function u0:0(i32, i64, i64 vmctx) fast {
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0
;;
;; block0(v0: i32, v1: i64, v2: i64):
;; @0032 v3 = uextend.i64 v0
;; @0032 v4 = global_value.i64 gv1
;; @0032 v5 = iadd v4, v3
;; @0032 istore8 little heap v1, v5
;; @0035 jump block1
;;
;; block1:
;; @0035 return
;; }

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 store little heap v1, v8
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = icmp ugt v3, v5
;; @0048 trapnz v6, heap_oob
;; @0048 v7 = global_value.i64 gv2
;; @0048 v8 = iadd v7, v3
;; @0048 v9 = load.i32 little heap v8
;; @004b jump block1(v9)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 store little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = load.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 store little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = load.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = icmp uge v3, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 istore8 little heap v1, v7
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +62,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = icmp uge v3, v4
;; @0048 trapnz v5, heap_oob
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = uload8.i32 little heap v7
;; @004b jump block1(v8)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4097
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 istore8 little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4097
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = uload8.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0001
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 istore8 little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0001
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = uload8.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iconst.i64 0
;; @0040 v9 = icmp ugt v3, v5
;; @0040 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0040 store little heap v1, v10
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = iconst.i64 0
;; @0048 v9 = icmp ugt v3, v5
;; @0048 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0048 v11 = load.i32 little heap v10
;; @004b jump block1(v11)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v3, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 store little heap v1, v11
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v3
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = iconst.i64 0
;; @0049 v10 = icmp ugt v3, v5
;; @0049 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0049 v12 = load.i32 little heap v11
;; @004d jump block1(v12)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 v10 = iconst.i64 0
;; @0040 v11 = icmp ugt v5, v6
;; @0040 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @0040 store little heap v1, v12
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +66,20 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v3
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = iconst.i64 0
;; @004c v11 = icmp ugt v5, v6
;; @004c v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @004c v13 = load.i32 little heap v12
;; @0053 jump block1(v13)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v3
;; @0040 v7 = iconst.i64 0
;; @0040 v8 = icmp uge v3, v4
;; @0040 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0040 istore8 little heap v1, v9
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v3
;; @0048 v7 = iconst.i64 0
;; @0048 v8 = icmp uge v3, v4
;; @0048 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0048 v10 = uload8.i32 little heap v9
;; @004b jump block1(v10)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4097
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v3, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 istore8 little heap v1, v11
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4097
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v3
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = iconst.i64 0
;; @0049 v10 = icmp ugt v3, v5
;; @0049 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0049 v12 = uload8.i32 little heap v11
;; @004d jump block1(v12)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0001
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 v10 = iconst.i64 0
;; @0040 v11 = icmp ugt v5, v6
;; @0040 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @0040 istore8 little heap v1, v12
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +66,20 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0001
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v3
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = iconst.i64 0
;; @004c v11 = icmp ugt v5, v6
;; @004c v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @004c v13 = uload8.i32 little heap v12
;; @0053 jump block1(v13)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 store little heap v1, v8
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = icmp ugt v3, v5
;; @0048 trapnz v6, heap_oob
;; @0048 v7 = global_value.i64 gv2
;; @0048 v8 = iadd v7, v3
;; @0048 v9 = load.i32 little heap v8
;; @004b jump block1(v9)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 store little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = load.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 store little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = load.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = icmp uge v3, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 istore8 little heap v1, v7
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +62,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = icmp uge v3, v4
;; @0048 trapnz v5, heap_oob
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = uload8.i32 little heap v7
;; @004b jump block1(v8)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4097
;; @0040 v6 = icmp ugt v3, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 4096
;; @0040 istore8 little heap v1, v9
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4097
;; @0049 v6 = icmp ugt v3, v5
;; @0049 trapnz v6, heap_oob
;; @0049 v7 = global_value.i64 gv2
;; @0049 v8 = iadd v7, v3
;; @0049 v9 = iadd_imm v8, 4096
;; @0049 v10 = uload8.i32 little heap v9
;; @004d jump block1(v10)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0001
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = icmp ugt v5, v6
;; @0040 trapnz v7, heap_oob
;; @0040 v8 = global_value.i64 gv2
;; @0040 v9 = iadd v8, v3
;; @0040 v10 = iadd_imm v9, 0xffff_0000
;; @0040 istore8 little heap v1, v10
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0001
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = icmp ugt v5, v6
;; @004c trapnz v7, heap_oob
;; @004c v8 = global_value.i64 gv2
;; @004c v9 = iadd v8, v3
;; @004c v10 = iadd_imm v9, 0xffff_0000
;; @004c v11 = uload8.i32 little heap v10
;; @0053 jump block1(v11)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iconst.i64 0
;; @0040 v9 = icmp ugt v3, v5
;; @0040 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0040 store little heap v1, v10
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = iadd_imm v4, -4
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v3
;; @0048 v8 = iconst.i64 0
;; @0048 v9 = icmp ugt v3, v5
;; @0048 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0048 v11 = load.i32 little heap v10
;; @004b jump block1(v11)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4100
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v3, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 store little heap v1, v11
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4100
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v3
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = iconst.i64 0
;; @0049 v10 = icmp ugt v3, v5
;; @0049 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0049 v12 = load.i32 little heap v11
;; @004d jump block1(v12)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0004
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 v10 = iconst.i64 0
;; @0040 v11 = icmp ugt v5, v6
;; @0040 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @0040 store little heap v1, v12
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +66,20 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0004
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0004
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v3
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = iconst.i64 0
;; @004c v11 = icmp ugt v5, v6
;; @004c v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @004c v13 = load.i32 little heap v12
;; @0053 jump block1(v13)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v3
;; @0040 v7 = iconst.i64 0
;; @0040 v8 = icmp uge v3, v4
;; @0040 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0040 istore8 little heap v1, v9
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = uextend.i64 v0
;; @0048 v4 = global_value.i64 gv1
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v3
;; @0048 v7 = iconst.i64 0
;; @0048 v8 = icmp uge v3, v4
;; @0048 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0048 v10 = uload8.i32 little heap v9
;; @004b jump block1(v10)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = global_value.i64 gv1
;; @0040 v5 = iadd_imm v4, -4097
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v3
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v3, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 istore8 little heap v1, v11
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = uextend.i64 v0
;; @0049 v4 = global_value.i64 gv1
;; @0049 v5 = iadd_imm v4, -4097
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v3
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = iconst.i64 0
;; @0049 v10 = icmp ugt v3, v5
;; @0049 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0049 v12 = uload8.i32 little heap v11
;; @004d jump block1(v12)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = uextend.i64 v0
;; @0040 v4 = iconst.i64 0xffff_0001
;; @0040 v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @0040 v6 = global_value.i64 gv1
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v3
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 v10 = iconst.i64 0
;; @0040 v11 = icmp ugt v5, v6
;; @0040 v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @0040 istore8 little heap v1, v12
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +66,20 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i32
;;
;; block0(v0: i32, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = uextend.i64 v0
;; @004c v4 = iconst.i64 0xffff_0001
;; @004c v5 = uadd_overflow_trap v3, v4, heap_oob ; v4 = 0xffff_0001
;; @004c v6 = global_value.i64 gv1
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v3
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = iconst.i64 0
;; @004c v11 = icmp ugt v5, v6
;; @004c v12 = select_spectre_guard v11, v10, v9 ; v10 = 0
;; @004c v13 = uload8.i32 little heap v12
;; @0053 jump block1(v13)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4
;; @0040 v5 = icmp ugt v0, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 store little heap v1, v7
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +62,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = global_value.i64 gv1
;; @0048 v4 = iadd_imm v3, -4
;; @0048 v5 = icmp ugt v0, v4
;; @0048 trapnz v5, heap_oob
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v0
;; @0048 v8 = load.i32 little heap v7
;; @004b jump block1(v8)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4100
;; @0040 v5 = icmp ugt v0, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 store little heap v1, v8
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = global_value.i64 gv1
;; @0049 v4 = iadd_imm v3, -4100
;; @0049 v5 = icmp ugt v0, v4
;; @0049 trapnz v5, heap_oob
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v0
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = load.i32 little heap v8
;; @004d jump block1(v9)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = iconst.i64 0xffff_0004
;; @0040 v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @0040 v5 = global_value.i64 gv1
;; @0040 v6 = icmp ugt v4, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v0
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 store little heap v1, v9
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = iconst.i64 0xffff_0004
;; @004c v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @004c v5 = global_value.i64 gv1
;; @004c v6 = icmp ugt v4, v5
;; @004c trapnz v6, heap_oob
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v0
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = load.i32 little heap v9
;; @0053 jump block1(v10)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,14 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = icmp uge v0, v3
;; @0040 trapnz v4, heap_oob
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v0
;; @0040 istore8 little heap v1, v6
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +61,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = global_value.i64 gv1
;; @0048 v4 = icmp uge v0, v3
;; @0048 trapnz v4, heap_oob
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v0
;; @0048 v7 = uload8.i32 little heap v6
;; @004b jump block1(v7)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4097
;; @0040 v5 = icmp ugt v0, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 istore8 little heap v1, v8
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = global_value.i64 gv1
;; @0049 v4 = iadd_imm v3, -4097
;; @0049 v5 = icmp ugt v0, v4
;; @0049 trapnz v5, heap_oob
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v0
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = uload8.i32 little heap v8
;; @004d jump block1(v9)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = iconst.i64 0xffff_0001
;; @0040 v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0001
;; @0040 v5 = global_value.i64 gv1
;; @0040 v6 = icmp ugt v4, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v0
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 istore8 little heap v1, v9
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = iconst.i64 0xffff_0001
;; @004c v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0001
;; @004c v5 = global_value.i64 gv1
;; @004c v6 = icmp ugt v4, v5
;; @004c trapnz v6, heap_oob
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v0
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = uload8.i32 little heap v9
;; @0053 jump block1(v10)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v0
;; @0040 v7 = iconst.i64 0
;; @0040 v8 = icmp ugt v0, v4
;; @0040 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0040 store little heap v1, v9
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = global_value.i64 gv1
;; @0048 v4 = iadd_imm v3, -4
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v0
;; @0048 v7 = iconst.i64 0
;; @0048 v8 = icmp ugt v0, v4
;; @0048 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0048 v10 = load.i32 little heap v9
;; @004b jump block1(v10)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4100
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v0
;; @0040 v7 = iadd_imm v6, 4096
;; @0040 v8 = iconst.i64 0
;; @0040 v9 = icmp ugt v0, v4
;; @0040 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0040 store little heap v1, v10
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = global_value.i64 gv1
;; @0049 v4 = iadd_imm v3, -4100
;; @0049 v5 = global_value.i64 gv2
;; @0049 v6 = iadd v5, v0
;; @0049 v7 = iadd_imm v6, 4096
;; @0049 v8 = iconst.i64 0
;; @0049 v9 = icmp ugt v0, v4
;; @0049 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0049 v11 = load.i32 little heap v10
;; @004d jump block1(v11)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = iconst.i64 0xffff_0004
;; @0040 v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @0040 v5 = global_value.i64 gv1
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 v8 = iadd_imm v7, 0xffff_0000
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v4, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 store little heap v1, v11
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = iconst.i64 0xffff_0004
;; @004c v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @004c v5 = global_value.i64 gv1
;; @004c v6 = global_value.i64 gv2
;; @004c v7 = iadd v6, v0
;; @004c v8 = iadd_imm v7, 0xffff_0000
;; @004c v9 = iconst.i64 0
;; @004c v10 = icmp ugt v4, v5
;; @004c v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @004c v12 = load.i32 little heap v11
;; @0053 jump block1(v12)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = global_value.i64 gv2
;; @0040 v5 = iadd v4, v0
;; @0040 v6 = iconst.i64 0
;; @0040 v7 = icmp uge v0, v3
;; @0040 v8 = select_spectre_guard v7, v6, v5 ; v6 = 0
;; @0040 istore8 little heap v1, v8
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +62,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = global_value.i64 gv1
;; @0048 v4 = global_value.i64 gv2
;; @0048 v5 = iadd v4, v0
;; @0048 v6 = iconst.i64 0
;; @0048 v7 = icmp uge v0, v3
;; @0048 v8 = select_spectre_guard v7, v6, v5 ; v6 = 0
;; @0048 v9 = uload8.i32 little heap v8
;; @004b jump block1(v9)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4097
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v0
;; @0040 v7 = iadd_imm v6, 4096
;; @0040 v8 = iconst.i64 0
;; @0040 v9 = icmp ugt v0, v4
;; @0040 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0040 istore8 little heap v1, v10
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = global_value.i64 gv1
;; @0049 v4 = iadd_imm v3, -4097
;; @0049 v5 = global_value.i64 gv2
;; @0049 v6 = iadd v5, v0
;; @0049 v7 = iadd_imm v6, 4096
;; @0049 v8 = iconst.i64 0
;; @0049 v9 = icmp ugt v0, v4
;; @0049 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0049 v11 = uload8.i32 little heap v10
;; @004d jump block1(v11)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = iconst.i64 0xffff_0001
;; @0040 v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0001
;; @0040 v5 = global_value.i64 gv1
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 v8 = iadd_imm v7, 0xffff_0000
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v4, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 istore8 little heap v1, v11
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = iconst.i64 0xffff_0001
;; @004c v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0001
;; @004c v5 = global_value.i64 gv1
;; @004c v6 = global_value.i64 gv2
;; @004c v7 = iadd v6, v0
;; @004c v8 = iadd_imm v7, 0xffff_0000
;; @004c v9 = iconst.i64 0
;; @004c v10 = icmp ugt v4, v5
;; @004c v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @004c v12 = uload8.i32 little heap v11
;; @0053 jump block1(v12)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4
;; @0040 v5 = icmp ugt v0, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 store little heap v1, v7
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +62,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = global_value.i64 gv1
;; @0048 v4 = iadd_imm v3, -4
;; @0048 v5 = icmp ugt v0, v4
;; @0048 trapnz v5, heap_oob
;; @0048 v6 = global_value.i64 gv2
;; @0048 v7 = iadd v6, v0
;; @0048 v8 = load.i32 little heap v7
;; @004b jump block1(v8)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4100
;; @0040 v5 = icmp ugt v0, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 store little heap v1, v8
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = global_value.i64 gv1
;; @0049 v4 = iadd_imm v3, -4100
;; @0049 v5 = icmp ugt v0, v4
;; @0049 trapnz v5, heap_oob
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v0
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = load.i32 little heap v8
;; @004d jump block1(v9)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = iconst.i64 0xffff_0004
;; @0040 v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @0040 v5 = global_value.i64 gv1
;; @0040 v6 = icmp ugt v4, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v0
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 store little heap v1, v9
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = iconst.i64 0xffff_0004
;; @004c v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @004c v5 = global_value.i64 gv1
;; @004c v6 = icmp ugt v4, v5
;; @004c trapnz v6, heap_oob
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v0
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = load.i32 little heap v9
;; @0053 jump block1(v10)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,14 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = icmp uge v0, v3
;; @0040 trapnz v4, heap_oob
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v0
;; @0040 istore8 little heap v1, v6
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +61,15 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 1
;; @0048 v4 = uload8.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = global_value.i64 gv1
;; @0048 v4 = icmp uge v0, v3
;; @0048 trapnz v4, heap_oob
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v0
;; @0048 v7 = uload8.i32 little heap v6
;; @004b jump block1(v7)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4097
;; @0040 v5 = icmp ugt v0, v4
;; @0040 trapnz v5, heap_oob
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 v8 = iadd_imm v7, 4096
;; @0040 istore8 little heap v1, v8
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 1
;; @0049 v4 = uload8.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = global_value.i64 gv1
;; @0049 v4 = iadd_imm v3, -4097
;; @0049 v5 = icmp ugt v0, v4
;; @0049 trapnz v5, heap_oob
;; @0049 v6 = global_value.i64 gv2
;; @0049 v7 = iadd v6, v0
;; @0049 v8 = iadd_imm v7, 4096
;; @0049 v9 = uload8.i32 little heap v8
;; @004d jump block1(v9)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @0040 istore8 little heap v1, v3
;; @0040 v3 = iconst.i64 0xffff_0001
;; @0040 v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0001
;; @0040 v5 = global_value.i64 gv1
;; @0040 v6 = icmp ugt v4, v5
;; @0040 trapnz v6, heap_oob
;; @0040 v7 = global_value.i64 gv2
;; @0040 v8 = iadd v7, v0
;; @0040 v9 = iadd_imm v8, 0xffff_0000
;; @0040 istore8 little heap v1, v9
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 1
;; @004c v4 = uload8.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = iconst.i64 0xffff_0001
;; @004c v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0001
;; @004c v5 = global_value.i64 gv1
;; @004c v6 = icmp ugt v4, v5
;; @004c trapnz v6, heap_oob
;; @004c v7 = global_value.i64 gv2
;; @004c v8 = iadd v7, v0
;; @004c v9 = iadd_imm v8, 0xffff_0000
;; @004c v10 = uload8.i32 little heap v9
;; @0053 jump block1(v10)
;;
;; block1(v2: i32):
;; @0053 return v2

View File

@@ -43,11 +43,16 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v0
;; @0040 v7 = iconst.i64 0
;; @0040 v8 = icmp ugt v0, v4
;; @0040 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0040 store little heap v1, v9
;; @0043 jump block1
;;
;; block1:
@@ -58,12 +63,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0048 v3 = heap_addr.i64 heap0, v0, 0, 4
;; @0048 v4 = load.i32 little heap v3
;; @004b jump block1(v4)
;; @0048 v3 = global_value.i64 gv1
;; @0048 v4 = iadd_imm v3, -4
;; @0048 v5 = global_value.i64 gv2
;; @0048 v6 = iadd v5, v0
;; @0048 v7 = iconst.i64 0
;; @0048 v8 = icmp ugt v0, v4
;; @0048 v9 = select_spectre_guard v8, v7, v6 ; v7 = 0
;; @0048 v10 = load.i32 little heap v9
;; @004b jump block1(v10)
;;
;; block1(v2: i32):
;; @004b return v2

View File

@@ -43,11 +43,17 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = global_value.i64 gv1
;; @0040 v4 = iadd_imm v3, -4100
;; @0040 v5 = global_value.i64 gv2
;; @0040 v6 = iadd v5, v0
;; @0040 v7 = iadd_imm v6, 4096
;; @0040 v8 = iconst.i64 0
;; @0040 v9 = icmp ugt v0, v4
;; @0040 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0040 store little heap v1, v10
;; @0044 jump block1
;;
;; block1:
@@ -58,12 +64,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @0049 v3 = heap_addr.i64 heap0, v0, 4096, 4
;; @0049 v4 = load.i32 little heap v3
;; @004d jump block1(v4)
;; @0049 v3 = global_value.i64 gv1
;; @0049 v4 = iadd_imm v3, -4100
;; @0049 v5 = global_value.i64 gv2
;; @0049 v6 = iadd v5, v0
;; @0049 v7 = iadd_imm v6, 4096
;; @0049 v8 = iconst.i64 0
;; @0049 v9 = icmp ugt v0, v4
;; @0049 v10 = select_spectre_guard v9, v8, v7 ; v8 = 0
;; @0049 v11 = load.i32 little heap v10
;; @004d jump block1(v11)
;;
;; block1(v2: i32):
;; @004d return v2

View File

@@ -43,11 +43,18 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i32, v2: i64):
;; @0040 v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @0040 store little heap v1, v3
;; @0040 v3 = iconst.i64 0xffff_0004
;; @0040 v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @0040 v5 = global_value.i64 gv1
;; @0040 v6 = global_value.i64 gv2
;; @0040 v7 = iadd v6, v0
;; @0040 v8 = iadd_imm v7, 0xffff_0000
;; @0040 v9 = iconst.i64 0
;; @0040 v10 = icmp ugt v4, v5
;; @0040 v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @0040 store little heap v1, v11
;; @0047 jump block1
;;
;; block1:
@@ -58,12 +65,19 @@
;; gv0 = vmctx
;; gv1 = load.i64 notrap aligned readonly gv0+8
;; gv2 = load.i64 notrap aligned readonly gv0
;; heap0 = dynamic gv2, min 0x0001_0000, bound gv1, offset_guard 0xffff_ffff, index_type i64
;;
;; block0(v0: i64, v1: i64):
;; @004c v3 = heap_addr.i64 heap0, v0, 0xffff_0000, 4
;; @004c v4 = load.i32 little heap v3
;; @0053 jump block1(v4)
;; @004c v3 = iconst.i64 0xffff_0004
;; @004c v4 = uadd_overflow_trap v0, v3, heap_oob ; v3 = 0xffff_0004
;; @004c v5 = global_value.i64 gv1
;; @004c v6 = global_value.i64 gv2
;; @004c v7 = iadd v6, v0
;; @004c v8 = iadd_imm v7, 0xffff_0000
;; @004c v9 = iconst.i64 0
;; @004c v10 = icmp ugt v4, v5
;; @004c v11 = select_spectre_guard v10, v9, v8 ; v9 = 0
;; @004c v12 = load.i32 little heap v11
;; @0053 jump block1(v12)
;;
;; block1(v2: i32):
;; @0053 return v2

Some files were not shown because too many files have changed in this diff Show More