Support heaps with no offset-guard pages.

Also, say "guard-offset pages" rather than just "guard pages" to describe the
region of a heap which is never accessible and which exists to support
optimizations for heap accesses with offsets.

And, introduce a `Uimm64` immediate type, and make all heap fields use
`Uimm64` instead of `Imm64` since they really are unsigned.
This commit is contained in:
Dan Gohman
2018-11-29 04:53:30 -08:00
parent 93696a80bb
commit a20c852148
27 changed files with 302 additions and 172 deletions

View File

@@ -10,23 +10,23 @@ function %heap_addrs(i32, i64, i64 vmctx) {
gv2 = iadd_imm.i64 gv4, 80
gv3 = load.i32 notrap aligned gv4+88
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000, index_type i32
heap1 = static gv0, guard 0x1000, bound 0x1_0000, index_type i32
heap2 = static gv0, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000, index_type i64
heap3 = static gv0, guard 0x1000, bound 0x1_0000, index_type i64
heap4 = dynamic gv1, min 0x1_0000, bound gv3, guard 0x8000_0000, index_type i32
heap5 = dynamic gv1, bound gv3, guard 0x1000, index_type i32
heap6 = dynamic gv1, min 0x1_0000, bound gv2, guard 0x8000_0000, index_type i64
heap7 = dynamic gv1, bound gv2, guard 0x1000, index_type i64
heap0 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
heap1 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
heap2 = static gv0, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64
heap3 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i64
heap4 = dynamic gv1, min 0x1_0000, bound gv3, offset_guard 0x8000_0000, index_type i32
heap5 = dynamic gv1, bound gv3, offset_guard 0x1000, index_type i32
heap6 = dynamic gv1, min 0x1_0000, bound gv2, offset_guard 0x8000_0000, index_type i64
heap7 = dynamic gv1, bound gv2, offset_guard 0x1000, index_type i64
; check: heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000, index_type i32
; check: heap1 = static gv0, min 0, bound 0x0001_0000, guard 4096, index_type i32
; check: heap2 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000, index_type i64
; check: heap3 = static gv0, min 0, bound 0x0001_0000, guard 4096, index_type i64
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv3, guard 0x8000_0000, index_type i32
; check: heap5 = dynamic gv1, min 0, bound gv3, guard 4096, index_type i32
; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, guard 0x8000_0000, index_type i64
; check: heap7 = dynamic gv1, min 0, bound gv2, guard 4096, index_type i64
; check: heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i32
; check: heap1 = static gv0, min 0, bound 0x0001_0000, offset_guard 4096, index_type i32
; check: heap2 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000, index_type i64
; check: heap3 = static gv0, min 0, bound 0x0001_0000, offset_guard 4096, index_type i64
; check: heap4 = dynamic gv1, min 0x0001_0000, bound gv3, offset_guard 0x8000_0000, index_type i32
; check: heap5 = dynamic gv1, min 0, bound gv3, offset_guard 4096, index_type i32
; check: heap6 = dynamic gv1, min 0x0001_0000, bound gv2, offset_guard 0x8000_0000, index_type i64
; check: heap7 = dynamic gv1, min 0, bound gv2, offset_guard 4096, index_type i64
ebb0(v0: i32, v1: i64, v3: i64):
; The fast-path; 32-bit index, static heap with a sufficient bound, no bounds check needed!

View File

@@ -47,7 +47,7 @@ ebb1:
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: ebb0(
@@ -68,7 +68,7 @@ ebb0(v0: i32, v999: i64):
function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1000_0000, guard 0x8000_0000
heap0 = static gv1, min 0x1000, bound 0x1000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; Everything after the obviously OOB access should be eliminated, leaving
@@ -92,7 +92,7 @@ ebb0(v0: i32, v999: i64):
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
gv0 = vmctx
gv1 = iadd_imm.i64 gv0, 64
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v999: i64):
; check: ebb0(

View File

@@ -52,13 +52,13 @@ ebb0:
; Declare static heaps.
function %sheap(i32, i64 vmctx) -> i64 {
heap1 = static gv5, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000
heap2 = static gv5, guard 0x1000, bound 0x1_0000
heap1 = static gv5, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000
heap2 = static gv5, offset_guard 0x1000, bound 0x1_0000
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
; check: heap1 = static gv5, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
; check: heap2 = static gv5, min 0, bound 0x0001_0000, guard 4096
; check: heap1 = static gv5, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
; check: heap2 = static gv5, min 0, bound 0x0001_0000, offset_guard 4096
ebb0(v1: i32, v2: i64):
v3 = heap_addr.i64 heap1, v1, 0
; check: v3 = heap_addr.i64 heap1, v1, 0
@@ -67,14 +67,14 @@ ebb0(v1: i32, v2: i64):
; Declare dynamic heaps.
function %dheap(i32, i64 vmctx) -> i64 {
heap1 = dynamic gv5, min 0x1_0000, bound gv6, guard 0x8000_0000
heap2 = dynamic gv5, bound gv6, guard 0x1000
heap1 = dynamic gv5, min 0x1_0000, bound gv6, offset_guard 0x8000_0000
heap2 = dynamic gv5, bound gv6, offset_guard 0x1000
gv4 = vmctx
gv5 = iadd_imm.i64 gv4, 64
gv6 = iadd_imm.i64 gv4, 72
; check: heap1 = dynamic gv5, min 0x0001_0000, bound gv6, guard 0x8000_0000
; check: heap2 = dynamic gv5, min 0, bound gv6, guard 4096
; check: heap1 = dynamic gv5, min 0x0001_0000, bound gv6, offset_guard 0x8000_0000
; check: heap2 = dynamic gv5, min 0, bound gv6, offset_guard 4096
ebb0(v1: i32, v2: i64):
v3 = heap_addr.i64 heap2, v1, 0
; check: v3 = heap_addr.i64 heap2, v1, 0

View File

@@ -3,7 +3,7 @@ target x86_64 haswell
function %value_aliases(i32, f32, i64 vmctx) baldrdash {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: f32, v2: i64):
v3 = iconst.i32 0

View File

@@ -7,7 +7,7 @@ target x86_64 haswell
function %pr207(i64 vmctx, i32, i32) -> i32 system_v {
gv1 = vmctx
gv0 = iadd_imm.i64 gv1, -8
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
heap0 = static gv0, min 0, bound 0x5000, offset_guard 0x0040_0000
sig0 = (i64 vmctx, i32, i32) -> i32 system_v
sig1 = (i64 vmctx, i32, i32, i32) -> i32 system_v
sig2 = (i64 vmctx, i32, i32, i32) -> i32 system_v
@@ -1036,7 +1036,7 @@ ebb92(v767: i32):
; Same problem from musl.wasm.
function %musl(f64 [%xmm0], i64 vmctx [%rdi]) -> f64 [%xmm0] system_v {
gv0 = vmctx
heap0 = static gv0, min 0, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0, bound 0x0001_0000_0000, offset_guard 0x8000_0000
sig0 = (f64 [%xmm0], i32 [%rdi], i64 vmctx [%rsi]) -> f64 [%xmm0] system_v
fn0 = u0:517 sig0

View File

@@ -3,7 +3,7 @@ target x86_64 haswell
function %pr227(i32 [%rdi], i32 [%rsi], i32 [%rdx], i32 [%rcx], i64 vmctx [%r8]) system_v {
gv0 = vmctx
heap0 = static gv0, min 0, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i32, v2: i32, v3: i32, v4: i64):
[RexOp1pu_id#b8] v5 = iconst.i32 0

View File

@@ -13,7 +13,7 @@ target x86_64 haswell
function %pr208(i64 vmctx [%rdi]) system_v {
gv1 = vmctx
gv0 = iadd_imm.i64 gv1, -8
heap0 = static gv0, min 0, bound 0x5000, guard 0x0040_0000
heap0 = static gv0, min 0, bound 0x5000, offset_guard 0x0040_0000
sig0 = (i64 vmctx [%rdi]) -> i32 [%rax] system_v
sig1 = (i64 vmctx [%rdi], i32 [%rsi]) system_v
fn0 = u0:1 sig0

View File

@@ -5,7 +5,7 @@ target x86_64
function %eliminate_redundant_global_loads(i32, i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned readonly gv0
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, guard 0x8000_0000, index_type i32
heap0 = static gv1, min 0x1_0000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i32
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1

View File

@@ -4,7 +4,7 @@ target x86_64
function %heap_base_type(i64 vmctx) {
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
heap0 = static gv1, guard 0x1000, bound 0x1_0000, index_type i32 ; error: heap base has type i32, which is not the pointer type i64
heap0 = static gv1, offset_guard 0x1000, bound 0x1_0000, index_type i32 ; error: heap base has type i32, which is not the pointer type i64
ebb0(v0: i64):
return
@@ -12,7 +12,7 @@ ebb0(v0: i64):
function %invalid_base(i64 vmctx) {
gv0 = vmctx
heap0 = dynamic gv1, bound gv0, guard 0x1000, index_type i64 ; error: invalid base global value gv1
heap0 = dynamic gv1, bound gv0, offset_guard 0x1000, index_type i64 ; error: invalid base global value gv1
ebb0(v0: i64):
return
@@ -20,7 +20,7 @@ ebb0(v0: i64):
function %invalid_bound(i64 vmctx) {
gv0 = vmctx
heap0 = dynamic gv0, bound gv1, guard 0x1000, index_type i64 ; error: invalid bound global value gv1
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i64 ; error: invalid bound global value gv1
ebb0(v0: i64):
return
@@ -29,7 +29,7 @@ ebb0(v0: i64):
function %heap_bound_type(i64 vmctx) {
gv0 = vmctx
gv1 = load.i16 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32 ; error: heap index type i32 differs from the type of its bound, i16
ebb0(v0: i64):
return
@@ -37,7 +37,7 @@ ebb0(v0: i64):
function %heap_addr_index_type(i64 vmctx, i64) {
gv0 = vmctx
heap0 = static gv0, guard 0x1000, bound 0x1_0000, index_type i32
heap0 = static gv0, offset_guard 0x1000, bound 0x1_0000, index_type i32
ebb0(v0: i64, v1: i64):
v2 = heap_addr.i64 heap0, v1, 0; error: index type i64 differs from heap index type i32

View File

@@ -7,7 +7,7 @@ target x86_64 haswell
function %f32_load(i32, i64 vmctx) -> f32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
function %f32_store(f32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: f32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1

View File

@@ -7,7 +7,7 @@ target x86_64 haswell
function %f64_load(i32, i64 vmctx) -> f64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
function %f64_store(f64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: f64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1

View File

@@ -7,7 +7,7 @@ target x86_64 haswell
function %i32_load(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
function %i32_store(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1
@@ -27,7 +27,7 @@ ebb0(v0: i32, v1: i32, v2: i64):
function %i32_load8_s(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -37,7 +37,7 @@ ebb0(v0: i32, v1: i64):
function %i32_load8_u(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -47,7 +47,7 @@ ebb0(v0: i32, v1: i64):
function %i32_store8(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1
@@ -57,7 +57,7 @@ ebb0(v0: i32, v1: i32, v2: i64):
function %i32_load16_s(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -67,7 +67,7 @@ ebb0(v0: i32, v1: i64):
function %i32_load16_u(i32, i64 vmctx) -> i32 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -77,7 +77,7 @@ ebb0(v0: i32, v1: i64):
function %i32_store16(i32, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1

View File

@@ -7,7 +7,7 @@ target x86_64 haswell
function %i64_load(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
function %i64_store(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1
@@ -27,7 +27,7 @@ ebb0(v0: i64, v1: i32, v2: i64):
function %i64_load8_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -37,7 +37,7 @@ ebb0(v0: i32, v1: i64):
function %i64_load8_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -47,7 +47,7 @@ ebb0(v0: i32, v1: i64):
function %i64_store8(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1
@@ -57,7 +57,7 @@ ebb0(v0: i64, v1: i32, v2: i64):
function %i64_load16_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -67,7 +67,7 @@ ebb0(v0: i32, v1: i64):
function %i64_load16_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -77,7 +77,7 @@ ebb0(v0: i32, v1: i64):
function %i64_store16(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1
@@ -87,7 +87,7 @@ ebb0(v0: i64, v1: i32, v2: i64):
function %i64_load32_s(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -97,7 +97,7 @@ ebb0(v0: i32, v1: i64):
function %i64_load32_u(i32, i64 vmctx) -> i64 {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 1
@@ -107,7 +107,7 @@ ebb0(v0: i32, v1: i64):
function %i64_store32(i64, i32, i64 vmctx) {
gv0 = vmctx
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
ebb0(v0: i64, v1: i32, v2: i64):
v3 = heap_addr.i64 heap0, v1, 1