Support heaps with no offset-guard pages.
Also, say "guard-offset pages" rather than just "guard pages" to describe the region of a heap which is never accessible and which exists to support optimizations for heap accesses with offsets. And, introduce a `Uimm64` immediate type, and make all heap fields use `Uimm64` instead of `Imm64` since they really are unsigned.
This commit is contained in:
@@ -7,7 +7,7 @@ target x86_64 haswell
|
||||
|
||||
function %f32_load(i32, i64 vmctx) -> f32 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %f32_store(f32, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: f32, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
|
||||
@@ -7,7 +7,7 @@ target x86_64 haswell
|
||||
|
||||
function %f64_load(i32, i64 vmctx) -> f64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %f64_store(f64, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: f64, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
|
||||
@@ -7,7 +7,7 @@ target x86_64 haswell
|
||||
|
||||
function %i32_load(i32, i64 vmctx) -> i32 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i32_store(i32, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
@@ -27,7 +27,7 @@ ebb0(v0: i32, v1: i32, v2: i64):
|
||||
|
||||
function %i32_load8_s(i32, i64 vmctx) -> i32 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -37,7 +37,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i32_load8_u(i32, i64 vmctx) -> i32 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -47,7 +47,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i32_store8(i32, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
@@ -57,7 +57,7 @@ ebb0(v0: i32, v1: i32, v2: i64):
|
||||
|
||||
function %i32_load16_s(i32, i64 vmctx) -> i32 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -67,7 +67,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i32_load16_u(i32, i64 vmctx) -> i32 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -77,7 +77,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i32_store16(i32, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
|
||||
@@ -7,7 +7,7 @@ target x86_64 haswell
|
||||
|
||||
function %i64_load(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -17,7 +17,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i64_store(i64, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i64, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
@@ -27,7 +27,7 @@ ebb0(v0: i64, v1: i32, v2: i64):
|
||||
|
||||
function %i64_load8_s(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -37,7 +37,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i64_load8_u(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -47,7 +47,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i64_store8(i64, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i64, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
@@ -57,7 +57,7 @@ ebb0(v0: i64, v1: i32, v2: i64):
|
||||
|
||||
function %i64_load16_s(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -67,7 +67,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i64_load16_u(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -77,7 +77,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i64_store16(i64, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i64, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
@@ -87,7 +87,7 @@ ebb0(v0: i64, v1: i32, v2: i64):
|
||||
|
||||
function %i64_load32_s(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -97,7 +97,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i64_load32_u(i32, i64 vmctx) -> i64 {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i32, v1: i64):
|
||||
v2 = heap_addr.i64 heap0, v0, 1
|
||||
@@ -107,7 +107,7 @@ ebb0(v0: i32, v1: i64):
|
||||
|
||||
function %i64_store32(i64, i32, i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, guard 0x8000_0000
|
||||
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
||||
|
||||
ebb0(v0: i64, v1: i32, v2: i64):
|
||||
v3 = heap_addr.i64 heap0, v1, 1
|
||||
|
||||
Reference in New Issue
Block a user