Also, say "guard-offset pages" rather than just "guard pages" to describe the region of a heap which is never accessible and which exists to support optimizations for heap accesses with offsets. And, introduce a `Uimm64` immediate type, and make all heap fields use `Uimm64` instead of `Imm64` since they really are unsigned.
113 lines
3.1 KiB
Plaintext
113 lines
3.1 KiB
Plaintext
; Test the legalization of memory objects.
|
|
test legalizer
|
|
target x86_64
|
|
|
|
; regex: V=v\d+
|
|
; regex: EBB=ebb\d+
|
|
|
|
function %vmctx(i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, -16
|
|
|
|
ebb1(v1: i64):
|
|
v2 = global_value.i64 gv1
|
|
; check: v2 = iadd_imm v1, -16
|
|
return v2
|
|
; check: return v2
|
|
}
|
|
|
|
function %load(i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
gv1 = load.i64 notrap aligned gv0-16
|
|
gv2 = iadd_imm.i64 gv1, 32
|
|
|
|
ebb1(v1: i64):
|
|
v2 = global_value.i64 gv2
|
|
; check: $(p1=$V) = load.i64 notrap aligned v1-16
|
|
; check: v2 = iadd_imm $p1, 32
|
|
return v2
|
|
; check: return v2
|
|
}
|
|
|
|
function %symbol() -> i64 {
|
|
gv0 = symbol %something
|
|
gv1 = symbol u123:456
|
|
|
|
ebb1:
|
|
v0 = global_value.i64 gv0
|
|
; check: v0 = symbol_value.i64 gv0
|
|
v1 = global_value.i64 gv1
|
|
; check: v1 = symbol_value.i64 gv1
|
|
v2 = bxor v0, v1
|
|
return v2
|
|
}
|
|
|
|
; SpiderMonkey VM-style static 4+2 GB heap.
|
|
; This eliminates bounds checks completely for offsets < 2GB.
|
|
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, 64
|
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
|
|
|
|
ebb0(v0: i32, v999: i64):
|
|
; check: ebb0(
|
|
v1 = heap_addr.i64 heap0, v0, 1
|
|
; Boundscheck should be eliminated.
|
|
; Checks here are assuming that no pipehole opts fold the load offsets.
|
|
; nextln: $(xoff=$V) = uextend.i64 v0
|
|
; nextln: $(hbase=$V) = iadd_imm v999, 64
|
|
; nextln: v1 = iadd $hbase, $xoff
|
|
v2 = load.f32 v1+16
|
|
; nextln: v2 = load.f32 v1+16
|
|
v3 = load.f32 v1+20
|
|
; nextln: v3 = load.f32 v1+20
|
|
v4 = fadd v2, v3
|
|
return v4
|
|
}
|
|
|
|
function %staticheap_static_oob_sm64(i32, i64 vmctx) -> f32 baldrdash {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, 64
|
|
heap0 = static gv1, min 0x1000, bound 0x1000_0000, offset_guard 0x8000_0000
|
|
|
|
ebb0(v0: i32, v999: i64):
|
|
; Everything after the obviously OOB access should be eliminated, leaving
|
|
; the `trap heap_oob` instruction as the terminator of the Ebb and moving
|
|
; the remainder of the instructions into an inaccessible Ebb.
|
|
; check: ebb0(
|
|
; nextln: trap heap_oob
|
|
; check: ebb1:
|
|
; nextln: v1 = iconst.i64 0
|
|
; nextln: v2 = load.f32 v1+16
|
|
; nextln: return v2
|
|
; nextln: }
|
|
v1 = heap_addr.i64 heap0, v0, 0x1000_0001
|
|
v2 = load.f32 v1+16
|
|
return v2
|
|
}
|
|
|
|
|
|
; SpiderMonkey VM-style static 4+2 GB heap.
|
|
; Offsets >= 2 GB do require a boundscheck.
|
|
function %staticheap_sm64(i32, i64 vmctx) -> f32 baldrdash {
|
|
gv0 = vmctx
|
|
gv1 = iadd_imm.i64 gv0, 64
|
|
heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000
|
|
|
|
ebb0(v0: i32, v999: i64):
|
|
; check: ebb0(
|
|
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
|
|
; Boundscheck code
|
|
; check: $(oob=$V) = icmp
|
|
; nextln: brz $oob, $(ok=$EBB)
|
|
; nextln: trap heap_oob
|
|
; check: $ok:
|
|
; Checks here are assuming that no pipehole opts fold the load offsets.
|
|
; nextln: $(xoff=$V) = uextend.i64 v0
|
|
; nextln: $(hbase=$V) = iadd_imm.i64 v999, 64
|
|
; nextln: v1 = iadd $hbase, $xoff
|
|
v2 = load.f32 v1+0x7fff_ffff
|
|
; nextln: v2 = load.f32 v1+0x7fff_ffff
|
|
return v2
|
|
}
|