78 lines
2.1 KiB
Plaintext
78 lines
2.1 KiB
Plaintext
; Test the legalization of memory objects.
|
|
test legalizer
|
|
set is_64bit
|
|
isa intel
|
|
|
|
; regex: V=v\d+
|
|
; regex: EBB=ebb\d+
|
|
|
|
function %vmctx(i64 vmctx) -> i64 {
|
|
gv1 = vmctx-16
|
|
|
|
ebb1(v1: i64):
|
|
v2 = global_addr.i64 gv1
|
|
; check: $v2 = iadd_imm $v1, -16
|
|
return v2
|
|
; check: return $v2
|
|
}
|
|
|
|
function %deref(i64 vmctx) -> i64 {
|
|
gv1 = vmctx-16
|
|
gv2 = deref(gv1)+32
|
|
|
|
ebb1(v1: i64):
|
|
v2 = global_addr.i64 gv2
|
|
; check: $(a1=$V) = iadd_imm $v1, -16
|
|
; check: $(p1=$V) = load.i64 $a1
|
|
; check: $v2 = iadd_imm $p1, 32
|
|
return v2
|
|
; check: return $v2
|
|
}
|
|
|
|
; SpiderMonkey VM-style static 4+2 GB heap.
|
|
; This eliminates bounds checks completely for offsets < 2GB.
|
|
function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
|
|
gv0 = vmctx+64
|
|
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
|
|
|
ebb0(v0: i32, v999: i64):
|
|
; check: $ebb0(
|
|
v1 = heap_addr.i64 heap0, v0, 1
|
|
; Boundscheck should be eliminated.
|
|
; Checks here are assuming that no pipehole opts fold the load offsets.
|
|
; nextln: $(xoff=$V) = uextend.i64 $v0
|
|
; nextln: $(haddr=$V) = iadd_imm $v999, 64
|
|
; nextln: $(hbase=$V) = load.i64 $haddr
|
|
; nextln: $v1 = iadd $hbase, $xoff
|
|
v2 = load.f32 v1+16
|
|
; nextln: $v2 = load.f32 $v1+16
|
|
v3 = load.f32 v1+20
|
|
; nextln: $v3 = load.f32 $v1+20
|
|
v4 = fadd v2, v3
|
|
return v4
|
|
}
|
|
|
|
; SpiderMonkey VM-style static 4+2 GB heap.
|
|
; Offsets >= 2 GB do require a boundscheck.
|
|
function %staticheap_sm64(i32, i64 vmctx) -> f32 spiderwasm {
|
|
gv0 = vmctx+64
|
|
heap0 = static gv0, min 0x1000, bound 0x1_0000_0000, guard 0x8000_0000
|
|
|
|
ebb0(v0: i32, v999: i64):
|
|
; check: $ebb0(
|
|
v1 = heap_addr.i64 heap0, v0, 0x8000_0000
|
|
; Boundscheck code
|
|
; check: $(oob=$V) = icmp
|
|
; nextln: brz $oob, $(ok=$EBB)
|
|
; nextln: trap
|
|
; check: $ok:
|
|
; Checks here are assuming that no pipehole opts fold the load offsets.
|
|
; nextln: $(xoff=$V) = uextend.i64 $v0
|
|
; nextln: $(haddr=$V) = iadd_imm.i64 $v999, 64
|
|
; nextln: $(hbase=$V) = load.i64 $haddr
|
|
; nextln: $v1 = iadd $hbase, $xoff
|
|
v2 = load.f32 v1+0x7fff_ffff
|
|
; nextln: $v2 = load.f32 $v1+0x7fff_ffff
|
|
return v2
|
|
}
|