The tests for the SIMD floating-point maximum and minimum operations require particular care because the handling of the NaN values is non-deterministic and may vary between platforms. There is no way to match several NaN values in a test, so the solution is to extract the non-deterministic test cases into a separate file that is subsequently replicated for every backend under test, with adjustments made to the expected results. Copyright (c) 2021, Arm Limited.
118 lines
3.0 KiB
Plaintext
118 lines
3.0 KiB
Plaintext
; Test basic code generation for i32 memory WebAssembly instructions.
|
|
test compile
|
|
|
|
; We only test on 64-bit since the heap_addr instructions and vmctx parameters
|
|
; explicitly mention the pointer width.
|
|
target aarch64
|
|
target x86_64 haswell
|
|
|
|
function %i64_load(i32, i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v1: i64):
|
|
v2 = heap_addr.i64 heap0, v0, 1
|
|
v3 = load.i64 v2
|
|
return v3
|
|
}
|
|
|
|
function %i64_store(i64, i32, i64 vmctx) {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i64, v1: i32, v2: i64):
|
|
v3 = heap_addr.i64 heap0, v1, 1
|
|
store v0, v3
|
|
return
|
|
}
|
|
|
|
function %i64_load8_s(i32, i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v1: i64):
|
|
v2 = heap_addr.i64 heap0, v0, 1
|
|
v3 = sload8.i64 v2
|
|
return v3
|
|
}
|
|
|
|
function %i64_load8_u(i32, i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v1: i64):
|
|
v2 = heap_addr.i64 heap0, v0, 1
|
|
v3 = uload8.i64 v2
|
|
return v3
|
|
}
|
|
|
|
function %i64_store8(i64, i32, i64 vmctx) {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i64, v1: i32, v2: i64):
|
|
v3 = heap_addr.i64 heap0, v1, 1
|
|
istore8 v0, v3
|
|
return
|
|
}
|
|
|
|
function %i64_load16_s(i32, i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v1: i64):
|
|
v2 = heap_addr.i64 heap0, v0, 1
|
|
v3 = sload16.i64 v2
|
|
return v3
|
|
}
|
|
|
|
function %i64_load16_u(i32, i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v1: i64):
|
|
v2 = heap_addr.i64 heap0, v0, 1
|
|
v3 = uload16.i64 v2
|
|
return v3
|
|
}
|
|
|
|
function %i64_store16(i64, i32, i64 vmctx) {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i64, v1: i32, v2: i64):
|
|
v3 = heap_addr.i64 heap0, v1, 1
|
|
istore16 v0, v3
|
|
return
|
|
}
|
|
|
|
function %i64_load32_s(i32, i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v1: i64):
|
|
v2 = heap_addr.i64 heap0, v0, 1
|
|
v3 = sload32.i64 v2
|
|
return v3
|
|
}
|
|
|
|
function %i64_load32_u(i32, i64 vmctx) -> i64 {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i32, v1: i64):
|
|
v2 = heap_addr.i64 heap0, v0, 1
|
|
v3 = uload32.i64 v2
|
|
return v3
|
|
}
|
|
|
|
function %i64_store32(i64, i32, i64 vmctx) {
|
|
gv0 = vmctx
|
|
heap0 = static gv0, min 0x0001_0000, bound 0x0001_0000_0000, offset_guard 0x8000_0000
|
|
|
|
block0(v0: i64, v1: i32, v2: i64):
|
|
v3 = heap_addr.i64 heap0, v1, 1
|
|
istore32 v0, v3
|
|
return
|
|
}
|