Update the spec test suite submodule (#5970)

* Update the spec test suite submodule

Delete the local copies of the relaxed-simd test suite as well as
they're now incorporated.

Closes #5914

* Remove page guards in QEMU emulation

Otherwise `(memory 0 0)` was being compiled as a static memory with huge
guards which we're trying to avoid in QEMU.
This commit is contained in:
Alex Crichton
2023-03-10 10:50:20 -06:00
committed by GitHub
parent e64fb6ab39
commit 7650d857fa
10 changed files with 15 additions and 772 deletions

View File

@@ -30,12 +30,6 @@ fn main() -> anyhow::Result<()> {
test_directory_module(out, "tests/misc_testsuite/threads", strategy)?;
test_directory_module(out, "tests/misc_testsuite/memory64", strategy)?;
test_directory_module(out, "tests/misc_testsuite/component-model", strategy)?;
// NB: these are copied from upstream and updated to wasmtime's
// current version of `wast`. This local copy should go away when
// all of Wasmtime's tooling is updated and the upstream
// `testsuite` module is additionally updated.
test_directory_module(out, "tests/misc_testsuite/relaxed-simd", strategy)?;
Ok(())
})?;
@@ -51,6 +45,11 @@ fn main() -> anyhow::Result<()> {
strategy,
)?;
test_directory_module(out, "tests/spec_testsuite/proposals/threads", strategy)?;
test_directory_module(
out,
"tests/spec_testsuite/proposals/relaxed-simd",
strategy,
)?;
} else {
println!(
"cargo:warning=The spec testsuite is disabled. To enable, run `git submodule \
@@ -180,6 +179,12 @@ fn write_testsuite_tests(
/// Ignore tests that aren't supported yet.
fn ignore(testsuite: &str, testname: &str, strategy: &str) -> bool {
assert_eq!(strategy, "Cranelift");
// This is an empty file right now which the `wast` crate doesn't parse
if testname.contains("memory_copy1") {
return true;
}
match env::var("CARGO_CFG_TARGET_ARCH").unwrap().as_str() {
"s390x" => {
// FIXME: These tests fail under qemu due to a qemu bug.

View File

@@ -85,6 +85,8 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()>
cfg.static_memory_maximum_size(0);
}
cfg.dynamic_memory_reserved_for_growth(0);
cfg.static_memory_guard_size(0);
cfg.dynamic_memory_guard_size(0);
}
let _pooling_lock = if pooling {
@@ -101,7 +103,7 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()>
// fails to grow, the values here will need to be adjusted.
let mut pool = PoolingAllocationConfig::default();
pool.instance_count(450)
.instance_memories(2)
.instance_memories(if multi_memory { 9 } else { 1 })
.instance_tables(4)
.instance_memory_pages(805);
cfg.allocation_strategy(InstanceAllocationStrategy::Pooling(pool));

View File

@@ -1,26 +0,0 @@
;; Tests for i16x8.relaxed_q15mulr_s.
(module
(func (export "i16x8.relaxed_q15mulr_s") (param v128 v128) (result v128) (i16x8.relaxed_q15mulr_s (local.get 0) (local.get 1)))
(func (export "i16x8.relaxed_q15mulr_s_cmp") (param v128 v128) (result v128)
(i16x8.eq
(i16x8.relaxed_q15mulr_s (local.get 0) (local.get 1))
(i16x8.relaxed_q15mulr_s (local.get 0) (local.get 1))))
)
;; INT16_MIN = -32768
(assert_return (invoke "i16x8.relaxed_q15mulr_s"
(v128.const i16x8 -32768 -32767 32767 0 0 0 0 0)
(v128.const i16x8 -32768 -32768 32767 0 0 0 0 0))
;; overflows, return either INT16_MIN or INT16_MAX
(either (v128.const i16x8 -32768 32767 32766 0 0 0 0 0)
(v128.const i16x8 32767 32767 32766 0 0 0 0 0)))
;; Check that multiple calls to the relaxed instruction with same inputs returns same results.
(assert_return (invoke "i16x8.relaxed_q15mulr_s_cmp"
(v128.const i16x8 -32768 -32767 32767 0 0 0 0 0)
(v128.const i16x8 -32768 -32768 32767 0 0 0 0 0))
;; overflows, return either INT16_MIN or INT16_MAX
(v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1))

View File

@@ -1,123 +0,0 @@
;; Tests for i32x4.relaxed_trunc_f32x4_s, i32x4.relaxed_trunc_f32x4_u, i32x4.relaxed_trunc_f64x2_s_zero, and i32x4.relaxed_trunc_f64x2_u_zero.
(module
(func (export "i32x4.relaxed_trunc_f32x4_s") (param v128) (result v128) (i32x4.relaxed_trunc_f32x4_s (local.get 0)))
(func (export "i32x4.relaxed_trunc_f32x4_u") (param v128) (result v128) (i32x4.relaxed_trunc_f32x4_u (local.get 0)))
(func (export "i32x4.relaxed_trunc_f64x2_s_zero") (param v128) (result v128) (i32x4.relaxed_trunc_f64x2_s_zero (local.get 0)))
(func (export "i32x4.relaxed_trunc_f64x2_u_zero") (param v128) (result v128) (i32x4.relaxed_trunc_f64x2_u_zero (local.get 0)))
(func (export "i32x4.relaxed_trunc_f32x4_s_cmp") (param v128) (result v128)
(i32x4.eq
(i32x4.relaxed_trunc_f32x4_s (local.get 0))
(i32x4.relaxed_trunc_f32x4_s (local.get 0))))
(func (export "i32x4.relaxed_trunc_f32x4_u_cmp") (param v128) (result v128)
(i32x4.eq
(i32x4.relaxed_trunc_f32x4_u (local.get 0))
(i32x4.relaxed_trunc_f32x4_u (local.get 0))))
(func (export "i32x4.relaxed_trunc_f64x2_s_zero_cmp") (param v128) (result v128)
(i32x4.eq
(i32x4.relaxed_trunc_f64x2_s_zero (local.get 0))
(i32x4.relaxed_trunc_f64x2_s_zero (local.get 0))))
(func (export "i32x4.relaxed_trunc_f64x2_u_zero_cmp") (param v128) (result v128)
(i32x4.eq
(i32x4.relaxed_trunc_f64x2_u_zero (local.get 0))
(i32x4.relaxed_trunc_f64x2_u_zero (local.get 0))))
)
;; Test some edge cases around min/max to ensure that the instruction either
;; saturates correctly or returns INT_MIN.
;;
;; Note, though, that INT_MAX itself is not tested. The value for INT_MAX is
;; 2147483647 but that is not representable in a `f32` since it requires 31 bits
;; when a f32 has only 24 bits available. This means that the closest integers
;; to INT_MAX which can be represented are 2147483520 and 2147483648, meaning
;; that the INT_MAX test case cannot be tested.
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_s"
;; INT32_MIN <INT32_MIN >INT32_MAX
(v128.const f32x4 -2147483648.0 -2147483904.0 2.0 2147483904.0))
;; out of range -> saturate or INT32_MIN
(either (v128.const i32x4 -2147483648 -2147483648 2 2147483647)
(v128.const i32x4 -2147483648 -2147483648 2 -2147483648)))
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_s"
(v128.const f32x4 nan -nan nan:0x444444 -nan:0x444444))
;; nans -> 0 or INT32_MIN
(either (v128.const i32x4 0 0 0 0)
(v128.const i32x4 0x80000000 0x80000000 0x80000000 0x80000000)))
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_u"
;; UINT32_MIN UINT32_MIN-1 <UINT32_MAX UINT32_MAX+1
(v128.const f32x4 0 -1.0 4294967040.0 4294967296.0))
;; out of range -> saturate or UINT32_MAX
(either (v128.const i32x4 0 0 4294967040 0xffffffff)
(v128.const i32x4 0 0xffffffff 4294967040 0xffffffff)))
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_u"
(v128.const f32x4 nan -nan nan:0x444444 -nan:0x444444))
;; nans -> 0 or UINT32_MAX
(either (v128.const i32x4 0 0 0 0)
(v128.const i32x4 0xffffffff 0xffffffff 0xffffffff 0xffffffff)))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_s_zero"
(v128.const f64x2 -2147483904.0 2147483904.0))
;; out of range -> saturate or INT32_MIN
(either (v128.const i32x4 -2147483648 2147483647 0 0)
(v128.const i32x4 -2147483648 -2147483648 0 0)))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_s_zero"
(v128.const f64x2 nan -nan))
(either (v128.const i32x4 0 0 0 0)
(v128.const i32x4 0x80000000 0x80000000 0 0)))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_u_zero"
(v128.const f64x2 -1.0 4294967296.0))
;; out of range -> saturate or UINT32_MAX
(either (v128.const i32x4 0 0xffffffff 0 0)
(v128.const i32x4 0xffffffff 0xffffffff 0 0)))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_u_zero"
(v128.const f64x2 nan -nan))
(either (v128.const i32x4 0 0 0 0)
(v128.const i32x4 0 0 0xffffffff 0xffffffff)))
;; Check that multiple calls to the relaxed instruction with same inputs returns same results.
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_s_cmp"
;; INT32_MIN <INT32_MIN INT32_MAX >INT32_MAX
(v128.const f32x4 -2147483648.0 -2147483904.0 2147483647.0 2147483904.0))
;; out of range -> saturate or INT32_MIN
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_s_cmp"
(v128.const f32x4 nan -nan nan:0x444444 -nan:0x444444))
;; nans -> 0 or INT32_MIN
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_u_cmp"
;; UINT32_MIN UINT32_MIN-1 <UINT32_MAX UINT32_MAX+1
(v128.const f32x4 0 -1.0 4294967040.0 4294967296.0))
;; out of range -> saturate or UINT32_MAX
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_trunc_f32x4_u_cmp"
(v128.const f32x4 nan -nan nan:0x444444 -nan:0x444444))
;; nans -> 0 or UINT32_MAX
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_s_zero_cmp"
(v128.const f64x2 -2147483904.0 2147483904.0))
;; out of range -> saturate or INT32_MIN
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_s_zero_cmp"
(v128.const f64x2 nan -nan))
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_u_zero_cmp"
(v128.const f64x2 -1.0 4294967296.0))
;; out of range -> saturate or UINT32_MAX
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_trunc_f64x2_u_zero_cmp"
(v128.const f64x2 nan -nan))
(v128.const i32x4 -1 -1 -1 -1))

View File

@@ -1,44 +0,0 @@
;; Tests for relaxed i8x16 swizzle.
(module
(func (export "i8x16.relaxed_swizzle") (param v128 v128) (result v128) (i8x16.relaxed_swizzle (local.get 0) (local.get 1)))
(func (export "i8x16.relaxed_swizzle_cmp") (param v128 v128) (result v128)
(i8x16.eq
(i8x16.relaxed_swizzle (local.get 0) (local.get 1))
(i8x16.relaxed_swizzle (local.get 0) (local.get 1))))
)
(assert_return (invoke "i8x16.relaxed_swizzle"
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15))
(either (v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)))
;; out of range, returns 0 or modulo 15 if < 128
(assert_return (invoke "i8x16.relaxed_swizzle"
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31))
(either (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)))
;; out of range, returns 0 if >= 128
(assert_return (invoke "i8x16.relaxed_swizzle"
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 128 129 130 131 132 133 134 135 248 249 250 251 252 253 254 255))
(either (v128.const i8x16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)))
;; Check that multiple calls to the relaxed instruction with same inputs returns same results.
;; out of range, returns 0 or modulo 15 if < 128
(assert_return (invoke "i8x16.relaxed_swizzle_cmp"
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31))
(v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1))
;; out of range, returns 0 if >= 128
(assert_return (invoke "i8x16.relaxed_swizzle_cmp"
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 128 129 130 131 132 133 134 135 248 249 250 251 252 253 254 255))
(v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1))

View File

@@ -1,106 +0,0 @@
;; Tests for relaxed dot products.
(module
(func (export "i16x8.relaxed_dot_i8x16_i7x16_s") (param v128 v128) (result v128) (i16x8.relaxed_dot_i8x16_i7x16_s (local.get 0) (local.get 1)))
(func (export "i32x4.relaxed_dot_i8x16_i7x16_add_s") (param v128 v128 v128) (result v128) (i32x4.relaxed_dot_i8x16_i7x16_add_s (local.get 0) (local.get 1) (local.get 2)))
(func (export "i16x8.relaxed_dot_i8x16_i7x16_s_cmp") (param v128 v128) (result v128)
(i16x8.eq
(i16x8.relaxed_dot_i8x16_i7x16_s (local.get 0) (local.get 1))
(i16x8.relaxed_dot_i8x16_i7x16_s (local.get 0) (local.get 1))))
(func (export "i32x4.relaxed_dot_i8x16_i7x16_add_s_cmp") (param v128 v128 v128) (result v128)
(i16x8.eq
(i32x4.relaxed_dot_i8x16_i7x16_add_s (local.get 0) (local.get 1) (local.get 2))
(i32x4.relaxed_dot_i8x16_i7x16_add_s (local.get 0) (local.get 1) (local.get 2))))
)
;; Simple values to ensure things are functional.
(assert_return (invoke "i16x8.relaxed_dot_i8x16_i7x16_s"
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15))
(v128.const i16x8 1 13 41 85 145 221 313 421))
;; Test max and min i8 values;
(assert_return (invoke "i16x8.relaxed_dot_i8x16_i7x16_s"
(v128.const i8x16 -128 -128 127 127 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 127 127 127 127 0 0 0 0 0 0 0 0 0 0 0 0))
(v128.const i16x8 -32512 32258 0 0 0 0 0 0))
;; signed * unsigned : -128 * 129 * 2 = -33,024 saturated to -32,768
;; signed * signed : -128 * -127 * 2 = 32,512
;; unsigned * unsigned : 128 * 129 * 2 = 33,024
(assert_return (invoke "i16x8.relaxed_dot_i8x16_i7x16_s"
(v128.const i8x16 -128 -128 0 0 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 -127 -127 0 0 0 0 0 0 0 0 0 0 0 0 0 0))
(either
(v128.const i16x8 -32768 0 0 0 0 0 0 0)
(v128.const i16x8 32512 0 0 0 0 0 0 0)
(v128.const i16x8 33024 0 0 0 0 0 0 0)))
;; Simple values to ensure things are functional.
(assert_return (invoke "i32x4.relaxed_dot_i8x16_i7x16_add_s"
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i32x4 0 1 2 3))
;; intermediate result is [14, 126, 366, 734]
(v128.const i32x4 14 127 368 737))
;; Test max and min i8 values;
(assert_return (invoke "i32x4.relaxed_dot_i8x16_i7x16_add_s"
(v128.const i8x16 -128 -128 -128 -128 127 127 127 127 0 0 0 0 0 0 0 0)
(v128.const i8x16 127 127 127 127 127 127 127 127 0 0 0 0 0 0 0 0)
(v128.const i32x4 1 2 3 4))
;; intermediate result is [-65024, 64516, 0, 0]
(v128.const i32x4 -65023 64518 3 4))
;; signed * unsigned : -128 * 129 * 4 = -66,048 (+ 1) VPDPBUSD AVX2-VNNI or AVX512-VNNI
;; signed * unsigned with intermediate saturation :
;; (-128 * 129) + (-128 * 129) = -33024 saturated to -32768 (PMADDUBSW)
;; -32768 + -32768 = -65536 (+ 1)
;; signed * signed : -128 * -127 * 4 = 65,024 (+ 1)
;; unsigned * unsigned : 128 * 129 * 2 = 66,048 (+ 1)
(assert_return (invoke "i32x4.relaxed_dot_i8x16_i7x16_add_s"
(v128.const i8x16 -128 -128 -128 -128 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 -127 -127 -127 -127 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i32x4 1 2 3 4))
(either
(v128.const i32x4 -66047 2 3 4)
(v128.const i32x4 -65535 2 3 4)
(v128.const i32x4 65025 2 3 4)
(v128.const i32x4 66049 2 3 4)))
;; Check that multiple calls to the relaxed instruction with same inputs returns same results.
;; Test max and min i8 values;
(assert_return (invoke "i16x8.relaxed_dot_i8x16_i7x16_s_cmp"
(v128.const i8x16 -128 -128 127 127 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 127 127 127 127 0 0 0 0 0 0 0 0 0 0 0 0))
(v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1))
;; Test max and min i8 values;
(assert_return (invoke "i32x4.relaxed_dot_i8x16_i7x16_add_s_cmp"
(v128.const i8x16 -128 -128 -128 -128 127 127 127 127 0 0 0 0 0 0 0 0)
(v128.const i8x16 127 127 127 127 127 127 127 127 0 0 0 0 0 0 0 0)
(v128.const i32x4 1 2 3 4))
;; intermediate result is [-65024, 64516, 0, 0]
(v128.const i32x4 -1 -1 -1 -1))
;; signed * unsigned : -128 * 129 * 2 = -33,024 saturated to -32,768
;; signed * signed : -128 * -127 * 2 = 32,512
;; unsigned * unsigned : 128 * 129 * 2 = 33,024
(assert_return (invoke "i16x8.relaxed_dot_i8x16_i7x16_s_cmp"
(v128.const i8x16 -128 -128 0 0 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 -127 -127 0 0 0 0 0 0 0 0 0 0 0 0 0 0))
(v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1))
;; signed * unsigned : -128 * 129 * 4 = -66,048 (+ 1) VPDPBUSD AVX2-VNNI or AVX512-VNNI
;; signed * unsigned with intermediate saturation :
;; (-128 * 129) + (-128 * 129) = -33024 saturated to -32768 (PMADDUBSW)
;; -32768 + -32768 = -65536 (+ 1)
;; signed * signed : -128 * -127 * 4 = 65,024 (+ 1)
;; unsigned * unsigned : 128 * 129 * 2 = 66,048 (+ 1)
(assert_return (invoke "i32x4.relaxed_dot_i8x16_i7x16_add_s_cmp"
(v128.const i8x16 -128 -128 -128 -128 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i8x16 -127 -127 -127 -127 0 0 0 0 0 0 0 0 0 0 0 0)
(v128.const i32x4 1 2 3 4))
(v128.const i32x4 -1 -1 -1 -1))

View File

@@ -1,92 +0,0 @@
;; Tests for i8x16.relaxed_laneselect, i16x8.relaxed_laneselect, i32x4.relaxed_laneselect, and i64x2.relaxed_laneselect.
(module
(func (export "i8x16.relaxed_laneselect") (param v128 v128 v128) (result v128) (i8x16.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2)))
(func (export "i16x8.relaxed_laneselect") (param v128 v128 v128) (result v128) (i16x8.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2)))
(func (export "i32x4.relaxed_laneselect") (param v128 v128 v128) (result v128) (i32x4.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2)))
(func (export "i64x2.relaxed_laneselect") (param v128 v128 v128) (result v128) (i64x2.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2)))
(func (export "i8x16.relaxed_laneselect_cmp") (param v128 v128 v128) (result v128)
(i8x16.eq
(i8x16.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))
(i8x16.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))))
(func (export "i16x8.relaxed_laneselect_cmp") (param v128 v128 v128) (result v128)
(i16x8.eq
(i16x8.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))
(i16x8.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))))
(func (export "i32x4.relaxed_laneselect_cmp") (param v128 v128 v128) (result v128)
(i32x4.eq
(i32x4.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))
(i32x4.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))))
(func (export "i64x2.relaxed_laneselect_cmp") (param v128 v128 v128) (result v128)
(i64x2.eq
(i64x2.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))
(i64x2.relaxed_laneselect (local.get 0) (local.get 1) (local.get 2))))
)
(assert_return (invoke "i8x16.relaxed_laneselect"
(v128.const i8x16 0 1 0x12 0x12 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 16 17 0x34 0x34 20 21 22 23 24 25 26 27 28 29 30 31)
(v128.const i8x16 0xff 0 0xf0 0x0f 0 0 0 0 0 0 0 0 0 0 0 0))
(either (v128.const i8x16 0 17 0x14 0x32 20 21 22 23 24 25 26 27 28 29 30 31)
(v128.const i8x16 0 17 0x12 0x34 20 21 22 23 24 25 26 27 28 29 30 31)))
(assert_return (invoke "i16x8.relaxed_laneselect"
(v128.const i16x8 0 1 0x1234 0x1234 4 5 6 7)
(v128.const i16x8 8 9 0x5678 0x5678 12 13 14 15)
(v128.const i16x8 0xffff 0 0xff00 0x00ff 0 0 0 0))
(either (v128.const i16x8 0 9 0x1278 0x5634 12 13 14 15)
(v128.const i16x8 0 9 0x1234 0x5678 12 13 14 15)))
(assert_return (invoke "i32x4.relaxed_laneselect"
(v128.const i32x4 0 1 0x12341234 0x12341234)
(v128.const i32x4 4 5 0x56785678 0x56785678)
(v128.const i32x4 0xffffffff 0 0xffff0000 0x0000ffff))
(either (v128.const i32x4 0 5 0x12345678 0x56781234)
(v128.const i32x4 0 5 0x12341234 0x56785678)))
(assert_return (invoke "i64x2.relaxed_laneselect"
(v128.const i64x2 0 1)
(v128.const i64x2 2 3)
(v128.const i64x2 0xffffffffffffffff 0))
(either (v128.const i64x2 0 3)
(v128.const i64x2 0 3)))
(assert_return (invoke "i64x2.relaxed_laneselect"
(v128.const i64x2 0x1234123412341234 0x1234123412341234)
(v128.const i64x2 0x5678567856785678 0x5678567856785678)
(v128.const i64x2 0xffffffff00000000 0x00000000ffffffff))
(either (v128.const i64x2 0x1234123456785678 0x5678567812341234)
(v128.const i64x2 0x1234123412341234 0x5678567856785678)))
;; Check that multiple calls to the relaxed instruction with same inputs returns same results.
(assert_return (invoke "i8x16.relaxed_laneselect_cmp"
(v128.const i8x16 0 1 0x12 0x12 4 5 6 7 8 9 10 11 12 13 14 15)
(v128.const i8x16 16 17 0x34 0x34 20 21 22 23 24 25 26 27 28 29 30 31)
(v128.const i8x16 0xff 0 0xf0 0x0f 0 0 0 0 0 0 0 0 0 0 0 0))
(v128.const i8x16 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1))
(assert_return (invoke "i16x8.relaxed_laneselect_cmp"
(v128.const i16x8 0 1 0x1234 0x1234 4 5 6 7)
(v128.const i16x8 8 9 0x5678 0x5678 12 13 14 15)
(v128.const i16x8 0xffff 0 0xff00 0x00ff 0 0 0 0))
(v128.const i16x8 -1 -1 -1 -1 -1 -1 -1 -1))
(assert_return (invoke "i32x4.relaxed_laneselect_cmp"
(v128.const i32x4 0 1 0x12341234 0x12341234)
(v128.const i32x4 4 5 0x56785678 0x56785678)
(v128.const i32x4 0xffffffff 0 0xffff0000 0x0000ffff))
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "i64x2.relaxed_laneselect_cmp"
(v128.const i64x2 0 1)
(v128.const i64x2 2 3)
(v128.const i64x2 0xffffffffffffffff 0))
(v128.const i64x2 -1 -1))
(assert_return (invoke "i64x2.relaxed_laneselect_cmp"
(v128.const i64x2 0x1234123412341234 0x1234123412341234)
(v128.const i64x2 0x5678567856785678 0x5678567856785678)
(v128.const i64x2 0xffffffff00000000 0x00000000ffffffff))
(v128.const i64x2 -1 -1))

View File

@@ -1,190 +0,0 @@
;; Tests for f32x4.relaxed_madd, f32x4.relaxed_nmadd, f64x2.relaxed_madd, and f64x2.relaxed_nmadd.
(module
(func (export "f32x4.relaxed_madd") (param v128 v128 v128) (result v128) (f32x4.relaxed_madd (local.get 0) (local.get 1) (local.get 2)))
(func (export "f32x4.relaxed_nmadd") (param v128 v128 v128) (result v128) (f32x4.relaxed_nmadd (local.get 0) (local.get 1) (local.get 2)))
(func (export "f64x2.relaxed_nmadd") (param v128 v128 v128) (result v128) (f64x2.relaxed_nmadd (local.get 0) (local.get 1) (local.get 2)))
(func (export "f64x2.relaxed_madd") (param v128 v128 v128) (result v128) (f64x2.relaxed_madd (local.get 0) (local.get 1) (local.get 2)))
(func (export "f32x4.relaxed_madd_cmp") (param v128 v128 v128) (result v128)
(f32x4.eq
(f32x4.relaxed_madd (local.get 0) (local.get 1) (local.get 2))
(f32x4.relaxed_madd (local.get 0) (local.get 1) (local.get 2))))
(func (export "f32x4.relaxed_nmadd_cmp") (param v128 v128 v128) (result v128)
(f32x4.eq
(f32x4.relaxed_nmadd (local.get 0) (local.get 1) (local.get 2))
(f32x4.relaxed_nmadd (local.get 0) (local.get 1) (local.get 2))))
(func (export "f64x2.relaxed_nmadd_cmp") (param v128 v128 v128) (result v128)
(f64x2.eq
(f64x2.relaxed_nmadd (local.get 0) (local.get 1) (local.get 2))
(f64x2.relaxed_nmadd (local.get 0) (local.get 1) (local.get 2))))
(func (export "f64x2.relaxed_madd_cmp") (param v128 v128 v128) (result v128)
(f64x2.eq
(f64x2.relaxed_madd (local.get 0) (local.get 1) (local.get 2))
(f64x2.relaxed_madd (local.get 0) (local.get 1) (local.get 2))))
)
;; FLT_MAX == 0x1.fffffep+127
;; FLT_MAX * 2 - FLT_MAX ==
;; FLT_MAX (if fma)
;; 0 (if no fma)
;; from https://www.vinc17.net/software/fma-tests.c
(assert_return (invoke "f32x4.relaxed_madd"
(v128.const f32x4 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127 )
(v128.const f32x4 2.0 2.0 2.0 2.0)
(v128.const f32x4 -0x1.fffffep+127 -0x1.fffffep+127 -0x1.fffffep+127 -0x1.fffffep+127))
(either (v128.const f32x4 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127)
(v128.const f32x4 inf inf inf inf)))
;; Special values for float:
;; x = 0x1.000004p+0 (1 + 2^-22)
;; y = 0x1.0002p+0 (1 + 2^-15)
;; z = -(1.0 + 0x0.0002p+0 + 0x0.000004p+0)
;; = -0x1.000204p+0
;; x.y = 1.0 + 0x0.0002p+0 + 0x0.000004p+0 + 0x1p-37 (round bit)
;; x.y+z = 0 (2 roundings)
;; fma(x, y, z) = (0x1p-37) 2^-37
;; from https://accurate-algorithms.readthedocs.io/en/latest/ch09appendix.html#test-system-information
(assert_return (invoke "f32x4.relaxed_madd"
(v128.const f32x4 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0)
(v128.const f32x4 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0)
(v128.const f32x4 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0))
(either (v128.const f32x4 0x1p-37 0x1p-37 0x1p-37 0x1p-37)
(v128.const f32x4 0 0 0 0)))
;; fnma tests with negated x, same answers are expected.
(assert_return (invoke "f32x4.relaxed_nmadd"
(v128.const f32x4 -0x1.000004p+0 -0x1.000004p+0 -0x1.000004p+0 -0x1.000004p+0)
(v128.const f32x4 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0)
(v128.const f32x4 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0))
(either (v128.const f32x4 0x1p-37 0x1p-37 0x1p-37 0x1p-37)
(v128.const f32x4 0 0 0 0)))
;; fnma tests with negated y, same answers are expected.
(assert_return (invoke "f32x4.relaxed_nmadd"
(v128.const f32x4 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0)
(v128.const f32x4 -0x1.0002p+0 -0x1.0002p+0 -0x1.0002p+0 -0x1.0002p+0)
(v128.const f32x4 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0))
(either (v128.const f32x4 0x1p-37 0x1p-37 0x1p-37 0x1p-37)
(v128.const f32x4 0 0 0 0)))
;; DBL_MAX = 0x1.fffffffffffffp+1023
;; DLB_MAX * 2 - DLB_MAX ==
;; DLB_MAX (if fma)
;; 0 (if no fma)
;; form https://www.vinc17.net/software/fma-tests.c
;; from https://www.vinc17.net/software/fma-tests.c
(assert_return (invoke "f64x2.relaxed_madd"
(v128.const f64x2 0x1.fffffffffffffp+1023 0x1.fffffffffffffp+1023)
(v128.const f64x2 2.0 2.0)
(v128.const f64x2 -0x1.fffffffffffffp+1023 -0x1.fffffffffffffp+1023))
(either (v128.const f64x2 0x1.fffffffffffffp+1023 0x1.fffffffffffffp+1023)
(v128.const f64x2 inf inf)))
;; Special values for double:
;; x = 0x1.00000004p+0 (1 + 2^-30)
;; y = 0x1.000002p+0 (1 + 2^-23)
;; z = -(1.0 + 0x0.000002p+0 + 0x0.00000004p+0)
;; = -0x1.00000204p+0
;; x.y = 1.0 + 0x0.000002p+0 + 0x0.00000004p+0 + 0x1p-53 (round bit)
;; x.y+z = 0 (2 roundings)
;; fma(x, y, z) = 0x1p-53
;; from https://accurate-algorithms.readthedocs.io/en/latest/ch09appendix.html#test-system-information
(assert_return (invoke "f64x2.relaxed_madd"
(v128.const f64x2 0x1.00000004p+0 0x1.00000004p+0)
(v128.const f64x2 0x1.000002p+0 0x1.000002p+0)
(v128.const f64x2 -0x1.00000204p+0 -0x1.00000204p+0))
(either (v128.const f64x2 0x1p-53 0x1p-53)
(v128.const f64x2 0 0)))
;; fnma tests with negated x, same answers are expected.
(assert_return (invoke "f64x2.relaxed_nmadd"
(v128.const f64x2 -0x1.00000004p+0 -0x1.00000004p+0)
(v128.const f64x2 0x1.000002p+0 0x1.000002p+0)
(v128.const f64x2 -0x1.00000204p+0 -0x1.00000204p+0))
(either (v128.const f64x2 0x1p-53 0x1p-53)
(v128.const f64x2 0 0)))
;; fnma tests with negated y, same answers are expected.
(assert_return (invoke "f64x2.relaxed_nmadd"
(v128.const f64x2 0x1.00000004p+0 0x1.00000004p+0)
(v128.const f64x2 -0x1.000002p+0 -0x1.000002p+0)
(v128.const f64x2 -0x1.00000204p+0 -0x1.00000204p+0))
(either (v128.const f64x2 0x1p-53 0x1p-53)
(v128.const f64x2 0 0)))
;; Check that multiple calls to the relaxed instruction with same inputs returns same results.
;; FLT_MAX == 0x1.fffffep+127
;; FLT_MAX * 2 - FLT_MAX ==
;; FLT_MAX (if fma)
;; 0 (if no fma)
;; from https://www.vinc17.net/software/fma-tests.c
(assert_return (invoke "f32x4.relaxed_madd_cmp"
(v128.const f32x4 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127 0x1.fffffep+127 )
(v128.const f32x4 2.0 2.0 2.0 2.0)
(v128.const f32x4 -0x1.fffffep+127 -0x1.fffffep+127 -0x1.fffffep+127 -0x1.fffffep+127))
(v128.const i32x4 -1 -1 -1 -1))
;; Special values for float:
;; x = 0x1.000004p+0 (1 + 2^-22)
;; y = 0x1.0002p+0 (1 + 2^-15)
;; z = -(1.0 + 0x0.0002p+0 + 0x0.000004p+0)
;; = -0x1.000204p+0
;; x.y = 1.0 + 0x0.0002p+0 + 0x0.000004p+0 + 0x1p-37 (round bit)
;; x.y+z = 0 (2 roundings)
;; fma(x, y, z) = (0x1p-37) 2^-37
;; from https://accurate-algorithms.readthedocs.io/en/latest/ch09appendix.html#test-system-information
(assert_return (invoke "f32x4.relaxed_madd_cmp"
(v128.const f32x4 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0)
(v128.const f32x4 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0)
(v128.const f32x4 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0))
(v128.const i32x4 -1 -1 -1 -1))
;; fnma tests with negated x, same answers are expected.
(assert_return (invoke "f32x4.relaxed_nmadd_cmp"
(v128.const f32x4 -0x1.000004p+0 -0x1.000004p+0 -0x1.000004p+0 -0x1.000004p+0)
(v128.const f32x4 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0 0x1.0002p+0)
(v128.const f32x4 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0))
(v128.const i32x4 -1 -1 -1 -1))
;; fnma tests with negated y, same answers are expected.
(assert_return (invoke "f32x4.relaxed_nmadd_cmp"
(v128.const f32x4 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0 0x1.000004p+0)
(v128.const f32x4 -0x1.0002p+0 -0x1.0002p+0 -0x1.0002p+0 -0x1.0002p+0)
(v128.const f32x4 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0 -0x1.000204p+0))
(v128.const i32x4 -1 -1 -1 -1))
;; DBL_MAX = 0x1.fffffffffffffp+1023
;; DLB_MAX * 2 - DLB_MAX ==
;; DLB_MAX (if fma)
;; 0 (if no fma)
;; form https://www.vinc17.net/software/fma-tests.c
;; from https://www.vinc17.net/software/fma-tests.c
(assert_return (invoke "f64x2.relaxed_madd_cmp"
(v128.const f64x2 0x1.fffffffffffffp+1023 0x1.fffffffffffffp+1023)
(v128.const f64x2 2.0 2.0)
(v128.const f64x2 -0x1.fffffffffffffp+1023 -0x1.fffffffffffffp+1023))
(v128.const i64x2 -1 -1))
;; Special values for double:
;; x = 0x1.00000004p+0 (1 + 2^-30)
;; y = 0x1.000002p+0 (1 + 2^-23)
;; z = -(1.0 + 0x0.000002p+0 + 0x0.00000004p+0)
;; = -0x1.00000204p+0
;; x.y = 1.0 + 0x0.000002p+0 + 0x0.00000004p+0 + 0x1p-53 (round bit)
;; x.y+z = 0 (2 roundings)
;; fma(x, y, z) = 0x1p-53
;; from https://accurate-algorithms.readthedocs.io/en/latest/ch09appendix.html#test-system-information
(assert_return (invoke "f64x2.relaxed_madd_cmp"
(v128.const f64x2 0x1.00000004p+0 0x1.00000004p+0)
(v128.const f64x2 0x1.000002p+0 0x1.000002p+0)
(v128.const f64x2 -0x1.00000204p+0 -0x1.00000204p+0))
(v128.const i64x2 -1 -1))
;; fnma tests with negated x, same answers are expected.
(assert_return (invoke "f64x2.relaxed_nmadd_cmp"
(v128.const f64x2 -0x1.00000004p+0 -0x1.00000004p+0)
(v128.const f64x2 0x1.000002p+0 0x1.000002p+0)
(v128.const f64x2 -0x1.00000204p+0 -0x1.00000204p+0))
(v128.const i64x2 -1 -1))
;; fnma tests with negated y, same answers are expected.
(assert_return (invoke "f64x2.relaxed_nmadd_cmp"
(v128.const f64x2 0x1.00000004p+0 0x1.00000004p+0)
(v128.const f64x2 -0x1.000002p+0 -0x1.000002p+0)
(v128.const f64x2 -0x1.00000204p+0 -0x1.00000204p+0))
(v128.const i64x2 -1 -1))

View File

@@ -1,183 +0,0 @@
;; Tests for f32x4.min, f32x4.max, f64x2.min, and f64x2.max.
(module
(func (export "f32x4.relaxed_min") (param v128 v128) (result v128) (f32x4.relaxed_min (local.get 0) (local.get 1)))
(func (export "f32x4.relaxed_max") (param v128 v128) (result v128) (f32x4.relaxed_max (local.get 0) (local.get 1)))
(func (export "f64x2.relaxed_min") (param v128 v128) (result v128) (f64x2.relaxed_min (local.get 0) (local.get 1)))
(func (export "f64x2.relaxed_max") (param v128 v128) (result v128) (f64x2.relaxed_max (local.get 0) (local.get 1)))
(func (export "f32x4.relaxed_min_cmp") (param v128 v128) (result v128)
(i32x4.eq
(f32x4.relaxed_min (local.get 0) (local.get 1))
(f32x4.relaxed_min (local.get 0) (local.get 1))))
(func (export "f32x4.relaxed_max_cmp") (param v128 v128) (result v128)
(i32x4.eq
(f32x4.relaxed_max (local.get 0) (local.get 1))
(f32x4.relaxed_max (local.get 0) (local.get 1))))
(func (export "f64x2.relaxed_min_cmp") (param v128 v128) (result v128)
(i64x2.eq
(f64x2.relaxed_min (local.get 0) (local.get 1))
(f64x2.relaxed_min (local.get 0) (local.get 1))))
(func (export "f64x2.relaxed_max_cmp") (param v128 v128) (result v128)
(i64x2.eq
(f64x2.relaxed_max (local.get 0) (local.get 1))
(f64x2.relaxed_max (local.get 0) (local.get 1))))
)
(assert_return (invoke "f32x4.relaxed_min"
(v128.const f32x4 -nan nan 0 0)
(v128.const f32x4 0 0 -nan nan))
(either (v128.const f32x4 nan:canonical nan:canonical nan:canonical nan:canonical)
(v128.const f32x4 nan:canonical nan:canonical 0 0)
(v128.const f32x4 0 0 nan:canonical nan:canonical)
(v128.const f32x4 0 0 0 0)))
(assert_return (invoke "f32x4.relaxed_min"
(v128.const f32x4 +0.0 -0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 +0.0 +0.0 -0.0))
(either (v128.const f32x4 -0.0 -0.0 +0.0 -0.0)
(v128.const f32x4 +0.0 -0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 +0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 -0.0 +0.0 -0.0)))
(assert_return (invoke "f32x4.relaxed_max"
(v128.const f32x4 -nan nan 0 0)
(v128.const f32x4 0 0 -nan nan))
(either (v128.const f32x4 nan:canonical nan:canonical nan:canonical nan:canonical)
(v128.const f32x4 nan:canonical nan:canonical 0 0)
(v128.const f32x4 0 0 nan:canonical nan:canonical)
(v128.const f32x4 0 0 0 0)))
(assert_return (invoke "f32x4.relaxed_max"
(v128.const f32x4 +0.0 -0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 +0.0 +0.0 -0.0))
(either (v128.const f32x4 +0.0 +0.0 +0.0 -0.0)
(v128.const f32x4 +0.0 -0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 +0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 -0.0 +0.0 -0.0)))
(assert_return (invoke "f64x2.relaxed_min"
(v128.const f64x2 -nan nan)
(v128.const f64x2 0 0))
(either (v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 0 0)
(v128.const f64x2 0 0)))
(assert_return (invoke "f64x2.relaxed_min"
(v128.const f64x2 0 0)
(v128.const f64x2 -nan nan))
(either (v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 0 0)
(v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 0 0)))
(assert_return (invoke "f64x2.relaxed_min"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 -0.0 +0.0))
(either (v128.const f64x2 -0.0 -0.0)
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 -0.0 +0.0)
(v128.const f64x2 -0.0 -0.0)))
(assert_return (invoke "f64x2.relaxed_min"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0))
(either (v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0)))
(assert_return (invoke "f64x2.relaxed_max"
(v128.const f64x2 -nan nan)
(v128.const f64x2 0 0))
(either (v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 0 0)
(v128.const f64x2 0 0)))
(assert_return (invoke "f64x2.relaxed_max"
(v128.const f64x2 0 0)
(v128.const f64x2 -nan nan))
(either (v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 0 0)
(v128.const f64x2 nan:canonical nan:canonical)
(v128.const f64x2 0 0)))
(assert_return (invoke "f64x2.relaxed_max"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 -0.0 +0.0))
(either (v128.const f64x2 +0.0 +0.0)
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 -0.0 +0.0)
(v128.const f64x2 -0.0 -0.0)))
(assert_return (invoke "f64x2.relaxed_max"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0))
(either (v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0)))
;; Check that multiple calls to the relaxed instruction with same inputs returns same results.
(assert_return (invoke "f32x4.relaxed_min_cmp"
(v128.const f32x4 -nan nan 0 0)
(v128.const f32x4 0 0 -nan nan))
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "f32x4.relaxed_min_cmp"
(v128.const f32x4 +0.0 -0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 +0.0 +0.0 -0.0))
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "f32x4.relaxed_max_cmp"
(v128.const f32x4 -nan nan 0 0)
(v128.const f32x4 0 0 -nan nan))
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "f32x4.relaxed_max_cmp"
(v128.const f32x4 +0.0 -0.0 +0.0 -0.0)
(v128.const f32x4 -0.0 +0.0 +0.0 -0.0))
(v128.const i32x4 -1 -1 -1 -1))
(assert_return (invoke "f64x2.relaxed_min_cmp"
(v128.const f64x2 -nan nan)
(v128.const f64x2 0 0))
(v128.const i64x2 -1 -1))
(assert_return (invoke "f64x2.relaxed_min_cmp"
(v128.const f64x2 0 0)
(v128.const f64x2 -nan nan))
(v128.const i64x2 -1 -1))
(assert_return (invoke "f64x2.relaxed_min_cmp"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 -0.0 +0.0))
(v128.const i64x2 -1 -1))
(assert_return (invoke "f64x2.relaxed_min_cmp"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0))
(v128.const i64x2 -1 -1))
(assert_return (invoke "f64x2.relaxed_max_cmp"
(v128.const f64x2 -nan nan)
(v128.const f64x2 0 0))
(v128.const i64x2 -1 -1))
(assert_return (invoke "f64x2.relaxed_max_cmp"
(v128.const f64x2 0 0)
(v128.const f64x2 -nan nan))
(v128.const i64x2 -1 -1))
(assert_return (invoke "f64x2.relaxed_max_cmp"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 -0.0 +0.0))
(v128.const i64x2 -1 -1))
(assert_return (invoke "f64x2.relaxed_max_cmp"
(v128.const f64x2 +0.0 -0.0)
(v128.const f64x2 +0.0 -0.0))
(v128.const i64x2 -1 -1))