Convert scalar_to_vector to ISLE (AArch64) (#4401)
* Convert `scalar_to_vector` to ISLE (AArch64) Converted the exisiting implementation of `scalar_to_vector` for AArch64 to ISLE. Copyright (c) 2022 Arm Limited * Add support for floats and fix FpuExtend - Added rules to cover `f32 -> f32x4` and `f64 -> f64x2` for `scalar_to_vector` - Added tests for `scalar_to_vector` on floats. - Corrected an invalid instruction emitted by `FpuExtend` on 64-bit values. Copyright (c) 2022 Arm Limited
This commit is contained in:
@@ -10,9 +10,9 @@ block0:
|
||||
}
|
||||
|
||||
; block0:
|
||||
; movz x2, #1
|
||||
; movk x2, #1, LSL #48
|
||||
; fmov d0, x2
|
||||
; movz x1, #1
|
||||
; movk x1, #1, LSL #48
|
||||
; fmov d0, x1
|
||||
; ret
|
||||
|
||||
function %f2() -> i32x4 {
|
||||
@@ -23,7 +23,31 @@ block0:
|
||||
}
|
||||
|
||||
; block0:
|
||||
; movz x2, #42679
|
||||
; fmov s0, w2
|
||||
; movz x1, #42679
|
||||
; fmov s0, w1
|
||||
; ret
|
||||
|
||||
function %f3() -> f32x4 {
|
||||
block0:
|
||||
v0 = f32const 0x1.0
|
||||
v1 = scalar_to_vector.f32x4 v0
|
||||
return v1
|
||||
}
|
||||
|
||||
; block0:
|
||||
; fmov s1, #1
|
||||
; fmov s0, s1
|
||||
; ret
|
||||
|
||||
function %f4() -> f64x2 {
|
||||
block0:
|
||||
v0 = f64const 0x1.0
|
||||
v1 = scalar_to_vector.f64x2 v0
|
||||
return v1
|
||||
}
|
||||
|
||||
; block0:
|
||||
; fmov d1, #1
|
||||
; fmov d0, d1
|
||||
; ret
|
||||
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
test run
|
||||
target aarch64
|
||||
; i8 and i16 are invalid source sizes for x86_64
|
||||
|
||||
function %scalartovector_i8(i8) -> i8x16 {
|
||||
block0(v0: i8):
|
||||
v1 = scalar_to_vector.i8x16 v0
|
||||
return v1
|
||||
}
|
||||
; run: %scalartovector_i8(1) == [1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
|
||||
; run: %scalartovector_i8(255) == [255 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
|
||||
|
||||
function %scalartovector_i16(i16) -> i16x8 {
|
||||
block0(v0: i16):
|
||||
v1 = scalar_to_vector.i16x8 v0
|
||||
return v1
|
||||
}
|
||||
; run: %scalartovector_i16(1) == [1 0 0 0 0 0 0 0]
|
||||
; run: %scalartovector_i16(65535) == [65535 0 0 0 0 0 0 0]
|
||||
@@ -0,0 +1,42 @@
|
||||
test run
|
||||
target aarch64
|
||||
set enable_simd
|
||||
target x86_64 has_sse3 has_ssse3 has_sse41
|
||||
|
||||
function %scalartovector_i32(i32) -> i32x4 {
|
||||
block0(v0: i32):
|
||||
v1 = scalar_to_vector.i32x4 v0
|
||||
return v1
|
||||
}
|
||||
; run: %scalartovector_i32(1) == [1 0 0 0]
|
||||
; run: %scalartovector_i32(4294967295) == [4294967295 0 0 0]
|
||||
|
||||
function %scalartovector_i64(i64) -> i64x2 {
|
||||
block0(v0: i64):
|
||||
v1 = scalar_to_vector.i64x2 v0
|
||||
return v1
|
||||
}
|
||||
; run: %scalartovector_i64(1) == [1 0]
|
||||
; run: %scalartovector_i64(18446744073709551615) == [18446744073709551615 0]
|
||||
|
||||
function %scalartovector_f32(f32) -> f32x4 {
|
||||
block0(v0: f32):
|
||||
v1 = scalar_to_vector.f32x4 v0
|
||||
return v1
|
||||
}
|
||||
; run: %scalartovector_f32(0x1.0) == [0x1.0 0x0.0 0x0.0 0x0.0]
|
||||
; run: %scalartovector_f32(0x0.1) == [0x0.1 0x0.0 0x0.0 0x0.0]
|
||||
; run: %scalartovector_f32(NaN) == [NaN 0x0.0 0x0.0 0x0.0]
|
||||
; run: %scalartovector_f32(-0x0.0) == [-0x0.0 0x0.0 0x0.0 0x0.0]
|
||||
; run: %scalartovector_f32(0x0.0) == [0x0.0 0x0.0 0x0.0 0x0.0]
|
||||
|
||||
function %scalartovector_f64(f64) -> f64x2 {
|
||||
block0(v0: f64):
|
||||
v1 = scalar_to_vector.f64x2 v0
|
||||
return v1
|
||||
}
|
||||
; run: %scalartovector_f64(0x1.0) == [0x1.0 0x0.0]
|
||||
; run: %scalartovector_f64(0x0.1) == [0x0.1 0x0.0]
|
||||
; run: %scalartovector_f64(NaN) == [NaN 0x0.0]
|
||||
; run: %scalartovector_f64(-0x0.0) == [-0x0.0 0x0.0]
|
||||
; run: %scalartovector_f64(0x0.0) == [0x0.0 0x0.0]
|
||||
Reference in New Issue
Block a user