AArch64: port misc ops to ISLE. (#4796)

* Add some precise-output compile tests for aarch64.

* AArch64: port misc ops to ISLE.

- get_pinned_reg / set_pinned_reg
- bitcast
- stack_addr
- extractlane
- insertlane
- vhigh_bits
- iadd_ifcout
- fcvt_low_from_sint
This commit is contained in:
Chris Fallin
2022-08-29 12:56:39 -07:00
committed by GitHub
parent 6368c6b188
commit a6eb24bd4f
18 changed files with 1362 additions and 662 deletions

View File

@@ -0,0 +1,43 @@
test compile precise-output
target aarch64
function %f1(f32) -> i32 {
block0(v0: f32):
v1 = bitcast.i32 v0
return v1
}
; block0:
; mov w0, v0.s[0]
; ret
function %f2(i32) -> f32 {
block0(v0: i32):
v1 = bitcast.f32 v0
return v1
}
; block0:
; fmov s0, w0
; ret
function %f3(f64) -> i64 {
block0(v0: f64):
v1 = bitcast.i64 v0
return v1
}
; block0:
; mov x0, v0.d[0]
; ret
function %f4(i64) -> f64 {
block0(v0: i64):
v1 = bitcast.f64 v0
return v1
}
; block0:
; fmov d0, x0
; ret

View File

@@ -15,9 +15,9 @@ block0:
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; mov x1, sp
; movz x2, #1
; str x2, [x0]
; str x2, [x1]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
@@ -36,9 +36,9 @@ block0:
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; mov x1, sp
; movz x2, #1
; str x2, [x0]
; str x2, [x1]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret

View File

@@ -0,0 +1,461 @@
test compile precise-output
target aarch64
function %f1(i8) -> f32 {
block0(v0: i8):
v1 = fcvt_from_sint.f32 v0
return v1
}
; block0:
; sxtb w3, w0
; scvtf s0, w3
; ret
function %f2(i16) -> f32 {
block0(v0: i16):
v1 = fcvt_from_sint.f32 v0
return v1
}
; block0:
; sxth w3, w0
; scvtf s0, w3
; ret
function %f3(i32) -> f32 {
block0(v0: i32):
v1 = fcvt_from_sint.f32 v0
return v1
}
; block0:
; scvtf s0, w0
; ret
function %f4(i64) -> f32 {
block0(v0: i64):
v1 = fcvt_from_sint.f32 v0
return v1
}
; block0:
; scvtf s0, x0
; ret
function %f5(i8) -> f64 {
block0(v0: i8):
v1 = fcvt_from_sint.f64 v0
return v1
}
; block0:
; sxtb w3, w0
; scvtf d0, w3
; ret
function %f6(i16) -> f64 {
block0(v0: i16):
v1 = fcvt_from_sint.f64 v0
return v1
}
; block0:
; sxth w3, w0
; scvtf d0, w3
; ret
function %f7(i32) -> f64 {
block0(v0: i32):
v1 = fcvt_from_sint.f64 v0
return v1
}
; block0:
; scvtf d0, w0
; ret
function %f8(i64) -> f64 {
block0(v0: i64):
v1 = fcvt_from_sint.f64 v0
return v1
}
; block0:
; scvtf d0, x0
; ret
function %f9(i32x4) -> f64x2 {
block0(v0: i32x4):
v1 = fcvt_low_from_sint.f64x2 v0
return v1
}
; block0:
; sxtl v3.2d, v0.2s
; scvtf v0.2d, v3.2d
; ret
function %f10(i8, i16, i32, i64) -> f32 {
block0(v0: i8, v1: i16, v2: i32, v3: i64):
v4 = fcvt_from_uint.f32 v0
v5 = fcvt_from_uint.f32 v1
v6 = fcvt_from_uint.f32 v2
v7 = fcvt_from_uint.f32 v3
v8 = fadd.f32 v4, v5
v9 = fadd.f32 v8, v6
v10 = fadd.f32 v9, v7
return v10
}
; block0:
; uxtb w0, w0
; ucvtf s26, w0
; uxth w0, w1
; ucvtf s27, w0
; ucvtf s25, w2
; ucvtf s28, x3
; fadd s26, s26, s27
; fadd s25, s26, s25
; fadd s0, s25, s28
; ret
function %f11(i32x4) -> f64x2 {
block0(v0: i32x4):
v1 = uwiden_low v0
v2 = fcvt_from_uint.f64x2 v1
return v2
}
; block0:
; uxtl v4.2d, v0.2s
; ucvtf v0.2d, v4.2d
; ret
function %f12(i32x4) -> f32x4 {
block0(v0: i32x4):
v1 = fcvt_from_uint.f32x4 v0
return v1
}
; block0:
; ucvtf v0.4s, v0.4s
; ret
function %f13(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_uint.i32 v0
return v1
}
; block0:
; fcmp s0, s0
; b.vc 8 ; udf
; fmov s5, #-1
; fcmp s0, s5
; b.gt 8 ; udf
; movz x10, #20352, LSL #16
; fmov s18, w10
; fcmp s0, s18
; b.lt 8 ; udf
; fcvtzu w0, s0
; ret
function %f14(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_uint.i64 v0
return v1
}
; block0:
; fcmp s0, s0
; b.vc 8 ; udf
; fmov s5, #-1
; fcmp s0, s5
; b.gt 8 ; udf
; movz x10, #24448, LSL #16
; fmov s18, w10
; fcmp s0, s18
; b.lt 8 ; udf
; fcvtzu x0, s0
; ret
function %f15(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_uint.i32 v0
return v1
}
; block0:
; fcmp d0, d0
; b.vc 8 ; udf
; fmov d5, #-1
; fcmp d0, d5
; b.gt 8 ; udf
; movz x10, #16880, LSL #48
; fmov d18, x10
; fcmp d0, d18
; b.lt 8 ; udf
; fcvtzu w0, d0
; ret
function %f16(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_uint.i64 v0
return v1
}
; block0:
; fcmp d0, d0
; b.vc 8 ; udf
; fmov d5, #-1
; fcmp d0, d5
; b.gt 8 ; udf
; movz x10, #17392, LSL #48
; fmov d18, x10
; fcmp d0, d18
; b.lt 8 ; udf
; fcvtzu x0, d0
; ret
function %f17(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_uint_sat.i32 v0
return v1
}
; block0:
; movz x4, #20352, LSL #16
; fmov s4, w4
; fmin s7, s0, s4
; movi v17.2s, #0
; fmax s19, s7, s17
; fcmp s0, s0
; fcsel s22, s17, s19, ne
; fcvtzu w0, s22
; ret
function %f18(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_uint_sat.i64 v0
return v1
}
; block0:
; movz x4, #24448, LSL #16
; fmov s4, w4
; fmin s7, s0, s4
; movi v17.2s, #0
; fmax s19, s7, s17
; fcmp s0, s0
; fcsel s22, s17, s19, ne
; fcvtzu x0, s22
; ret
function %f19(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_uint_sat.i32 v0
return v1
}
; block0:
; ldr d3, pc+8 ; b 12 ; data.f64 4294967295
; fmin d5, d0, d3
; movi v7.2s, #0
; fmax d17, d5, d7
; fcmp d0, d0
; fcsel d20, d7, d17, ne
; fcvtzu w0, d20
; ret
function %f20(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_uint_sat.i64 v0
return v1
}
; block0:
; movz x4, #17392, LSL #48
; fmov d4, x4
; fmin d7, d0, d4
; movi v17.2s, #0
; fmax d19, d7, d17
; fcmp d0, d0
; fcsel d22, d17, d19, ne
; fcvtzu x0, d22
; ret
function %f21(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_sint.i32 v0
return v1
}
; block0:
; fcmp s0, s0
; b.vc 8 ; udf
; movz x6, #52992, LSL #16
; fmov s6, w6
; fcmp s0, s6
; b.ge 8 ; udf
; movz x12, #20224, LSL #16
; fmov s20, w12
; fcmp s0, s20
; b.lt 8 ; udf
; fcvtzs w0, s0
; ret
function %f22(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_sint.i64 v0
return v1
}
; block0:
; fcmp s0, s0
; b.vc 8 ; udf
; movz x6, #57088, LSL #16
; fmov s6, w6
; fcmp s0, s6
; b.ge 8 ; udf
; movz x12, #24320, LSL #16
; fmov s20, w12
; fcmp s0, s20
; b.lt 8 ; udf
; fcvtzs x0, s0
; ret
function %f23(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_sint.i32 v0
return v1
}
; block0:
; fcmp d0, d0
; b.vc 8 ; udf
; ldr d5, pc+8 ; b 12 ; data.f64 -2147483649
; fcmp d0, d5
; b.gt 8 ; udf
; movz x10, #16864, LSL #48
; fmov d18, x10
; fcmp d0, d18
; b.lt 8 ; udf
; fcvtzs w0, d0
; ret
function %f24(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_sint.i64 v0
return v1
}
; block0:
; fcmp d0, d0
; b.vc 8 ; udf
; movz x6, #50144, LSL #48
; fmov d6, x6
; fcmp d0, d6
; b.ge 8 ; udf
; movz x12, #17376, LSL #48
; fmov d20, x12
; fcmp d0, d20
; b.lt 8 ; udf
; fcvtzs x0, d0
; ret
function %f25(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_sint_sat.i32 v0
return v1
}
; block0:
; movz x4, #20224, LSL #16
; fmov s4, w4
; fmin s7, s0, s4
; movz x10, #52992, LSL #16
; fmov s18, w10
; fmax s21, s7, s18
; movi v23.16b, #0
; fcmp s0, s0
; fcsel s26, s23, s21, ne
; fcvtzs w0, s26
; ret
function %f26(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_sint_sat.i64 v0
return v1
}
; block0:
; movz x4, #24320, LSL #16
; fmov s4, w4
; fmin s7, s0, s4
; movz x10, #57088, LSL #16
; fmov s18, w10
; fmax s21, s7, s18
; movi v23.16b, #0
; fcmp s0, s0
; fcsel s26, s23, s21, ne
; fcvtzs x0, s26
; ret
function %f27(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_sint_sat.i32 v0
return v1
}
; block0:
; ldr d3, pc+8 ; b 12 ; data.f64 2147483647
; fmin d5, d0, d3
; movz x8, #49632, LSL #48
; fmov d16, x8
; fmax d19, d5, d16
; movi v21.16b, #0
; fcmp d0, d0
; fcsel d24, d21, d19, ne
; fcvtzs w0, d24
; ret
function %f28(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_sint_sat.i64 v0
return v1
}
; block0:
; movz x4, #17376, LSL #48
; fmov d4, x4
; fmin d7, d0, d4
; movz x10, #50144, LSL #48
; fmov d18, x10
; fmax d21, d7, d18
; movi v23.16b, #0
; fcmp d0, d0
; fcsel d26, d23, d21, ne
; fcvtzs x0, d26
; ret
function %f29(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = fcvt_to_uint_sat.i32x4 v0
return v1
}
; block0:
; fcvtzu v0.4s, v0.4s
; ret
function %f30(f32x4) -> i32x4 {
block0(v0: f32x4):
v1 = fcvt_to_sint_sat.i32x4 v0
return v1
}
; block0:
; fcvtzs v0.4s, v0.4s
; ret

View File

@@ -71,11 +71,11 @@ block3(v7: r64, v8: r64):
; str x0, [sp, #8]
; ldr x2, 8 ; b 12 ; data TestCase(%f) + 0
; blr x2
; mov x8, sp
; mov x4, sp
; ldr x11, [sp, #8]
; str x11, [x8]
; and w6, w0, #1
; cbz x6, label1 ; b label3
; str x11, [x4]
; and w5, w0, #1
; cbz x5, label1 ; b label3
; block1:
; b label2
; block2:
@@ -89,8 +89,8 @@ block3(v7: r64, v8: r64):
; ldr x1, [sp, #16]
; b label5
; block5:
; mov x3, sp
; ldr x2, [x3]
; mov x6, sp
; ldr x2, [x6]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret

View File

@@ -0,0 +1,213 @@
test compile precise-output
set enable_simd
target aarch64
function %band_f32x4(f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = band v0, v1
return v2
}
; block0:
; and v0.16b, v0.16b, v1.16b
; ret
function %band_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = band v0, v1
return v2
}
; block0:
; and v0.16b, v0.16b, v1.16b
; ret
function %band_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = band v0, v1
return v2
}
; block0:
; and v0.16b, v0.16b, v1.16b
; ret
function %bor_f32x4(f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = bor v0, v1
return v2
}
; block0:
; orr v0.16b, v0.16b, v1.16b
; ret
function %bor_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = bor v0, v1
return v2
}
; block0:
; orr v0.16b, v0.16b, v1.16b
; ret
function %bor_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = bor v0, v1
return v2
}
; block0:
; orr v0.16b, v0.16b, v1.16b
; ret
function %bxor_f32x4(f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = bxor v0, v1
return v2
}
; block0:
; eor v0.16b, v0.16b, v1.16b
; ret
function %bxor_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = bxor v0, v1
return v2
}
; block0:
; eor v0.16b, v0.16b, v1.16b
; ret
function %bxor_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = bxor v0, v1
return v2
}
; block0:
; eor v0.16b, v0.16b, v1.16b
; ret
function %bitselect_i16x8() -> i16x8 {
block0:
v0 = vconst.i16x8 [0 0 0 0 0 0 0 0]
v1 = vconst.i16x8 [0 0 0 0 0 0 0 0]
v2 = vconst.i16x8 [0 0 0 0 0 0 0 0]
v3 = bitselect v0, v1, v2
return v3
}
; block0:
; movi v0.16b, #0
; movi v4.16b, #0
; movi v5.16b, #0
; bsl v0.16b, v4.16b, v5.16b
; ret
function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 {
block0(v0: b16x8, v1: i16x8, v2: i16x8):
v3 = vselect v0, v1, v2
return v3
}
; block0:
; bsl v0.16b, v1.16b, v2.16b
; ret
function %vselect_f32x4(b32x4, f32x4, f32x4) -> f32x4 {
block0(v0: b32x4, v1: f32x4, v2: f32x4):
v3 = vselect v0, v1, v2
return v3
}
; block0:
; bsl v0.16b, v1.16b, v2.16b
; ret
function %vselect_f64x2(b64x2, f64x2, f64x2) -> f64x2 {
block0(v0: b64x2, v1: f64x2, v2: f64x2):
v3 = vselect v0, v1, v2
return v3
}
; block0:
; bsl v0.16b, v1.16b, v2.16b
; ret
function %ishl_i8x16(i32) -> i8x16 {
block0(v0: i32):
v1 = vconst.i8x16 [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
v2 = ishl v1, v0
return v2
}
; block0:
; ldr q6, pc+8 ; b 20 ; data.f128 0x0f0e0d0c0b0a09080706050403020100
; and w4, w0, #7
; dup v7.16b, w4
; sshl v0.16b, v6.16b, v7.16b
; ret
function %ushr_i8x16_imm() -> i8x16 {
block0:
v0 = iconst.i32 1
v1 = vconst.i8x16 [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
v2 = ushr v1, v0
return v2
}
; block0:
; ldr q6, pc+8 ; b 20 ; data.f128 0x0f0e0d0c0b0a09080706050403020100
; movz x2, #1
; and w4, w2, #7
; sub x6, xzr, x4
; dup v16.16b, w6
; ushl v0.16b, v6.16b, v16.16b
; ret
function %sshr_i8x16(i32) -> i8x16 {
block0(v0: i32):
v1 = vconst.i8x16 [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
v2 = sshr v1, v0
return v2
}
; block0:
; ldr q7, pc+8 ; b 20 ; data.f128 0x0f0e0d0c0b0a09080706050403020100
; and w4, w0, #7
; sub x6, xzr, x4
; dup v16.16b, w6
; sshl v0.16b, v7.16b, v16.16b
; ret
function %sshr_i8x16_imm(i8x16, i32) -> i8x16 {
block0(v0: i8x16, v1: i32):
v2 = sshr_imm v0, 3
return v2
}
; block0:
; movz x5, #3
; and w7, w5, #7
; sub x9, xzr, x7
; dup v19.16b, w9
; sshl v0.16b, v0.16b, v19.16b
; ret
function %sshr_i64x2(i64x2, i32) -> i64x2 {
block0(v0: i64x2, v1: i32):
v2 = sshr v0, v1
return v2
}
; block0:
; and w5, w0, #63
; sub x7, xzr, x5
; dup v17.2d, x7
; sshl v0.2d, v0.2d, v17.2d
; ret

View File

@@ -0,0 +1,45 @@
test compile precise-output
set enable_simd
target aarch64
function %icmp_ne_32x4(i32x4, i32x4) -> b32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp ne v0, v1
return v2
}
; block0:
; cmeq v0.4s, v0.4s, v1.4s
; mvn v0.16b, v0.16b
; ret
function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp ugt v0, v1
return v2
}
; block0:
; cmhi v0.4s, v0.4s, v1.4s
; ret
function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp sge v0, v1
return v2
}
; block0:
; cmge v0.8h, v0.8h, v1.8h
; ret
function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp uge v0, v1
return v2
}
; block0:
; cmhs v0.16b, v0.16b, v1.16b
; ret

View File

@@ -0,0 +1,124 @@
test compile precise-output
set enable_simd
target aarch64
;; shuffle
function %shuffle_different_ssa_values() -> i8x16 {
block0:
v0 = vconst.i8x16 0x00
v1 = vconst.i8x16 0x01
v2 = shuffle v0, v1, 0x11000000000000000000000000000000 ;; pick the second lane of v1, the rest use the first lane of v0
return v2
}
; block0:
; movi v30.16b, #0
; movz x5, #1
; fmov s31, w5
; ldr q4, pc+8 ; b 20 ; data.f128 0x11000000000000000000000000000000
; tbl v0.16b, { v30.16b, v31.16b }, v4.16b
; ret
function %shuffle_same_ssa_value() -> i8x16 {
block0:
v1 = vconst.i8x16 0x01
v2 = shuffle v1, v1, 0x13000000000000000000000000000000 ;; pick the fourth lane of v1 and the rest from the first lane of v1
return v2
}
; block0:
; movz x4, #1
; fmov s30, w4
; ldr q3, pc+8 ; b 20 ; data.f128 0x13000000000000000000000000000000
; mov v31.16b, v30.16b
; tbl v0.16b, { v30.16b, v31.16b }, v3.16b
; ret
function %swizzle() -> i8x16 {
block0:
v0 = vconst.i8x16 [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
v1 = vconst.i8x16 [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15]
v2 = swizzle.i8x16 v0, v1
return v2
}
; block0:
; ldr q3, pc+8 ; b 20 ; data.f128 0x0f0e0d0c0b0a09080706050403020100
; ldr q4, pc+8 ; b 20 ; data.f128 0x0f0e0d0c0b0a09080706050403020100
; tbl v0.16b, { v3.16b }, v4.16b
; ret
function %splat_i8(i8) -> i8x16 {
block0(v0: i8):
v1 = splat.i8x16 v0
return v1
}
; block0:
; dup v0.16b, w0
; ret
function %splat_b16() -> b16x8 {
block0:
v0 = bconst.b16 true
v1 = splat.b16x8 v0
return v1
}
; block0:
; movi v0.16b, #255
; ret
function %splat_i32(i32) -> i32x4 {
block0(v0: i32):
v1 = splat.i32x4 v0
return v1
}
; block0:
; dup v0.4s, w0
; ret
function %splat_f64(f64) -> f64x2 {
block0(v0: f64):
v1 = splat.f64x2 v0
return v1
}
; block0:
; dup v0.2d, v0.d[0]
; ret
function %load32_zero_coalesced(i64) -> i32x4 {
block0(v0: i64):
v1 = load.i32 v0
v2 = scalar_to_vector.i32x4 v1
return v2
}
; block0:
; ldr w2, [x0]
; fmov s0, w2
; ret
function %load32_zero_int(i32) -> i32x4 {
block0(v0: i32):
v1 = scalar_to_vector.i32x4 v0
return v1
}
; block0:
; fmov s0, w0
; ret
function %load32_zero_float(f32) -> f32x4 {
block0(v0: f32):
v1 = scalar_to_vector.f32x4 v0
return v1
}
; block0:
; fmov s0, s0
; ret

View File

@@ -0,0 +1,40 @@
test compile precise-output
set enable_simd
target aarch64
function %bnot_b32x4(b32x4) -> b32x4 {
block0(v0: b32x4):
v1 = bnot v0
return v1
}
; block0:
; mvn v0.16b, v0.16b
; ret
function %vany_true_b32x4(b32x4) -> b1 {
block0(v0: b32x4):
v1 = vany_true v0
return v1
}
; block0:
; umaxp v3.4s, v0.4s, v0.4s
; mov x5, v3.d[0]
; subs xzr, x5, #0
; csetm x0, ne
; ret
function %vall_true_i64x2(i64x2) -> b1 {
block0(v0: i64x2):
v1 = vall_true v0
return v1
}
; block0:
; cmeq v3.2d, v0.2d, #0
; addp v5.2d, v3.2d, v3.2d
; fcmp d5, d5
; cset x0, eq
; ret

View File

@@ -1,8 +1,6 @@
test compile precise-output
set unwind_info=false
target aarch64
function %fn1(i8x16) -> i16x8 {
block0(v0: i8x16):
v1 = swiden_low v0
@@ -15,19 +13,7 @@ block0(v0: i8x16):
; saddlp v0.8h, v0.16b
; ret
function %fn2(i8x16) -> i16x8 {
block0(v0: i8x16):
v1 = uwiden_low v0
v2 = uwiden_high v0
v3 = iadd_pairwise v1, v2
return v3
}
; block0:
; uaddlp v0.8h, v0.16b
; ret
function %fn3(i16x8) -> i32x4 {
function %fn2(i16x8) -> i32x4 {
block0(v0: i16x8):
v1 = swiden_low v0
v2 = swiden_high v0
@@ -39,6 +25,18 @@ block0(v0: i16x8):
; saddlp v0.4s, v0.8h
; ret
function %fn3(i8x16) -> i16x8 {
block0(v0: i8x16):
v1 = uwiden_low v0
v2 = uwiden_high v0
v3 = iadd_pairwise v1, v2
return v3
}
; block0:
; uaddlp v0.8h, v0.16b
; ret
function %fn4(i16x8) -> i32x4 {
block0(v0: i16x8):
v1 = uwiden_low v0
@@ -51,169 +49,3 @@ block0(v0: i16x8):
; uaddlp v0.4s, v0.8h
; ret
function %fn5(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
v2 = swiden_low v0
v3 = swiden_high v1
v4 = iadd_pairwise v2, v3
return v4
}
; block0:
; sxtl v7.8h, v0.8b
; sxtl2 v16.8h, v1.16b
; addp v0.8h, v7.8h, v16.8h
; ret
function %fn6(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
v2 = uwiden_low v0
v3 = uwiden_high v1
v4 = iadd_pairwise v2, v3
return v4
}
; block0:
; uxtl v7.8h, v0.8b
; uxtl2 v16.8h, v1.16b
; addp v0.8h, v7.8h, v16.8h
; ret
function %fn7(i8x16) -> i16x8 {
block0(v0: i8x16):
v1 = uwiden_low v0
v2 = swiden_high v0
v3 = iadd_pairwise v1, v2
return v3
}
; block0:
; uxtl v5.8h, v0.8b
; sxtl2 v6.8h, v0.16b
; addp v0.8h, v5.8h, v6.8h
; ret
function %fn8(i8x16) -> i16x8 {
block0(v0: i8x16):
v1 = swiden_low v0
v2 = uwiden_high v0
v3 = iadd_pairwise v1, v2
return v3
}
; block0:
; sxtl v5.8h, v0.8b
; uxtl2 v6.8h, v0.16b
; addp v0.8h, v5.8h, v6.8h
; ret
function %fn9(i8x8, i8x8) -> i8x8 {
block0(v0: i8x8, v1: i8x8):
v2 = iadd_pairwise v0, v1
return v2
}
; block0:
; addp v0.8b, v0.8b, v1.8b
; ret
function %fn10(i8x16, i8x16) -> i8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = iadd_pairwise v0, v1
return v2
}
; block0:
; addp v0.16b, v0.16b, v1.16b
; ret
function %fn11(i16x4, i16x4) -> i16x4 {
block0(v0: i16x4, v1: i16x4):
v2 = iadd_pairwise v0, v1
return v2
}
; block0:
; addp v0.4h, v0.4h, v1.4h
; ret
function %fn12(i16x8, i16x8) -> i16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = iadd_pairwise v0, v1
return v2
}
; block0:
; addp v0.8h, v0.8h, v1.8h
; ret
function %fn14(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = iadd_pairwise v0, v1
return v2
}
; block0:
; addp v0.4s, v0.4s, v1.4s
; ret
function %fn15(i8x8, i8x8) -> i16x4 {
block0(v0: i8x8, v1: i8x8):
v2 = swiden_low v0
v3 = swiden_high v1
v4 = iadd_pairwise v2, v3
return v4
}
; block0:
; sxtl v16.8h, v0.8b
; mov s7, v1.s[1]
; sxtl v17.8h, v7.8b
; addp v0.4h, v16.4h, v17.4h
; ret
function %fn16(i8x8, i8x8) -> i16x4 {
block0(v0: i8x8, v1: i8x8):
v2 = uwiden_low v0
v3 = uwiden_high v1
v4 = iadd_pairwise v2, v3
return v4
}
; block0:
; uxtl v16.8h, v0.8b
; mov s7, v1.s[1]
; uxtl v17.8h, v7.8b
; addp v0.4h, v16.4h, v17.4h
; ret
function %fn17(i8x8) -> i16x4 {
block0(v0: i8x8):
v1 = uwiden_low v0
v2 = swiden_high v0
v3 = iadd_pairwise v1, v2
return v3
}
; block0:
; uxtl v6.8h, v0.8b
; mov s5, v0.s[1]
; sxtl v7.8h, v5.8b
; addp v0.4h, v6.4h, v7.4h
; ret
function %fn18(i8x8) -> i16x4 {
block0(v0: i8x8):
v1 = swiden_low v0
v2 = uwiden_high v0
v3 = iadd_pairwise v1, v2
return v3
}
; block0:
; sxtl v6.8h, v0.8b
; mov s5, v0.s[1]
; uxtl v7.8h, v5.8b
; addp v0.4h, v6.4h, v7.4h
; ret

View File

@@ -53,8 +53,8 @@ block0:
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; ldr x0, [x0]
; mov x2, sp
; ldr x0, [x2]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
@@ -74,8 +74,8 @@ block0:
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x0, sp
; ldr x0, [x0]
; mov x2, sp
; ldr x0, [x2]
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX
@@ -442,8 +442,8 @@ block0(v0: i128):
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x4, sp
; stp x0, x1, [x4]
; mov x5, sp
; stp x0, x1, [x5]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
@@ -461,8 +461,8 @@ block0(v0: i128):
; mov fp, sp
; sub sp, sp, #32
; block0:
; add x4, sp, #32
; stp x0, x1, [x4]
; add x5, sp, #32
; stp x0, x1, [x5]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret
@@ -482,8 +482,8 @@ block0(v0: i128):
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x4, sp
; stp x0, x1, [x4]
; mov x5, sp
; stp x0, x1, [x5]
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX
@@ -502,8 +502,8 @@ block0:
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; ldp x0, x1, [x0]
; mov x5, sp
; ldp x0, x1, [x5]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
@@ -521,8 +521,8 @@ block0:
; mov fp, sp
; sub sp, sp, #32
; block0:
; add x0, sp, #32
; ldp x0, x1, [x0]
; add x5, sp, #32
; ldp x0, x1, [x5]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret
@@ -542,8 +542,8 @@ block0:
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x0, sp
; ldp x0, x1, [x0]
; mov x5, sp
; ldp x0, x1, [x5]
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX

View File

@@ -1,8 +1,7 @@
test compile precise-output
set unwind_info=false
target aarch64
function %f() {
function %trap() {
block0:
trap user0
}
@@ -10,26 +9,14 @@ block0:
; block0:
; udf #0xc11f
function %g(i64) {
block0(v0: i64):
v1 = iconst.i64 42
v2 = ifcmp v0, v1
trapif eq v2, user0
function %trap_iadd_ifcout(i64, i64) {
block0(v0: i64, v1: i64):
v2, v3 = iadd_ifcout v0, v1
trapif of v3, user0
return
}
; block0:
; subs xzr, x0, #42
; b.ne 8 ; udf
; ret
function %h() {
block0:
debugtrap
return
}
; block0:
; brk #0
; b.vc 8 ; udf
; ret

View File

@@ -0,0 +1,85 @@
test compile precise-output
target aarch64
function %f1(i8x16) -> i8 {
block0(v0: i8x16):
v1 = vhigh_bits.i8 v0
return v1
}
; block0:
; sshr v3.16b, v0.16b, #7
; movz x6, #513
; movk x6, #2052, LSL #16
; movk x6, #8208, LSL #32
; movk x6, #32832, LSL #48
; dup v17.2d, x6
; and v20.16b, v3.16b, v17.16b
; ext v22.16b, v20.16b, v20.16b, #8
; zip1 v24.16b, v20.16b, v22.16b
; addv h26, v24.8h
; umov w0, v26.h[0]
; ret
function %f2(i8x16) -> i16 {
block0(v0: i8x16):
v1 = vhigh_bits.i16 v0
return v1
}
; block0:
; sshr v3.16b, v0.16b, #7
; movz x6, #513
; movk x6, #2052, LSL #16
; movk x6, #8208, LSL #32
; movk x6, #32832, LSL #48
; dup v17.2d, x6
; and v20.16b, v3.16b, v17.16b
; ext v22.16b, v20.16b, v20.16b, #8
; zip1 v24.16b, v20.16b, v22.16b
; addv h26, v24.8h
; umov w0, v26.h[0]
; ret
function %f3(i16x8) -> i8 {
block0(v0: i16x8):
v1 = vhigh_bits.i8 v0
return v1
}
; block0:
; sshr v3.8h, v0.8h, #15
; ldr q5, pc+8 ; b 20 ; data.f128 0x00800040002000100008000400020001
; and v7.16b, v3.16b, v5.16b
; addv h17, v7.8h
; umov w0, v17.h[0]
; ret
function %f4(i32x4) -> i8 {
block0(v0: i32x4):
v1 = vhigh_bits.i8 v0
return v1
}
; block0:
; sshr v3.4s, v0.4s, #31
; ldr q5, pc+8 ; b 20 ; data.f128 0x00000008000000040000000200000001
; and v7.16b, v3.16b, v5.16b
; addv s17, v7.4s
; mov w0, v17.s[0]
; ret
function %f5(i64x2) -> i8 {
block0(v0: i64x2):
v1 = vhigh_bits.i8 v0
return v1
}
; block0:
; mov x3, v0.d[1]
; mov x5, v0.d[0]
; lsr x7, x3, #63
; lsr x9, x5, #63
; add x0, x9, x7, LSL 1
; ret