aarch64: Avoid a copy in the extract_vector lowering (#6252)

* Avoid a move in the aarch64 lowering of extract_vector

* Update tests
This commit is contained in:
Trevor Elliott
2023-04-20 11:24:12 -07:00
committed by GitHub
parent b667f5fa5b
commit 7ad2fe32c9
5 changed files with 195 additions and 195 deletions

View File

@@ -97,7 +97,7 @@
;;; Rules for `extract_vector` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Rules for `extract_vector` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (extract_vector x 0)) (rule (lower (extract_vector x 0))
(value_reg (fpu_move_128 (put_in_reg x)))) (value_reg (put_in_reg x)))
;;;; Rules for `swiden_high` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;; Rules for `swiden_high` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View File

@@ -16,16 +16,16 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v3.4h, w0 ; dup v2.4h, w0
; mov v3.d[1], v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtn v0.8b, v3.8h ; sqxtn v0.8b, v2.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v3.4h, w0 ; dup v2.4h, w0
; mov v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[0]
; sqxtn v0.8b, v3.8h ; sqxtn v0.8b, v2.8h
; ret ; ret
function %snarrow_i16x8(i16) -> i8x16 { function %snarrow_i16x8(i16) -> i8x16 {
@@ -43,16 +43,16 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.8h, w0 ; dup v4.8h, w0
; sqxtn v0.8b, v5.8h ; sqxtn v0.8b, v4.8h
; sqxtn2 v0.16b, v0.16b, v5.8h ; sqxtn2 v0.16b, v0.16b, v4.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.8h, w0 ; dup v4.8h, w0
; sqxtn v0.8b, v5.8h ; sqxtn v0.8b, v4.8h
; sqxtn2 v0.16b, v5.8h ; sqxtn2 v0.16b, v4.8h
; ret ; ret
function %snarrow_i32x2(i32) -> i16x4 { function %snarrow_i32x2(i32) -> i16x4 {
@@ -70,16 +70,16 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v3.2s, w0 ; dup v2.2s, w0
; mov v3.d[1], v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtn v0.4h, v3.4s ; sqxtn v0.4h, v2.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v3.2s, w0 ; dup v2.2s, w0
; mov v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[0]
; sqxtn v0.4h, v3.4s ; sqxtn v0.4h, v2.4s
; ret ; ret
function %snarrow_i32x4(i32) -> i16x8 { function %snarrow_i32x4(i32) -> i16x8 {
@@ -97,16 +97,16 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.4s, w0 ; dup v4.4s, w0
; sqxtn v0.4h, v5.4s ; sqxtn v0.4h, v4.4s
; sqxtn2 v0.8h, v0.8h, v5.4s ; sqxtn2 v0.8h, v0.8h, v4.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.4s, w0 ; dup v4.4s, w0
; sqxtn v0.4h, v5.4s ; sqxtn v0.4h, v4.4s
; sqxtn2 v0.8h, v5.4s ; sqxtn2 v0.8h, v4.4s
; ret ; ret
function %snarrow_i64x2(i64) -> i32x4 { function %snarrow_i64x2(i64) -> i32x4 {
@@ -124,16 +124,16 @@ block0(v0: i64):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.2d, x0 ; dup v4.2d, x0
; sqxtn v0.2s, v5.2d ; sqxtn v0.2s, v4.2d
; sqxtn2 v0.4s, v0.4s, v5.2d ; sqxtn2 v0.4s, v0.4s, v4.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.2d, x0 ; dup v4.2d, x0
; sqxtn v0.2s, v5.2d ; sqxtn v0.2s, v4.2d
; sqxtn2 v0.4s, v5.2d ; sqxtn2 v0.4s, v4.2d
; ret ; ret
function %unarrow_i16x4(i16) -> i8x8 { function %unarrow_i16x4(i16) -> i8x8 {
@@ -151,16 +151,16 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v3.4h, w0 ; dup v2.4h, w0
; mov v3.d[1], v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtun v0.8b, v3.8h ; sqxtun v0.8b, v2.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v3.4h, w0 ; dup v2.4h, w0
; mov v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[0]
; sqxtun v0.8b, v3.8h ; sqxtun v0.8b, v2.8h
; ret ; ret
function %unarrow_i16x8(i16) -> i8x16 { function %unarrow_i16x8(i16) -> i8x16 {
@@ -178,16 +178,16 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.8h, w0 ; dup v4.8h, w0
; sqxtun v0.8b, v5.8h ; sqxtun v0.8b, v4.8h
; sqxtun2 v0.16b, v0.16b, v5.8h ; sqxtun2 v0.16b, v0.16b, v4.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.8h, w0 ; dup v4.8h, w0
; sqxtun v0.8b, v5.8h ; sqxtun v0.8b, v4.8h
; sqxtun2 v0.16b, v5.8h ; sqxtun2 v0.16b, v4.8h
; ret ; ret
function %unarrow_i32x2(i32) -> i16x4 { function %unarrow_i32x2(i32) -> i16x4 {
@@ -205,16 +205,16 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v3.2s, w0 ; dup v2.2s, w0
; mov v3.d[1], v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtun v0.4h, v3.4s ; sqxtun v0.4h, v2.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v3.2s, w0 ; dup v2.2s, w0
; mov v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[0]
; sqxtun v0.4h, v3.4s ; sqxtun v0.4h, v2.4s
; ret ; ret
function %unarrow_i32x4(i32) -> i16x8 { function %unarrow_i32x4(i32) -> i16x8 {
@@ -232,16 +232,16 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.4s, w0 ; dup v4.4s, w0
; sqxtun v0.4h, v5.4s ; sqxtun v0.4h, v4.4s
; sqxtun2 v0.8h, v0.8h, v5.4s ; sqxtun2 v0.8h, v0.8h, v4.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.4s, w0 ; dup v4.4s, w0
; sqxtun v0.4h, v5.4s ; sqxtun v0.4h, v4.4s
; sqxtun2 v0.8h, v5.4s ; sqxtun2 v0.8h, v4.4s
; ret ; ret
function %unarrow_i64x2(i64) -> i32x4 { function %unarrow_i64x2(i64) -> i32x4 {
@@ -259,16 +259,16 @@ block0(v0: i64):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.2d, x0 ; dup v4.2d, x0
; sqxtun v0.2s, v5.2d ; sqxtun v0.2s, v4.2d
; sqxtun2 v0.4s, v0.4s, v5.2d ; sqxtun2 v0.4s, v0.4s, v4.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.2d, x0 ; dup v4.2d, x0
; sqxtun v0.2s, v5.2d ; sqxtun v0.2s, v4.2d
; sqxtun2 v0.4s, v5.2d ; sqxtun2 v0.4s, v4.2d
; ret ; ret
function %uunarrow_i16x4(i16) -> i8x8 { function %uunarrow_i16x4(i16) -> i8x8 {
@@ -286,16 +286,16 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v3.4h, w0 ; dup v2.4h, w0
; mov v3.d[1], v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[1], v2.d[0]
; uqxtn v0.8b, v3.8h ; uqxtn v0.8b, v2.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v3.4h, w0 ; dup v2.4h, w0
; mov v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[0]
; uqxtn v0.8b, v3.8h ; uqxtn v0.8b, v2.8h
; ret ; ret
function %uunarrow_i16x8(i16) -> i8x16 { function %uunarrow_i16x8(i16) -> i8x16 {
@@ -313,16 +313,16 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.8h, w0 ; dup v4.8h, w0
; uqxtn v0.8b, v5.8h ; uqxtn v0.8b, v4.8h
; uqxtn2 v0.16b, v0.16b, v5.8h ; uqxtn2 v0.16b, v0.16b, v4.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.8h, w0 ; dup v4.8h, w0
; uqxtn v0.8b, v5.8h ; uqxtn v0.8b, v4.8h
; uqxtn2 v0.16b, v5.8h ; uqxtn2 v0.16b, v4.8h
; ret ; ret
function %uunarrow_i32x2(i32) -> i16x4 { function %uunarrow_i32x2(i32) -> i16x4 {
@@ -340,16 +340,16 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v3.2s, w0 ; dup v2.2s, w0
; mov v3.d[1], v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[1], v2.d[0]
; uqxtn v0.4h, v3.4s ; uqxtn v0.4h, v2.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v3.2s, w0 ; dup v2.2s, w0
; mov v3.d[1], v3.d[0] ; mov v2.d[1], v2.d[0]
; uqxtn v0.4h, v3.4s ; uqxtn v0.4h, v2.4s
; ret ; ret
function %uunarrow_i32x4(i32) -> i16x8 { function %uunarrow_i32x4(i32) -> i16x8 {
@@ -367,16 +367,16 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.4s, w0 ; dup v4.4s, w0
; uqxtn v0.4h, v5.4s ; uqxtn v0.4h, v4.4s
; uqxtn2 v0.8h, v0.8h, v5.4s ; uqxtn2 v0.8h, v0.8h, v4.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.4s, w0 ; dup v4.4s, w0
; uqxtn v0.4h, v5.4s ; uqxtn v0.4h, v4.4s
; uqxtn2 v0.8h, v5.4s ; uqxtn2 v0.8h, v4.4s
; ret ; ret
function %uunarrow_i64x2(i64) -> i32x4 { function %uunarrow_i64x2(i64) -> i32x4 {
@@ -394,15 +394,15 @@ block0(v0: i64):
; VCode: ; VCode:
; block0: ; block0:
; dup v5.2d, x0 ; dup v4.2d, x0
; uqxtn v0.2s, v5.2d ; uqxtn v0.2s, v4.2d
; uqxtn2 v0.4s, v0.4s, v5.2d ; uqxtn2 v0.4s, v0.4s, v4.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v5.2d, x0 ; dup v4.2d, x0
; uqxtn v0.2s, v5.2d ; uqxtn v0.2s, v4.2d
; uqxtn2 v0.4s, v5.2d ; uqxtn2 v0.4s, v4.2d
; ret ; ret

View File

@@ -15,16 +15,16 @@ block0(v0: i8, v1: i8):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.16b, w0 ; dup v5.16b, w0
; dup v7.16b, w1 ; dup v6.16b, w1
; add v0.16b, v6.16b, v7.16b ; add v0.16b, v5.16b, v6.16b
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.16b, w0 ; dup v5.16b, w0
; dup v7.16b, w1 ; dup v6.16b, w1
; add v0.16b, v6.16b, v7.16b ; add v0.16b, v5.16b, v6.16b
; ret ; ret
function %i16x8_splat_add(i16, i16) -> i16x8 { function %i16x8_splat_add(i16, i16) -> i16x8 {
@@ -41,16 +41,16 @@ block0(v0: i16, v1: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.8h, w0 ; dup v5.8h, w0
; dup v7.8h, w1 ; dup v6.8h, w1
; add v0.8h, v6.8h, v7.8h ; add v0.8h, v5.8h, v6.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.8h, w0 ; dup v5.8h, w0
; dup v7.8h, w1 ; dup v6.8h, w1
; add v0.8h, v6.8h, v7.8h ; add v0.8h, v5.8h, v6.8h
; ret ; ret
function %i32x4_splat_mul(i32, i32) -> i32x4 { function %i32x4_splat_mul(i32, i32) -> i32x4 {
@@ -67,16 +67,16 @@ block0(v0: i32, v1: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.4s, w0 ; dup v5.4s, w0
; dup v7.4s, w1 ; dup v6.4s, w1
; mul v0.4s, v6.4s, v7.4s ; mul v0.4s, v5.4s, v6.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.4s, w0 ; dup v5.4s, w0
; dup v7.4s, w1 ; dup v6.4s, w1
; mul v0.4s, v6.4s, v7.4s ; mul v0.4s, v5.4s, v6.4s
; ret ; ret
function %i64x2_splat_sub(i64, i64) -> i64x2 { function %i64x2_splat_sub(i64, i64) -> i64x2 {
@@ -93,16 +93,16 @@ block0(v0: i64, v1: i64):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.2d, x0 ; dup v5.2d, x0
; dup v7.2d, x1 ; dup v6.2d, x1
; sub v0.2d, v6.2d, v7.2d ; sub v0.2d, v5.2d, v6.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.2d, x0 ; dup v5.2d, x0
; dup v7.2d, x1 ; dup v6.2d, x1
; sub v0.2d, v6.2d, v7.2d ; sub v0.2d, v5.2d, v6.2d
; ret ; ret
function %f32x4_splat_add(f32, f32) -> f32x4 { function %f32x4_splat_add(f32, f32) -> f32x4 {
@@ -119,16 +119,16 @@ block0(v0: f32, v1: f32):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.4s, v0.s[0] ; dup v5.4s, v0.s[0]
; dup v7.4s, v1.s[0] ; dup v6.4s, v1.s[0]
; fadd v0.4s, v6.4s, v7.4s ; fadd v0.4s, v5.4s, v6.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.4s, v0.s[0] ; dup v5.4s, v0.s[0]
; dup v7.4s, v1.s[0] ; dup v6.4s, v1.s[0]
; fadd v0.4s, v6.4s, v7.4s ; fadd v0.4s, v5.4s, v6.4s
; ret ; ret
function %f64x2_splat_sub(f64, f64) -> f64x2 { function %f64x2_splat_sub(f64, f64) -> f64x2 {
@@ -145,16 +145,16 @@ block0(v0: f64, v1: f64):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fsub v0.2d, v6.2d, v7.2d ; fsub v0.2d, v5.2d, v6.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fsub v0.2d, v6.2d, v7.2d ; fsub v0.2d, v5.2d, v6.2d
; ret ; ret
function %f64x2_splat_mul(f64, f64) -> f64x2 { function %f64x2_splat_mul(f64, f64) -> f64x2 {
@@ -171,16 +171,16 @@ block0(v0: f64, v1: f64):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fmul v0.2d, v6.2d, v7.2d ; fmul v0.2d, v5.2d, v6.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fmul v0.2d, v6.2d, v7.2d ; fmul v0.2d, v5.2d, v6.2d
; ret ; ret
function %f64x2_splat_div(f64, f64) -> f64x2 { function %f64x2_splat_div(f64, f64) -> f64x2 {
@@ -197,16 +197,16 @@ block0(v0: f64, v1: f64):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fdiv v0.2d, v6.2d, v7.2d ; fdiv v0.2d, v5.2d, v6.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fdiv v0.2d, v6.2d, v7.2d ; fdiv v0.2d, v5.2d, v6.2d
; ret ; ret
function %f64x2_splat_min(f64, f64) -> f64x2 { function %f64x2_splat_min(f64, f64) -> f64x2 {
@@ -223,16 +223,16 @@ block0(v0: f64, v1: f64):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fmin v0.2d, v6.2d, v7.2d ; fmin v0.2d, v5.2d, v6.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fmin v0.2d, v6.2d, v7.2d ; fmin v0.2d, v5.2d, v6.2d
; ret ; ret
function %f64x2_splat_max(f64, f64) -> f64x2 { function %f64x2_splat_max(f64, f64) -> f64x2 {
@@ -249,16 +249,16 @@ block0(v0: f64, v1: f64):
; VCode: ; VCode:
; block0: ; block0:
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fmax v0.2d, v6.2d, v7.2d ; fmax v0.2d, v5.2d, v6.2d
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v6.2d, v0.d[0] ; dup v5.2d, v0.d[0]
; dup v7.2d, v1.d[0] ; dup v6.2d, v1.d[0]
; fmax v0.2d, v6.2d, v7.2d ; fmax v0.2d, v5.2d, v6.2d
; ret ; ret
function %f64x2_splat_min_pseudo(f64, f64) -> f64x2 { function %f64x2_splat_min_pseudo(f64, f64) -> f64x2 {
@@ -275,18 +275,18 @@ block0(v0: f64, v1: f64):
; VCode: ; VCode:
; block0: ; block0:
; dup v7.2d, v0.d[0] ; dup v6.2d, v0.d[0]
; dup v16.2d, v1.d[0] ; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v7.2d, v16.2d ; fcmgt v0.2d, v6.2d, v7.2d
; bsl v0.16b, v0.16b, v16.16b, v7.16b ; bsl v0.16b, v0.16b, v7.16b, v6.16b
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v7.2d, v0.d[0] ; dup v6.2d, v0.d[0]
; dup v16.2d, v1.d[0] ; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v7.2d, v16.2d ; fcmgt v0.2d, v6.2d, v7.2d
; bsl v0.16b, v16.16b, v7.16b ; bsl v0.16b, v7.16b, v6.16b
; ret ; ret
function %f64x2_splat_max_pseudo(f64, f64) -> f64x2 { function %f64x2_splat_max_pseudo(f64, f64) -> f64x2 {
@@ -303,17 +303,17 @@ block0(v0: f64, v1: f64):
; VCode: ; VCode:
; block0: ; block0:
; dup v7.2d, v0.d[0] ; dup v6.2d, v0.d[0]
; dup v16.2d, v1.d[0] ; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v16.2d, v7.2d ; fcmgt v0.2d, v7.2d, v6.2d
; bsl v0.16b, v0.16b, v16.16b, v7.16b ; bsl v0.16b, v0.16b, v7.16b, v6.16b
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v7.2d, v0.d[0] ; dup v6.2d, v0.d[0]
; dup v16.2d, v1.d[0] ; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v16.2d, v7.2d ; fcmgt v0.2d, v7.2d, v6.2d
; bsl v0.16b, v16.16b, v7.16b ; bsl v0.16b, v7.16b, v6.16b
; ret ; ret

View File

@@ -16,14 +16,14 @@ block0(v0: i8):
; VCode: ; VCode:
; block0: ; block0:
; dup v4.16b, w0 ; dup v3.16b, w0
; sxtl2 v0.8h, v4.16b ; sxtl2 v0.8h, v3.16b
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v4.16b, w0 ; dup v3.16b, w0
; sshll2 v0.8h, v4.16b, #0 ; sshll2 v0.8h, v3.16b, #0
; ret ; ret
function %swidenhigh_i16x8(i16) -> i32x4 { function %swidenhigh_i16x8(i16) -> i32x4 {
@@ -41,14 +41,14 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v4.8h, w0 ; dup v3.8h, w0
; sxtl2 v0.4s, v4.8h ; sxtl2 v0.4s, v3.8h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v4.8h, w0 ; dup v3.8h, w0
; sshll2 v0.4s, v4.8h, #0 ; sshll2 v0.4s, v3.8h, #0
; ret ; ret
function %swidenhigh_i32x4(i32) -> i64x2 { function %swidenhigh_i32x4(i32) -> i64x2 {
@@ -66,14 +66,14 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v4.4s, w0 ; dup v3.4s, w0
; sxtl2 v0.2d, v4.4s ; sxtl2 v0.2d, v3.4s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v4.4s, w0 ; dup v3.4s, w0
; sshll2 v0.2d, v4.4s, #0 ; sshll2 v0.2d, v3.4s, #0
; ret ; ret
function %swidenlow_i8x16(i8) -> i16x8 { function %swidenlow_i8x16(i8) -> i16x8 {
@@ -91,14 +91,14 @@ block0(v0: i8):
; VCode: ; VCode:
; block0: ; block0:
; dup v4.16b, w0 ; dup v3.16b, w0
; sxtl v0.8h, v4.8b ; sxtl v0.8h, v3.8b
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v4.16b, w0 ; dup v3.16b, w0
; sshll v0.8h, v4.8b, #0 ; sshll v0.8h, v3.8b, #0
; ret ; ret
function %swidenlow_i16x8(i16) -> i32x4 { function %swidenlow_i16x8(i16) -> i32x4 {
@@ -116,14 +116,14 @@ block0(v0: i16):
; VCode: ; VCode:
; block0: ; block0:
; dup v4.8h, w0 ; dup v3.8h, w0
; sxtl v0.4s, v4.4h ; sxtl v0.4s, v3.4h
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v4.8h, w0 ; dup v3.8h, w0
; sshll v0.4s, v4.4h, #0 ; sshll v0.4s, v3.4h, #0
; ret ; ret
function %swidenlow_i32x4(i32) -> i64x2 { function %swidenlow_i32x4(i32) -> i64x2 {
@@ -141,13 +141,13 @@ block0(v0: i32):
; VCode: ; VCode:
; block0: ; block0:
; dup v4.4s, w0 ; dup v3.4s, w0
; sxtl v0.2d, v4.2s ; sxtl v0.2d, v3.2s
; ret ; ret
; ;
; Disassembled: ; Disassembled:
; block0: ; offset 0x0 ; block0: ; offset 0x0
; dup v4.4s, w0 ; dup v3.4s, w0
; sshll v0.2d, v4.2s, #0 ; sshll v0.2d, v3.2s, #0
; ret ; ret

View File

@@ -123,8 +123,8 @@ block0:
; mov fp, sp ; mov fp, sp
; sub sp, sp, #16 ; sub sp, sp, #16
; block0: ; block0:
; mov x2, sp ; mov x1, sp
; ldr q0, [x2] ; ldr q0, [x1]
; add sp, sp, #16 ; add sp, sp, #16
; ldp fp, lr, [sp], #16 ; ldp fp, lr, [sp], #16
; ret ; ret
@@ -135,8 +135,8 @@ block0:
; mov x29, sp ; mov x29, sp
; sub sp, sp, #0x10 ; sub sp, sp, #0x10
; block1: ; offset 0xc ; block1: ; offset 0xc
; mov x2, sp ; mov x1, sp
; ldr q0, [x2] ; ldr q0, [x1]
; add sp, sp, #0x10 ; add sp, sp, #0x10
; ldp x29, x30, [sp], #0x10 ; ldp x29, x30, [sp], #0x10
; ret ; ret