aarch64: Avoid a copy in the extract_vector lowering (#6252)

* Avoid a move in the aarch64 lowering of extract_vector

* Update tests
This commit is contained in:
Trevor Elliott
2023-04-20 11:24:12 -07:00
committed by GitHub
parent b667f5fa5b
commit 7ad2fe32c9
5 changed files with 195 additions and 195 deletions

View File

@@ -97,7 +97,7 @@
;;; Rules for `extract_vector` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(rule (lower (extract_vector x 0))
(value_reg (fpu_move_128 (put_in_reg x))))
(value_reg (put_in_reg x)))
;;;; Rules for `swiden_high` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View File

@@ -16,16 +16,16 @@ block0(v0: i16):
; VCode:
; block0:
; dup v3.4h, w0
; mov v3.d[1], v3.d[1], v3.d[0]
; sqxtn v0.8b, v3.8h
; dup v2.4h, w0
; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtn v0.8b, v2.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v3.4h, w0
; mov v3.d[1], v3.d[0]
; sqxtn v0.8b, v3.8h
; dup v2.4h, w0
; mov v2.d[1], v2.d[0]
; sqxtn v0.8b, v2.8h
; ret
function %snarrow_i16x8(i16) -> i8x16 {
@@ -43,16 +43,16 @@ block0(v0: i16):
; VCode:
; block0:
; dup v5.8h, w0
; sqxtn v0.8b, v5.8h
; sqxtn2 v0.16b, v0.16b, v5.8h
; dup v4.8h, w0
; sqxtn v0.8b, v4.8h
; sqxtn2 v0.16b, v0.16b, v4.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.8h, w0
; sqxtn v0.8b, v5.8h
; sqxtn2 v0.16b, v5.8h
; dup v4.8h, w0
; sqxtn v0.8b, v4.8h
; sqxtn2 v0.16b, v4.8h
; ret
function %snarrow_i32x2(i32) -> i16x4 {
@@ -70,16 +70,16 @@ block0(v0: i32):
; VCode:
; block0:
; dup v3.2s, w0
; mov v3.d[1], v3.d[1], v3.d[0]
; sqxtn v0.4h, v3.4s
; dup v2.2s, w0
; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtn v0.4h, v2.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v3.2s, w0
; mov v3.d[1], v3.d[0]
; sqxtn v0.4h, v3.4s
; dup v2.2s, w0
; mov v2.d[1], v2.d[0]
; sqxtn v0.4h, v2.4s
; ret
function %snarrow_i32x4(i32) -> i16x8 {
@@ -97,16 +97,16 @@ block0(v0: i32):
; VCode:
; block0:
; dup v5.4s, w0
; sqxtn v0.4h, v5.4s
; sqxtn2 v0.8h, v0.8h, v5.4s
; dup v4.4s, w0
; sqxtn v0.4h, v4.4s
; sqxtn2 v0.8h, v0.8h, v4.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.4s, w0
; sqxtn v0.4h, v5.4s
; sqxtn2 v0.8h, v5.4s
; dup v4.4s, w0
; sqxtn v0.4h, v4.4s
; sqxtn2 v0.8h, v4.4s
; ret
function %snarrow_i64x2(i64) -> i32x4 {
@@ -124,16 +124,16 @@ block0(v0: i64):
; VCode:
; block0:
; dup v5.2d, x0
; sqxtn v0.2s, v5.2d
; sqxtn2 v0.4s, v0.4s, v5.2d
; dup v4.2d, x0
; sqxtn v0.2s, v4.2d
; sqxtn2 v0.4s, v0.4s, v4.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.2d, x0
; sqxtn v0.2s, v5.2d
; sqxtn2 v0.4s, v5.2d
; dup v4.2d, x0
; sqxtn v0.2s, v4.2d
; sqxtn2 v0.4s, v4.2d
; ret
function %unarrow_i16x4(i16) -> i8x8 {
@@ -151,16 +151,16 @@ block0(v0: i16):
; VCode:
; block0:
; dup v3.4h, w0
; mov v3.d[1], v3.d[1], v3.d[0]
; sqxtun v0.8b, v3.8h
; dup v2.4h, w0
; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtun v0.8b, v2.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v3.4h, w0
; mov v3.d[1], v3.d[0]
; sqxtun v0.8b, v3.8h
; dup v2.4h, w0
; mov v2.d[1], v2.d[0]
; sqxtun v0.8b, v2.8h
; ret
function %unarrow_i16x8(i16) -> i8x16 {
@@ -178,16 +178,16 @@ block0(v0: i16):
; VCode:
; block0:
; dup v5.8h, w0
; sqxtun v0.8b, v5.8h
; sqxtun2 v0.16b, v0.16b, v5.8h
; dup v4.8h, w0
; sqxtun v0.8b, v4.8h
; sqxtun2 v0.16b, v0.16b, v4.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.8h, w0
; sqxtun v0.8b, v5.8h
; sqxtun2 v0.16b, v5.8h
; dup v4.8h, w0
; sqxtun v0.8b, v4.8h
; sqxtun2 v0.16b, v4.8h
; ret
function %unarrow_i32x2(i32) -> i16x4 {
@@ -205,16 +205,16 @@ block0(v0: i32):
; VCode:
; block0:
; dup v3.2s, w0
; mov v3.d[1], v3.d[1], v3.d[0]
; sqxtun v0.4h, v3.4s
; dup v2.2s, w0
; mov v2.d[1], v2.d[1], v2.d[0]
; sqxtun v0.4h, v2.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v3.2s, w0
; mov v3.d[1], v3.d[0]
; sqxtun v0.4h, v3.4s
; dup v2.2s, w0
; mov v2.d[1], v2.d[0]
; sqxtun v0.4h, v2.4s
; ret
function %unarrow_i32x4(i32) -> i16x8 {
@@ -232,16 +232,16 @@ block0(v0: i32):
; VCode:
; block0:
; dup v5.4s, w0
; sqxtun v0.4h, v5.4s
; sqxtun2 v0.8h, v0.8h, v5.4s
; dup v4.4s, w0
; sqxtun v0.4h, v4.4s
; sqxtun2 v0.8h, v0.8h, v4.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.4s, w0
; sqxtun v0.4h, v5.4s
; sqxtun2 v0.8h, v5.4s
; dup v4.4s, w0
; sqxtun v0.4h, v4.4s
; sqxtun2 v0.8h, v4.4s
; ret
function %unarrow_i64x2(i64) -> i32x4 {
@@ -259,16 +259,16 @@ block0(v0: i64):
; VCode:
; block0:
; dup v5.2d, x0
; sqxtun v0.2s, v5.2d
; sqxtun2 v0.4s, v0.4s, v5.2d
; dup v4.2d, x0
; sqxtun v0.2s, v4.2d
; sqxtun2 v0.4s, v0.4s, v4.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.2d, x0
; sqxtun v0.2s, v5.2d
; sqxtun2 v0.4s, v5.2d
; dup v4.2d, x0
; sqxtun v0.2s, v4.2d
; sqxtun2 v0.4s, v4.2d
; ret
function %uunarrow_i16x4(i16) -> i8x8 {
@@ -286,16 +286,16 @@ block0(v0: i16):
; VCode:
; block0:
; dup v3.4h, w0
; mov v3.d[1], v3.d[1], v3.d[0]
; uqxtn v0.8b, v3.8h
; dup v2.4h, w0
; mov v2.d[1], v2.d[1], v2.d[0]
; uqxtn v0.8b, v2.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v3.4h, w0
; mov v3.d[1], v3.d[0]
; uqxtn v0.8b, v3.8h
; dup v2.4h, w0
; mov v2.d[1], v2.d[0]
; uqxtn v0.8b, v2.8h
; ret
function %uunarrow_i16x8(i16) -> i8x16 {
@@ -313,16 +313,16 @@ block0(v0: i16):
; VCode:
; block0:
; dup v5.8h, w0
; uqxtn v0.8b, v5.8h
; uqxtn2 v0.16b, v0.16b, v5.8h
; dup v4.8h, w0
; uqxtn v0.8b, v4.8h
; uqxtn2 v0.16b, v0.16b, v4.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.8h, w0
; uqxtn v0.8b, v5.8h
; uqxtn2 v0.16b, v5.8h
; dup v4.8h, w0
; uqxtn v0.8b, v4.8h
; uqxtn2 v0.16b, v4.8h
; ret
function %uunarrow_i32x2(i32) -> i16x4 {
@@ -340,16 +340,16 @@ block0(v0: i32):
; VCode:
; block0:
; dup v3.2s, w0
; mov v3.d[1], v3.d[1], v3.d[0]
; uqxtn v0.4h, v3.4s
; dup v2.2s, w0
; mov v2.d[1], v2.d[1], v2.d[0]
; uqxtn v0.4h, v2.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v3.2s, w0
; mov v3.d[1], v3.d[0]
; uqxtn v0.4h, v3.4s
; dup v2.2s, w0
; mov v2.d[1], v2.d[0]
; uqxtn v0.4h, v2.4s
; ret
function %uunarrow_i32x4(i32) -> i16x8 {
@@ -367,16 +367,16 @@ block0(v0: i32):
; VCode:
; block0:
; dup v5.4s, w0
; uqxtn v0.4h, v5.4s
; uqxtn2 v0.8h, v0.8h, v5.4s
; dup v4.4s, w0
; uqxtn v0.4h, v4.4s
; uqxtn2 v0.8h, v0.8h, v4.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.4s, w0
; uqxtn v0.4h, v5.4s
; uqxtn2 v0.8h, v5.4s
; dup v4.4s, w0
; uqxtn v0.4h, v4.4s
; uqxtn2 v0.8h, v4.4s
; ret
function %uunarrow_i64x2(i64) -> i32x4 {
@@ -394,15 +394,15 @@ block0(v0: i64):
; VCode:
; block0:
; dup v5.2d, x0
; uqxtn v0.2s, v5.2d
; uqxtn2 v0.4s, v0.4s, v5.2d
; dup v4.2d, x0
; uqxtn v0.2s, v4.2d
; uqxtn2 v0.4s, v0.4s, v4.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v5.2d, x0
; uqxtn v0.2s, v5.2d
; uqxtn2 v0.4s, v5.2d
; dup v4.2d, x0
; uqxtn v0.2s, v4.2d
; uqxtn2 v0.4s, v4.2d
; ret

View File

@@ -15,16 +15,16 @@ block0(v0: i8, v1: i8):
; VCode:
; block0:
; dup v6.16b, w0
; dup v7.16b, w1
; add v0.16b, v6.16b, v7.16b
; dup v5.16b, w0
; dup v6.16b, w1
; add v0.16b, v5.16b, v6.16b
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.16b, w0
; dup v7.16b, w1
; add v0.16b, v6.16b, v7.16b
; dup v5.16b, w0
; dup v6.16b, w1
; add v0.16b, v5.16b, v6.16b
; ret
function %i16x8_splat_add(i16, i16) -> i16x8 {
@@ -41,16 +41,16 @@ block0(v0: i16, v1: i16):
; VCode:
; block0:
; dup v6.8h, w0
; dup v7.8h, w1
; add v0.8h, v6.8h, v7.8h
; dup v5.8h, w0
; dup v6.8h, w1
; add v0.8h, v5.8h, v6.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.8h, w0
; dup v7.8h, w1
; add v0.8h, v6.8h, v7.8h
; dup v5.8h, w0
; dup v6.8h, w1
; add v0.8h, v5.8h, v6.8h
; ret
function %i32x4_splat_mul(i32, i32) -> i32x4 {
@@ -67,16 +67,16 @@ block0(v0: i32, v1: i32):
; VCode:
; block0:
; dup v6.4s, w0
; dup v7.4s, w1
; mul v0.4s, v6.4s, v7.4s
; dup v5.4s, w0
; dup v6.4s, w1
; mul v0.4s, v5.4s, v6.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.4s, w0
; dup v7.4s, w1
; mul v0.4s, v6.4s, v7.4s
; dup v5.4s, w0
; dup v6.4s, w1
; mul v0.4s, v5.4s, v6.4s
; ret
function %i64x2_splat_sub(i64, i64) -> i64x2 {
@@ -93,16 +93,16 @@ block0(v0: i64, v1: i64):
; VCode:
; block0:
; dup v6.2d, x0
; dup v7.2d, x1
; sub v0.2d, v6.2d, v7.2d
; dup v5.2d, x0
; dup v6.2d, x1
; sub v0.2d, v5.2d, v6.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.2d, x0
; dup v7.2d, x1
; sub v0.2d, v6.2d, v7.2d
; dup v5.2d, x0
; dup v6.2d, x1
; sub v0.2d, v5.2d, v6.2d
; ret
function %f32x4_splat_add(f32, f32) -> f32x4 {
@@ -119,16 +119,16 @@ block0(v0: f32, v1: f32):
; VCode:
; block0:
; dup v6.4s, v0.s[0]
; dup v7.4s, v1.s[0]
; fadd v0.4s, v6.4s, v7.4s
; dup v5.4s, v0.s[0]
; dup v6.4s, v1.s[0]
; fadd v0.4s, v5.4s, v6.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.4s, v0.s[0]
; dup v7.4s, v1.s[0]
; fadd v0.4s, v6.4s, v7.4s
; dup v5.4s, v0.s[0]
; dup v6.4s, v1.s[0]
; fadd v0.4s, v5.4s, v6.4s
; ret
function %f64x2_splat_sub(f64, f64) -> f64x2 {
@@ -145,16 +145,16 @@ block0(v0: f64, v1: f64):
; VCode:
; block0:
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fsub v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fsub v0.2d, v5.2d, v6.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fsub v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fsub v0.2d, v5.2d, v6.2d
; ret
function %f64x2_splat_mul(f64, f64) -> f64x2 {
@@ -171,16 +171,16 @@ block0(v0: f64, v1: f64):
; VCode:
; block0:
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fmul v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fmul v0.2d, v5.2d, v6.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fmul v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fmul v0.2d, v5.2d, v6.2d
; ret
function %f64x2_splat_div(f64, f64) -> f64x2 {
@@ -197,16 +197,16 @@ block0(v0: f64, v1: f64):
; VCode:
; block0:
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fdiv v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fdiv v0.2d, v5.2d, v6.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fdiv v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fdiv v0.2d, v5.2d, v6.2d
; ret
function %f64x2_splat_min(f64, f64) -> f64x2 {
@@ -223,16 +223,16 @@ block0(v0: f64, v1: f64):
; VCode:
; block0:
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fmin v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fmin v0.2d, v5.2d, v6.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fmin v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fmin v0.2d, v5.2d, v6.2d
; ret
function %f64x2_splat_max(f64, f64) -> f64x2 {
@@ -249,16 +249,16 @@ block0(v0: f64, v1: f64):
; VCode:
; block0:
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fmax v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fmax v0.2d, v5.2d, v6.2d
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fmax v0.2d, v6.2d, v7.2d
; dup v5.2d, v0.d[0]
; dup v6.2d, v1.d[0]
; fmax v0.2d, v5.2d, v6.2d
; ret
function %f64x2_splat_min_pseudo(f64, f64) -> f64x2 {
@@ -275,18 +275,18 @@ block0(v0: f64, v1: f64):
; VCode:
; block0:
; dup v7.2d, v0.d[0]
; dup v16.2d, v1.d[0]
; fcmgt v0.2d, v7.2d, v16.2d
; bsl v0.16b, v0.16b, v16.16b, v7.16b
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v6.2d, v7.2d
; bsl v0.16b, v0.16b, v7.16b, v6.16b
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v7.2d, v0.d[0]
; dup v16.2d, v1.d[0]
; fcmgt v0.2d, v7.2d, v16.2d
; bsl v0.16b, v16.16b, v7.16b
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v6.2d, v7.2d
; bsl v0.16b, v7.16b, v6.16b
; ret
function %f64x2_splat_max_pseudo(f64, f64) -> f64x2 {
@@ -303,17 +303,17 @@ block0(v0: f64, v1: f64):
; VCode:
; block0:
; dup v7.2d, v0.d[0]
; dup v16.2d, v1.d[0]
; fcmgt v0.2d, v16.2d, v7.2d
; bsl v0.16b, v0.16b, v16.16b, v7.16b
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v7.2d, v6.2d
; bsl v0.16b, v0.16b, v7.16b, v6.16b
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v7.2d, v0.d[0]
; dup v16.2d, v1.d[0]
; fcmgt v0.2d, v16.2d, v7.2d
; bsl v0.16b, v16.16b, v7.16b
; dup v6.2d, v0.d[0]
; dup v7.2d, v1.d[0]
; fcmgt v0.2d, v7.2d, v6.2d
; bsl v0.16b, v7.16b, v6.16b
; ret

View File

@@ -16,14 +16,14 @@ block0(v0: i8):
; VCode:
; block0:
; dup v4.16b, w0
; sxtl2 v0.8h, v4.16b
; dup v3.16b, w0
; sxtl2 v0.8h, v3.16b
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v4.16b, w0
; sshll2 v0.8h, v4.16b, #0
; dup v3.16b, w0
; sshll2 v0.8h, v3.16b, #0
; ret
function %swidenhigh_i16x8(i16) -> i32x4 {
@@ -41,14 +41,14 @@ block0(v0: i16):
; VCode:
; block0:
; dup v4.8h, w0
; sxtl2 v0.4s, v4.8h
; dup v3.8h, w0
; sxtl2 v0.4s, v3.8h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v4.8h, w0
; sshll2 v0.4s, v4.8h, #0
; dup v3.8h, w0
; sshll2 v0.4s, v3.8h, #0
; ret
function %swidenhigh_i32x4(i32) -> i64x2 {
@@ -66,14 +66,14 @@ block0(v0: i32):
; VCode:
; block0:
; dup v4.4s, w0
; sxtl2 v0.2d, v4.4s
; dup v3.4s, w0
; sxtl2 v0.2d, v3.4s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v4.4s, w0
; sshll2 v0.2d, v4.4s, #0
; dup v3.4s, w0
; sshll2 v0.2d, v3.4s, #0
; ret
function %swidenlow_i8x16(i8) -> i16x8 {
@@ -91,14 +91,14 @@ block0(v0: i8):
; VCode:
; block0:
; dup v4.16b, w0
; sxtl v0.8h, v4.8b
; dup v3.16b, w0
; sxtl v0.8h, v3.8b
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v4.16b, w0
; sshll v0.8h, v4.8b, #0
; dup v3.16b, w0
; sshll v0.8h, v3.8b, #0
; ret
function %swidenlow_i16x8(i16) -> i32x4 {
@@ -116,14 +116,14 @@ block0(v0: i16):
; VCode:
; block0:
; dup v4.8h, w0
; sxtl v0.4s, v4.4h
; dup v3.8h, w0
; sxtl v0.4s, v3.4h
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v4.8h, w0
; sshll v0.4s, v4.4h, #0
; dup v3.8h, w0
; sshll v0.4s, v3.4h, #0
; ret
function %swidenlow_i32x4(i32) -> i64x2 {
@@ -141,13 +141,13 @@ block0(v0: i32):
; VCode:
; block0:
; dup v4.4s, w0
; sxtl v0.2d, v4.2s
; dup v3.4s, w0
; sxtl v0.2d, v3.2s
; ret
;
; Disassembled:
; block0: ; offset 0x0
; dup v4.4s, w0
; sshll v0.2d, v4.2s, #0
; dup v3.4s, w0
; sshll v0.2d, v3.2s, #0
; ret

View File

@@ -123,8 +123,8 @@ block0:
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x2, sp
; ldr q0, [x2]
; mov x1, sp
; ldr q0, [x1]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
@@ -135,8 +135,8 @@ block0:
; mov x29, sp
; sub sp, sp, #0x10
; block1: ; offset 0xc
; mov x2, sp
; ldr q0, [x2]
; mov x1, sp
; ldr q0, [x1]
; add sp, sp, #0x10
; ldp x29, x30, [sp], #0x10
; ret