Cranelift: use regalloc2 constraints on caller side of ABI code. (#4892)
* Cranelift: use regalloc2 constraints on caller side of ABI code. This PR updates the shared ABI code and backends to use register-operand constraints rather than explicit pinned-vreg moves for register arguments and return values. The s390x backend was not updated, because it has its own implementation of ABI code. Ideally we could converge back to the code shared by x64 and aarch64 (which didn't exist when s390x ported calls to ISLE, so the current situation is underestandable, to be clear!). I'll leave this for future work. This PR exposed several places where regalloc2 needed to be a bit more flexible with constraints; it requires regalloc2#74 to be merged and pulled in. * Update to regalloc2 0.3.3. In addition to version bump, this required removing two asserts as `SpillSlot`s no longer carry their class (so we can't assert that they have the correct class). * Review comments. * Filetest updates. * Add cargo-vet audit for regalloc2 0.3.2 -> 0.3.3 upgrade. * Update to regalloc2 0.4.0.
This commit is contained in:
@@ -69,9 +69,10 @@ block0(v0: i64, v1: i64, v2: i64):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; add x0, x0, x2
|
||||
; add x0, x0, x1
|
||||
; ldr w0, [x0, #48]
|
||||
; mov x6, x0
|
||||
; add x6, x6, x2
|
||||
; add x6, x6, x1
|
||||
; ldr w0, [x6, #48]
|
||||
; ret
|
||||
|
||||
function %f10(i64, i64, i64) -> i32 {
|
||||
@@ -232,11 +233,11 @@ block0(v0: i64):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov x6, x0
|
||||
; ldp x7, x1, [x6]
|
||||
; mov x11, x7
|
||||
; stp x11, x1, [x0]
|
||||
; mov x0, x7
|
||||
; mov x8, x0
|
||||
; mov x6, x8
|
||||
; ldp x0, x1, [x6]
|
||||
; mov x7, x8
|
||||
; stp x0, x1, [x7]
|
||||
; ret
|
||||
|
||||
function %i128_imm_offset(i64) -> i128 {
|
||||
@@ -247,11 +248,11 @@ block0(v0: i64):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov x6, x0
|
||||
; ldp x7, x1, [x6, #16]
|
||||
; mov x11, x7
|
||||
; stp x11, x1, [x0, #16]
|
||||
; mov x0, x7
|
||||
; mov x8, x0
|
||||
; mov x6, x8
|
||||
; ldp x0, x1, [x6, #16]
|
||||
; mov x7, x8
|
||||
; stp x0, x1, [x7, #16]
|
||||
; ret
|
||||
|
||||
function %i128_imm_offset_large(i64) -> i128 {
|
||||
@@ -262,11 +263,11 @@ block0(v0: i64):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov x6, x0
|
||||
; ldp x7, x1, [x6, #504]
|
||||
; mov x11, x7
|
||||
; stp x11, x1, [x0, #504]
|
||||
; mov x0, x7
|
||||
; mov x8, x0
|
||||
; mov x6, x8
|
||||
; ldp x0, x1, [x6, #504]
|
||||
; mov x7, x8
|
||||
; stp x0, x1, [x7, #504]
|
||||
; ret
|
||||
|
||||
function %i128_imm_offset_negative_large(i64) -> i128 {
|
||||
@@ -277,11 +278,11 @@ block0(v0: i64):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov x6, x0
|
||||
; ldp x7, x1, [x6, #-512]
|
||||
; mov x11, x7
|
||||
; stp x11, x1, [x0, #-512]
|
||||
; mov x0, x7
|
||||
; mov x8, x0
|
||||
; mov x6, x8
|
||||
; ldp x0, x1, [x6, #-512]
|
||||
; mov x7, x8
|
||||
; stp x0, x1, [x7, #-512]
|
||||
; ret
|
||||
|
||||
function %i128_add_offset(i64) -> i128 {
|
||||
@@ -293,11 +294,11 @@ block0(v0: i64):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov x6, x0
|
||||
; ldp x7, x1, [x6, #32]
|
||||
; mov x11, x7
|
||||
; stp x11, x1, [x0, #32]
|
||||
; mov x0, x7
|
||||
; mov x8, x0
|
||||
; mov x6, x8
|
||||
; ldp x0, x1, [x6, #32]
|
||||
; mov x7, x8
|
||||
; stp x0, x1, [x7, #32]
|
||||
; ret
|
||||
|
||||
function %i128_32bit_sextend_simple(i32) -> i128 {
|
||||
@@ -327,13 +328,13 @@ block0(v0: i64, v1: i32):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov x7, x0
|
||||
; mov x11, x0
|
||||
; mov x7, x11
|
||||
; add x7, x7, x1, SXTW
|
||||
; ldp x9, x10, [x7, #24]
|
||||
; add x0, x0, x1, SXTW
|
||||
; mov x14, x9
|
||||
; ldp x0, x10, [x7, #24]
|
||||
; mov x9, x11
|
||||
; add x9, x9, x1, SXTW
|
||||
; mov x1, x10
|
||||
; stp x14, x1, [x0, #24]
|
||||
; mov x0, x9
|
||||
; stp x0, x1, [x9, #24]
|
||||
; ret
|
||||
|
||||
|
||||
@@ -109,3 +109,4 @@ block0(v0: i64):
|
||||
; blr x4
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ block0(v0: i8):
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; block0:
|
||||
; mov x15, x0
|
||||
; mov x8, x0
|
||||
; sub sp, sp, #16
|
||||
; virtual_sp_offset_adjust 16
|
||||
; movz x0, #42
|
||||
@@ -90,9 +90,9 @@ block0(v0: i8):
|
||||
; movz x5, #42
|
||||
; movz x6, #42
|
||||
; movz x7, #42
|
||||
; strb w15, [sp]
|
||||
; ldr x14, 8 ; b 12 ; data TestCase(%g) + 0
|
||||
; blr x14
|
||||
; strb w8, [sp]
|
||||
; ldr x8, 8 ; b 12 ; data TestCase(%g) + 0
|
||||
; blr x8
|
||||
; add sp, sp, #16
|
||||
; virtual_sp_offset_adjust -16
|
||||
; ldp fp, lr, [sp], #16
|
||||
@@ -105,7 +105,7 @@ block0(v0: i8):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov x15, x0
|
||||
; mov x8, x0
|
||||
; mov x13, x1
|
||||
; movz x0, #42
|
||||
; movz x1, #42
|
||||
@@ -115,7 +115,8 @@ block0(v0: i8):
|
||||
; movz x5, #42
|
||||
; movz x6, #42
|
||||
; movz x7, #42
|
||||
; strb w15, [x13]
|
||||
; mov x11, x8
|
||||
; strb w11, [x13]
|
||||
; ret
|
||||
|
||||
function %f8() {
|
||||
@@ -140,26 +141,26 @@ block0:
|
||||
; mov fp, sp
|
||||
; sub sp, sp, #48
|
||||
; block0:
|
||||
; ldr x8, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x8
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x9
|
||||
; str q0, [sp, #32]
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g1) + 0
|
||||
; blr x9
|
||||
; str q0, [sp, #16]
|
||||
; ldr x10, 8 ; b 12 ; data TestCase(%g1) + 0
|
||||
; blr x10
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g1) + 0
|
||||
; blr x9
|
||||
; str q0, [sp]
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; blr x12
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; blr x9
|
||||
; ldr x10, 8 ; b 12 ; data TestCase(%g3) + 0
|
||||
; ldr q0, [sp, #32]
|
||||
; ldr x14, 8 ; b 12 ; data TestCase(%g3) + 0
|
||||
; blr x14
|
||||
; blr x10
|
||||
; ldr x11, 8 ; b 12 ; data TestCase(%g4) + 0
|
||||
; ldr q0, [sp, #16]
|
||||
; ldr x0, 8 ; b 12 ; data TestCase(%g4) + 0
|
||||
; blr x0
|
||||
; blr x11
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%g4) + 0
|
||||
; ldr q0, [sp]
|
||||
; ldr x2, 8 ; b 12 ; data TestCase(%g4) + 0
|
||||
; blr x2
|
||||
; blr x12
|
||||
; add sp, sp, #48
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
@@ -184,26 +185,26 @@ block0:
|
||||
; mov fp, sp
|
||||
; sub sp, sp, #48
|
||||
; block0:
|
||||
; ldr x8, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x8
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x9
|
||||
; str q0, [sp, #32]
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x9
|
||||
; str q0, [sp, #16]
|
||||
; ldr x10, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x10
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x9
|
||||
; str q0, [sp]
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%g1) + 0
|
||||
; blr x12
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g1) + 0
|
||||
; blr x9
|
||||
; ldr x10, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; ldr q0, [sp, #32]
|
||||
; ldr x14, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; blr x14
|
||||
; blr x10
|
||||
; ldr x11, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; ldr q0, [sp, #16]
|
||||
; ldr x0, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; blr x0
|
||||
; blr x11
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; ldr q0, [sp]
|
||||
; ldr x2, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; blr x2
|
||||
; blr x12
|
||||
; add sp, sp, #48
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
@@ -232,26 +233,26 @@ block0:
|
||||
; mov fp, sp
|
||||
; sub sp, sp, #48
|
||||
; block0:
|
||||
; ldr x8, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x8
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g0) + 0
|
||||
; blr x9
|
||||
; str q0, [sp, #32]
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g1) + 0
|
||||
; blr x9
|
||||
; str q0, [sp, #16]
|
||||
; ldr x10, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; blr x10
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g2) + 0
|
||||
; blr x9
|
||||
; str q0, [sp]
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%g3) + 0
|
||||
; blr x12
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%g3) + 0
|
||||
; blr x9
|
||||
; ldr x10, 8 ; b 12 ; data TestCase(%g4) + 0
|
||||
; ldr q0, [sp, #32]
|
||||
; ldr x14, 8 ; b 12 ; data TestCase(%g4) + 0
|
||||
; blr x14
|
||||
; blr x10
|
||||
; ldr x11, 8 ; b 12 ; data TestCase(%g5) + 0
|
||||
; ldr q0, [sp, #16]
|
||||
; ldr x0, 8 ; b 12 ; data TestCase(%g5) + 0
|
||||
; blr x0
|
||||
; blr x11
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%g6) + 0
|
||||
; ldr q0, [sp]
|
||||
; ldr x2, 8 ; b 12 ; data TestCase(%g6) + 0
|
||||
; blr x2
|
||||
; blr x12
|
||||
; add sp, sp, #48
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
@@ -279,12 +280,11 @@ block0(v0: i64):
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; block0:
|
||||
; mov x7, x0
|
||||
; mov x1, x0
|
||||
; movz x0, #42
|
||||
; movz x2, #42
|
||||
; mov x1, x7
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%f11) + 0
|
||||
; blr x9
|
||||
; ldr x7, 8 ; b 12 ; data TestCase(%f11) + 0
|
||||
; blr x7
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
@@ -311,12 +311,11 @@ block0(v0: i64):
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; block0:
|
||||
; mov x7, x0
|
||||
; mov x2, x0
|
||||
; movz x3, #42
|
||||
; movz x0, #42
|
||||
; mov x2, x7
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%f12) + 0
|
||||
; blr x9
|
||||
; ldr x7, 8 ; b 12 ; data TestCase(%f12) + 0
|
||||
; blr x7
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
@@ -343,12 +342,11 @@ block0(v0: i64):
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; block0:
|
||||
; mov x7, x0
|
||||
; mov x1, x0
|
||||
; movz x2, #42
|
||||
; movz x0, #42
|
||||
; mov x1, x7
|
||||
; ldr x9, 8 ; b 12 ; data TestCase(%f13) + 0
|
||||
; blr x9
|
||||
; ldr x7, 8 ; b 12 ; data TestCase(%f13) + 0
|
||||
; blr x7
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
@@ -376,20 +374,19 @@ block0(v0: i128, v1: i64):
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; block0:
|
||||
; mov x11, x2
|
||||
; mov x6, x2
|
||||
; sub sp, sp, #16
|
||||
; virtual_sp_offset_adjust 16
|
||||
; mov x10, x0
|
||||
; mov x12, x1
|
||||
; mov x2, x10
|
||||
; mov x3, x12
|
||||
; mov x4, x10
|
||||
; mov x5, x12
|
||||
; mov x6, x11
|
||||
; str x10, [sp]
|
||||
; str x12, [sp, #8]
|
||||
; ldr x7, 8 ; b 12 ; data TestCase(%f14) + 0
|
||||
; blr x7
|
||||
; str x0, [sp]
|
||||
; mov x4, x0
|
||||
; str x1, [sp, #8]
|
||||
; mov x5, x1
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%f14) + 0
|
||||
; mov x0, x4
|
||||
; mov x2, x4
|
||||
; mov x1, x5
|
||||
; mov x3, x5
|
||||
; blr x12
|
||||
; add sp, sp, #16
|
||||
; virtual_sp_offset_adjust -16
|
||||
; ldp fp, lr, [sp], #16
|
||||
@@ -419,20 +416,19 @@ block0(v0: i128, v1: i64):
|
||||
; stp fp, lr, [sp, #-16]!
|
||||
; mov fp, sp
|
||||
; block0:
|
||||
; mov x11, x2
|
||||
; mov x6, x2
|
||||
; sub sp, sp, #16
|
||||
; virtual_sp_offset_adjust 16
|
||||
; mov x10, x0
|
||||
; mov x12, x1
|
||||
; mov x2, x10
|
||||
; mov x3, x12
|
||||
; mov x4, x10
|
||||
; mov x5, x12
|
||||
; mov x6, x11
|
||||
; str x10, [sp]
|
||||
; str x12, [sp, #8]
|
||||
; ldr x7, 8 ; b 12 ; data TestCase(%f15) + 0
|
||||
; blr x7
|
||||
; str x0, [sp]
|
||||
; mov x4, x0
|
||||
; str x1, [sp, #8]
|
||||
; mov x5, x1
|
||||
; ldr x12, 8 ; b 12 ; data TestCase(%f15) + 0
|
||||
; mov x0, x4
|
||||
; mov x2, x4
|
||||
; mov x1, x5
|
||||
; mov x3, x5
|
||||
; blr x12
|
||||
; add sp, sp, #16
|
||||
; virtual_sp_offset_adjust -16
|
||||
; ldp fp, lr, [sp], #16
|
||||
@@ -496,8 +492,8 @@ block0(v0: i64):
|
||||
; str x24, [sp, #-16]!
|
||||
; block0:
|
||||
; mov x24, x8
|
||||
; ldr x5, 8 ; b 12 ; data TestCase(%g) + 0
|
||||
; blr x5
|
||||
; ldr x4, 8 ; b 12 ; data TestCase(%g) + 0
|
||||
; blr x4
|
||||
; mov x8, x24
|
||||
; ldr x24, [sp], #16
|
||||
; ldp fp, lr, [sp], #16
|
||||
|
||||
@@ -918,8 +918,9 @@ block0(v0: f32x4, v1: f32x4, v2: f32x4):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; fmla v2.4s, v2.4s, v0.4s, v1.4s
|
||||
; mov v0.16b, v2.16b
|
||||
; mov v5.16b, v2.16b
|
||||
; fmla v5.4s, v5.4s, v0.4s, v1.4s
|
||||
; mov v0.16b, v5.16b
|
||||
; ret
|
||||
|
||||
function %f79(f32x2, f32x2, f32x2) -> f32x2 {
|
||||
@@ -929,8 +930,9 @@ block0(v0: f32x2, v1: f32x2, v2: f32x2):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; fmla v2.2s, v2.2s, v0.2s, v1.2s
|
||||
; mov v0.16b, v2.16b
|
||||
; mov v5.16b, v2.16b
|
||||
; fmla v5.2s, v5.2s, v0.2s, v1.2s
|
||||
; mov v0.16b, v5.16b
|
||||
; ret
|
||||
|
||||
function %f80(f64x2, f64x2, f64x2) -> f64x2 {
|
||||
@@ -940,8 +942,9 @@ block0(v0: f64x2, v1: f64x2, v2: f64x2):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; fmla v2.2d, v2.2d, v0.2d, v1.2d
|
||||
; mov v0.16b, v2.16b
|
||||
; mov v5.16b, v2.16b
|
||||
; fmla v5.2d, v5.2d, v0.2d, v1.2d
|
||||
; mov v0.16b, v5.16b
|
||||
; ret
|
||||
|
||||
function %f81(f32x2, f32x2) -> f32x2 {
|
||||
|
||||
@@ -67,15 +67,15 @@ block3(v7: r64, v8: r64):
|
||||
; mov fp, sp
|
||||
; sub sp, sp, #32
|
||||
; block0:
|
||||
; str x1, [sp, #16]
|
||||
; str x0, [sp, #8]
|
||||
; ldr x1, 8 ; b 12 ; data TestCase(%f) + 0
|
||||
; blr x1
|
||||
; mov x3, sp
|
||||
; str x1, [sp, #16]
|
||||
; ldr x3, 8 ; b 12 ; data TestCase(%f) + 0
|
||||
; blr x3
|
||||
; mov x2, sp
|
||||
; ldr x9, [sp, #8]
|
||||
; str x9, [x3]
|
||||
; and w4, w0, #1
|
||||
; cbz x4, label1 ; b label3
|
||||
; str x9, [x2]
|
||||
; and w3, w0, #1
|
||||
; cbz x3, label1 ; b label3
|
||||
; block1:
|
||||
; b label2
|
||||
; block2:
|
||||
@@ -89,8 +89,8 @@ block3(v7: r64, v8: r64):
|
||||
; ldr x1, [sp, #16]
|
||||
; b label5
|
||||
; block5:
|
||||
; mov x5, sp
|
||||
; ldr x2, [x5]
|
||||
; mov x4, sp
|
||||
; ldr x2, [x4]
|
||||
; add sp, sp, #32
|
||||
; ldp fp, lr, [sp], #16
|
||||
; ret
|
||||
|
||||
@@ -9,8 +9,9 @@ block0(v0: i16x4, v1: i16x4):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov v0.d[1], v0.d[1], v1.d[0]
|
||||
; sqxtn v0.8b, v0.8h
|
||||
; mov v4.16b, v0.16b
|
||||
; mov v4.d[1], v4.d[1], v1.d[0]
|
||||
; sqxtn v0.8b, v4.8h
|
||||
; ret
|
||||
|
||||
function %snarrow_i16x8(i16x8, i16x8) -> i8x16 {
|
||||
@@ -31,8 +32,9 @@ block0(v0: i32x2, v1: i32x2):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov v0.d[1], v0.d[1], v1.d[0]
|
||||
; sqxtn v0.4h, v0.4s
|
||||
; mov v4.16b, v0.16b
|
||||
; mov v4.d[1], v4.d[1], v1.d[0]
|
||||
; sqxtn v0.4h, v4.4s
|
||||
; ret
|
||||
|
||||
function %snarrow_i32x4(i32x4, i32x4) -> i16x8 {
|
||||
@@ -64,8 +66,9 @@ block0(v0: i16x4, v1: i16x4):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov v0.d[1], v0.d[1], v1.d[0]
|
||||
; sqxtun v0.8b, v0.8h
|
||||
; mov v4.16b, v0.16b
|
||||
; mov v4.d[1], v4.d[1], v1.d[0]
|
||||
; sqxtun v0.8b, v4.8h
|
||||
; ret
|
||||
|
||||
function %unarrow_i16x8(i16x8, i16x8) -> i8x16 {
|
||||
@@ -86,8 +89,9 @@ block0(v0: i32x2, v1: i32x2):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov v0.d[1], v0.d[1], v1.d[0]
|
||||
; sqxtun v0.4h, v0.4s
|
||||
; mov v4.16b, v0.16b
|
||||
; mov v4.d[1], v4.d[1], v1.d[0]
|
||||
; sqxtun v0.4h, v4.4s
|
||||
; ret
|
||||
|
||||
function %unarrow_i32x4(i32x4, i32x4) -> i16x8 {
|
||||
@@ -119,8 +123,9 @@ block0(v0: i16x4, v1: i16x4):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov v0.d[1], v0.d[1], v1.d[0]
|
||||
; uqxtn v0.8b, v0.8h
|
||||
; mov v4.16b, v0.16b
|
||||
; mov v4.d[1], v4.d[1], v1.d[0]
|
||||
; uqxtn v0.8b, v4.8h
|
||||
; ret
|
||||
|
||||
function %uunarrow_i16x8(i16x8, i16x8) -> i8x16 {
|
||||
@@ -141,8 +146,9 @@ block0(v0: i32x2, v1: i32x2):
|
||||
}
|
||||
|
||||
; block0:
|
||||
; mov v0.d[1], v0.d[1], v1.d[0]
|
||||
; uqxtn v0.4h, v0.4s
|
||||
; mov v4.16b, v0.16b
|
||||
; mov v4.d[1], v4.d[1], v1.d[0]
|
||||
; uqxtn v0.4h, v4.4s
|
||||
; ret
|
||||
|
||||
function %uunarrow_i32x4(i32x4, i32x4) -> i16x8 {
|
||||
|
||||
@@ -20,8 +20,9 @@ block0(v0: i32):
|
||||
; block0:
|
||||
; mov x25, x0
|
||||
; elf_tls_get_addr x0, userextname0
|
||||
; mov x7, x25
|
||||
; mov x1, x0
|
||||
; mov x0, x25
|
||||
; mov x0, x7
|
||||
; ldp d8, d9, [sp], #16
|
||||
; ldp d10, d11, [sp], #16
|
||||
; ldp d12, d13, [sp], #16
|
||||
|
||||
Reference in New Issue
Block a user