Switch Cranelift over to regalloc2. (#3989)

This PR switches Cranelift over to the new register allocator, regalloc2.

See [this document](https://gist.github.com/cfallin/08553421a91f150254fe878f67301801)
for a summary of the design changes. This switchover has implications for
core VCode/MachInst types and the lowering pass.

Overall, this change brings improvements to both compile time and speed of
generated code (runtime), as reported in #3942:

```
Benchmark       Compilation (wallclock)     Execution (wallclock)
blake3-scalar   25% faster                  28% faster
blake3-simd     no diff                     no diff
meshoptimizer   19% faster                  17% faster
pulldown-cmark  17% faster                  no diff
bz2             15% faster                  no diff
SpiderMonkey,   21% faster                  2% faster
  fib(30)
clang.wasm      42% faster                  N/A
```
This commit is contained in:
Chris Fallin
2022-04-14 10:28:21 -07:00
committed by GitHub
parent bfae6384aa
commit a0318f36f0
181 changed files with 16887 additions and 21587 deletions

View File

@@ -10,14 +10,9 @@ block0(v0: i64, v1: i32):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, SXTW]
; Inst 1: ret
; }}
; block0:
; ldr w0, [x0, w1, SXTW]
; ret
function %f6(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -27,14 +22,9 @@ block0(v0: i64, v1: i32):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, SXTW]
; Inst 1: ret
; }}
; block0:
; ldr w0, [x0, w1, SXTW]
; ret
function %f7(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -45,15 +35,10 @@ block0(v0: i32, v1: i32):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov w0, w0
; Inst 1: ldr w0, [x0, w1, UXTW]
; Inst 2: ret
; }}
; block0:
; mov w6, w0
; ldr w0, [x6, w1, UXTW]
; ret
function %f8(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -66,17 +51,12 @@ block0(v0: i64, v1: i32):
return v7
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: add x2, x0, #68
; Inst 1: add x0, x2, x0
; Inst 2: add x0, x0, x1, SXTW
; Inst 3: ldr w0, [x0, w1, SXTW]
; Inst 4: ret
; }}
; block0:
; add x6, x0, #68
; add x6, x6, x0
; add x6, x6, x1, SXTW
; ldr w0, [x6, w1, SXTW]
; ret
function %f9(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -88,16 +68,11 @@ block0(v0: i64, v1: i64, v2: i64):
return v7
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: add x0, x0, x2
; Inst 1: add x0, x0, x1
; Inst 2: ldur w0, [x0, #48]
; Inst 3: ret
; }}
; block0:
; add x0, x0, x2
; add x0, x0, x1
; ldr w0, [x0, #48]
; ret
function %f10(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -109,17 +84,12 @@ block0(v0: i64, v1: i64, v2: i64):
return v7
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz x3, #4100
; Inst 1: add x1, x3, x1
; Inst 2: add x1, x1, x2
; Inst 3: ldr w0, [x1, x0]
; Inst 4: ret
; }}
; block0:
; movz x8, #4100
; add x8, x8, x1
; add x8, x8, x2
; ldr w0, [x8, x0]
; ret
function %f10() -> i32 {
block0:
@@ -128,15 +98,10 @@ block0:
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #1234
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
; block0:
; movz x2, #1234
; ldr w0, [x2]
; ret
function %f11(i64) -> i32 {
block0(v0: i64):
@@ -146,15 +111,10 @@ block0(v0: i64):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: add x0, x0, #8388608
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
; block0:
; add x4, x0, #8388608
; ldr w0, [x4]
; ret
function %f12(i64) -> i32 {
block0(v0: i64):
@@ -164,15 +124,10 @@ block0(v0: i64):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sub x0, x0, #4
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
; block0:
; sub x4, x0, #4
; ldr w0, [x4]
; ret
function %f13(i64) -> i32 {
block0(v0: i64):
@@ -182,17 +137,12 @@ block0(v0: i64):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz w1, #51712
; Inst 1: movk w1, #15258, LSL #16
; Inst 2: add x0, x1, x0
; Inst 3: ldr w0, [x0]
; Inst 4: ret
; }}
; block0:
; movz w4, #51712
; movk w4, #15258, LSL #16
; add x4, x4, x0
; ldr w0, [x4]
; ret
function %f14(i32) -> i32 {
block0(v0: i32):
@@ -201,15 +151,10 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtw x0, w0
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
; block0:
; sxtw x4, w0
; ldr w0, [x4]
; ret
function %f15(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -220,15 +165,10 @@ block0(v0: i32, v1: i32):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtw x0, w0
; Inst 1: ldr w0, [x0, w1, SXTW]
; Inst 2: ret
; }}
; block0:
; sxtw x6, w0
; ldr w0, [x6, w1, SXTW]
; ret
function %f18(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -238,15 +178,10 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movn w0, #4097
; Inst 1: ldrsh x0, [x0]
; Inst 2: ret
; }}
; block0:
; movn w8, #4097
; ldrsh x0, [x8]
; ret
function %f19(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -256,15 +191,10 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #4098
; Inst 1: ldrsh x0, [x0]
; Inst 2: ret
; }}
; block0:
; movz x8, #4098
; ldrsh x0, [x8]
; ret
function %f20(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -274,16 +204,11 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: movn w0, #4097
; Inst 1: sxtw x0, w0
; Inst 2: ldrsh x0, [x0]
; Inst 3: ret
; }}
; block0:
; movn w8, #4097
; sxtw x10, w8
; ldrsh x0, [x10]
; ret
function %f21(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -293,16 +218,11 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: movz x0, #4098
; Inst 1: sxtw x0, w0
; Inst 2: ldrsh x0, [x0]
; Inst 3: ret
; }}
; block0:
; movz x8, #4098
; sxtw x10, w8
; ldrsh x0, [x10]
; ret
function %i128(i64) -> i128 {
block0(v0: i64):
@@ -311,17 +231,13 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1]
; Inst 2: stp x2, x1, [x0]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
; block0:
; mov x8, x0
; ldp x3, x1, [x8]
; mov x11, x3
; stp x11, x1, [x0]
; mov x0, x3
; ret
function %i128_imm_offset(i64) -> i128 {
block0(v0: i64):
@@ -330,17 +246,13 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #16]
; Inst 2: stp x2, x1, [x0, #16]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
; block0:
; mov x8, x0
; ldp x3, x1, [x8, #16]
; mov x11, x3
; stp x11, x1, [x0, #16]
; mov x0, x3
; ret
function %i128_imm_offset_large(i64) -> i128 {
block0(v0: i64):
@@ -349,17 +261,13 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #504]
; Inst 2: stp x2, x1, [x0, #504]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
; block0:
; mov x8, x0
; ldp x3, x1, [x8, #504]
; mov x11, x3
; stp x11, x1, [x0, #504]
; mov x0, x3
; ret
function %i128_imm_offset_negative_large(i64) -> i128 {
block0(v0: i64):
@@ -368,17 +276,13 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #-512]
; Inst 2: stp x2, x1, [x0, #-512]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
; block0:
; mov x8, x0
; ldp x3, x1, [x8, #-512]
; mov x11, x3
; stp x11, x1, [x0, #-512]
; mov x0, x3
; ret
function %i128_add_offset(i64) -> i128 {
block0(v0: i64):
@@ -388,17 +292,13 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #32]
; Inst 2: stp x2, x1, [x0, #32]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
; block0:
; mov x8, x0
; ldp x3, x1, [x8, #32]
; mov x11, x3
; stp x11, x1, [x0, #32]
; mov x0, x3
; ret
function %i128_32bit_sextend_simple(i32) -> i128 {
block0(v0: i32):
@@ -408,18 +308,13 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: sxtw x1, w0
; Inst 1: ldp x2, x1, [x1]
; Inst 2: sxtw x0, w0
; Inst 3: stp x2, x1, [x0]
; Inst 4: mov x0, x2
; Inst 5: ret
; }}
; block0:
; sxtw x8, w0
; ldp x4, x1, [x8]
; sxtw x9, w0
; mov x0, x4
; stp x0, x1, [x9]
; ret
function %i128_32bit_sextend(i64, i32) -> i128 {
block0(v0: i64, v1: i32):
@@ -431,18 +326,14 @@ block0(v0: i64, v1: i32):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: mov x2, x0
; Inst 1: add x2, x2, x1, SXTW
; Inst 2: ldp x3, x2, [x2, #24]
; Inst 3: add x0, x0, x1, SXTW
; Inst 4: stp x3, x2, [x0, #24]
; Inst 5: mov x0, x3
; Inst 6: mov x1, x2
; Inst 7: ret
; }}
; block0:
; mov x10, x0
; add x10, x10, x1, SXTW
; ldp x6, x7, [x10, #24]
; add x0, x0, x1, SXTW
; mov x15, x6
; mov x1, x7
; stp x15, x1, [x0, #24]
; mov x0, x6
; ret

View File

@@ -8,14 +8,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, x1
; Inst 1: ret
; }}
; block0:
; add x0, x0, x1
; ret
function %f2(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -23,14 +18,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sub x0, x0, x1
; Inst 1: ret
; }}
; block0:
; sub x0, x0, x1
; ret
function %f3(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -38,14 +28,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: madd x0, x0, x1, xzr
; Inst 1: ret
; }}
; block0:
; madd x0, x0, x1, xzr
; ret
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -53,14 +38,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umulh x0, x0, x1
; Inst 1: ret
; }}
; block0:
; umulh x0, x0, x1
; ret
function %f5(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -68,14 +48,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smulh x0, x0, x1
; Inst 1: ret
; }}
; block0:
; smulh x0, x0, x1
; ret
function %f6(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -83,18 +58,13 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: adds xzr, x1, #1
; Inst 2: ccmp x0, #1, #nzcv, eq
; Inst 3: b.vc 8 ; udf
; Inst 4: sdiv x0, x0, x1
; Inst 5: ret
; }}
; block0:
; cbnz x1, 8 ; udf
; adds xzr, x1, #1
; ccmp x0, #1, #nzcv, eq
; b.vc 8 ; udf
; sdiv x0, x0, x1
; ret
function %f7(i64) -> i64 {
block0(v0: i64):
@@ -103,15 +73,10 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: orr x1, xzr, #2
; Inst 1: sdiv x0, x0, x1
; Inst 2: ret
; }}
; block0:
; orr x3, xzr, #2
; sdiv x0, x0, x3
; ret
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -119,15 +84,10 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: udiv x0, x0, x1
; Inst 2: ret
; }}
; block0:
; cbnz x1, 8 ; udf
; udiv x0, x0, x1
; ret
function %f9(i64) -> i64 {
block0(v0: i64):
@@ -136,15 +96,10 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: orr x1, xzr, #2
; Inst 1: udiv x0, x0, x1
; Inst 2: ret
; }}
; block0:
; orr x3, xzr, #2
; udiv x0, x0, x3
; ret
function %f10(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -152,16 +107,11 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: sdiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
; block0:
; cbnz x1, 8 ; udf
; sdiv x6, x0, x1
; msub x0, x6, x1, x0
; ret
function %f11(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -169,16 +119,11 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: udiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
; block0:
; cbnz x1, 8 ; udf
; udiv x6, x0, x1
; msub x0, x6, x1, x0
; ret
function %f12(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -186,20 +131,15 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: sxtw x0, w0
; Inst 1: sxtw x1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: adds wzr, w1, #1
; Inst 4: ccmp w0, #1, #nzcv, eq
; Inst 5: b.vc 8 ; udf
; Inst 6: sdiv x0, x0, x1
; Inst 7: ret
; }}
; block0:
; sxtw x5, w0
; sxtw x7, w1
; cbnz x7, 8 ; udf
; adds wzr, w7, #1
; ccmp w5, #1, #nzcv, eq
; b.vc 8 ; udf
; sdiv x0, x5, x7
; ret
function %f13(i32) -> i32 {
block0(v0: i32):
@@ -208,16 +148,11 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtw x0, w0
; Inst 1: orr x1, xzr, #2
; Inst 2: sdiv x0, x0, x1
; Inst 3: ret
; }}
; block0:
; sxtw x3, w0
; orr x5, xzr, #2
; sdiv x0, x3, x5
; ret
function %f14(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -225,17 +160,12 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov w0, w0
; Inst 1: mov w1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: udiv x0, x0, x1
; Inst 4: ret
; }}
; block0:
; mov w5, w0
; mov w7, w1
; cbnz x7, 8 ; udf
; udiv x0, x5, x7
; ret
function %f15(i32) -> i32 {
block0(v0: i32):
@@ -244,16 +174,11 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: mov w0, w0
; Inst 1: orr x1, xzr, #2
; Inst 2: udiv x0, x0, x1
; Inst 3: ret
; }}
; block0:
; mov w3, w0
; orr x5, xzr, #2
; udiv x0, x3, x5
; ret
function %f16(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -261,18 +186,13 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: sxtw x0, w0
; Inst 1: sxtw x1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: sdiv x2, x0, x1
; Inst 4: msub x0, x2, x1, x0
; Inst 5: ret
; }}
; block0:
; sxtw x5, w0
; sxtw x7, w1
; cbnz x7, 8 ; udf
; sdiv x10, x5, x7
; msub x0, x10, x7, x5
; ret
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -280,18 +200,13 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: mov w0, w0
; Inst 1: mov w1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: udiv x2, x0, x1
; Inst 4: msub x0, x2, x1, x0
; Inst 5: ret
; }}
; block0:
; mov w5, w0
; mov w7, w1
; cbnz x7, 8 ; udf
; udiv x10, x5, x7
; msub x0, x10, x7, x5
; ret
function %f18(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -299,14 +214,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: and x0, x0, x1
; Inst 1: ret
; }}
; block0:
; and x0, x0, x1
; ret
function %f19(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -314,14 +224,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, x0, x1
; Inst 1: ret
; }}
; block0:
; orr x0, x0, x1
; ret
function %f20(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -329,14 +234,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: eor x0, x0, x1
; Inst 1: ret
; }}
; block0:
; eor x0, x0, x1
; ret
function %f21(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -344,14 +244,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: bic x0, x0, x1
; Inst 1: ret
; }}
; block0:
; bic x0, x0, x1
; ret
function %f22(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -359,14 +254,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orn x0, x0, x1
; Inst 1: ret
; }}
; block0:
; orn x0, x0, x1
; ret
function %f23(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -374,14 +264,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: eon x0, x0, x1
; Inst 1: ret
; }}
; block0:
; eon x0, x0, x1
; ret
function %f24(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -389,14 +274,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orn x0, xzr, x0
; Inst 1: ret
; }}
; block0:
; orn x0, xzr, x0
; ret
function %f25(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -406,14 +286,9 @@ block0(v0: i32, v1: i32):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sub w0, w1, w0, LSL 21
; Inst 1: ret
; }}
; block0:
; sub w0, w1, w0, LSL 21
; ret
function %f26(i32) -> i32 {
block0(v0: i32):
@@ -422,14 +297,9 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sub w0, w0, #1
; Inst 1: ret
; }}
; block0:
; sub w0, w0, #1
; ret
function %f27(i32) -> i32 {
block0(v0: i32):
@@ -438,14 +308,9 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, #1
; Inst 1: ret
; }}
; block0:
; add w0, w0, #1
; ret
function %f28(i64) -> i64 {
block0(v0: i64):
@@ -454,14 +319,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, #1
; Inst 1: ret
; }}
; block0:
; add x0, x0, #1
; ret
function %f29(i64) -> i64 {
block0(v0: i64):
@@ -470,15 +330,10 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #1
; Inst 1: sub x0, xzr, x0
; Inst 2: ret
; }}
; block0:
; movz x3, #1
; sub x0, xzr, x3
; ret
function %f30(i8x16) -> i8x16 {
block0(v0: i8x16):
@@ -487,17 +342,12 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz x0, #1
; Inst 1: sub w0, wzr, w0
; Inst 2: dup v1.16b, w0
; Inst 3: ushl v0.16b, v0.16b, v1.16b
; Inst 4: ret
; }}
; block0:
; movz x3, #1
; sub w5, wzr, w3
; dup v7.16b, w5
; ushl v0.16b, v0.16b, v7.16b
; ret
function %add_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -505,15 +355,10 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: adds x0, x0, x2
; Inst 1: adc x1, x1, x3
; Inst 2: ret
; }}
; block0:
; adds x0, x0, x2
; adc x1, x1, x3
; ret
function %sub_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -521,15 +366,10 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs x0, x0, x2
; Inst 1: sbc x1, x1, x3
; Inst 2: ret
; }}
; block0:
; subs x0, x0, x2
; sbc x1, x1, x3
; ret
function %mul_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -537,17 +377,12 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: umulh x4, x0, x2
; Inst 1: madd x3, x0, x3, x4
; Inst 2: madd x1, x1, x2, x3
; Inst 3: madd x0, x0, x2, xzr
; Inst 4: ret
; }}
; block0:
; umulh x10, x0, x2
; madd x12, x0, x3, x10
; madd x1, x1, x2, x12
; madd x0, x0, x2, xzr
; ret
function %add_mul_1(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
@@ -556,14 +391,9 @@ block0(v0: i32, v1: i32, v2: i32):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: madd w0, w1, w2, w0
; Inst 1: ret
; }}
; block0:
; madd w0, w1, w2, w0
; ret
function %add_mul_2(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
@@ -572,14 +402,9 @@ block0(v0: i32, v1: i32, v2: i32):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: madd w0, w1, w2, w0
; Inst 1: ret
; }}
; block0:
; madd w0, w1, w2, w0
; ret
function %srem_const (i64) -> i64 {
block0(v0: i64):
@@ -588,16 +413,11 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: orr x1, xzr, #2
; Inst 1: sdiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
; block0:
; orr x3, xzr, #2
; sdiv x5, x0, x3
; msub x0, x5, x3, x0
; ret
function %urem_const (i64) -> i64 {
block0(v0: i64):
@@ -606,16 +426,11 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: orr x1, xzr, #2
; Inst 1: udiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
; block0:
; orr x3, xzr, #2
; udiv x5, x0, x3
; msub x0, x5, x3, x0
; ret
function %sdiv_minus_one(i64) -> i64 {
block0(v0: i64):
@@ -624,16 +439,11 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: movn x1, #0
; Inst 1: adds xzr, x1, #1
; Inst 2: ccmp x0, #1, #nzcv, eq
; Inst 3: b.vc 8 ; udf
; Inst 4: sdiv x0, x0, x1
; Inst 5: ret
; }}
; block0:
; movn x3, #0
; adds xzr, x3, #1
; ccmp x0, #1, #nzcv, eq
; b.vc 8 ; udf
; sdiv x0, x0, x3
; ret

View File

@@ -7,14 +7,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldaddal x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldaddal x1, x4, [x0]
; ret
function %atomic_rmw_add_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -22,14 +17,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldaddal w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldaddal w1, w4, [x0]
; ret
function %atomic_rmw_add_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -37,14 +27,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldaddalh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldaddalh w1, w4, [x0]
; ret
function %atomic_rmw_add_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -52,14 +37,9 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldaddalb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldaddalb w1, w4, [x0]
; ret
function %atomic_rmw_and_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -67,14 +47,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldclral x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldclral x1, x4, [x0]
; ret
function %atomic_rmw_and_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -82,14 +57,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldclral w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldclral w1, w4, [x0]
; ret
function %atomic_rmw_and_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -97,14 +67,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldclralh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldclralh w1, w4, [x0]
; ret
function %atomic_rmw_and_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -112,14 +77,9 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldclralb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldclralb w1, w4, [x0]
; ret
function %atomic_rmw_nand_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -127,25 +87,21 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 13)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: str x28, [sp, #-16]!
; Inst 3: stp x26, x27, [sp, #-16]!
; Inst 4: stp x24, x25, [sp, #-16]!
; Inst 5: mov x25, x0
; Inst 6: mov x26, x1
; Inst 7: 1: ldaxr x27, [x25]; and x28, x27, x26; mvn x28, x28; stlxr w24, x28, [x25]; cbnz w24, 1b
; Inst 8: ldp x24, x25, [sp], #16
; Inst 9: ldp x26, x27, [sp], #16
; Inst 10: ldr x28, [sp], #16
; Inst 11: ldp fp, lr, [sp], #16
; Inst 12: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x4, x1
; mov x26, x4
; 1: ldaxr x27, [x25]; and x28, x27, x26; mvn x28, x28; stlxr w24, x28, [x25]; cbnz w24, 1b
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_nand_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -153,25 +109,21 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 13)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: str x28, [sp, #-16]!
; Inst 3: stp x26, x27, [sp, #-16]!
; Inst 4: stp x24, x25, [sp, #-16]!
; Inst 5: mov x25, x0
; Inst 6: mov x26, x1
; Inst 7: 1: ldaxr w27, [x25]; and w28, w27, w26; mvn w28, w28; stlxr w24, w28, [x25]; cbnz w24, 1b
; Inst 8: ldp x24, x25, [sp], #16
; Inst 9: ldp x26, x27, [sp], #16
; Inst 10: ldr x28, [sp], #16
; Inst 11: ldp fp, lr, [sp], #16
; Inst 12: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x4, x1
; mov x26, x4
; 1: ldaxr w27, [x25]; and w28, w27, w26; mvn w28, w28; stlxr w24, w28, [x25]; cbnz w24, 1b
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_nand_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -179,25 +131,21 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 13)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: str x28, [sp, #-16]!
; Inst 3: stp x26, x27, [sp, #-16]!
; Inst 4: stp x24, x25, [sp, #-16]!
; Inst 5: mov x25, x0
; Inst 6: mov x26, x1
; Inst 7: 1: ldaxrh w27, [x25]; and w28, w27, w26; mvn w28, w28; stlxrh w24, w28, [x25]; cbnz w24, 1b
; Inst 8: ldp x24, x25, [sp], #16
; Inst 9: ldp x26, x27, [sp], #16
; Inst 10: ldr x28, [sp], #16
; Inst 11: ldp fp, lr, [sp], #16
; Inst 12: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x4, x1
; mov x26, x4
; 1: ldaxrh w27, [x25]; and w28, w27, w26; mvn w28, w28; stlxrh w24, w28, [x25]; cbnz w24, 1b
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_nand_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -205,25 +153,21 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 13)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: str x28, [sp, #-16]!
; Inst 3: stp x26, x27, [sp, #-16]!
; Inst 4: stp x24, x25, [sp, #-16]!
; Inst 5: mov x25, x0
; Inst 6: mov x26, x1
; Inst 7: 1: ldaxrb w27, [x25]; and w28, w27, w26; mvn w28, w28; stlxrb w24, w28, [x25]; cbnz w24, 1b
; Inst 8: ldp x24, x25, [sp], #16
; Inst 9: ldp x26, x27, [sp], #16
; Inst 10: ldr x28, [sp], #16
; Inst 11: ldp fp, lr, [sp], #16
; Inst 12: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x26, x27, [sp, #-16]!
; stp x24, x25, [sp, #-16]!
; block0:
; mov x25, x0
; mov x4, x1
; mov x26, x4
; 1: ldaxrb w27, [x25]; and w28, w27, w26; mvn w28, w28; stlxrb w24, w28, [x25]; cbnz w24, 1b
; ldp x24, x25, [sp], #16
; ldp x26, x27, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %atomic_rmw_or_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -231,14 +175,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsetal x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsetal x1, x4, [x0]
; ret
function %atomic_rmw_or_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -246,14 +185,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsetal w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsetal w1, w4, [x0]
; ret
function %atomic_rmw_or_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -261,14 +195,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsetalh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsetalh w1, w4, [x0]
; ret
function %atomic_rmw_or_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -276,14 +205,9 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsetalb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsetalb w1, w4, [x0]
; ret
function %atomic_rmw_xor_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -291,14 +215,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldeoral x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldeoral x1, x4, [x0]
; ret
function %atomic_rmw_xor_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -306,14 +225,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldeoral w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldeoral w1, w4, [x0]
; ret
function %atomic_rmw_xor_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -321,14 +235,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldeoralh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldeoralh w1, w4, [x0]
; ret
function %atomic_rmw_xor_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -336,14 +245,9 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldeoralb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldeoralb w1, w4, [x0]
; ret
function %atomic_rmw_smax_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -351,14 +255,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsmaxal x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsmaxal x1, x4, [x0]
; ret
function %atomic_rmw_smax_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -366,14 +265,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsmaxal w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsmaxal w1, w4, [x0]
; ret
function %atomic_rmw_smax_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -381,14 +275,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsmaxalh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsmaxalh w1, w4, [x0]
; ret
function %atomic_rmw_smax_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -396,14 +285,9 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsmaxalb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsmaxalb w1, w4, [x0]
; ret
function %atomic_rmw_umax_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -411,14 +295,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldumaxal x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldumaxal x1, x4, [x0]
; ret
function %atomic_rmw_umax_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -426,14 +305,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldumaxal w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldumaxal w1, w4, [x0]
; ret
function %atomic_rmw_umax_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -441,14 +315,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldumaxalh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldumaxalh w1, w4, [x0]
; ret
function %atomic_rmw_umax_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -456,14 +325,9 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldumaxalb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldumaxalb w1, w4, [x0]
; ret
function %atomic_rmw_smin_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -471,14 +335,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsminal x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsminal x1, x4, [x0]
; ret
function %atomic_rmw_smin_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -486,14 +345,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsminal w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsminal w1, w4, [x0]
; ret
function %atomic_rmw_smin_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -501,14 +355,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsminalh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsminalh w1, w4, [x0]
; ret
function %atomic_rmw_smin_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -516,14 +365,9 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsminalb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldsminalb w1, w4, [x0]
; ret
function %atomic_rmw_umin_i64(i64, i64) {
block0(v0: i64, v1: i64):
@@ -531,14 +375,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lduminal x1, x0, [x0]
; Inst 1: ret
; }}
; block0:
; lduminal x1, x4, [x0]
; ret
function %atomic_rmw_umin_i32(i64, i32) {
block0(v0: i64, v1: i32):
@@ -546,14 +385,9 @@ block0(v0: i64, v1: i32):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lduminal w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; lduminal w1, w4, [x0]
; ret
function %atomic_rmw_umin_i16(i64, i16) {
block0(v0: i64, v1: i16):
@@ -561,14 +395,9 @@ block0(v0: i64, v1: i16):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lduminalh w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; lduminalh w1, w4, [x0]
; ret
function %atomic_rmw_umin_i8(i64, i8) {
block0(v0: i64, v1: i8):
@@ -576,12 +405,7 @@ block0(v0: i64, v1: i8):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lduminalb w1, w0, [x0]
; Inst 1: ret
; }}
; block0:
; lduminalb w1, w4, [x0]
; ret

File diff suppressed because it is too large Load Diff

View File

@@ -7,14 +7,9 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldar x0, [x0]
; Inst 1: ret
; }}
; block0:
; ldar x0, [x0]
; ret
function %atomic_load_i32(i64) -> i32 {
block0(v0: i64):
@@ -22,14 +17,9 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldar w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldar w0, [x0]
; ret
function %atomic_load_i16(i64) -> i16 {
block0(v0: i64):
@@ -37,14 +27,9 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarh w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldarh w0, [x0]
; ret
function %atomic_load_i8(i64) -> i8 {
block0(v0: i64):
@@ -52,14 +37,9 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarb w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldarb w0, [x0]
; ret
function %atomic_load_i32_i64(i64) -> i64 {
block0(v0: i64):
@@ -68,14 +48,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldar w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldar w0, [x0]
; ret
function %atomic_load_i16_i64(i64) -> i64 {
block0(v0: i64):
@@ -84,14 +59,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarh w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldarh w0, [x0]
; ret
function %atomic_load_i8_i64(i64) -> i64 {
block0(v0: i64):
@@ -100,14 +70,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarb w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldarb w0, [x0]
; ret
function %atomic_load_i16_i32(i64) -> i32 {
block0(v0: i64):
@@ -116,14 +81,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarh w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldarh w0, [x0]
; ret
function %atomic_load_i8_i32(i64) -> i32 {
block0(v0: i64):
@@ -132,12 +92,7 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarb w0, [x0]
; Inst 1: ret
; }}
; block0:
; ldarb w0, [x0]
; ret

View File

@@ -7,14 +7,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlr x0, [x1]
; Inst 1: ret
; }}
; block0:
; stlr x0, [x1]
; ret
function %atomic_store_i32(i32, i64) {
block0(v0: i32, v1: i64):
@@ -22,14 +17,9 @@ block0(v0: i32, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlr w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlr w0, [x1]
; ret
function %atomic_store_i16(i16, i64) {
block0(v0: i16, v1: i64):
@@ -37,14 +27,9 @@ block0(v0: i16, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrh w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlrh w0, [x1]
; ret
function %atomic_store_i8(i8, i64) {
block0(v0: i8, v1: i64):
@@ -52,14 +37,9 @@ block0(v0: i8, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrb w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlrb w0, [x1]
; ret
function %atomic_store_i64_i32(i64, i64) {
block0(v0: i64, v1: i64):
@@ -68,14 +48,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlr w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlr w0, [x1]
; ret
function %atomic_store_i64_i16(i64, i64) {
block0(v0: i64, v1: i64):
@@ -84,14 +59,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrh w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlrh w0, [x1]
; ret
function %atomic_store_i64_i8(i64, i64) {
block0(v0: i64, v1: i64):
@@ -100,14 +70,9 @@ block0(v0: i64, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrb w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlrb w0, [x1]
; ret
function %atomic_store_i32_i16(i32, i64) {
block0(v0: i32, v1: i64):
@@ -116,14 +81,9 @@ block0(v0: i32, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrh w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlrh w0, [x1]
; ret
function %atomic_store_i32_i8(i32, i64) {
block0(v0: i32, v1: i64):
@@ -132,12 +92,7 @@ block0(v0: i32, v1: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrb w0, [x1]
; Inst 1: ret
; }}
; block0:
; stlrb w0, [x1]
; ret

View File

@@ -8,12 +8,7 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}
; block0:
; add w0, w0, w1
; ret

File diff suppressed because it is too large Load Diff

View File

@@ -9,15 +9,10 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: blr x1
; Inst 3: ldp fp, lr, [sp], #16
; Inst 4: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; blr x1
; ldp fp, lr, [sp], #16
; ret

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
set enable_probestack=false
target aarch64
@@ -11,12 +11,13 @@ block0(v0: i64):
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: blr x1
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; ldr x5, 8 ; b 12 ; data TestCase { length: 1, ascii: [103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x5
; ldp fp, lr, [sp], #16
; ret
function %f2(i32) -> i64 {
fn0 = %g(i32 uext) -> i64 baldrdash_system_v
@@ -26,20 +27,40 @@ block0(v0: i32):
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; check: mov w0, w0
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: blr x1
; check: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; stp x27, x28, [sp, #-16]!
; stp x25, x26, [sp, #-16]!
; stp x23, x24, [sp, #-16]!
; stp x21, x22, [sp, #-16]!
; stp x19, x20, [sp, #-16]!
; stp d14, d15, [sp, #-16]!
; stp d12, d13, [sp, #-16]!
; stp d10, d11, [sp, #-16]!
; stp d8, d9, [sp, #-16]!
; block0:
; mov w0, w0
; ldr x5, 8 ; b 12 ; data TestCase { length: 1, ascii: [103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x5
; ldp d8, d9, [sp], #16
; ldp d10, d11, [sp], #16
; ldp d12, d13, [sp], #16
; ldp d14, d15, [sp], #16
; ldp x19, x20, [sp], #16
; ldp x21, x22, [sp], #16
; ldp x23, x24, [sp], #16
; ldp x25, x26, [sp], #16
; ldp x27, x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %f3(i32) -> i32 uext baldrdash_system_v {
block0(v0: i32):
return v0
}
; check: mov w0, w0
; block0:
; mov w0, w0
function %f4(i32) -> i64 {
fn0 = %g(i32 sext) -> i64 baldrdash_system_v
@@ -49,20 +70,40 @@ block0(v0: i32):
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; check: sxtw x0, w0
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: blr x1
; check: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; stp x27, x28, [sp, #-16]!
; stp x25, x26, [sp, #-16]!
; stp x23, x24, [sp, #-16]!
; stp x21, x22, [sp, #-16]!
; stp x19, x20, [sp, #-16]!
; stp d14, d15, [sp, #-16]!
; stp d12, d13, [sp, #-16]!
; stp d10, d11, [sp, #-16]!
; stp d8, d9, [sp, #-16]!
; block0:
; sxtw x0, w0
; ldr x5, 8 ; b 12 ; data TestCase { length: 1, ascii: [103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x5
; ldp d8, d9, [sp], #16
; ldp d10, d11, [sp], #16
; ldp d12, d13, [sp], #16
; ldp d14, d15, [sp], #16
; ldp x19, x20, [sp], #16
; ldp x21, x22, [sp], #16
; ldp x23, x24, [sp], #16
; ldp x25, x26, [sp], #16
; ldp x27, x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %f5(i32) -> i32 sext baldrdash_system_v {
block0(v0: i32):
return v0
}
; check: sxtw x0, w0
; block0:
; sxtw x0, w0
function %f6(i8) -> i64 {
fn0 = %g(i32, i32, i32, i32, i32, i32, i32, i32, i8 sext) -> i64
@@ -73,26 +114,27 @@ block0(v0: i8):
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov x8, x0
; nextln: sub sp, sp, #16
; nextln: virtual_sp_offset_adjust 16
; nextln: movz x0, #42
; nextln: movz x1, #42
; nextln: movz x2, #42
; nextln: movz x3, #42
; nextln: movz x4, #42
; nextln: movz x5, #42
; nextln: movz x6, #42
; nextln: movz x7, #42
; nextln: sturb w8, [sp]
; nextln: ldr x8, 8 ; b 12 ; data
; nextln: blr x8
; nextln: add sp, sp, #16
; nextln: virtual_sp_offset_adjust -16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; mov x15, x0
; sub sp, sp, #16
; virtual_sp_offset_adjust 16
; movz x0, #42
; movz x1, #42
; movz x2, #42
; movz x3, #42
; movz x4, #42
; movz x5, #42
; movz x6, #42
; movz x7, #42
; strb w15, [sp]
; ldr x15, 8 ; b 12 ; data TestCase { length: 1, ascii: [103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x15
; add sp, sp, #16
; virtual_sp_offset_adjust -16
; ldp fp, lr, [sp], #16
; ret
function %f7(i8) -> i32, i32, i32, i32, i32, i32, i32, i32, i8 sext {
block0(v0: i8):
@@ -100,18 +142,19 @@ block0(v0: i8):
return v1, v1, v1, v1, v1, v1, v1, v1, v0
}
; check: mov x9, x0
; nextln: mov x8, x1
; nextln: movz x0, #42
; nextln: movz x1, #42
; nextln: movz x2, #42
; nextln: movz x3, #42
; nextln: movz x4, #42
; nextln: movz x5, #42
; nextln: movz x6, #42
; nextln: movz x7, #42
; nextln: sturb w9, [x8]
; nextln: ret
; block0:
; mov x14, x0
; mov x8, x1
; movz x0, #42
; movz x1, #42
; movz x2, #42
; movz x3, #42
; movz x4, #42
; movz x5, #42
; movz x6, #42
; movz x7, #42
; strb w14, [x8]
; ret
function %f8() {
fn0 = %g0() -> f32
@@ -131,32 +174,33 @@ block0:
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #48
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp, #16]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp, #32]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp, #16]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp, #32]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: add sp, sp, #48
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #48
; block0:
; ldr x9, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x9
; str q0, [sp]
; ldr x11, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x11
; str q0, [sp, #16]
; ldr x13, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x13
; str q0, [sp, #32]
; ldr x15, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x15
; ldr q0, [sp]
; ldr x1, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x1
; ldr q0, [sp, #16]
; ldr x3, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x3
; ldr q0, [sp, #32]
; ldr x5, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x5
; add sp, sp, #48
; ldp fp, lr, [sp], #16
; ret
function %f9() {
fn0 = %g0() -> i8x16
@@ -174,32 +218,33 @@ block0:
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #48
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp, #16]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp, #32]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp, #16]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp, #32]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: add sp, sp, #48
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #48
; block0:
; ldr x9, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x9
; str q0, [sp]
; ldr x11, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x11
; str q0, [sp, #16]
; ldr x13, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x13
; str q0, [sp, #32]
; ldr x15, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x15
; ldr q0, [sp]
; ldr x1, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x1
; ldr q0, [sp, #16]
; ldr x3, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x3
; ldr q0, [sp, #32]
; ldr x5, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x5
; add sp, sp, #48
; ldp fp, lr, [sp], #16
; ret
function %f10() {
fn0 = %g0() -> f32
@@ -221,44 +266,43 @@ block0:
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #48
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp, #16]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: str q0, [sp, #32]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp, #16]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: ldr q0, [sp, #32]
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: blr x0
; nextln: add sp, sp, #48
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #48
; block0:
; ldr x9, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x9
; str q0, [sp]
; ldr x11, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x11
; str q0, [sp, #16]
; ldr x13, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x13
; str q0, [sp, #32]
; ldr x15, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x15
; ldr q0, [sp]
; ldr x1, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x1
; ldr q0, [sp, #16]
; ldr x3, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x3
; ldr q0, [sp, #32]
; ldr x5, 8 ; b 12 ; data TestCase { length: 2, ascii: [103, 54, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x5
; add sp, sp, #48
; ldp fp, lr, [sp], #16
; ret
; i128 tests
function %f11(i128, i64) -> i64 {
block0(v0: i128, v1: i64):
v2, v3 = isplit v0
return v3
}
; check: mov x0, x1
; nextln: ret
; block0:
; mov x0, x1
; ret
function %f11_call(i64) -> i64 {
fn0 = %f11(i128, i64) -> i64
@@ -270,28 +314,27 @@ block0(v0: i64):
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov x1, x0
; nextln: movz x0, #42
; nextln: movz x2, #42
; nextln: ldr x3, 8 ; b 12 ; data
; nextln: blr x3
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; mov x9, x0
; movz x0, #42
; mov x1, x9
; movz x2, #42
; ldr x14, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 49, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x14
; ldp fp, lr, [sp], #16
; ret
; The AArch64 ABI requires that the i128 argument be aligned
; and to be passed in x2 and x3
function %f12(i64, i128) -> i64 {
block0(v0: i64, v1: i128):
v2, v3 = isplit v1
return v2
}
; check: mov x0, x2
; nextln: ret
; block0:
; mov x0, x2
; ret
function %f12_call(i64) -> i64 {
fn0 = %f12(i64, i128) -> i64
@@ -303,29 +346,26 @@ block0(v0: i64):
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x3, #42
; nextln: mov x2, x0
; nextln: movz x0, #42
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: blr x1
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; movz x3, #42
; mov x2, x0
; movz x0, #42
; ldr x14, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 49, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x14
; ldp fp, lr, [sp], #16
; ret
; The Apple AArch64 ABI allows the i128 argument to not be aligned
; and to be passed in x1 and x2
function %f13(i64, i128) -> i64 apple_aarch64 {
block0(v0: i64, v1: i128):
v2, v3 = isplit v1
return v2
}
; check: mov x0, x1
; nextln: ret
; block0:
; mov x0, x1
; ret
function %f13_call(i64) -> i64 apple_aarch64 {
fn0 = %f13(i64, i128) -> i64 apple_aarch64
@@ -337,31 +377,29 @@ block0(v0: i64):
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x2, #42
; nextln: mov x1, x0
; nextln: movz x0, #42
; nextln: ldr x3, 8 ; b 12 ; data
; nextln: blr x3
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; movz x2, #42
; mov x1, x0
; movz x0, #42
; ldr x14, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 49, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x14
; ldp fp, lr, [sp], #16
; ret
; We only have 8 registers to pass data in
; make sure we spill the last argument even though there is one slot available
function %f14(i128, i128, i128, i64, i128) -> i128 {
block0(v0: i128, v1: i128, v2: i128, v3: i64, v4: i128):
return v4
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x0, [fp, #16]
; nextln: ldur x1, [fp, #24]
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; ldr x0, [fp, #16]
; ldr x1, [fp, #24]
; ldp fp, lr, [sp], #16
; ret
function %f14_call(i128, i64) -> i128 {
fn0 = %f14(i128, i128, i128, i64, i128) -> i128
@@ -371,50 +409,40 @@ block0(v0: i128, v1: i64):
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; mov x14, x2
; sub sp, sp, #16
; virtual_sp_offset_adjust 16
; mov x13, x0
; mov x15, x1
; mov x2, x13
; mov x3, x15
; mov x4, x13
; mov x5, x15
; mov x6, x14
; str x13, [sp]
; str x15, [sp, #8]
; ldr x7, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 49, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x7
; add sp, sp, #16
; virtual_sp_offset_adjust -16
; ldp fp, lr, [sp], #16
; ret
; TODO: Some codegen optimization possible here with x0,x1 moving to x7,x8 and then moving back
; nextln: mov x7, x0
; nextln: mov x8, x1
; nextln: mov x6, x2
; nextln: sub sp, sp, #16
; nextln: virtual_sp_offset_adjust 16
; nextln: mov x0, x7
; nextln: mov x1, x8
; nextln: mov x2, x7
; nextln: mov x3, x8
; nextln: mov x4, x7
; nextln: mov x5, x8
; nextln: stur x7, [sp]
; nextln: stur x8, [sp, #8]
; nextln: ldr x7, 8 ; b 12 ; data
; nextln: blr x7
; nextln: add sp, sp, #16
; nextln: virtual_sp_offset_adjust -16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; We have one register slot available (Similar to %f14), however apple
; allows us to start i128 on non even numbered registers (x7 in this case).
;
; It is unspecified if we can split the i128 into x7 + the stack.
; In practice LLVM does not do this, so we are going to go with that.
function %f15(i128, i128, i128, i64, i128) -> i128 apple_aarch64{
block0(v0: i128, v1: i128, v2: i128, v3: i64, v4: i128):
return v4
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x0, [fp, #16]
; nextln: ldur x1, [fp, #24]
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; ldr x0, [fp, #16]
; ldr x1, [fp, #24]
; ldp fp, lr, [sp], #16
; ret
function %f15_call(i128, i64) -> i128 apple_aarch64 {
fn0 = %f15(i128, i128, i128, i64, i128) -> i128 apple_aarch64
@@ -424,31 +452,27 @@ block0(v0: i128, v1: i64):
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov x7, x0
; nextln: mov x8, x1
; nextln: mov x6, x2
; nextln: sub sp, sp, #16
; nextln: virtual_sp_offset_adjust 16
; nextln: mov x0, x7
; nextln: mov x1, x8
; nextln: mov x2, x7
; nextln: mov x3, x8
; nextln: mov x4, x7
; nextln: mov x5, x8
; nextln: stur x7, [sp]
; nextln: stur x8, [sp, #8]
; nextln: ldr x7, 8 ; b 12 ; data
; nextln: blr x7
; nextln: add sp, sp, #16
; nextln: virtual_sp_offset_adjust -16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; block0:
; mov x14, x2
; sub sp, sp, #16
; virtual_sp_offset_adjust 16
; mov x13, x0
; mov x15, x1
; mov x2, x13
; mov x3, x15
; mov x4, x13
; mov x5, x15
; mov x6, x14
; str x13, [sp]
; str x15, [sp, #8]
; ldr x7, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 49, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x7
; add sp, sp, #16
; virtual_sp_offset_adjust -16
; ldp fp, lr, [sp], #16
; ret
function %f16() -> i32, i32 wasmtime_system_v {
block0:
@@ -457,9 +481,10 @@ block0:
return v0, v1
}
; check: mov x1, x0
; nextln: movz x0, #0
; nextln: movz x2, #1
; nextln: stur w2, [x1]
; nextln: ret
; block0:
; mov x11, x0
; movz x0, #0
; movz x7, #1
; str w7, [x11]
; ret

View File

@@ -10,14 +10,9 @@ block0(v0: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmeq v0.16b, v0.16b, #0
; Inst 1: ret
; }}
; block0:
; cmeq v0.16b, v0.16b, #0
; ret
function %f1(i16x8) -> b16x8 {
block0(v0: i16x8):
@@ -27,14 +22,9 @@ block0(v0: i16x8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmeq v0.8h, v0.8h, #0
; Inst 1: ret
; }}
; block0:
; cmeq v0.8h, v0.8h, #0
; ret
function %f2(i32x4) -> b32x4 {
block0(v0: i32x4):
@@ -44,15 +34,10 @@ block0(v0: i32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: cmeq v0.4s, v0.4s, #0
; Inst 1: mvn v0.16b, v0.16b
; Inst 2: ret
; }}
; block0:
; cmeq v3.4s, v0.4s, #0
; mvn v0.16b, v3.16b
; ret
function %f3(i64x2) -> b64x2 {
block0(v0: i64x2):
@@ -62,15 +47,10 @@ block0(v0: i64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: cmeq v0.2d, v0.2d, #0
; Inst 1: mvn v0.16b, v0.16b
; Inst 2: ret
; }}
; block0:
; cmeq v3.2d, v0.2d, #0
; mvn v0.16b, v3.16b
; ret
function %f4(i8x16) -> b8x16 {
block0(v0: i8x16):
@@ -80,14 +60,9 @@ block0(v0: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmle v0.16b, v0.16b, #0
; Inst 1: ret
; }}
; block0:
; cmle v0.16b, v0.16b, #0
; ret
function %f5(i16x8) -> b16x8 {
block0(v0: i16x8):
@@ -97,14 +72,9 @@ block0(v0: i16x8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmge v0.8h, v0.8h, #0
; Inst 1: ret
; }}
; block0:
; cmge v0.8h, v0.8h, #0
; ret
function %f6(i32x4) -> b32x4 {
block0(v0: i32x4):
@@ -114,14 +84,9 @@ block0(v0: i32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmge v0.4s, v0.4s, #0
; Inst 1: ret
; }}
; block0:
; cmge v0.4s, v0.4s, #0
; ret
function %f7(i64x2) -> b64x2 {
block0(v0: i64x2):
@@ -131,14 +96,9 @@ block0(v0: i64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmle v0.2d, v0.2d, #0
; Inst 1: ret
; }}
; block0:
; cmle v0.2d, v0.2d, #0
; ret
function %f8(i8x16) -> b8x16 {
block0(v0: i8x16):
@@ -148,14 +108,9 @@ block0(v0: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmlt v0.16b, v0.16b, #0
; Inst 1: ret
; }}
; block0:
; cmlt v0.16b, v0.16b, #0
; ret
function %f9(i16x8) -> b16x8 {
block0(v0: i16x8):
@@ -165,14 +120,9 @@ block0(v0: i16x8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmgt v0.8h, v0.8h, #0
; Inst 1: ret
; }}
; block0:
; cmgt v0.8h, v0.8h, #0
; ret
function %f10(i32x4) -> b32x4 {
block0(v0: i32x4):
@@ -182,14 +132,9 @@ block0(v0: i32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmgt v0.4s, v0.4s, #0
; Inst 1: ret
; }}
; block0:
; cmgt v0.4s, v0.4s, #0
; ret
function %f11(i64x2) -> b64x2 {
block0(v0: i64x2):
@@ -199,14 +144,9 @@ block0(v0: i64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: cmlt v0.2d, v0.2d, #0
; Inst 1: ret
; }}
; block0:
; cmlt v0.2d, v0.2d, #0
; ret
function %f12(f32x4) -> b32x4 {
block0(v0: f32x4):
@@ -216,14 +156,9 @@ block0(v0: f32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmeq v0.4s, v0.4s, #0.0
; Inst 1: ret
; }}
; block0:
; fcmeq v0.4s, v0.4s, #0.0
; ret
function %f13(f64x2) -> b64x2 {
block0(v0: f64x2):
@@ -233,14 +168,9 @@ block0(v0: f64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmeq v0.2d, v0.2d, #0.0
; Inst 1: ret
; }}
; block0:
; fcmeq v0.2d, v0.2d, #0.0
; ret
function %f14(f64x2) -> b64x2 {
block0(v0: f64x2):
@@ -250,15 +180,10 @@ block0(v0: f64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: fcmeq v0.2d, v0.2d, #0.0
; Inst 1: mvn v0.16b, v0.16b
; Inst 2: ret
; }}
; block0:
; fcmeq v3.2d, v0.2d, #0.0
; mvn v0.16b, v3.16b
; ret
function %f15(f32x4) -> b32x4 {
block0(v0: f32x4):
@@ -268,15 +193,10 @@ block0(v0: f32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: fcmeq v0.4s, v0.4s, #0.0
; Inst 1: mvn v0.16b, v0.16b
; Inst 2: ret
; }}
; block0:
; fcmeq v3.4s, v0.4s, #0.0
; mvn v0.16b, v3.16b
; ret
function %f16(f32x4) -> b32x4 {
block0(v0: f32x4):
@@ -286,14 +206,9 @@ block0(v0: f32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmle v0.4s, v0.4s, #0.0
; Inst 1: ret
; }}
; block0:
; fcmle v0.4s, v0.4s, #0.0
; ret
function %f17(f64x2) -> b64x2 {
block0(v0: f64x2):
@@ -303,14 +218,9 @@ block0(v0: f64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmge v0.2d, v0.2d, #0.0
; Inst 1: ret
; }}
; block0:
; fcmge v0.2d, v0.2d, #0.0
; ret
function %f18(f64x2) -> b64x2 {
block0(v0: f64x2):
@@ -320,14 +230,9 @@ block0(v0: f64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmge v0.2d, v0.2d, #0.0
; Inst 1: ret
; }}
; block0:
; fcmge v0.2d, v0.2d, #0.0
; ret
function %f19(f32x4) -> b32x4 {
block0(v0: f32x4):
@@ -337,14 +242,9 @@ block0(v0: f32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmle v0.4s, v0.4s, #0.0
; Inst 1: ret
; }}
; block0:
; fcmle v0.4s, v0.4s, #0.0
; ret
function %f20(f32x4) -> b32x4 {
block0(v0: f32x4):
@@ -354,14 +254,9 @@ block0(v0: f32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmlt v0.4s, v0.4s, #0.0
; Inst 1: ret
; }}
; block0:
; fcmlt v0.4s, v0.4s, #0.0
; ret
function %f21(f64x2) -> b64x2 {
block0(v0: f64x2):
@@ -371,14 +266,9 @@ block0(v0: f64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmgt v0.2d, v0.2d, #0.0
; Inst 1: ret
; }}
; block0:
; fcmgt v0.2d, v0.2d, #0.0
; ret
function %f22(f64x2) -> b64x2 {
block0(v0: f64x2):
@@ -388,14 +278,9 @@ block0(v0: f64x2):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmgt v0.2d, v0.2d, #0.0
; Inst 1: ret
; }}
; block0:
; fcmgt v0.2d, v0.2d, #0.0
; ret
function %f23(f32x4) -> b32x4 {
block0(v0: f32x4):
@@ -405,11 +290,7 @@ block0(v0: f32x4):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fcmlt v0.4s, v0.4s, #0.0
; Inst 1: ret
; }}
; block0:
; fcmlt v0.4s, v0.4s, #0.0
; ret

View File

@@ -8,15 +8,10 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs xzr, x0, x1
; Inst 1: cset x0, eq
; Inst 2: ret
; }}
; block0:
; subs xzr, x0, x1
; cset x0, eq
; ret
function %icmp_eq_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -24,17 +19,12 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: cset x0, eq
; Inst 4: ret
; }}
; block0:
; eor x10, x0, x2
; eor x12, x1, x3
; adds xzr, x10, x12
; cset x0, eq
; ret
function %icmp_ne_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -42,17 +32,12 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: cset x0, ne
; Inst 4: ret
; }}
; block0:
; eor x10, x0, x2
; eor x12, x1, x3
; adds xzr, x10, x12
; cset x0, ne
; ret
function %icmp_slt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -60,18 +45,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, lo
; subs xzr, x1, x3
; cset x14, lt
; csel x0, x11, x14, eq
; ret
function %icmp_ult_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -79,18 +59,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lo
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, lo
; subs xzr, x1, x3
; cset x14, lo
; csel x0, x11, x14, eq
; ret
function %icmp_sle_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -98,18 +73,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, le
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, ls
; subs xzr, x1, x3
; cset x14, le
; csel x0, x11, x14, eq
; ret
function %icmp_ule_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -117,18 +87,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ls
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, ls
; subs xzr, x1, x3
; cset x14, ls
; csel x0, x11, x14, eq
; ret
function %icmp_sgt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -136,18 +101,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, gt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, hi
; subs xzr, x1, x3
; cset x14, gt
; csel x0, x11, x14, eq
; ret
function %icmp_ugt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -155,18 +115,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hi
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, hi
; subs xzr, x1, x3
; cset x14, hi
; csel x0, x11, x14, eq
; ret
function %icmp_sge_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -174,18 +129,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ge
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, hs
; subs xzr, x1, x3
; cset x14, ge
; csel x0, x11, x14, eq
; ret
function %icmp_uge_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -193,18 +143,13 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hs
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x11, hs
; subs xzr, x1, x3
; cset x14, hs
; csel x0, x11, x14, eq
; ret
function %icmp_of_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -212,16 +157,11 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: cset x0, vs
; Inst 3: ret
; }}
; block0:
; adds xzr, x0, x2
; adcs xzr, x1, x3
; cset x0, vs
; ret
function %icmp_nof_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -229,16 +169,11 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: cset x0, vc
; Inst 3: ret
; }}
; block0:
; adds xzr, x0, x2
; adcs xzr, x1, x3
; cset x0, vc
; ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -255,26 +190,15 @@ block2:
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: subs xzr, x0, x1
; Inst 1: b.eq label1 ; b label2
; Block 1:
; (original IR block: block1)
; (instruction range: 2 .. 4)
; Inst 2: movz x0, #1
; Inst 3: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 4 .. 6)
; Inst 4: movz x0, #2
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x1
; b.eq label1 ; b label2
; block1:
; movz x0, #1
; ret
; block2:
; movz x0, #2
; ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -287,29 +211,16 @@ block1:
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: subs xzr, x0, x1
; Inst 1: b.eq label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 2 .. 3)
; Inst 2: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 4 .. 6)
; Inst 4: movz x0, #1
; Inst 5: ret
; }}
; block0:
; subs xzr, x0, x1
; b.eq label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; movz x0, #1
; ret
function %i128_brz(i128){
block0(v0: i128):
@@ -321,28 +232,15 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, x0, x1
; Inst 1: cbz x0, label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 2 .. 3)
; Inst 2: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 4 .. 5)
; Inst 4: ret
; }}
; block0:
; orr x4, x0, x1
; cbz x4, label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_brnz(i128){
block0(v0: i128):
@@ -354,28 +252,15 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, x0, x1
; Inst 1: cbnz x0, label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 2 .. 3)
; Inst 2: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 4 .. 5)
; Inst 4: ret
; }}
; block0:
; orr x4, x0, x1
; cbnz x4, label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_eq(i128, i128) {
block0(v0: i128, v1: i128):
@@ -386,30 +271,17 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: b.eq label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 5 .. 6)
; Inst 5: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 6 .. 7)
; Inst 6: ret
; }}
; block0:
; eor x8, x0, x2
; eor x10, x1, x3
; adds xzr, x8, x10
; b.eq label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_ne(i128, i128) {
block0(v0: i128, v1: i128):
@@ -420,30 +292,17 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: b.ne label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 5 .. 6)
; Inst 5: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 6 .. 7)
; Inst 6: ret
; }}
; block0:
; eor x8, x0, x2
; eor x10, x1, x3
; adds xzr, x8, x10
; b.ne label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_slt(i128, i128) {
block0(v0: i128, v1: i128):
@@ -454,33 +313,20 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, xzr, x0
; Inst 6: b.lt label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, lo
; subs xzr, x1, x3
; cset x12, lt
; csel x9, x9, x12, eq
; subs xzr, xzr, x9
; b.lt label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_ult(i128, i128) {
block0(v0: i128, v1: i128):
@@ -491,33 +337,20 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lo
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, xzr, x0
; Inst 6: b.lo label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, lo
; subs xzr, x1, x3
; cset x12, lo
; csel x9, x9, x12, eq
; subs xzr, xzr, x9
; b.lo label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_sle(i128, i128) {
block0(v0: i128, v1: i128):
@@ -528,34 +361,21 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, le
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x1, x0
; Inst 7: b.le label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, ls
; subs xzr, x1, x3
; cset x12, le
; csel x9, x9, x12, eq
; movz x12, #1
; subs xzr, x12, x9
; b.le label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_ule(i128, i128) {
block0(v0: i128, v1: i128):
@@ -566,34 +386,21 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ls
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x1, x0
; Inst 7: b.ls label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, ls
; subs xzr, x1, x3
; cset x12, ls
; csel x9, x9, x12, eq
; movz x12, #1
; subs xzr, x12, x9
; b.ls label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_sgt(i128, i128) {
block0(v0: i128, v1: i128):
@@ -604,33 +411,20 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, gt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, x0, xzr
; Inst 6: b.gt label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, hi
; subs xzr, x1, x3
; cset x12, gt
; csel x9, x9, x12, eq
; subs xzr, x9, xzr
; b.gt label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_ugt(i128, i128) {
block0(v0: i128, v1: i128):
@@ -641,33 +435,20 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hi
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, x0, xzr
; Inst 6: b.hi label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, hi
; subs xzr, x1, x3
; cset x12, hi
; csel x9, x9, x12, eq
; subs xzr, x9, xzr
; b.hi label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_sge(i128, i128) {
block0(v0: i128, v1: i128):
@@ -678,34 +459,21 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ge
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x0, x1
; Inst 7: b.ge label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, hs
; subs xzr, x1, x3
; cset x12, ge
; csel x9, x9, x12, eq
; movz x12, #1
; subs xzr, x9, x12
; b.ge label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_uge(i128, i128) {
block0(v0: i128, v1: i128):
@@ -716,34 +484,21 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hs
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x0, x1
; Inst 7: b.hs label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
; block0:
; subs xzr, x0, x2
; cset x9, hs
; subs xzr, x1, x3
; cset x12, hs
; csel x9, x9, x12, eq
; movz x12, #1
; subs xzr, x9, x12
; b.hs label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_of(i128, i128) {
block0(v0: i128, v1: i128):
@@ -754,29 +509,16 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 3)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: b.vs label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 5 .. 6)
; Inst 5: ret
; }}
; block0:
; adds xzr, x0, x2
; adcs xzr, x1, x3
; b.vs label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret
function %i128_bricmp_nof(i128, i128) {
block0(v0: i128, v1: i128):
@@ -787,27 +529,14 @@ block1:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 3)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: b.vc label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 5 .. 6)
; Inst 5: ret
; }}
; block0:
; adds xzr, x0, x2
; adcs xzr, x1, x3
; b.vc label1 ; b label2
; block1:
; b label3
; block2:
; b label3
; block3:
; ret

View File

@@ -10,16 +10,11 @@ block0(v0: i8, v1: i64, v2: i64):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtb w0, w0
; Inst 1: subs wzr, w0, #42
; Inst 2: csel x0, x1, x2, eq
; Inst 3: ret
; }}
; block0:
; uxtb w8, w0
; subs wzr, w8, #42
; csel x0, x1, x2, eq
; ret
function %g(i8) -> b1 {
block0(v0: i8):
@@ -29,16 +24,11 @@ block0(v0: i8):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtb w0, w0
; Inst 1: subs wzr, w0, #42
; Inst 2: cset x0, eq
; Inst 3: ret
; }}
; block0:
; uxtb w4, w0
; subs wzr, w4, #42
; cset x0, eq
; ret
function %h(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
@@ -46,16 +36,11 @@ block0(v0: i8, v1: i8, v2: i8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: and x1, x1, x0
; Inst 1: bic x0, x2, x0
; Inst 2: orr x0, x0, x1
; Inst 3: ret
; }}
; block0:
; and x8, x1, x0
; bic x0, x2, x0
; orr x0, x0, x8
; ret
function %i(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
@@ -63,16 +48,11 @@ block0(v0: b1, v1: i8, v2: i8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: and w0, w0, #1
; Inst 1: subs wzr, w0, wzr
; Inst 2: csel x0, x1, x2, ne
; Inst 3: ret
; }}
; block0:
; and w8, w0, #1
; subs wzr, w8, wzr
; csel x0, x1, x2, ne
; ret
function %i(i32, i8, i8) -> i8 {
block0(v0: i32, v1: i8, v2: i8):
@@ -82,15 +62,10 @@ block0(v0: i32, v1: i8, v2: i8):
return v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs wzr, w0, #42
; Inst 1: csel x0, x1, x2, eq
; Inst 2: ret
; }}
; block0:
; subs wzr, w0, #42
; csel x0, x1, x2, eq
; ret
function %i128_select(b1, i128, i128) -> i128 {
block0(v0: b1, v1: i128, v2: i128):
@@ -98,15 +73,10 @@ block0(v0: b1, v1: i128, v2: i128):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: and w0, w0, #1
; Inst 1: subs wzr, w0, wzr
; Inst 2: csel x0, x2, x4, ne
; Inst 3: csel x1, x3, x5, ne
; Inst 4: ret
; }}
; block0:
; and w14, w0, #1
; subs wzr, w14, wzr
; csel x0, x2, x4, ne
; csel x1, x3, x5, ne
; ret

View File

@@ -8,14 +8,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #255
; Inst 1: ret
; }}
; block0:
; movz x0, #255
; ret
function %f() -> b16 {
block0:
@@ -23,14 +18,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #0
; Inst 1: ret
; }}
; block0:
; movz x0, #0
; ret
function %f() -> i64 {
block0:
@@ -38,14 +28,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #0
; Inst 1: ret
; }}
; block0:
; movz x0, #0
; ret
function %f() -> i64 {
block0:
@@ -53,14 +38,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535
; Inst 1: ret
; }}
; block0:
; movz x0, #65535
; ret
function %f() -> i64 {
block0:
@@ -68,14 +48,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535, LSL #16
; Inst 1: ret
; }}
; block0:
; movz x0, #65535, LSL #16
; ret
function %f() -> i64 {
block0:
@@ -83,14 +58,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535, LSL #32
; Inst 1: ret
; }}
; block0:
; movz x0, #65535, LSL #32
; ret
function %f() -> i64 {
block0:
@@ -98,14 +68,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535, LSL #48
; Inst 1: ret
; }}
; block0:
; movz x0, #65535, LSL #48
; ret
function %f() -> i64 {
block0:
@@ -113,14 +78,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #0
; Inst 1: ret
; }}
; block0:
; movn x0, #0
; ret
function %f() -> i64 {
block0:
@@ -128,14 +88,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535
; Inst 1: ret
; }}
; block0:
; movn x0, #65535
; ret
function %f() -> i64 {
block0:
@@ -143,14 +98,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535, LSL #16
; Inst 1: ret
; }}
; block0:
; movn x0, #65535, LSL #16
; ret
function %f() -> i64 {
block0:
@@ -158,14 +108,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535, LSL #32
; Inst 1: ret
; }}
; block0:
; movn x0, #65535, LSL #32
; ret
function %f() -> i64 {
block0:
@@ -173,14 +118,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535, LSL #48
; Inst 1: ret
; }}
; block0:
; movn x0, #65535, LSL #48
; ret
function %f() -> i64 {
block0:
@@ -188,17 +128,12 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz x0, #58
; Inst 1: movk x0, #4626, LSL #16
; Inst 2: movk x0, #61603, LSL #32
; Inst 3: movk x0, #62283, LSL #48
; Inst 4: ret
; }}
; block0:
; movz x0, #58
; movk x0, #4626, LSL #16
; movk x0, #61603, LSL #32
; movk x0, #62283, LSL #48
; ret
function %f() -> i64 {
block0:
@@ -206,15 +141,10 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #7924, LSL #16
; Inst 1: movk x0, #4841, LSL #48
; Inst 2: ret
; }}
; block0:
; movz x0, #7924, LSL #16
; movk x0, #4841, LSL #48
; ret
function %f() -> i64 {
block0:
@@ -222,15 +152,10 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movn x0, #57611, LSL #16
; Inst 1: movk x0, #4841, LSL #48
; Inst 2: ret
; }}
; block0:
; movn x0, #57611, LSL #16
; movk x0, #4841, LSL #48
; ret
function %f() -> i32 {
block0:
@@ -238,14 +163,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, xzr, #4294967295
; Inst 1: ret
; }}
; block0:
; orr x0, xzr, #4294967295
; ret
function %f() -> i32 {
block0:
@@ -253,14 +173,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn w0, #8
; Inst 1: ret
; }}
; block0:
; movn w0, #8
; ret
function %f() -> i64 {
block0:
@@ -268,14 +183,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn w0, #8
; Inst 1: ret
; }}
; block0:
; movn w0, #8
; ret
function %f() -> i64 {
block0:
@@ -283,14 +193,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #8
; Inst 1: ret
; }}
; block0:
; movn x0, #8
; ret
function %f() -> f64 {
block0:
@@ -298,14 +203,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fmov d0, #1
; Inst 1: ret
; }}
; block0:
; fmov d0, #1
; ret
function %f() -> f32 {
block0:
@@ -313,14 +213,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fmov s0, #5
; Inst 1: ret
; }}
; block0:
; fmov s0, #5
; ret
function %f() -> f64 {
block0:
@@ -328,15 +223,10 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #16457, LSL #48
; Inst 1: fmov d0, x0
; Inst 2: ret
; }}
; block0:
; movz x2, #16457, LSL #48
; fmov d0, x2
; ret
function %f() -> f32 {
block0:
@@ -344,15 +234,10 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #16968, LSL #16
; Inst 1: fmov s0, w0
; Inst 2: ret
; }}
; block0:
; movz x2, #16968, LSL #16
; fmov s0, w2
; ret
function %f() -> f64 {
block0:
@@ -360,14 +245,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movi v0.2s, #0
; Inst 1: ret
; }}
; block0:
; movi v0.2s, #0
; ret
function %f() -> f32 {
block0:
@@ -375,14 +255,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movi v0.2s, #0
; Inst 1: ret
; }}
; block0:
; movi v0.2s, #0
; ret
function %f() -> f64 {
block0:
@@ -390,14 +265,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fmov d0, #-16
; Inst 1: ret
; }}
; block0:
; fmov d0, #-16
; ret
function %f() -> f32 {
block0:
@@ -405,11 +275,7 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fmov s0, #-16
; Inst 1: ret
; }}
; block0:
; fmov s0, #-16
; ret

View File

@@ -10,15 +10,10 @@ block0(v0: i8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtb x0, w0
; Inst 1: add x0, x0, #42
; Inst 2: ret
; }}
; block0:
; sxtb x4, w0
; add x0, x4, #42
; ret
function %f2(i8, i64) -> i64 {
block0(v0: i8, v1: i64):
@@ -27,14 +22,9 @@ block0(v0: i8, v1: i64):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x1, x0, SXTB
; Inst 1: ret
; }}
; block0:
; add x0, x1, x0, SXTB
; ret
function %i128_uextend_i64(i64) -> i128 {
block0(v0: i64):
@@ -42,14 +32,9 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x1, #0
; Inst 1: ret
; }}
; block0:
; movz x1, #0
; ret
function %i128_sextend_i64(i64) -> i128 {
block0(v0: i64):
@@ -57,14 +42,9 @@ block0(v0: i64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr x1, x0, #63
; Inst 1: ret
; }}
; block0:
; asr x1, x0, #63
; ret
function %i128_uextend_i32(i32) -> i128 {
block0(v0: i32):
@@ -72,15 +52,10 @@ block0(v0: i32):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov w0, w0
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
; block0:
; mov w0, w0
; movz x1, #0
; ret
function %i128_sextend_i32(i32) -> i128 {
block0(v0: i32):
@@ -88,15 +63,10 @@ block0(v0: i32):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtw x0, w0
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
; block0:
; sxtw x0, w0
; asr x1, x0, #63
; ret
function %i128_uextend_i16(i16) -> i128 {
block0(v0: i16):
@@ -104,15 +74,10 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxth w0, w0
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
; block0:
; uxth w0, w0
; movz x1, #0
; ret
function %i128_sextend_i16(i16) -> i128 {
block0(v0: i16):
@@ -120,15 +85,10 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxth x0, w0
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
; block0:
; sxth x0, w0
; asr x1, x0, #63
; ret
function %i128_uextend_i8(i8) -> i128 {
block0(v0: i8):
@@ -136,15 +96,10 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxtb w0, w0
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
; block0:
; uxtb w0, w0
; movz x1, #0
; ret
function %i128_sextend_i8(i8) -> i128 {
block0(v0: i8):
@@ -152,15 +107,10 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtb x0, w0
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
; block0:
; sxtb x0, w0
; asr x1, x0, #63
; ret
function %i8x16_uextend_i16(i8x16) -> i16 {
block0(v0: i8x16):
@@ -169,14 +119,9 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.b[1]
; Inst 1: ret
; }}
; block0:
; umov w0, v0.b[1]
; ret
function %i8x16_uextend_i32(i8x16) -> i32 {
block0(v0: i8x16):
@@ -185,14 +130,9 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.b[1]
; Inst 1: ret
; }}
; block0:
; umov w0, v0.b[1]
; ret
function %i8x16_uextend_i64(i8x16) -> i64 {
block0(v0: i8x16):
@@ -201,14 +141,9 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.b[1]
; Inst 1: ret
; }}
; block0:
; umov w0, v0.b[1]
; ret
function %i8x16_uextend_i128(i8x16) -> i128 {
block0(v0: i8x16):
@@ -217,15 +152,10 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: umov w0, v0.b[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
; block0:
; umov w0, v0.b[1]
; movz x1, #0
; ret
function %i8x16_sextend_i16(i8x16) -> i16 {
block0(v0: i8x16):
@@ -234,14 +164,9 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov w0, v0.b[1]
; Inst 1: ret
; }}
; block0:
; smov w0, v0.b[1]
; ret
function %i8x16_sextend_i32(i8x16) -> i32 {
block0(v0: i8x16):
@@ -250,14 +175,9 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov w0, v0.b[1]
; Inst 1: ret
; }}
; block0:
; smov w0, v0.b[1]
; ret
function %i8x16_sextend_i64(i8x16) -> i64 {
block0(v0: i8x16):
@@ -266,14 +186,9 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov x0, v0.b[1]
; Inst 1: ret
; }}
; block0:
; smov x0, v0.b[1]
; ret
function %i8x16_sextend_i128(i8x16) -> i128 {
block0(v0: i8x16):
@@ -282,15 +197,10 @@ block0(v0: i8x16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: smov x0, v0.b[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
; block0:
; smov x0, v0.b[1]
; asr x1, x0, #63
; ret
function %i16x8_uextend_i32(i16x8) -> i32 {
block0(v0: i16x8):
@@ -299,14 +209,9 @@ block0(v0: i16x8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.h[1]
; Inst 1: ret
; }}
; block0:
; umov w0, v0.h[1]
; ret
function %i16x8_uextend_i64(i16x8) -> i64 {
block0(v0: i16x8):
@@ -315,14 +220,9 @@ block0(v0: i16x8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.h[1]
; Inst 1: ret
; }}
; block0:
; umov w0, v0.h[1]
; ret
function %i16x8_uextend_i128(i16x8) -> i128 {
block0(v0: i16x8):
@@ -331,15 +231,10 @@ block0(v0: i16x8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: umov w0, v0.h[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
; block0:
; umov w0, v0.h[1]
; movz x1, #0
; ret
function %i16x8_sextend_i32(i16x8) -> i32 {
block0(v0: i16x8):
@@ -348,14 +243,9 @@ block0(v0: i16x8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov w0, v0.h[1]
; Inst 1: ret
; }}
; block0:
; smov w0, v0.h[1]
; ret
function %i16x8_sextend_i64(i16x8) -> i64 {
block0(v0: i16x8):
@@ -364,14 +254,9 @@ block0(v0: i16x8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov x0, v0.h[1]
; Inst 1: ret
; }}
; block0:
; smov x0, v0.h[1]
; ret
function %i16x8_sextend_i128(i16x8) -> i128 {
block0(v0: i16x8):
@@ -380,15 +265,10 @@ block0(v0: i16x8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: smov x0, v0.h[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
; block0:
; smov x0, v0.h[1]
; asr x1, x0, #63
; ret
function %i32x4_uextend_i64(i32x4) -> i64 {
block0(v0: i32x4):
@@ -397,14 +277,9 @@ block0(v0: i32x4):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: mov w0, v0.s[1]
; Inst 1: ret
; }}
; block0:
; mov w0, v0.s[1]
; ret
function %i32x4_uextend_i128(i32x4) -> i128 {
block0(v0: i32x4):
@@ -413,15 +288,10 @@ block0(v0: i32x4):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov w0, v0.s[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
; block0:
; mov w0, v0.s[1]
; movz x1, #0
; ret
function %i32x4_sextend_i64(i32x4) -> i64 {
block0(v0: i32x4):
@@ -430,14 +300,9 @@ block0(v0: i32x4):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov x0, v0.s[1]
; Inst 1: ret
; }}
; block0:
; smov x0, v0.s[1]
; ret
function %i32x4_sextend_i128(i32x4) -> i128 {
block0(v0: i32x4):
@@ -446,15 +311,10 @@ block0(v0: i32x4):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: smov x0, v0.s[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
; block0:
; smov x0, v0.s[1]
; asr x1, x0, #63
; ret
function %i64x2_uextend_i128(i64x2) -> i128 {
block0(v0: i64x2):
@@ -463,15 +323,10 @@ block0(v0: i64x2):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov x0, v0.d[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
; block0:
; mov x0, v0.d[1]
; movz x1, #0
; ret
function %i64x2_sextend_i128(i64x2) -> i128 {
block0(v0: i64x2):
@@ -480,13 +335,8 @@ block0(v0: i64x2):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov x0, v0.d[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
; block0:
; mov x0, v0.d[1]
; asr x1, x0, #63
; ret

View File

@@ -8,15 +8,10 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxtb w0, w0
; Inst 1: ucvtf s0, w0
; Inst 2: ret
; }}
; block0:
; uxtb w4, w0
; ucvtf s0, w4
; ret
function u0:0(i8) -> f64 {
block0(v0: i8):
@@ -24,15 +19,10 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxtb w0, w0
; Inst 1: ucvtf d0, w0
; Inst 2: ret
; }}
; block0:
; uxtb w4, w0
; ucvtf d0, w4
; ret
function u0:0(i16) -> f32 {
block0(v0: i16):
@@ -40,15 +30,10 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxth w0, w0
; Inst 1: ucvtf s0, w0
; Inst 2: ret
; }}
; block0:
; uxth w4, w0
; ucvtf s0, w4
; ret
function u0:0(i16) -> f64 {
block0(v0: i16):
@@ -56,15 +41,10 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxth w0, w0
; Inst 1: ucvtf d0, w0
; Inst 2: ret
; }}
; block0:
; uxth w4, w0
; ucvtf d0, w4
; ret
function u0:0(f32) -> i8 {
block0(v0: f32):
@@ -72,23 +52,18 @@ block0(v0: f32):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: fcmp s0, s0
; Inst 1: b.vc 8 ; udf
; Inst 2: fmov s1, #-1
; Inst 3: fcmp s0, s1
; Inst 4: b.gt 8 ; udf
; Inst 5: movz x0, #17280, LSL #16
; Inst 6: fmov s1, w0
; Inst 7: fcmp s0, s1
; Inst 8: b.mi 8 ; udf
; Inst 9: fcvtzu w0, s0
; Inst 10: ret
; }}
; block0:
; fcmp s0, s0
; b.vc 8 ; udf
; fmov s6, #-1
; fcmp s0, s6
; b.gt 8 ; udf
; movz x10, #17280, LSL #16
; fmov s6, w10
; fcmp s0, s6
; b.mi 8 ; udf
; fcvtzu w0, s0
; ret
function u0:0(f64) -> i8 {
block0(v0: f64):
@@ -96,23 +71,18 @@ block0(v0: f64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: fcmp d0, d0
; Inst 1: b.vc 8 ; udf
; Inst 2: fmov d1, #-1
; Inst 3: fcmp d0, d1
; Inst 4: b.gt 8 ; udf
; Inst 5: movz x0, #16496, LSL #48
; Inst 6: fmov d1, x0
; Inst 7: fcmp d0, d1
; Inst 8: b.mi 8 ; udf
; Inst 9: fcvtzu w0, d0
; Inst 10: ret
; }}
; block0:
; fcmp d0, d0
; b.vc 8 ; udf
; fmov d6, #-1
; fcmp d0, d6
; b.gt 8 ; udf
; movz x10, #16496, LSL #48
; fmov d6, x10
; fcmp d0, d6
; b.mi 8 ; udf
; fcvtzu w0, d0
; ret
function u0:0(f32) -> i16 {
block0(v0: f32):
@@ -120,23 +90,18 @@ block0(v0: f32):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: fcmp s0, s0
; Inst 1: b.vc 8 ; udf
; Inst 2: fmov s1, #-1
; Inst 3: fcmp s0, s1
; Inst 4: b.gt 8 ; udf
; Inst 5: movz x0, #18304, LSL #16
; Inst 6: fmov s1, w0
; Inst 7: fcmp s0, s1
; Inst 8: b.mi 8 ; udf
; Inst 9: fcvtzu w0, s0
; Inst 10: ret
; }}
; block0:
; fcmp s0, s0
; b.vc 8 ; udf
; fmov s6, #-1
; fcmp s0, s6
; b.gt 8 ; udf
; movz x10, #18304, LSL #16
; fmov s6, w10
; fcmp s0, s6
; b.mi 8 ; udf
; fcvtzu w0, s0
; ret
function u0:0(f64) -> i16 {
block0(v0: f64):
@@ -144,21 +109,16 @@ block0(v0: f64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: fcmp d0, d0
; Inst 1: b.vc 8 ; udf
; Inst 2: fmov d1, #-1
; Inst 3: fcmp d0, d1
; Inst 4: b.gt 8 ; udf
; Inst 5: movz x0, #16624, LSL #48
; Inst 6: fmov d1, x0
; Inst 7: fcmp d0, d1
; Inst 8: b.mi 8 ; udf
; Inst 9: fcvtzu w0, d0
; Inst 10: ret
; }}
; block0:
; fcmp d0, d0
; b.vc 8 ; udf
; fmov d6, #-1
; fcmp d0, d6
; b.gt 8 ; udf
; movz x10, #16624, LSL #48
; fmov d6, x10
; fcmp d0, d6
; b.mi 8 ; udf
; fcvtzu w0, d0
; ret

File diff suppressed because it is too large Load Diff

View File

@@ -13,31 +13,20 @@ block0(v0: i64, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 5)
; Inst 0: mov w2, w1
; Inst 1: ldr x3, [x0]
; Inst 2: mov x3, x3
; Inst 3: subs xzr, x2, x3
; Inst 4: b.ls label1 ; b label2
; Block 1:
; (original IR block: block2)
; (instruction range: 5 .. 10)
; Inst 5: add x0, x0, x1, UXTW
; Inst 6: subs xzr, x2, x3
; Inst 7: movz x1, #0
; Inst 8: csel x0, x1, x0, hi
; Inst 9: ret
; Block 2:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: udf
; }}
; block0:
; mov w10, w1
; ldr x5, [x0]
; mov x11, x5
; subs xzr, x10, x11
; b.ls label1 ; b label2
; block1:
; add x13, x0, x1, UXTW
; subs xzr, x10, x11
; movz x14, #0
; csel x0, x14, x13, hi
; ret
; block2:
; udf
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
@@ -48,27 +37,16 @@ block0(v0: i64, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 3)
; Inst 0: mov w2, w1
; Inst 1: subs xzr, x2, #65536
; Inst 2: b.ls label1 ; b label2
; Block 1:
; (original IR block: block2)
; (instruction range: 3 .. 8)
; Inst 3: add x0, x0, x1, UXTW
; Inst 4: subs xzr, x2, #65536
; Inst 5: movz x1, #0
; Inst 6: csel x0, x1, x0, hi
; Inst 7: ret
; Block 2:
; (original IR block: block1)
; (instruction range: 8 .. 9)
; Inst 8: udf
; }}
; block0:
; mov w8, w1
; subs xzr, x8, #65536
; b.ls label1 ; b label2
; block1:
; add x10, x0, x1, UXTW
; subs xzr, x8, #65536
; movz x11, #0
; csel x0, x11, x10, hi
; ret
; block2:
; udf

View File

@@ -14,17 +14,12 @@ block0:
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: movz x0, #56780
; Inst 1: uxth w0, w0
; Inst 2: movz x1, #56780
; Inst 3: subs wzr, w0, w1, UXTH
; Inst 4: cset x0, ne
; Inst 5: and w0, w0, #1
; Inst 6: ret
; }}
; block0:
; movz x3, #56780
; uxth w5, w3
; movz x7, #56780
; subs wzr, w5, w7, UXTH
; cset x4, ne
; and w0, w4, #1
; ret

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -29,16 +29,31 @@ block5(v5: i64):
return v6
}
; check: subs wzr, w0, #3
; nextln: b.hs label1 ; adr x1, pc+16 ; ldrsw x2, [x1, x0, LSL 2] ; add x1, x1, x2 ; br x1 ; jt_entries
; check: movz x1, #1
; nextln: b
; check: movz x1, #2
; nextln: b
; check: movz x1, #3
; check: add x0, x0, x1
; block0:
; emit_island 36
; subs wzr, w0, #3
; b.hs label1 ; adr x15, pc+16 ; ldrsw x1, [x15, x0, LSL 2] ; add x15, x15, x1 ; br x15 ; jt_entries [Label(MachLabel(3)), Label(MachLabel(5)), Label(MachLabel(7))]
; block1:
; movz x5, #4
; b label2
; block2:
; b label9
; block3:
; movz x5, #1
; b label4
; block4:
; b label9
; block5:
; movz x5, #2
; b label6
; block6:
; b label9
; block7:
; movz x5, #3
; b label8
; block8:
; b label9
; block9:
; add x0, x0, x5
; ret

View File

@@ -10,13 +10,8 @@ block1:
return v0, v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block1)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #1
; Inst 1: movz x1, #2
; Inst 2: ret
; }}
; block0:
; movz x0, #1
; movz x1, #2
; ret

View File

@@ -8,14 +8,9 @@ block0(v0: i8, v1: i8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}
; block0:
; add w0, w0, w1
; ret
function %add16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -23,14 +18,9 @@ block0(v0: i16, v1: i16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}
; block0:
; add w0, w0, w1
; ret
function %add32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -38,14 +28,9 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}
; block0:
; add w0, w0, w1
; ret
function %add32_8(i32, i8) -> i32 {
block0(v0: i32, v1: i8):
@@ -54,14 +39,9 @@ block0(v0: i32, v1: i8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1, SXTB
; Inst 1: ret
; }}
; block0:
; add w0, w0, w1, SXTB
; ret
function %add64_32(i64, i32) -> i64 {
block0(v0: i64, v1: i32):
@@ -70,12 +50,7 @@ block0(v0: i64, v1: i32):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, x1, SXTW
; Inst 1: ret
; }}
; block0:
; add x0, x0, x1, SXTW
; ret

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -75,19 +75,85 @@ block0(v0: f64):
return v62
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: stp d14, d15, [sp, #-16]!
; nextln: stp d12, d13, [sp, #-16]!
; nextln: stp d10, d11, [sp, #-16]!
; nextln: stp d8, d9, [sp, #-16]!
; check: ldp d8, d9, [sp], #16
; nextln: ldp d10, d11, [sp], #16
; nextln: ldp d12, d13, [sp], #16
; nextln: ldp d14, d15, [sp], #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; stp d14, d15, [sp, #-16]!
; stp d12, d13, [sp, #-16]!
; stp d10, d11, [sp, #-16]!
; stp d8, d9, [sp, #-16]!
; sub sp, sp, #16
; block0:
; fadd d4, d0, d0
; fadd d6, d0, d0
; str q6, [sp]
; fadd d6, d0, d0
; fadd d8, d0, d0
; fadd d10, d0, d0
; fadd d12, d0, d0
; fadd d14, d0, d0
; fadd d1, d0, d0
; fadd d3, d0, d0
; fadd d5, d0, d0
; fadd d7, d0, d0
; fadd d9, d0, d0
; fadd d11, d0, d0
; fadd d13, d0, d0
; fadd d16, d0, d0
; fadd d15, d0, d0
; fadd d20, d0, d0
; fadd d22, d0, d0
; fadd d24, d0, d0
; fadd d26, d0, d0
; fadd d28, d0, d0
; fadd d30, d0, d0
; fadd d17, d0, d0
; fadd d19, d0, d0
; fadd d21, d0, d0
; fadd d23, d0, d0
; fadd d25, d0, d0
; fadd d27, d0, d0
; fadd d29, d0, d0
; fadd d18, d0, d0
; fadd d2, d0, d0
; fadd d0, d0, d4
; ldr q4, [sp]
; fadd d6, d4, d6
; fadd d4, d8, d10
; fadd d10, d12, d14
; fadd d8, d1, d3
; fadd d14, d5, d7
; fadd d12, d9, d11
; fadd d3, d13, d16
; fadd d1, d15, d20
; fadd d7, d22, d24
; fadd d5, d26, d28
; fadd d11, d30, d17
; fadd d9, d19, d21
; fadd d15, d23, d25
; fadd d13, d27, d29
; fadd d2, d18, d2
; fadd d0, d0, d6
; fadd d6, d4, d10
; fadd d4, d8, d14
; fadd d10, d12, d3
; fadd d8, d1, d7
; fadd d11, d5, d11
; fadd d12, d9, d15
; fadd d14, d13, d2
; fadd d0, d0, d6
; fadd d2, d4, d10
; fadd d4, d8, d11
; fadd d6, d12, d14
; fadd d8, d0, d2
; fadd d10, d4, d6
; fadd d0, d8, d10
; add sp, sp, #16
; ldp d8, d9, [sp], #16
; ldp d10, d11, [sp], #16
; ldp d12, d13, [sp], #16
; ldp d14, d15, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %f2(i64) -> i64 {
block0(v0: i64):
@@ -135,14 +201,49 @@ block0(v0: i64):
return v36
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: str x22, [sp, #-16]!
; nextln: stp x19, x20, [sp, #-16]!
; nextln: add x1, x0, x0
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x28, [sp, #-16]!
; stp x19, x21, [sp, #-16]!
; block0:
; add x6, x0, x0
; add x7, x0, x6
; add x8, x0, x7
; add x9, x0, x8
; add x10, x0, x9
; add x11, x0, x10
; add x12, x0, x11
; add x13, x0, x12
; add x14, x0, x13
; add x15, x0, x14
; add x1, x0, x15
; add x2, x0, x1
; add x3, x0, x2
; add x4, x0, x3
; add x5, x0, x4
; add x28, x0, x5
; add x21, x0, x28
; add x19, x0, x21
; add x6, x0, x6
; add x7, x7, x8
; add x8, x9, x10
; add x9, x11, x12
; add x10, x13, x14
; add x11, x15, x1
; add x12, x2, x3
; add x13, x4, x5
; add x14, x28, x21
; add x6, x19, x6
; add x7, x7, x8
; add x8, x9, x10
; add x9, x11, x12
; add x10, x13, x14
; add x6, x6, x7
; add x7, x8, x9
; add x6, x10, x6
; add x0, x7, x6
; ldp x19, x21, [sp], #16
; ldr x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
; check: add x0, x1, x0
; nextln: ldp x19, x20, [sp], #16
; nextln: ldr x22, [sp], #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -8,13 +8,8 @@ block0(v0: i128):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret
function %ireduce_128_32(i128) -> i32 {
block0(v0: i128):
@@ -22,13 +17,8 @@ block0(v0: i128):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret
function %ireduce_128_16(i128) -> i16 {
block0(v0: i128):
@@ -36,13 +26,8 @@ block0(v0: i128):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret
function %ireduce_128_8(i128) -> i8 {
block0(v0: i128):
@@ -50,11 +35,6 @@ block0(v0: i128):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret

View File

@@ -7,13 +7,8 @@ block0(v0: r64):
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret
function %f1(r64) -> b1 {
block0(v0: r64):
@@ -21,15 +16,10 @@ block0(v0: r64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs xzr, x0, #0
; Inst 1: cset x0, eq
; Inst 2: ret
; }}
; block0:
; subs xzr, x0, #0
; cset x0, eq
; ret
function %f2(r64) -> b1 {
block0(v0: r64):
@@ -37,15 +27,10 @@ block0(v0: r64):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: adds xzr, x0, #1
; Inst 1: cset x0, eq
; Inst 2: ret
; }}
; block0:
; adds xzr, x0, #1
; cset x0, eq
; ret
function %f3() -> r64 {
block0:
@@ -53,14 +38,9 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #0
; Inst 1: ret
; }}
; block0:
; movz x0, #0
; ret
function %f4(r64, r64) -> r64, r64, r64 {
fn0 = %f(r64) -> b1
@@ -83,63 +63,38 @@ block3(v7: r64, v8: r64):
return v7, v8, v9
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 3)
; (instruction range: 0 .. 18)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: stp x19, x20, [sp, #-16]!
; Inst 3: sub sp, sp, #32
; Inst 4: mov x19, x0
; Inst 5: mov x20, x1
; Inst 6: mov x0, x19
; Inst 7: ldr x1, 8 ; b 12 ; data TestCase { length: 1, ascii: [102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 8: stur x0, [sp, #8]
; Inst 9: stur x19, [sp, #16]
; Inst 10: stur x20, [sp, #24]
; (safepoint: slots [S0, S1, S2] with EmitState EmitState { virtual_sp_offset: 0, nominal_sp_to_fp: 0, stack_map: None, cur_srcloc: SourceLoc(4294967295) })
; Inst 11: blr x1
; Inst 12: ldur x19, [sp, #16]
; Inst 13: ldur x20, [sp, #24]
; Inst 14: mov x1, sp
; Inst 15: str x19, [x1]
; Inst 16: and w0, w0, #1
; Inst 17: cbz x0, label1 ; b label3
; Block 1:
; (original IR block: block1)
; (successor: Block 2)
; (instruction range: 18 .. 19)
; Inst 18: b label2
; Block 2:
; (successor: Block 5)
; (instruction range: 19 .. 21)
; Inst 19: mov x0, x20
; Inst 20: b label5
; Block 3:
; (original IR block: block2)
; (successor: Block 4)
; (instruction range: 21 .. 22)
; Inst 21: b label4
; Block 4:
; (successor: Block 5)
; (instruction range: 22 .. 25)
; Inst 22: mov x0, x19
; Inst 23: mov x19, x20
; Inst 24: b label5
; Block 5:
; (original IR block: block3)
; (instruction range: 25 .. 33)
; Inst 25: mov x1, sp
; Inst 26: ldr x1, [x1]
; Inst 27: mov x2, x1
; Inst 28: mov x1, x19
; Inst 29: add sp, sp, #32
; Inst 30: ldp x19, x20, [sp], #16
; Inst 31: ldp fp, lr, [sp], #16
; Inst 32: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #32
; block0:
; mov x4, x1
; mov x2, x0
; ldr x3, 8 ; b 12 ; data TestCase { length: 1, ascii: [102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; str x2, [sp, #8]
; str x4, [sp, #16]
; blr x3
; ldr x2, [sp, #8]
; mov x9, sp
; mov x12, x2
; str x12, [x9]
; and w7, w0, #1
; cbz x7, label1 ; b label3
; block1:
; b label2
; block2:
; mov x1, x12
; ldr x0, [sp, #16]
; b label5
; block3:
; b label4
; block4:
; mov x0, x12
; ldr x1, [sp, #16]
; b label5
; block5:
; mov x4, sp
; ldr x2, [x4]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret

View File

@@ -10,14 +10,9 @@ block0(v0: i64):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, x0, LSL 3
; Inst 1: ret
; }}
; block0:
; add x0, x0, x0, LSL 3
; ret
function %f(i32) -> i32 {
block0(v0: i32):
@@ -26,12 +21,7 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl w0, w0, #21
; Inst 1: ret
; }}
; block0:
; lsl w0, w0, #21
; ret

View File

@@ -12,37 +12,30 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 25)
; Inst 0: mov x4, x1
; Inst 1: orr x1, xzr, #128
; Inst 2: sub x1, x1, x2
; Inst 3: lsr x5, x0, x2
; Inst 4: lsr x3, x4, x2
; Inst 5: orn w6, wzr, w2
; Inst 6: lsl x7, x4, #1
; Inst 7: lsl x6, x7, x6
; Inst 8: orr x5, x5, x6
; Inst 9: ands xzr, x2, #64
; Inst 10: csel x2, x3, x5, ne
; Inst 11: csel x3, xzr, x3, ne
; Inst 12: lsl x5, x0, x1
; Inst 13: lsl x4, x4, x1
; Inst 14: orn w6, wzr, w1
; Inst 15: lsr x0, x0, #1
; Inst 16: lsr x0, x0, x6
; Inst 17: orr x0, x4, x0
; Inst 18: ands xzr, x1, #64
; Inst 19: csel x1, xzr, x5, ne
; Inst 20: csel x0, x5, x0, ne
; Inst 21: orr x3, x3, x0
; Inst 22: orr x0, x2, x1
; Inst 23: mov x1, x3
; Inst 24: ret
; }}
; block0:
; orr x10, xzr, #128
; sub x12, x10, x2
; lsr x14, x0, x2
; lsr x3, x1, x2
; orn w4, wzr, w2
; lsl x5, x1, #1
; lsl x6, x5, x4
; orr x8, x14, x6
; ands xzr, x2, #64
; csel x11, x3, x8, ne
; csel x13, xzr, x3, ne
; lsl x15, x0, x12
; lsl x1, x1, x12
; orn w3, wzr, w12
; lsr x5, x0, #1
; lsr x7, x5, x3
; orr x9, x1, x7
; ands xzr, x12, #64
; csel x12, xzr, x15, ne
; csel x14, x15, x9, ne
; orr x1, x13, x14
; orr x0, x11, x12
; ret
function %f0(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -50,14 +43,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror x0, x0, x1
; Inst 1: ret
; }}
; block0:
; ror x0, x0, x1
; ret
function %f1(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -65,14 +53,9 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror w0, w0, w1
; Inst 1: ret
; }}
; block0:
; ror w0, w0, w1
; ret
function %f2(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -80,20 +63,15 @@ block0(v0: i16, v1: i16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: uxth w0, w0
; Inst 1: and w1, w1, #15
; Inst 2: sub w2, w1, #16
; Inst 3: sub w2, wzr, w2
; Inst 4: lsr w1, w0, w1
; Inst 5: lsl w0, w0, w2
; Inst 6: orr w0, w0, w1
; Inst 7: ret
; }}
; block0:
; uxth w5, w0
; and w7, w1, #15
; sub w9, w7, #16
; sub w11, wzr, w9
; lsr w13, w5, w7
; lsl w15, w5, w11
; orr w0, w15, w13
; ret
function %f3(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -101,20 +79,15 @@ block0(v0: i8, v1: i8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: uxtb w0, w0
; Inst 1: and w1, w1, #7
; Inst 2: sub w2, w1, #8
; Inst 3: sub w2, wzr, w2
; Inst 4: lsr w1, w0, w1
; Inst 5: lsl w0, w0, w2
; Inst 6: orr w0, w0, w1
; Inst 7: ret
; }}
; block0:
; uxtb w5, w0
; and w7, w1, #7
; sub w9, w7, #8
; sub w11, wzr, w9
; lsr w13, w5, w7
; lsl w15, w5, w11
; orr w0, w15, w13
; ret
function %i128_rotl(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -122,36 +95,30 @@ block0(v0: i128, v1: i128):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 24)
; Inst 0: mov x4, x0
; Inst 1: orr x0, xzr, #128
; Inst 2: sub x0, x0, x2
; Inst 3: lsl x3, x4, x2
; Inst 4: lsl x5, x1, x2
; Inst 5: orn w6, wzr, w2
; Inst 6: lsr x7, x4, #1
; Inst 7: lsr x6, x7, x6
; Inst 8: orr x5, x5, x6
; Inst 9: ands xzr, x2, #64
; Inst 10: csel x2, xzr, x3, ne
; Inst 11: csel x3, x3, x5, ne
; Inst 12: lsr x5, x4, x0
; Inst 13: lsr x4, x1, x0
; Inst 14: orn w6, wzr, w0
; Inst 15: lsl x1, x1, #1
; Inst 16: lsl x1, x1, x6
; Inst 17: orr x1, x5, x1
; Inst 18: ands xzr, x0, #64
; Inst 19: csel x0, x4, x1, ne
; Inst 20: csel x1, xzr, x4, ne
; Inst 21: orr x0, x2, x0
; Inst 22: orr x1, x3, x1
; Inst 23: ret
; }}
; block0:
; orr x10, xzr, #128
; sub x12, x10, x2
; lsl x14, x0, x2
; lsl x3, x1, x2
; orn w4, wzr, w2
; lsr x5, x0, #1
; lsr x6, x5, x4
; orr x8, x3, x6
; ands xzr, x2, #64
; csel x11, xzr, x14, ne
; csel x13, x14, x8, ne
; lsr x15, x0, x12
; lsr x2, x1, x12
; orn w3, wzr, w12
; lsl x5, x1, #1
; lsl x7, x5, x3
; orr x9, x15, x7
; ands xzr, x12, #64
; csel x12, x2, x9, ne
; csel x14, xzr, x2, ne
; orr x0, x11, x12
; orr x1, x13, x14
; ret
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -159,15 +126,10 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sub x1, xzr, x1
; Inst 1: ror x0, x0, x1
; Inst 2: ret
; }}
; block0:
; sub x5, xzr, x1
; ror x0, x0, x5
; ret
function %f5(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -175,15 +137,10 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sub w1, wzr, w1
; Inst 1: ror w0, w0, w1
; Inst 2: ret
; }}
; block0:
; sub w5, wzr, w1
; ror w0, w0, w5
; ret
function %f6(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -191,21 +148,16 @@ block0(v0: i16, v1: i16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: sub w1, wzr, w1
; Inst 1: uxth w0, w0
; Inst 2: and w1, w1, #15
; Inst 3: sub w2, w1, #16
; Inst 4: sub w2, wzr, w2
; Inst 5: lsr w1, w0, w1
; Inst 6: lsl w0, w0, w2
; Inst 7: orr w0, w0, w1
; Inst 8: ret
; }}
; block0:
; sub w5, wzr, w1
; uxth w7, w0
; and w9, w5, #15
; sub w11, w9, #16
; sub w13, wzr, w11
; lsr w15, w7, w9
; lsl w1, w7, w13
; orr w0, w1, w15
; ret
function %f7(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -213,21 +165,16 @@ block0(v0: i8, v1: i8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: sub w1, wzr, w1
; Inst 1: uxtb w0, w0
; Inst 2: and w1, w1, #7
; Inst 3: sub w2, w1, #8
; Inst 4: sub w2, wzr, w2
; Inst 5: lsr w1, w0, w1
; Inst 6: lsl w0, w0, w2
; Inst 7: orr w0, w0, w1
; Inst 8: ret
; }}
; block0:
; sub w5, wzr, w1
; uxtb w7, w0
; and w9, w5, #7
; sub w11, w9, #8
; sub w13, wzr, w11
; lsr w15, w7, w9
; lsl w1, w7, w13
; orr w0, w1, w15
; ret
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -235,14 +182,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsr x0, x0, x1
; Inst 1: ret
; }}
; block0:
; lsr x0, x0, x1
; ret
function %f9(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -250,14 +192,9 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsr w0, w0, w1
; Inst 1: ret
; }}
; block0:
; lsr w0, w0, w1
; ret
function %f10(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -265,16 +202,11 @@ block0(v0: i16, v1: i16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxth w0, w0
; Inst 1: and w1, w1, #15
; Inst 2: lsr w0, w0, w1
; Inst 3: ret
; }}
; block0:
; uxth w5, w0
; and w7, w1, #15
; lsr w0, w5, w7
; ret
function %f11(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -282,16 +214,11 @@ block0(v0: i8, v1: i8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtb w0, w0
; Inst 1: and w1, w1, #7
; Inst 2: lsr w0, w0, w1
; Inst 3: ret
; }}
; block0:
; uxtb w5, w0
; and w7, w1, #7
; lsr w0, w5, w7
; ret
function %f12(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -299,14 +226,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl x0, x0, x1
; Inst 1: ret
; }}
; block0:
; lsl x0, x0, x1
; ret
function %f13(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -314,14 +236,9 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl w0, w0, w1
; Inst 1: ret
; }}
; block0:
; lsl w0, w0, w1
; ret
function %f14(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -329,15 +246,10 @@ block0(v0: i16, v1: i16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: and w1, w1, #15
; Inst 1: lsl w0, w0, w1
; Inst 2: ret
; }}
; block0:
; and w5, w1, #15
; lsl w0, w0, w5
; ret
function %f15(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -345,15 +257,10 @@ block0(v0: i8, v1: i8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: and w1, w1, #7
; Inst 1: lsl w0, w0, w1
; Inst 2: ret
; }}
; block0:
; and w5, w1, #7
; lsl w0, w0, w5
; ret
function %f16(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -361,14 +268,9 @@ block0(v0: i64, v1: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr x0, x0, x1
; Inst 1: ret
; }}
; block0:
; asr x0, x0, x1
; ret
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -376,14 +278,9 @@ block0(v0: i32, v1: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr w0, w0, w1
; Inst 1: ret
; }}
; block0:
; asr w0, w0, w1
; ret
function %f18(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -391,16 +288,11 @@ block0(v0: i16, v1: i16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxth w0, w0
; Inst 1: and w1, w1, #15
; Inst 2: asr w0, w0, w1
; Inst 3: ret
; }}
; block0:
; sxth w5, w0
; and w7, w1, #15
; asr w0, w5, w7
; ret
function %f19(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -408,16 +300,11 @@ block0(v0: i8, v1: i8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtb w0, w0
; Inst 1: and w1, w1, #7
; Inst 2: asr w0, w0, w1
; Inst 3: ret
; }}
; block0:
; sxtb w5, w0
; and w7, w1, #7
; asr w0, w5, w7
; ret
function %f20(i64) -> i64 {
block0(v0: i64):
@@ -426,14 +313,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror x0, x0, #17
; Inst 1: ret
; }}
; block0:
; ror x0, x0, #17
; ret
function %f21(i64) -> i64 {
block0(v0: i64):
@@ -442,14 +324,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror x0, x0, #47
; Inst 1: ret
; }}
; block0:
; ror x0, x0, #47
; ret
function %f22(i32) -> i32 {
block0(v0: i32):
@@ -458,14 +335,9 @@ block0(v0: i32):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror w0, w0, #15
; Inst 1: ret
; }}
; block0:
; ror w0, w0, #15
; ret
function %f23(i16) -> i16 {
block0(v0: i16):
@@ -474,17 +346,12 @@ block0(v0: i16):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: uxth w0, w0
; Inst 1: lsr w1, w0, #6
; Inst 2: lsl w0, w0, #10
; Inst 3: orr w0, w0, w1
; Inst 4: ret
; }}
; block0:
; uxth w3, w0
; lsr w5, w3, #6
; lsl w7, w3, #10
; orr w0, w7, w5
; ret
function %f24(i8) -> i8 {
block0(v0: i8):
@@ -493,17 +360,12 @@ block0(v0: i8):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: uxtb w0, w0
; Inst 1: lsr w1, w0, #5
; Inst 2: lsl w0, w0, #3
; Inst 3: orr w0, w0, w1
; Inst 4: ret
; }}
; block0:
; uxtb w3, w0
; lsr w5, w3, #5
; lsl w7, w3, #3
; orr w0, w7, w5
; ret
function %f25(i64) -> i64 {
block0(v0: i64):
@@ -512,14 +374,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsr x0, x0, #17
; Inst 1: ret
; }}
; block0:
; lsr x0, x0, #17
; ret
function %f26(i64) -> i64 {
block0(v0: i64):
@@ -528,14 +385,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr x0, x0, #17
; Inst 1: ret
; }}
; block0:
; asr x0, x0, #17
; ret
function %f27(i64) -> i64 {
block0(v0: i64):
@@ -544,12 +396,7 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl x0, x0, #17
; Inst 1: ret
; }}
; block0:
; lsl x0, x0, #17
; ret

View File

@@ -10,14 +10,9 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull v0.8h, v0.8b, v1.8b
; Inst 1: ret
; }}
; block0:
; smull v0.8h, v0.8b, v1.8b
; ret
function %fn2(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -27,14 +22,9 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull2 v0.8h, v0.16b, v1.16b
; Inst 1: ret
; }}
; block0:
; smull2 v0.8h, v0.16b, v1.16b
; ret
function %fn3(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -44,14 +34,9 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull v0.4s, v0.4h, v1.4h
; Inst 1: ret
; }}
; block0:
; smull v0.4s, v0.4h, v1.4h
; ret
function %fn4(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -61,14 +46,9 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull2 v0.4s, v0.8h, v1.8h
; Inst 1: ret
; }}
; block0:
; smull2 v0.4s, v0.8h, v1.8h
; ret
function %fn5(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -78,14 +58,9 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull v0.2d, v0.2s, v1.2s
; Inst 1: ret
; }}
; block0:
; smull v0.2d, v0.2s, v1.2s
; ret
function %fn6(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -95,14 +70,9 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull2 v0.2d, v0.4s, v1.4s
; Inst 1: ret
; }}
; block0:
; smull2 v0.2d, v0.4s, v1.4s
; ret
function %fn7(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -112,14 +82,9 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull v0.8h, v0.8b, v1.8b
; Inst 1: ret
; }}
; block0:
; umull v0.8h, v0.8b, v1.8b
; ret
function %fn8(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -129,14 +94,9 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull2 v0.8h, v0.16b, v1.16b
; Inst 1: ret
; }}
; block0:
; umull2 v0.8h, v0.16b, v1.16b
; ret
function %fn9(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -146,14 +106,9 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull v0.4s, v0.4h, v1.4h
; Inst 1: ret
; }}
; block0:
; umull v0.4s, v0.4h, v1.4h
; ret
function %fn10(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -163,14 +118,9 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull2 v0.4s, v0.8h, v1.8h
; Inst 1: ret
; }}
; block0:
; umull2 v0.4s, v0.8h, v1.8h
; ret
function %fn11(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -180,14 +130,9 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull v0.2d, v0.2s, v1.2s
; Inst 1: ret
; }}
; block0:
; umull v0.2d, v0.2s, v1.2s
; ret
function %fn12(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -197,12 +142,7 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull2 v0.2d, v0.4s, v1.4s
; Inst 1: ret
; }}
; block0:
; umull2 v0.2d, v0.4s, v1.4s
; ret

View File

@@ -11,14 +11,9 @@ block0(v0: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: saddlp v0.8h, v0.16b
; Inst 1: ret
; }}
; block0:
; saddlp v0.8h, v0.16b
; ret
function %fn2(i8x16) -> i16x8 {
block0(v0: i8x16):
@@ -28,14 +23,9 @@ block0(v0: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uaddlp v0.8h, v0.16b
; Inst 1: ret
; }}
; block0:
; uaddlp v0.8h, v0.16b
; ret
function %fn3(i16x8) -> i32x4 {
block0(v0: i16x8):
@@ -45,14 +35,9 @@ block0(v0: i16x8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: saddlp v0.4s, v0.8h
; Inst 1: ret
; }}
; block0:
; saddlp v0.4s, v0.8h
; ret
function %fn4(i16x8) -> i32x4 {
block0(v0: i16x8):
@@ -62,14 +47,9 @@ block0(v0: i16x8):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uaddlp v0.4s, v0.8h
; Inst 1: ret
; }}
; block0:
; uaddlp v0.4s, v0.8h
; ret
function %fn5(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -79,16 +59,11 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtl v0.8h, v0.8b
; Inst 1: sxtl2 v1.8h, v1.16b
; Inst 2: addp v0.8h, v0.8h, v1.8h
; Inst 3: ret
; }}
; block0:
; sxtl v4.8h, v0.8b
; sxtl2 v6.8h, v1.16b
; addp v0.8h, v4.8h, v6.8h
; ret
function %fn6(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -98,16 +73,11 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtl v0.8h, v0.8b
; Inst 1: uxtl2 v1.8h, v1.16b
; Inst 2: addp v0.8h, v0.8h, v1.8h
; Inst 3: ret
; }}
; block0:
; uxtl v4.8h, v0.8b
; uxtl2 v6.8h, v1.16b
; addp v0.8h, v4.8h, v6.8h
; ret
function %fn7(i8x16) -> i16x8 {
block0(v0: i8x16):
@@ -117,16 +87,11 @@ block0(v0: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtl v1.8h, v0.8b
; Inst 1: sxtl2 v0.8h, v0.16b
; Inst 2: addp v0.8h, v1.8h, v0.8h
; Inst 3: ret
; }}
; block0:
; uxtl v2.8h, v0.8b
; sxtl2 v4.8h, v0.16b
; addp v0.8h, v2.8h, v4.8h
; ret
function %fn8(i8x16) -> i16x8 {
block0(v0: i8x16):
@@ -136,14 +101,9 @@ block0(v0: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtl v1.8h, v0.8b
; Inst 1: uxtl2 v0.8h, v0.16b
; Inst 2: addp v0.8h, v1.8h, v0.8h
; Inst 3: ret
; }}
; block0:
; sxtl v2.8h, v0.8b
; uxtl2 v4.8h, v0.16b
; addp v0.8h, v2.8h, v4.8h
; ret

View File

@@ -9,16 +9,11 @@ block0:
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: movz x0, #1
; Inst 1: movk x0, #1, LSL #48
; Inst 2: dup v0.2d, x0
; Inst 3: ret
; }}
; block0:
; movz x2, #1
; movk x2, #1, LSL #48
; dup v0.2d, x2
; ret
function %f2() -> i16x8 {
block0:
@@ -28,15 +23,10 @@ block0:
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #42679
; Inst 1: dup v0.8h, w0
; Inst 2: ret
; }}
; block0:
; movz x2, #42679
; dup v0.8h, w2
; ret
function %f3() -> b8x16 {
block0:
@@ -46,14 +36,9 @@ block0:
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movi v0.16b, #255
; Inst 1: ret
; }}
; block0:
; movi v0.16b, #255
; ret
function %f4(i32, i8x16, i8x16) -> i8x16 {
block0(v0: i32, v1: i8x16, v2: i8x16):
@@ -61,15 +46,10 @@ block0(v0: i32, v1: i8x16, v2: i8x16):
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs wzr, w0, wzr
; Inst 1: vcsel v0.16b, v0.16b, v1.16b, ne (if-then-else diamond)
; Inst 2: ret
; }}
; block0:
; subs wzr, w0, wzr
; vcsel v0.16b, v0.16b, v1.16b, ne (if-then-else diamond)
; ret
function %f5(i64) -> i8x16 {
block0(v0: i64):
@@ -78,14 +58,9 @@ block0(v0: i64):
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ld1r { v0.16b }, [x0]
; Inst 1: ret
; }}
; block0:
; ld1r { v0.16b }, [x0]
; ret
function %f6(i64, i64) -> i8x16, i8x16 {
block0(v0: i64, v1: i64):
@@ -96,15 +71,10 @@ block0(v0: i64, v1: i64):
return v4, v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: ld1r { v0.16b }, [x0]
; Inst 1: ld1r { v1.16b }, [x1]
; Inst 2: ret
; }}
; block0:
; ld1r { v0.16b }, [x0]
; ld1r { v1.16b }, [x1]
; ret
function %f7(i64, i64) -> i8x16, i8x16 {
block0(v0: i64, v1: i64):
@@ -115,16 +85,11 @@ block0(v0: i64, v1: i64):
return v4, v5
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: ldrb w0, [x0]
; Inst 1: ld1r { v0.16b }, [x1]
; Inst 2: dup v1.16b, w0
; Inst 3: ret
; }}
; block0:
; ldrb w4, [x0]
; ld1r { v0.16b }, [x1]
; dup v1.16b, w4
; ret
function %f8(i64, i64) -> i8x16, i8x16 {
block0(v0: i64, v1: i64):
@@ -134,16 +99,11 @@ block0(v0: i64, v1: i64):
return v3, v4
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: ldrb w0, [x0]
; Inst 1: dup v0.16b, w0
; Inst 2: dup v1.16b, w0
; Inst 3: ret
; }}
; block0:
; ldrb w4, [x0]
; dup v0.16b, w4
; dup v1.16b, w4
; ret
function %f9() -> i32x2 {
block0:
@@ -152,15 +112,10 @@ block0:
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movi v0.2d, #18374687579166474495
; Inst 1: fmov d0, d0
; Inst 2: ret
; }}
; block0:
; movi v0.2d, #18374687579166474495
; fmov d0, d0
; ret
function %f10() -> i32x4 {
block0:
@@ -169,14 +124,9 @@ block0:
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: mvni v0.4s, #15, MSL #16
; Inst 1: ret
; }}
; block0:
; mvni v0.4s, #15, MSL #16
; ret
function %f11() -> f32x4 {
block0:
@@ -185,12 +135,7 @@ block0:
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: fmov v0.4s, #1.3125
; Inst 1: ret
; }}
; block0:
; fmov v0.4s, #1.3125
; ret

View File

@@ -9,16 +9,11 @@ block0:
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: movz x0, #1
; Inst 1: movk x0, #1, LSL #48
; Inst 2: fmov d0, x0
; Inst 3: ret
; }}
; block0:
; movz x2, #1
; movk x2, #1, LSL #48
; fmov d0, x2
; ret
function %f2() -> i32x4 {
block0:
@@ -27,13 +22,8 @@ block0:
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #42679
; Inst 1: fmov s0, w0
; Inst 2: ret
; }}
; block0:
; movz x2, #42679
; fmov s0, w2
; ret

View File

@@ -7,26 +7,16 @@ block0:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret
function %stack_limit_leaf_zero(i64 stack_limit) {
block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret
function %stack_limit_gv_leaf_zero(i64 vmctx) {
gv0 = vmctx
@@ -37,13 +27,8 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
; block0:
; ret
function %stack_limit_call_zero(i64 stack_limit) {
fn0 = %foo()
@@ -52,20 +37,15 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: subs xzr, sp, x0, UXTX
; Inst 3: b.hs 8 ; udf
; Inst 4: ldr x0, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 5: blr x0
; Inst 6: ldp fp, lr, [sp], #16
; Inst 7: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; subs xzr, sp, x0, UXTX
; b.hs 8 ; udf
; block0:
; ldr x2, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x2
; ldp fp, lr, [sp], #16
; ret
function %stack_limit_gv_call_zero(i64 vmctx) {
gv0 = vmctx
@@ -78,22 +58,17 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: ldur x16, [x0]
; Inst 3: ldur x16, [x16, #4]
; Inst 4: subs xzr, sp, x16, UXTX
; Inst 5: b.hs 8 ; udf
; Inst 6: ldr x0, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 7: blr x0
; Inst 8: ldp fp, lr, [sp], #16
; Inst 9: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; ldr x16, [x0]
; ldr x16, [x16, #4]
; subs xzr, sp, x16, UXTX
; b.hs 8 ; udf
; block0:
; ldr x2, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; blr x2
; ldp fp, lr, [sp], #16
; ret
function %stack_limit(i64 stack_limit) {
ss0 = explicit_slot 168
@@ -101,21 +76,16 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: add x16, x0, #176
; Inst 3: subs xzr, sp, x16, UXTX
; Inst 4: b.hs 8 ; udf
; Inst 5: sub sp, sp, #176
; Inst 6: add sp, sp, #176
; Inst 7: ldp fp, lr, [sp], #16
; Inst 8: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; add x16, x0, #176
; subs xzr, sp, x16, UXTX
; b.hs 8 ; udf
; sub sp, sp, #176
; block0:
; add sp, sp, #176
; ldp fp, lr, [sp], #16
; ret
function %huge_stack_limit(i64 stack_limit) {
ss0 = explicit_slot 400000
@@ -123,29 +93,24 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 17)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: subs xzr, sp, x0, UXTX
; Inst 3: b.hs 8 ; udf
; Inst 4: movz w17, #6784
; Inst 5: movk w17, #6, LSL #16
; Inst 6: add x16, x0, x17, UXTX
; Inst 7: subs xzr, sp, x16, UXTX
; Inst 8: b.hs 8 ; udf
; Inst 9: movz w16, #6784
; Inst 10: movk w16, #6, LSL #16
; Inst 11: sub sp, sp, x16, UXTX
; Inst 12: movz w16, #6784
; Inst 13: movk w16, #6, LSL #16
; Inst 14: add sp, sp, x16, UXTX
; Inst 15: ldp fp, lr, [sp], #16
; Inst 16: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; subs xzr, sp, x0, UXTX
; b.hs 8 ; udf
; movz w17, #6784
; movk w17, #6, LSL #16
; add x16, x0, x17, UXTX
; subs xzr, sp, x16, UXTX
; b.hs 8 ; udf
; movz w16, #6784
; movk w16, #6, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; movz w16, #6784
; movk w16, #6, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %limit_preamble(i64 vmctx) {
gv0 = vmctx
@@ -157,23 +122,18 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: ldur x16, [x0]
; Inst 3: ldur x16, [x16, #4]
; Inst 4: add x16, x16, #32
; Inst 5: subs xzr, sp, x16, UXTX
; Inst 6: b.hs 8 ; udf
; Inst 7: sub sp, sp, #32
; Inst 8: add sp, sp, #32
; Inst 9: ldp fp, lr, [sp], #16
; Inst 10: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; ldr x16, [x0]
; ldr x16, [x16, #4]
; add x16, x16, #32
; subs xzr, sp, x16, UXTX
; b.hs 8 ; udf
; sub sp, sp, #32
; block0:
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret
function %limit_preamble_huge(i64 vmctx) {
gv0 = vmctx
@@ -185,31 +145,26 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 19)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: ldur x16, [x0]
; Inst 3: ldur x16, [x16, #4]
; Inst 4: subs xzr, sp, x16, UXTX
; Inst 5: b.hs 8 ; udf
; Inst 6: movz w17, #6784
; Inst 7: movk w17, #6, LSL #16
; Inst 8: add x16, x16, x17, UXTX
; Inst 9: subs xzr, sp, x16, UXTX
; Inst 10: b.hs 8 ; udf
; Inst 11: movz w16, #6784
; Inst 12: movk w16, #6, LSL #16
; Inst 13: sub sp, sp, x16, UXTX
; Inst 14: movz w16, #6784
; Inst 15: movk w16, #6, LSL #16
; Inst 16: add sp, sp, x16, UXTX
; Inst 17: ldp fp, lr, [sp], #16
; Inst 18: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; ldr x16, [x0]
; ldr x16, [x16, #4]
; subs xzr, sp, x16, UXTX
; b.hs 8 ; udf
; movz w17, #6784
; movk w17, #6, LSL #16
; add x16, x16, x17, UXTX
; subs xzr, sp, x16, UXTX
; b.hs 8 ; udf
; movz w16, #6784
; movk w16, #6, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; movz w16, #6784
; movk w16, #6, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %limit_preamble_huge_offset(i64 vmctx) {
gv0 = vmctx
@@ -220,20 +175,15 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
; Inst 3: add x16, x16, #32
; Inst 4: subs xzr, sp, x16, UXTX
; Inst 5: b.hs 8 ; udf
; Inst 6: sub sp, sp, #32
; Inst 7: add sp, sp, #32
; Inst 8: ldp fp, lr, [sp], #16
; Inst 9: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
; add x16, x16, #32
; subs xzr, sp, x16, UXTX
; b.hs 8 ; udf
; sub sp, sp, #32
; block0:
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -10,14 +10,14 @@ block0:
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x0, sp
; nextln: add sp, sp, #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %stack_addr_big() -> i64 {
ss0 = explicit_slot 100000
@@ -28,20 +28,18 @@ block0:
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x0, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: add sp, sp, x16, UXTX
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; FIXME: don't use stack_addr legalization for stack_load and stack_store
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x0, sp
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %stack_load_small() -> i64 {
ss0 = explicit_slot 8
@@ -51,15 +49,15 @@ block0:
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x0, sp
; nextln: ldr x0, [x0]
; nextln: add sp, sp, #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; ldr x0, [x0]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %stack_load_big() -> i64 {
ss0 = explicit_slot 100000
@@ -70,19 +68,19 @@ block0:
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x0, sp
; nextln: ldr x0, [x0]
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: add sp, sp, x16, UXTX
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x0, sp
; ldr x0, [x0]
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %stack_store_small(i64) {
ss0 = explicit_slot 8
@@ -92,15 +90,15 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x1, sp
; nextln: str x0, [x1]
; nextln: add sp, sp, #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x2, sp
; str x0, [x2]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %stack_store_big(i64) {
ss0 = explicit_slot 100000
@@ -111,21 +109,20 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x1, sp
; nextln: str x0, [x1]
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: add sp, sp, x16, UXTX
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x2, sp
; str x0, [x2]
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
; Force a b1 to be spilled into a slot at an SP offset between 0x100 and
; 0x1fff, to exercise the scaled addressing mode.
function %b1_spill_slot(b1) -> b1, i64 {
ss0 = explicit_slot 1000
@@ -277,7 +274,161 @@ block0(v0: b1):
return v0, v137
}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; stp x27, x28, [sp, #-16]!
; stp x25, x26, [sp, #-16]!
; stp x23, x24, [sp, #-16]!
; stp x21, x22, [sp, #-16]!
; stp x19, x20, [sp, #-16]!
; sub sp, sp, #1152
; block0:
; str x0, [sp, #1000]
; movz x8, #2
; add x11, x8, #1
; str x11, [sp, #1136]
; movz x8, #4
; add x12, x8, #3
; str x12, [sp, #1128]
; movz x8, #6
; add x13, x8, #5
; str x13, [sp, #1120]
; movz x8, #8
; add x14, x8, #7
; str x14, [sp, #1112]
; movz x8, #10
; add x15, x8, #9
; str x15, [sp, #1104]
; movz x8, #12
; add x1, x8, #11
; str x1, [sp, #1096]
; movz x8, #14
; add x2, x8, #13
; str x2, [sp, #1088]
; movz x8, #16
; add x3, x8, #15
; str x3, [sp, #1080]
; movz x8, #18
; add x4, x8, #17
; str x4, [sp, #1072]
; movz x8, #20
; add x5, x8, #19
; str x5, [sp, #1064]
; movz x8, #22
; add x6, x8, #21
; str x6, [sp, #1056]
; movz x8, #24
; add x7, x8, #23
; str x7, [sp, #1048]
; movz x8, #26
; add x8, x8, #25
; str x8, [sp, #1040]
; movz x8, #28
; add x9, x8, #27
; str x9, [sp, #1032]
; movz x8, #30
; add x26, x8, #29
; str x26, [sp, #1024]
; movz x8, #32
; add x27, x8, #31
; str x27, [sp, #1016]
; movz x8, #34
; add x28, x8, #33
; movz x8, #36
; add x21, x8, #35
; str x21, [sp, #1008]
; movz x8, #38
; add x21, x8, #37
; movz x8, #30
; add x19, x8, #39
; movz x8, #32
; add x20, x8, #31
; movz x8, #34
; add x22, x8, #33
; movz x8, #36
; add x23, x8, #35
; movz x8, #38
; add x24, x8, #37
; movz x8, #30
; add x25, x8, #39
; movz x8, #32
; add x0, x8, #31
; movz x8, #34
; add x10, x8, #33
; movz x8, #36
; add x11, x8, #35
; movz x8, #38
; add x12, x8, #37
; movz x8, #30
; add x13, x8, #39
; movz x8, #32
; add x14, x8, #31
; movz x8, #34
; add x15, x8, #33
; movz x8, #36
; add x1, x8, #35
; movz x8, #38
; add x2, x8, #37
; ldr x3, [sp, #1136]
; add x3, x3, #39
; ldr x5, [sp, #1120]
; ldr x4, [sp, #1128]
; add x4, x4, x5
; ldr x5, [sp, #1104]
; ldr x8, [sp, #1112]
; add x5, x8, x5
; ldr x6, [sp, #1088]
; ldr x7, [sp, #1096]
; add x6, x7, x6
; ldr x7, [sp, #1072]
; ldr x8, [sp, #1080]
; add x7, x8, x7
; ldr x9, [sp, #1056]
; ldr x8, [sp, #1064]
; add x8, x8, x9
; ldr x9, [sp, #1040]
; ldr x26, [sp, #1048]
; add x9, x26, x9
; ldr x26, [sp, #1024]
; ldr x27, [sp, #1032]
; add x26, x27, x26
; ldr x27, [sp, #1016]
; add x27, x27, x28
; ldr x28, [sp, #1008]
; add x28, x28, x21
; add x21, x19, x20
; add x19, x22, x23
; add x25, x24, x25
; add x10, x0, x10
; add x11, x11, x12
; add x12, x13, x14
; add x13, x15, x1
; add x14, x2, x3
; add x15, x4, x5
; add x0, x6, x7
; add x9, x8, x9
; add x1, x26, x27
; add x2, x28, x21
; add x3, x19, x25
; add x10, x10, x11
; add x11, x12, x13
; add x12, x14, x15
; add x9, x0, x9
; add x13, x1, x2
; add x10, x3, x10
; add x11, x11, x12
; add x9, x9, x13
; add x10, x10, x11
; add x1, x9, x10
; ldr x0, [sp, #1000]
; add sp, sp, #1152
; ldp x19, x20, [sp], #16
; ldp x21, x22, [sp], #16
; ldp x23, x24, [sp], #16
; ldp x25, x26, [sp], #16
; ldp x27, x28, [sp], #16
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_store(i128) {
ss0 = explicit_slot 16
@@ -286,15 +437,16 @@ block0(v0: i128):
stack_store.i128 v0, ss0
return
}
; TODO: Codegen improvement opportunities: This should be just a stp x0, x1, [sp, #-16]
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x2, sp
; nextln: stp x0, x1, [x2]
; nextln: add sp, sp, #16
; nextln: ldp fp, lr, [sp], #16
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x4, sp
; stp x0, x1, [x4]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_store_inst_offset(i128) {
ss0 = explicit_slot 16
@@ -304,15 +456,16 @@ block0(v0: i128):
stack_store.i128 v0, ss1+16
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #32
; nextln: add x2, sp, #32
; nextln: stp x0, x1, [x2]
; nextln: add sp, sp, #32
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #32
; block0:
; add x4, sp, #32
; stp x0, x1, [x4]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_store_big(i128) {
ss0 = explicit_slot 100000
@@ -322,20 +475,20 @@ block0(v0: i128):
stack_store.i128 v0, ss0
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x2, sp
; nextln: stp x0, x1, [x2]
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: add sp, sp, x16, UXTX
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x4, sp
; stp x0, x1, [x4]
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_load() -> i128 {
ss0 = explicit_slot 16
@@ -344,20 +497,16 @@ block0:
v0 = stack_load.i128 ss0
return v0
}
; TODO: Codegen improvement opportunities: This should be just a ldp x0, x1, [sp, #-16]
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x0, sp
; nextln: ldp x1, x0, [x0]
; nextln: mov x2, x0
; nextln: mov x0, x1
; nextln: mov x1, x2
; nextln: add sp, sp, #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #16
; block0:
; mov x0, sp
; ldp x0, x1, [x0]
; add sp, sp, #16
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_load_inst_offset() -> i128 {
ss0 = explicit_slot 16
@@ -367,19 +516,16 @@ block0:
v0 = stack_load.i128 ss1+16
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #32
; nextln: add x0, sp, #32
; nextln: ldp x1, x0, [x0]
; nextln: mov x2, x0
; nextln: mov x0, x1
; nextln: mov x1, x2
; nextln: add sp, sp, #32
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; sub sp, sp, #32
; block0:
; add x0, sp, #32
; ldp x0, x1, [x0]
; add sp, sp, #32
; ldp fp, lr, [sp], #16
; ret
function %i128_stack_load_big() -> i128 {
ss0 = explicit_slot 100000
@@ -389,18 +535,18 @@ block0:
v0 = stack_load.i128 ss0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x0, sp
; nextln: ldp x1, x0, [x0]
; nextln: mov x2, x0
; nextln: mov x0, x1
; nextln: mov x1, x2
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: add sp, sp, x16, UXTX
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; movz w16, #34480
; movk w16, #1, LSL #16
; sub sp, sp, x16, UXTX
; block0:
; mov x0, sp
; ldp x0, x1, [x0]
; movz w16, #34480
; movk w16, #1, LSL #16
; add sp, sp, x16, UXTX
; ldp fp, lr, [sp], #16
; ret

View File

@@ -10,12 +10,7 @@ block0:
return v0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr x0, 8 ; b 12 ; data TestCase { length: 9, ascii: [109, 121, 95, 103, 108, 111, 98, 97, 108, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 1: ret
; }}
; block0:
; ldr x0, 8 ; b 12 ; data TestCase { length: 9, ascii: [109, 121, 95, 103, 108, 111, 98, 97, 108, 0, 0, 0, 0, 0, 0, 0] } + 0
; ret

View File

@@ -10,28 +10,23 @@ block0(v0: i32):
return v0, v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 18)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: str x19, [sp, #-16]!
; Inst 3: stp d14, d15, [sp, #-16]!
; Inst 4: stp d12, d13, [sp, #-16]!
; Inst 5: stp d10, d11, [sp, #-16]!
; Inst 6: stp d8, d9, [sp, #-16]!
; Inst 7: mov x19, x0
; Inst 8: elf_tls_get_addr u1:0
; Inst 9: mov x1, x0
; Inst 10: mov x0, x19
; Inst 11: ldp d8, d9, [sp], #16
; Inst 12: ldp d10, d11, [sp], #16
; Inst 13: ldp d12, d13, [sp], #16
; Inst 14: ldp d14, d15, [sp], #16
; Inst 15: ldr x19, [sp], #16
; Inst 16: ldp fp, lr, [sp], #16
; Inst 17: ret
; }}
; stp fp, lr, [sp, #-16]!
; mov fp, sp
; str x25, [sp, #-16]!
; stp d14, d15, [sp, #-16]!
; stp d12, d13, [sp, #-16]!
; stp d10, d11, [sp, #-16]!
; stp d8, d9, [sp, #-16]!
; block0:
; mov x25, x0
; elf_tls_get_addr u1:0
; mov x1, x0
; mov x0, x25
; ldp d8, d9, [sp], #16
; ldp d10, d11, [sp], #16
; ldp d12, d13, [sp], #16
; ldp d14, d15, [sp], #16
; ldr x25, [sp], #16
; ldp fp, lr, [sp], #16
; ret

View File

@@ -7,13 +7,8 @@ block0:
trap user0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: udf
; }}
; block0:
; udf
function %g(i64) {
block0(v0: i64):
@@ -23,15 +18,10 @@ block0(v0: i64):
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs xzr, x0, #42
; Inst 1: b.ne 8 ; udf
; Inst 2: ret
; }}
; block0:
; subs xzr, x0, #42
; b.ne 8 ; udf
; ret
function %h() {
block0:
@@ -39,12 +29,7 @@ block0:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: brk #0
; Inst 1: ret
; }}
; block0:
; brk #0
; ret

View File

@@ -8,14 +8,9 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxtb w0, w0
; Inst 1: ret
; }}
; block0:
; uxtb w0, w0
; ret
function %f_u_8_32(i8) -> i32 {
block0(v0: i8):
@@ -23,14 +18,9 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxtb w0, w0
; Inst 1: ret
; }}
; block0:
; uxtb w0, w0
; ret
function %f_u_8_16(i8) -> i16 {
block0(v0: i8):
@@ -38,14 +28,9 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxtb w0, w0
; Inst 1: ret
; }}
; block0:
; uxtb w0, w0
; ret
function %f_s_8_64(i8) -> i64 {
block0(v0: i8):
@@ -53,14 +38,9 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtb x0, w0
; Inst 1: ret
; }}
; block0:
; sxtb x0, w0
; ret
function %f_s_8_32(i8) -> i32 {
block0(v0: i8):
@@ -68,14 +48,9 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtb w0, w0
; Inst 1: ret
; }}
; block0:
; sxtb w0, w0
; ret
function %f_s_8_16(i8) -> i16 {
block0(v0: i8):
@@ -83,14 +58,9 @@ block0(v0: i8):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtb w0, w0
; Inst 1: ret
; }}
; block0:
; sxtb w0, w0
; ret
function %f_u_16_64(i16) -> i64 {
block0(v0: i16):
@@ -98,14 +68,9 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxth w0, w0
; Inst 1: ret
; }}
; block0:
; uxth w0, w0
; ret
function %f_u_16_32(i16) -> i32 {
block0(v0: i16):
@@ -113,14 +78,9 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxth w0, w0
; Inst 1: ret
; }}
; block0:
; uxth w0, w0
; ret
function %f_s_16_64(i16) -> i64 {
block0(v0: i16):
@@ -128,14 +88,9 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxth x0, w0
; Inst 1: ret
; }}
; block0:
; sxth x0, w0
; ret
function %f_s_16_32(i16) -> i32 {
block0(v0: i16):
@@ -143,14 +98,9 @@ block0(v0: i16):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxth w0, w0
; Inst 1: ret
; }}
; block0:
; sxth w0, w0
; ret
function %f_u_32_64(i32) -> i64 {
block0(v0: i32):
@@ -158,14 +108,9 @@ block0(v0: i32):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: mov w0, w0
; Inst 1: ret
; }}
; block0:
; mov w0, w0
; ret
function %f_s_32_64(i32) -> i64 {
block0(v0: i32):
@@ -173,12 +118,7 @@ block0(v0: i32):
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtw x0, w0
; Inst 1: ret
; }}
; block0:
; sxtw x0, w0
; ret