Update lots of isa/*/*.clif tests to precise-output (#3677)
* Update lots of `isa/*/*.clif` tests to `precise-output` This commit goes through the `aarch64` and `x64` subdirectories and subjectively changes tests from `test compile` to add `precise-output`. This then auto-updates all the test expectations so they can be automatically instead of manually updated in the future. Not all tests were migrated, largely subject to the whims of myself, mainly looking to see if the test was looking for specific instructions or just checking the whole assembly output. * Filter out `;;` comments from test expctations Looks like the cranelift parser picks up all comments, not just those trailing the function, so use a convention where `;;` is used for human-readable-comments in test cases and `;`-prefixed comments are the test expectation.
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -9,8 +9,14 @@ block0(v0: i64, v1: i32):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldr w0, [x0, w1, UXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr w0, [x0, w1, UXTW]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f2(i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -19,8 +25,14 @@ block0(v0: i64, v1: i32):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldr w0, [x0, w1, UXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr w0, [x0, w1, UXTW]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f3(i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -29,8 +41,14 @@ block0(v0: i64, v1: i32):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldr w0, [x0, w1, SXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr w0, [x0, w1, SXTW]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f4(i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -39,8 +57,14 @@ block0(v0: i64, v1: i32):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldr w0, [x0, w1, SXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr w0, [x0, w1, SXTW]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f5(i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -50,8 +74,14 @@ block0(v0: i64, v1: i32):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: ldr w0, [x0, w1, SXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr w0, [x0, w1, SXTW]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f6(i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -61,8 +91,14 @@ block0(v0: i64, v1: i32):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: ldr w0, [x0, w1, SXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr w0, [x0, w1, SXTW]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f7(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -73,9 +109,15 @@ block0(v0: i32, v1: i32):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: mov w0, w0
|
||||
; nextln: ldr w0, [x0, w1, UXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: mov w0, w0
|
||||
; Inst 1: ldr w0, [x0, w1, UXTW]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f8(i64, i32) -> i32 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -88,14 +130,17 @@ block0(v0: i64, v1: i32):
|
||||
return v7
|
||||
}
|
||||
|
||||
; v6+4 = 2*v5 = 2*v4 + 2*v0 + 4 = 2*v2 + 2*v3 + 2*v0 + 4
|
||||
; = 2*sextend($x1) + 2*$x0 + 68
|
||||
|
||||
; check: add x2, x0, #68
|
||||
; nextln: add x0, x2, x0
|
||||
; nextln: add x0, x0, x1, SXTW
|
||||
; nextln: ldr w0, [x0, w1, SXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: add x2, x0, #68
|
||||
; Inst 1: add x0, x2, x0
|
||||
; Inst 2: add x0, x0, x1, SXTW
|
||||
; Inst 3: ldr w0, [x0, w1, SXTW]
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %f9(i64, i64, i64) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
@@ -107,12 +152,16 @@ block0(v0: i64, v1: i64, v2: i64):
|
||||
return v7
|
||||
}
|
||||
|
||||
; v6 = $x0 + $x1 + $x2 + 48
|
||||
|
||||
; check: add x0, x0, x2
|
||||
; nextln: add x0, x0, x1
|
||||
; nextln: ldur w0, [x0, #48]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: add x0, x0, x2
|
||||
; Inst 1: add x0, x0, x1
|
||||
; Inst 2: ldur w0, [x0, #48]
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f10(i64, i64, i64) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
@@ -124,13 +173,17 @@ block0(v0: i64, v1: i64, v2: i64):
|
||||
return v7
|
||||
}
|
||||
|
||||
; v6 = $x0 + $x1 + $x2 + 4100
|
||||
|
||||
; check: movz x3, #4100
|
||||
; nextln: add x1, x3, x1
|
||||
; nextln: add x1, x1, x2
|
||||
; nextln: ldr w0, [x1, x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: movz x3, #4100
|
||||
; Inst 1: add x1, x3, x1
|
||||
; Inst 2: add x1, x1, x2
|
||||
; Inst 3: ldr w0, [x1, x0]
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %f10() -> i32 {
|
||||
block0:
|
||||
@@ -139,23 +192,33 @@ block0:
|
||||
return v2
|
||||
}
|
||||
|
||||
; v6 = $x0 + $x1 + $x2 + 48
|
||||
|
||||
; check: movz x0, #1234
|
||||
; nextln: ldr w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movz x0, #1234
|
||||
; Inst 1: ldr w0, [x0]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f11(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
v1 = iconst.i64 8388608 ; Imm12: 0x800 << 12
|
||||
v1 = iconst.i64 8388608 ;; Imm12: 0x800 << 12
|
||||
v2 = iadd.i64 v0, v1
|
||||
v3 = load.i32 v2
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: add x0, x0, #8388608
|
||||
; nextln: ldr w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: add x0, x0, #8388608
|
||||
; Inst 1: ldr w0, [x0]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f12(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
@@ -165,9 +228,15 @@ block0(v0: i64):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sub x0, x0, #4
|
||||
; nextln: ldr w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sub x0, x0, #4
|
||||
; Inst 1: ldr w0, [x0]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f13(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
@@ -177,11 +246,17 @@ block0(v0: i64):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: movz w1, #51712
|
||||
; nextln: movk w1, #15258, LSL #16
|
||||
; nextln: add x0, x1, x0
|
||||
; nextln: ldr w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: movz w1, #51712
|
||||
; Inst 1: movk w1, #15258, LSL #16
|
||||
; Inst 2: add x0, x1, x0
|
||||
; Inst 3: ldr w0, [x0]
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %f14(i32) -> i32 {
|
||||
block0(v0: i32):
|
||||
@@ -190,9 +265,15 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sxtw x0, w0
|
||||
; nextln: ldr w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sxtw x0, w0
|
||||
; Inst 1: ldr w0, [x0]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f15(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -203,9 +284,15 @@ block0(v0: i32, v1: i32):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: sxtw x0, w0
|
||||
; nextln: ldr w0, [x0, w1, SXTW]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sxtw x0, w0
|
||||
; Inst 1: ldr w0, [x0, w1, SXTW]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f16(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
@@ -215,8 +302,14 @@ block0(v0: i64):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldr w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f17(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
@@ -226,8 +319,14 @@ block0(v0: i64):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldur w0, [x0, #4]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldur w0, [x0, #4]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f18(i64, i32) -> i16x8 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -236,9 +335,15 @@ block0(v0: i64, v1: i32):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldr d0, [x0, w1, UXTW]
|
||||
; nextln: sxtl v0.8h, v0.8b
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: ldr d0, [x0, w1, UXTW]
|
||||
; Inst 1: sxtl v0.8h, v0.8b
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f19(i64, i64) -> i32x4 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -246,10 +351,16 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: add x0, x0, x1
|
||||
; nextln: ldr d0, [x0, #8]
|
||||
; nextln: uxtl v0.4s, v0.4h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: add x0, x0, x1
|
||||
; Inst 1: ldr d0, [x0, #8]
|
||||
; Inst 2: uxtl v0.4s, v0.4h
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f20(i64, i32) -> i64x2 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -258,9 +369,15 @@ block0(v0: i64, v1: i32):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: ldr d0, [x0, w1, SXTW]
|
||||
; nextln: uxtl v0.2d, v0.2s
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: ldr d0, [x0, w1, SXTW]
|
||||
; Inst 1: uxtl v0.2d, v0.2s
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f18(i64, i64, i64) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
@@ -270,9 +387,15 @@ block0(v0: i64, v1: i64, v2: i64):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: movn w0, #4097
|
||||
; nextln: ldrsh x0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movn w0, #4097
|
||||
; Inst 1: ldrsh x0, [x0]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f19(i64, i64, i64) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
@@ -282,9 +405,15 @@ block0(v0: i64, v1: i64, v2: i64):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: movz x0, #4098
|
||||
; nextln: ldrsh x0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movz x0, #4098
|
||||
; Inst 1: ldrsh x0, [x0]
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f20(i64, i64, i64) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
@@ -294,10 +423,16 @@ block0(v0: i64, v1: i64, v2: i64):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: movn w0, #4097
|
||||
; nextln: sxtw x0, w0
|
||||
; nextln: ldrsh x0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: movn w0, #4097
|
||||
; Inst 1: sxtw x0, w0
|
||||
; Inst 2: ldrsh x0, [x0]
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f21(i64, i64, i64) -> i32 {
|
||||
block0(v0: i64, v1: i64, v2: i64):
|
||||
@@ -307,11 +442,16 @@ block0(v0: i64, v1: i64, v2: i64):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: movz x0, #4098
|
||||
; nextln: sxtw x0, w0
|
||||
; nextln: ldrsh x0, [x0]
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: movz x0, #4098
|
||||
; Inst 1: sxtw x0, w0
|
||||
; Inst 2: ldrsh x0, [x0]
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %i128(i64) -> i128 {
|
||||
block0(v0: i64):
|
||||
@@ -320,12 +460,17 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: mov x1, x0
|
||||
; nextln: ldp x2, x1, [x1]
|
||||
; nextln: stp x2, x1, [x0]
|
||||
; nextln: mov x0, x2
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: mov x1, x0
|
||||
; Inst 1: ldp x2, x1, [x1]
|
||||
; Inst 2: stp x2, x1, [x0]
|
||||
; Inst 3: mov x0, x2
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %i128_imm_offset(i64) -> i128 {
|
||||
block0(v0: i64):
|
||||
@@ -334,11 +479,17 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: mov x1, x0
|
||||
; nextln: ldp x2, x1, [x1, #16]
|
||||
; nextln: stp x2, x1, [x0, #16]
|
||||
; nextln: mov x0, x2
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: mov x1, x0
|
||||
; Inst 1: ldp x2, x1, [x1, #16]
|
||||
; Inst 2: stp x2, x1, [x0, #16]
|
||||
; Inst 3: mov x0, x2
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %i128_imm_offset_large(i64) -> i128 {
|
||||
block0(v0: i64):
|
||||
@@ -347,11 +498,17 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: mov x1, x0
|
||||
; nextln: ldp x2, x1, [x1, #504]
|
||||
; nextln: stp x2, x1, [x0, #504]
|
||||
; nextln: mov x0, x2
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: mov x1, x0
|
||||
; Inst 1: ldp x2, x1, [x1, #504]
|
||||
; Inst 2: stp x2, x1, [x0, #504]
|
||||
; Inst 3: mov x0, x2
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %i128_imm_offset_negative_large(i64) -> i128 {
|
||||
block0(v0: i64):
|
||||
@@ -360,12 +517,17 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: mov x1, x0
|
||||
; nextln: ldp x2, x1, [x1, #-512]
|
||||
; nextln: stp x2, x1, [x0, #-512]
|
||||
; nextln: mov x0, x2
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: mov x1, x0
|
||||
; Inst 1: ldp x2, x1, [x1, #-512]
|
||||
; Inst 2: stp x2, x1, [x0, #-512]
|
||||
; Inst 3: mov x0, x2
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %i128_add_offset(i64) -> i128 {
|
||||
block0(v0: i64):
|
||||
@@ -375,12 +537,17 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov x1, x0
|
||||
; nextln: ldp x2, x1, [x1, #32]
|
||||
; nextln: stp x2, x1, [x0, #32]
|
||||
; nextln: mov x0, x2
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: mov x1, x0
|
||||
; Inst 1: ldp x2, x1, [x1, #32]
|
||||
; Inst 2: stp x2, x1, [x0, #32]
|
||||
; Inst 3: mov x0, x2
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %i128_32bit_sextend_simple(i32) -> i128 {
|
||||
block0(v0: i32):
|
||||
@@ -390,14 +557,18 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; TODO: We should be able to deduplicate the sxtw instruction
|
||||
; check: sxtw x1, w0
|
||||
; nextln: ldp x2, x1, [x1]
|
||||
; nextln: sxtw x0, w0
|
||||
; nextln: stp x2, x1, [x0]
|
||||
; nextln: mov x0, x2
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: sxtw x1, w0
|
||||
; Inst 1: ldp x2, x1, [x1]
|
||||
; Inst 2: sxtw x0, w0
|
||||
; Inst 3: stp x2, x1, [x0]
|
||||
; Inst 4: mov x0, x2
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %i128_32bit_sextend(i64, i32) -> i128 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -409,11 +580,18 @@ block0(v0: i64, v1: i32):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: mov x2, x0
|
||||
; nextln: add x2, x2, x1, SXTW
|
||||
; nextln: ldp x3, x2, [x2, #24]
|
||||
; nextln: add x0, x0, x1, SXTW
|
||||
; nextln: stp x3, x2, [x0, #24]
|
||||
; nextln: mov x0, x3
|
||||
; nextln: mov x1, x2
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: mov x2, x0
|
||||
; Inst 1: add x2, x2, x1, SXTW
|
||||
; Inst 2: ldp x3, x2, [x2, #24]
|
||||
; Inst 3: add x0, x0, x1, SXTW
|
||||
; Inst 4: stp x3, x2, [x0, #24]
|
||||
; Inst 5: mov x0, x3
|
||||
; Inst 6: mov x1, x2
|
||||
; Inst 7: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -8,9 +8,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: add x0, x0, x1
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f2(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -18,8 +23,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sub x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sub x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f3(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -27,8 +38,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: madd x0, x0, x1, xzr
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: madd x0, x0, x1, xzr
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f4(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -36,8 +53,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umulh x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umulh x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f5(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -45,8 +68,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smulh x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smulh x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f6(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -54,12 +83,18 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: cbnz x1, 8 ; udf
|
||||
; nextln: adds xzr, x1, #1
|
||||
; nextln: ccmp x0, #1, #nzcv, eq
|
||||
; nextln: b.vc 8 ; udf
|
||||
; nextln: sdiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: cbnz x1, 8 ; udf
|
||||
; Inst 1: adds xzr, x1, #1
|
||||
; Inst 2: ccmp x0, #1, #nzcv, eq
|
||||
; Inst 3: b.vc 8 ; udf
|
||||
; Inst 4: sdiv x0, x0, x1
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %f7(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -68,9 +103,15 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: orr x1, xzr, #2
|
||||
; nextln: sdiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: orr x1, xzr, #2
|
||||
; Inst 1: sdiv x0, x0, x1
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f8(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -78,9 +119,15 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: cbnz x1, 8 ; udf
|
||||
; nextln: udiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: cbnz x1, 8 ; udf
|
||||
; Inst 1: udiv x0, x0, x1
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f9(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -89,9 +136,15 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: orr x1, xzr, #2
|
||||
; nextln: udiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: orr x1, xzr, #2
|
||||
; Inst 1: udiv x0, x0, x1
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f10(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -99,10 +152,16 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: cbnz x1, 8 ; udf
|
||||
; nextln: sdiv x2, x0, x1
|
||||
; nextln: msub x0, x2, x1, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: cbnz x1, 8 ; udf
|
||||
; Inst 1: sdiv x2, x0, x1
|
||||
; Inst 2: msub x0, x2, x1, x0
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f11(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -110,11 +169,16 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: cbnz x1, 8 ; udf
|
||||
; nextln: udiv x2, x0, x1
|
||||
; nextln: msub x0, x2, x1, x0
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: cbnz x1, 8 ; udf
|
||||
; Inst 1: udiv x2, x0, x1
|
||||
; Inst 2: msub x0, x2, x1, x0
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f12(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -122,14 +186,20 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sxtw x0, w0
|
||||
; nextln: sxtw x1, w1
|
||||
; nextln: cbnz x1, 8 ; udf
|
||||
; nextln: adds wzr, w1, #1
|
||||
; nextln: ccmp w0, #1, #nzcv, eq
|
||||
; nextln: b.vc 8 ; udf
|
||||
; nextln: sdiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: sxtw x0, w0
|
||||
; Inst 1: sxtw x1, w1
|
||||
; Inst 2: cbnz x1, 8 ; udf
|
||||
; Inst 3: adds wzr, w1, #1
|
||||
; Inst 4: ccmp w0, #1, #nzcv, eq
|
||||
; Inst 5: b.vc 8 ; udf
|
||||
; Inst 6: sdiv x0, x0, x1
|
||||
; Inst 7: ret
|
||||
; }}
|
||||
|
||||
function %f13(i32) -> i32 {
|
||||
block0(v0: i32):
|
||||
@@ -138,10 +208,16 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sxtw x0, w0
|
||||
; nextln: orr x1, xzr, #2
|
||||
; nextln: sdiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: sxtw x0, w0
|
||||
; Inst 1: orr x1, xzr, #2
|
||||
; Inst 2: sdiv x0, x0, x1
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f14(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -149,12 +225,17 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov w0, w0
|
||||
; nextln: mov w1, w1
|
||||
; nextln: cbnz x1, 8 ; udf
|
||||
; nextln: udiv x0, x0, x1
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: mov w0, w0
|
||||
; Inst 1: mov w1, w1
|
||||
; Inst 2: cbnz x1, 8 ; udf
|
||||
; Inst 3: udiv x0, x0, x1
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %f15(i32) -> i32 {
|
||||
block0(v0: i32):
|
||||
@@ -163,10 +244,16 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov w0, w0
|
||||
; nextln: orr x1, xzr, #2
|
||||
; nextln: udiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: mov w0, w0
|
||||
; Inst 1: orr x1, xzr, #2
|
||||
; Inst 2: udiv x0, x0, x1
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f16(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -174,12 +261,18 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sxtw x0, w0
|
||||
; nextln: sxtw x1, w1
|
||||
; nextln: cbnz x1, 8 ; udf
|
||||
; nextln: sdiv x2, x0, x1
|
||||
; nextln: msub x0, x2, x1, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: sxtw x0, w0
|
||||
; Inst 1: sxtw x1, w1
|
||||
; Inst 2: cbnz x1, 8 ; udf
|
||||
; Inst 3: sdiv x2, x0, x1
|
||||
; Inst 4: msub x0, x2, x1, x0
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %f17(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -187,12 +280,18 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov w0, w0
|
||||
; nextln: mov w1, w1
|
||||
; nextln: cbnz x1, 8 ; udf
|
||||
; nextln: udiv x2, x0, x1
|
||||
; nextln: msub x0, x2, x1, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: mov w0, w0
|
||||
; Inst 1: mov w1, w1
|
||||
; Inst 2: cbnz x1, 8 ; udf
|
||||
; Inst 3: udiv x2, x0, x1
|
||||
; Inst 4: msub x0, x2, x1, x0
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %f18(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -200,8 +299,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: and x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: and x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f19(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -209,8 +314,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: orr x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: orr x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f20(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -218,8 +329,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: eor x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: eor x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f21(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -227,8 +344,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: bic x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: bic x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f22(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -236,8 +359,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: orn x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: orn x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f23(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -245,8 +374,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: eon x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: eon x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f24(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -254,8 +389,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: orn x0, xzr, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: orn x0, xzr, x0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f25(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -265,8 +406,14 @@ block0(v0: i32, v1: i32):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: sub w0, w1, w0, LSL 21
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sub w0, w1, w0, LSL 21
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f26(i32) -> i32 {
|
||||
block0(v0: i32):
|
||||
@@ -275,8 +422,14 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sub w0, w0, #1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sub w0, w0, #1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f27(i32) -> i32 {
|
||||
block0(v0: i32):
|
||||
@@ -285,8 +438,14 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: add w0, w0, #1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add w0, w0, #1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f28(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -295,8 +454,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: add x0, x0, #1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add x0, x0, #1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f29(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -305,9 +470,15 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: movz x0, #1
|
||||
; nextln: sub x0, xzr, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movz x0, #1
|
||||
; Inst 1: sub x0, xzr, x0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f30(i8x16) -> i8x16 {
|
||||
block0(v0: i8x16):
|
||||
@@ -316,12 +487,17 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: movz x0, #1
|
||||
; nextln: sub w0, wzr, w0
|
||||
; nextln: dup v1.16b, w0
|
||||
; nextln: ushl v0.16b, v0.16b, v1.16b
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: movz x0, #1
|
||||
; Inst 1: sub w0, wzr, w0
|
||||
; Inst 2: dup v1.16b, w0
|
||||
; Inst 3: ushl v0.16b, v0.16b, v1.16b
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %add_i128(i128, i128) -> i128 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -329,9 +505,15 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: adds x0, x0, x2
|
||||
; nextln: adc x1, x1, x3
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: adds x0, x0, x2
|
||||
; Inst 1: adc x1, x1, x3
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %sub_i128(i128, i128) -> i128 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -339,9 +521,15 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs x0, x0, x2
|
||||
; nextln: sbc x1, x1, x3
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: subs x0, x0, x2
|
||||
; Inst 1: sbc x1, x1, x3
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %mul_i128(i128, i128) -> i128 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -349,11 +537,17 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umulh x4, x0, x2
|
||||
; nextln: madd x3, x0, x3, x4
|
||||
; nextln: madd x1, x1, x2, x3
|
||||
; nextln: madd x0, x0, x2, xzr
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: umulh x4, x0, x2
|
||||
; Inst 1: madd x3, x0, x3, x4
|
||||
; Inst 2: madd x1, x1, x2, x3
|
||||
; Inst 3: madd x0, x0, x2, xzr
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %add_mul_1(i32, i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32, v2: i32):
|
||||
@@ -362,8 +556,14 @@ block0(v0: i32, v1: i32, v2: i32):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: madd w0, w1, w2, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: madd w0, w1, w2, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %add_mul_2(i32, i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32, v2: i32):
|
||||
@@ -372,8 +572,14 @@ block0(v0: i32, v1: i32, v2: i32):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: madd w0, w1, w2, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: madd w0, w1, w2, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %srem_const (i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -382,10 +588,16 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: orr x1, xzr, #2
|
||||
; nextln: sdiv x2, x0, x1
|
||||
; nextln: msub x0, x2, x1, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: orr x1, xzr, #2
|
||||
; Inst 1: sdiv x2, x0, x1
|
||||
; Inst 2: msub x0, x2, x1, x0
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %urem_const (i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -394,10 +606,16 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: orr x1, xzr, #2
|
||||
; nextln: udiv x2, x0, x1
|
||||
; nextln: msub x0, x2, x1, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: orr x1, xzr, #2
|
||||
; Inst 1: udiv x2, x0, x1
|
||||
; Inst 2: msub x0, x2, x1, x0
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %sdiv_minus_one(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -406,9 +624,16 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: movn x1, #0
|
||||
; nextln: adds xzr, x1, #1
|
||||
; nextln: ccmp x0, #1, #nzcv, eq
|
||||
; nextln: b.vc 8 ; udf
|
||||
; nextln: sdiv x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: movn x1, #0
|
||||
; Inst 1: adds xzr, x1, #1
|
||||
; Inst 2: ccmp x0, #1, #nzcv, eq
|
||||
; Inst 3: b.vc 8 ; udf
|
||||
; Inst 4: sdiv x0, x0, x1
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
target aarch64 has_lse
|
||||
|
||||
function %atomic_rmw_add_i64(i64, i64) {
|
||||
@@ -6,109 +6,238 @@ block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 add v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldaddal x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldaddal x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_add_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 add v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldaddal w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldaddal w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_and_i64(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 and v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldclral x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldclral x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_and_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 and v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldclral w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldclral w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_or_i64(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 or v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldsetal x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldsetal x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_or_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 or v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldsetal w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldsetal w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_xor_i64(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 xor v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldeoral x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldeoral x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_xor_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 xor v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldeoral w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldeoral w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_smax_i64(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 smax v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldsmaxal x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldsmaxal x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_smax_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 smax v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldsmaxal w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldsmaxal w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_umax_i64(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 umax v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldumaxal x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldumaxal x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_umax_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 umax v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldumaxal w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldumaxal w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_smin_i64(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 smin v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldsminal x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldsminal x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_smin_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 smin v0, v1
|
||||
return
|
||||
}
|
||||
; check: ldsminal w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldsminal w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_umin_i64(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
v2 = atomic_rmw.i64 umin v0, v1
|
||||
return
|
||||
}
|
||||
; check: lduminal x1, x0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lduminal x1, x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_rmw_umin_i32(i32, i32) {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = atomic_rmw.i32 umin v0, v1
|
||||
return
|
||||
}
|
||||
; check: lduminal w1, w0, [x0]
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lduminal w1, w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
target aarch64
|
||||
|
||||
function %atomic_load_i64(i64) -> i64 {
|
||||
@@ -7,8 +7,14 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: ldar x0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldar x0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i32(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
@@ -16,8 +22,14 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: ldar w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldar w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i16(i64) -> i16 {
|
||||
block0(v0: i64):
|
||||
@@ -25,8 +37,14 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: ldarh w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldarh w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i8(i64) -> i8 {
|
||||
block0(v0: i64):
|
||||
@@ -34,8 +52,14 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: ldarb w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldarb w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i32_i64(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -44,8 +68,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ldar w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldar w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i16_i64(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -54,8 +84,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ldarh w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldarh w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i8_i64(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -64,8 +100,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ldarb w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldarb w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i16_i32(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
@@ -74,8 +116,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ldarh w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldarh w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_load_i8_i32(i64) -> i32 {
|
||||
block0(v0: i64):
|
||||
@@ -84,5 +132,12 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ldarb w0, [x0]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldarb w0, [x0]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
target aarch64
|
||||
|
||||
function %atomic_store_i64(i64, i64) {
|
||||
@@ -7,8 +7,14 @@ block0(v0: i64, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stlr x0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlr x0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i32(i32, i64) {
|
||||
block0(v0: i32, v1: i64):
|
||||
@@ -16,8 +22,14 @@ block0(v0: i32, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stlr w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlr w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i16(i16, i64) {
|
||||
block0(v0: i16, v1: i64):
|
||||
@@ -25,8 +37,14 @@ block0(v0: i16, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stlrh w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlrh w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i8(i8, i64) {
|
||||
block0(v0: i8, v1: i64):
|
||||
@@ -34,8 +52,14 @@ block0(v0: i8, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stlrb w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlrb w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i64_i32(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -44,9 +68,14 @@ block0(v0: i64, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check-not: uxt
|
||||
; check: stlr w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlr w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i64_i16(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -55,9 +84,14 @@ block0(v0: i64, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check-not: uxt
|
||||
; check: stlrh w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlrh w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i64_i8(i64, i64) {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -66,9 +100,14 @@ block0(v0: i64, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check-not: uxt
|
||||
; check: stlrb w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlrb w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i32_i16(i32, i64) {
|
||||
block0(v0: i32, v1: i64):
|
||||
@@ -77,9 +116,14 @@ block0(v0: i32, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check-not: uxt
|
||||
; check: stlrh w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlrh w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %atomic_store_i32_i8(i32, i64) {
|
||||
block0(v0: i32, v1: i64):
|
||||
@@ -88,6 +132,12 @@ block0(v0: i32, v1: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check-not: uxt
|
||||
; check: stlrb w0, [x1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: stlrb w0, [x1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
function %f(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
v2 = iadd v0, v1
|
||||
; check: add w0, w0, w1
|
||||
return v2
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -9,8 +9,15 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: blr x1
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: blr x1
|
||||
; Inst 3: ldp fp, lr, [sp], #16
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -8,9 +8,15 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x1
|
||||
; nextln: cset x0, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: subs xzr, x0, x1
|
||||
; Inst 1: cset x0, eq
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %icmp_eq_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -18,12 +24,17 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: eor x0, x0, x2
|
||||
; nextln: eor x1, x1, x3
|
||||
; nextln: adds xzr, x0, x1
|
||||
; nextln: cset x0, eq
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: eor x0, x0, x2
|
||||
; Inst 1: eor x1, x1, x3
|
||||
; Inst 2: adds xzr, x0, x1
|
||||
; Inst 3: cset x0, eq
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %icmp_ne_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -31,12 +42,17 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: eor x0, x0, x2
|
||||
; nextln: eor x1, x1, x3
|
||||
; nextln: adds xzr, x0, x1
|
||||
; nextln: cset x0, ne
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: eor x0, x0, x2
|
||||
; Inst 1: eor x1, x1, x3
|
||||
; Inst 2: adds xzr, x0, x1
|
||||
; Inst 3: cset x0, ne
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %icmp_slt_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -44,13 +60,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, lo
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, lt
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, lo
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, lt
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_ult_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -58,12 +79,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, lo
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, lo
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, lo
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, lo
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_sle_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -71,12 +98,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, ls
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, le
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, ls
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, le
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_ule_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -84,12 +117,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, ls
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, ls
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, ls
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, ls
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_sgt_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -97,12 +136,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hi
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, gt
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hi
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, gt
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_ugt_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -110,13 +155,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hi
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, hi
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hi
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, hi
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_sge_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -124,12 +174,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hs
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, ge
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hs
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, ge
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_uge_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -137,12 +193,18 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hs
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, hs
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 6)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hs
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, hs
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %icmp_of_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -150,10 +212,16 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: adds xzr, x0, x2
|
||||
; nextln: adcs xzr, x1, x3
|
||||
; nextln: cset x0, vs
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: adds xzr, x0, x2
|
||||
; Inst 1: adcs xzr, x1, x3
|
||||
; Inst 2: cset x0, vs
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %icmp_nof_i128(i128, i128) -> b1 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -161,11 +229,16 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: adds xzr, x0, x2
|
||||
; nextln: adcs xzr, x1, x3
|
||||
; nextln: cset x0, vc
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: adds xzr, x0, x2
|
||||
; Inst 1: adcs xzr, x1, x3
|
||||
; Inst 2: cset x0, vc
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -182,15 +255,26 @@ block2:
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: subs xzr, x0, x1
|
||||
; nextln: b.eq label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: movz x0, #1
|
||||
; nextln: ret
|
||||
; check: Block 2:
|
||||
; check: movz x0, #2
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: subs xzr, x0, x1
|
||||
; Inst 1: b.eq label1 ; b label2
|
||||
; Block 1:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 2 .. 4)
|
||||
; Inst 2: movz x0, #1
|
||||
; Inst 3: ret
|
||||
; Block 2:
|
||||
; (original IR block: block2)
|
||||
; (instruction range: 4 .. 6)
|
||||
; Inst 4: movz x0, #2
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %f(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -203,11 +287,29 @@ block1:
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x1
|
||||
; check: Block 1:
|
||||
; check: movz x0, #1
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: subs xzr, x0, x1
|
||||
; Inst 1: b.eq label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 2 .. 3)
|
||||
; Inst 2: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 3 .. 4)
|
||||
; Inst 3: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 4 .. 6)
|
||||
; Inst 4: movz x0, #1
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %i128_brz(i128){
|
||||
block0(v0: i128):
|
||||
@@ -219,15 +321,28 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: orr x0, x0, x1
|
||||
; nextln: cbz x0, label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: orr x0, x0, x1
|
||||
; Inst 1: cbz x0, label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 2 .. 3)
|
||||
; Inst 2: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 3 .. 4)
|
||||
; Inst 3: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 4 .. 5)
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %i128_brnz(i128){
|
||||
block0(v0: i128):
|
||||
@@ -239,16 +354,28 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: orr x0, x0, x1
|
||||
; nextln: cbnz x0, label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: orr x0, x0, x1
|
||||
; Inst 1: cbnz x0, label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 2 .. 3)
|
||||
; Inst 2: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 3 .. 4)
|
||||
; Inst 3: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 4 .. 5)
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_eq(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -259,17 +386,30 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: eor x0, x0, x2
|
||||
; nextln: eor x1, x1, x3
|
||||
; nextln: adds xzr, x0, x1
|
||||
; nextln: b.eq label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: eor x0, x0, x2
|
||||
; Inst 1: eor x1, x1, x3
|
||||
; Inst 2: adds xzr, x0, x1
|
||||
; Inst 3: b.eq label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 4 .. 5)
|
||||
; Inst 4: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 5 .. 6)
|
||||
; Inst 5: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 6 .. 7)
|
||||
; Inst 6: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_ne(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -280,17 +420,30 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: eor x0, x0, x2
|
||||
; nextln: eor x1, x1, x3
|
||||
; nextln: adds xzr, x0, x1
|
||||
; nextln: b.ne label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: eor x0, x0, x2
|
||||
; Inst 1: eor x1, x1, x3
|
||||
; Inst 2: adds xzr, x0, x1
|
||||
; Inst 3: b.ne label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 4 .. 5)
|
||||
; Inst 4: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 5 .. 6)
|
||||
; Inst 5: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 6 .. 7)
|
||||
; Inst 6: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_slt(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -301,20 +454,33 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, lo
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, lt
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: subs xzr, xzr, x0
|
||||
; nextln: b.lt label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 7)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, lo
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, lt
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: subs xzr, xzr, x0
|
||||
; Inst 6: b.lt label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 7 .. 8)
|
||||
; Inst 7: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_ult(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -325,19 +491,33 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, lo
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, lo
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: subs xzr, xzr, x0
|
||||
; nextln: b.lo label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 7)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, lo
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, lo
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: subs xzr, xzr, x0
|
||||
; Inst 6: b.lo label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 7 .. 8)
|
||||
; Inst 7: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_sle(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -348,20 +528,34 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, ls
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, le
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: movz x1, #1
|
||||
; nextln: subs xzr, x1, x0
|
||||
; nextln: b.le label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, ls
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, le
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: movz x1, #1
|
||||
; Inst 6: subs xzr, x1, x0
|
||||
; Inst 7: b.le label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 10 .. 11)
|
||||
; Inst 10: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_ule(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -372,20 +566,34 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, ls
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, ls
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: movz x1, #1
|
||||
; nextln: subs xzr, x1, x0
|
||||
; nextln: b.ls label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, ls
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, ls
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: movz x1, #1
|
||||
; Inst 6: subs xzr, x1, x0
|
||||
; Inst 7: b.ls label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 10 .. 11)
|
||||
; Inst 10: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_sgt(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -396,19 +604,33 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hi
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, gt
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: subs xzr, x0, xzr
|
||||
; nextln: b.gt label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 7)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hi
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, gt
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: subs xzr, x0, xzr
|
||||
; Inst 6: b.gt label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 7 .. 8)
|
||||
; Inst 7: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_ugt(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -419,20 +641,33 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hi
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, hi
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: subs xzr, x0, xzr
|
||||
; nextln: b.hi label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 7)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hi
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, hi
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: subs xzr, x0, xzr
|
||||
; Inst 6: b.hi label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 7 .. 8)
|
||||
; Inst 7: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_sge(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -443,20 +678,34 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hs
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, ge
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: movz x1, #1
|
||||
; nextln: subs xzr, x0, x1
|
||||
; nextln: b.ge label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hs
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, ge
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: movz x1, #1
|
||||
; Inst 6: subs xzr, x0, x1
|
||||
; Inst 7: b.ge label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 10 .. 11)
|
||||
; Inst 10: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_uge(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -467,20 +716,34 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, x2
|
||||
; nextln: cset x0, hs
|
||||
; nextln: subs xzr, x1, x3
|
||||
; nextln: cset x1, hs
|
||||
; nextln: csel x0, x0, x1, eq
|
||||
; nextln: movz x1, #1
|
||||
; nextln: subs xzr, x0, x1
|
||||
; nextln: b.hs label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: subs xzr, x0, x2
|
||||
; Inst 1: cset x0, hs
|
||||
; Inst 2: subs xzr, x1, x3
|
||||
; Inst 3: cset x1, hs
|
||||
; Inst 4: csel x0, x0, x1, eq
|
||||
; Inst 5: movz x1, #1
|
||||
; Inst 6: subs xzr, x0, x1
|
||||
; Inst 7: b.hs label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 9 .. 10)
|
||||
; Inst 9: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 10 .. 11)
|
||||
; Inst 10: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_of(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -491,15 +754,29 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: adds xzr, x0, x2
|
||||
; nextln: adcs xzr, x1, x3
|
||||
; nextln: b.vs label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: adds xzr, x0, x2
|
||||
; Inst 1: adcs xzr, x1, x3
|
||||
; Inst 2: b.vs label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 3 .. 4)
|
||||
; Inst 3: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 4 .. 5)
|
||||
; Inst 4: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 5 .. 6)
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
function %i128_bricmp_nof(i128, i128) {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -510,12 +787,27 @@ block1:
|
||||
return
|
||||
}
|
||||
|
||||
; check: adds xzr, x0, x2
|
||||
; nextln: adcs xzr, x1, x3
|
||||
; nextln: b.vc label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: b label3
|
||||
; check: Block 2:
|
||||
; check: b label3
|
||||
; check: Block 3:
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: adds xzr, x0, x2
|
||||
; Inst 1: adcs xzr, x1, x3
|
||||
; Inst 2: b.vc label1 ; b label2
|
||||
; Block 1:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 3 .. 4)
|
||||
; Inst 3: b label3
|
||||
; Block 2:
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 4 .. 5)
|
||||
; Inst 4: b label3
|
||||
; Block 3:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 5 .. 6)
|
||||
; Inst 5: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -10,8 +10,16 @@ block0(v0: i8, v1: i64, v2: i64):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: subs wzr
|
||||
; check: csel x0, $(=x[0-9]+, x[0-9]+), eq
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: subs wzr, w0, #42
|
||||
; Inst 2: csel x0, x1, x2, eq
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %g(i8) -> b1 {
|
||||
block0(v0: i8):
|
||||
@@ -21,8 +29,16 @@ block0(v0: i8):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: subs wzr
|
||||
; check: cset x0, eq
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: subs wzr, w0, #42
|
||||
; Inst 2: cset x0, eq
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %h(i8, i8, i8) -> i8 {
|
||||
block0(v0: i8, v1: i8, v2: i8):
|
||||
@@ -30,9 +46,16 @@ block0(v0: i8, v1: i8, v2: i8):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: and
|
||||
; nextln: bic
|
||||
; nextln: orr
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: and x1, x1, x0
|
||||
; Inst 1: bic x0, x2, x0
|
||||
; Inst 2: orr x0, x0, x1
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %i(b1, i8, i8) -> i8 {
|
||||
block0(v0: b1, v1: i8, v2: i8):
|
||||
@@ -40,8 +63,16 @@ block0(v0: b1, v1: i8, v2: i8):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: subs wzr
|
||||
; nextln: csel
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: and w0, w0, #1
|
||||
; Inst 1: subs wzr, w0, wzr
|
||||
; Inst 2: csel x0, x1, x2, ne
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %i(i32, i8, i8) -> i8 {
|
||||
block0(v0: i32, v1: i8, v2: i8):
|
||||
@@ -51,9 +82,15 @@ block0(v0: i32, v1: i8, v2: i8):
|
||||
return v5
|
||||
}
|
||||
|
||||
; check: subs wzr, w0, #42
|
||||
; nextln: csel x0, x1, x2, eq
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: subs wzr, w0, #42
|
||||
; Inst 1: csel x0, x1, x2, eq
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i128_select(b1, i128, i128) -> i128 {
|
||||
block0(v0: b1, v1: i128, v2: i128):
|
||||
@@ -61,6 +98,15 @@ block0(v0: b1, v1: i128, v2: i128):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: subs wzr, w0, wzr
|
||||
; nextln: csel x0, x2, x4, ne
|
||||
; nextln: csel x1, x3, x5, ne
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: and w0, w0, #1
|
||||
; Inst 1: subs wzr, w0, wzr
|
||||
; Inst 2: csel x0, x2, x4, ne
|
||||
; Inst 3: csel x1, x3, x5, ne
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -8,8 +8,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #255
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #255
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> b16 {
|
||||
block0:
|
||||
@@ -17,8 +23,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -26,8 +38,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -35,8 +53,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #65535
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #65535
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -44,8 +68,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #65535, LSL #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #65535, LSL #16
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -53,8 +83,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #65535, LSL #32
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #65535, LSL #32
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -62,8 +98,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #65535, LSL #48
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #65535, LSL #48
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -71,8 +113,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn x0, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn x0, #0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -80,8 +128,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn x0, #65535
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn x0, #65535
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -89,8 +143,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn x0, #65535, LSL #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn x0, #65535, LSL #16
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -98,8 +158,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn x0, #65535, LSL #32
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn x0, #65535, LSL #32
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -107,40 +173,64 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn x0, #65535, LSL #48
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn x0, #65535, LSL #48
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
v0 = iconst.i64 0xf34bf0a31212003a ; random digits
|
||||
v0 = iconst.i64 0xf34bf0a31212003a ;; random digits
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #58
|
||||
; nextln: movk x0, #4626, LSL #16
|
||||
; nextln: movk x0, #61603, LSL #32
|
||||
; nextln: movk x0, #62283, LSL #48
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: movz x0, #58
|
||||
; Inst 1: movk x0, #4626, LSL #16
|
||||
; Inst 2: movk x0, #61603, LSL #32
|
||||
; Inst 3: movk x0, #62283, LSL #48
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
v0 = iconst.i64 0x12e900001ef40000 ; random digits with 2 clear half words
|
||||
v0 = iconst.i64 0x12e900001ef40000 ;; random digits with 2 clear half words
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #7924, LSL #16
|
||||
; nextln: movk x0, #4841, LSL #48
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movz x0, #7924, LSL #16
|
||||
; Inst 1: movk x0, #4841, LSL #48
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
v0 = iconst.i64 0x12e9ffff1ef4ffff ; random digits with 2 full half words
|
||||
v0 = iconst.i64 0x12e9ffff1ef4ffff ;; random digits with 2 full half words
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn x0, #57611, LSL #16
|
||||
; nextln: movk x0, #4841, LSL #48
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movn x0, #57611, LSL #16
|
||||
; Inst 1: movk x0, #4841, LSL #48
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i32 {
|
||||
block0:
|
||||
@@ -148,8 +238,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: orr x0, xzr, #4294967295
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: orr x0, xzr, #4294967295
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i32 {
|
||||
block0:
|
||||
@@ -157,8 +253,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn w0, #8
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn w0, #8
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -166,8 +268,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn w0, #8
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn w0, #8
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f() -> i64 {
|
||||
block0:
|
||||
@@ -175,5 +283,12 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movn x0, #8
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movn x0, #8
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -10,10 +10,15 @@ block0(v0: i8):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sxtb x0, w0
|
||||
; nextln: add x0, x0, #42
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sxtb x0, w0
|
||||
; Inst 1: add x0, x0, #42
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f2(i8, i64) -> i64 {
|
||||
block0(v0: i8, v1: i64):
|
||||
@@ -22,9 +27,14 @@ block0(v0: i8, v1: i64):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: add x0, x1, x0, SXTB
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add x0, x1, x0, SXTB
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i128_uextend_i64(i64) -> i128 {
|
||||
block0(v0: i64):
|
||||
@@ -32,8 +42,14 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x1, #0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i128_sextend_i64(i64) -> i128 {
|
||||
block0(v0: i64):
|
||||
@@ -41,9 +57,14 @@ block0(v0: i64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: asr x1, x0, #63
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i128_uextend_i32(i32) -> i128 {
|
||||
block0(v0: i32):
|
||||
@@ -51,9 +72,15 @@ block0(v0: i32):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: mov w0, w0
|
||||
; nextln: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: mov w0, w0
|
||||
; Inst 1: movz x1, #0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i128_sextend_i32(i32) -> i128 {
|
||||
block0(v0: i32):
|
||||
@@ -61,10 +88,15 @@ block0(v0: i32):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxtw x0, w0
|
||||
; nextln: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sxtw x0, w0
|
||||
; Inst 1: asr x1, x0, #63
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i128_uextend_i16(i16) -> i128 {
|
||||
block0(v0: i16):
|
||||
@@ -72,9 +104,15 @@ block0(v0: i16):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: uxth w0, w0
|
||||
; nextln: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: movz x1, #0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i128_sextend_i16(i16) -> i128 {
|
||||
block0(v0: i16):
|
||||
@@ -82,10 +120,15 @@ block0(v0: i16):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxth x0, w0
|
||||
; nextln: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sxth x0, w0
|
||||
; Inst 1: asr x1, x0, #63
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i128_uextend_i8(i8) -> i128 {
|
||||
block0(v0: i8):
|
||||
@@ -93,9 +136,15 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: uxtb w0, w0
|
||||
; nextln: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: movz x1, #0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i128_sextend_i8(i8) -> i128 {
|
||||
block0(v0: i8):
|
||||
@@ -103,9 +152,15 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxtb x0, w0
|
||||
; nextln: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sxtb x0, w0
|
||||
; Inst 1: asr x1, x0, #63
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_uextend_i16(i8x16) -> i16 {
|
||||
block0(v0: i8x16):
|
||||
@@ -114,8 +169,14 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umov w0, v0.b[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umov w0, v0.b[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_uextend_i32(i8x16) -> i32 {
|
||||
block0(v0: i8x16):
|
||||
@@ -124,8 +185,14 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umov w0, v0.b[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umov w0, v0.b[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_uextend_i64(i8x16) -> i64 {
|
||||
block0(v0: i8x16):
|
||||
@@ -134,8 +201,14 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umov w0, v0.b[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umov w0, v0.b[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_uextend_i128(i8x16) -> i128 {
|
||||
block0(v0: i8x16):
|
||||
@@ -144,9 +217,15 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umov w0, v0.b[1]
|
||||
; nextln: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: umov w0, v0.b[1]
|
||||
; Inst 1: movz x1, #0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_sextend_i16(i8x16) -> i16 {
|
||||
block0(v0: i8x16):
|
||||
@@ -155,8 +234,14 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov w0, v0.b[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smov w0, v0.b[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_sextend_i32(i8x16) -> i32 {
|
||||
block0(v0: i8x16):
|
||||
@@ -165,8 +250,14 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov w0, v0.b[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smov w0, v0.b[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_sextend_i64(i8x16) -> i64 {
|
||||
block0(v0: i8x16):
|
||||
@@ -175,8 +266,14 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov x0, v0.b[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smov x0, v0.b[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i8x16_sextend_i128(i8x16) -> i128 {
|
||||
block0(v0: i8x16):
|
||||
@@ -185,9 +282,15 @@ block0(v0: i8x16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov x0, v0.b[1]
|
||||
; nextln: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: smov x0, v0.b[1]
|
||||
; Inst 1: asr x1, x0, #63
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i16x8_uextend_i32(i16x8) -> i32 {
|
||||
block0(v0: i16x8):
|
||||
@@ -196,8 +299,14 @@ block0(v0: i16x8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umov w0, v0.h[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umov w0, v0.h[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i16x8_uextend_i64(i16x8) -> i64 {
|
||||
block0(v0: i16x8):
|
||||
@@ -206,8 +315,14 @@ block0(v0: i16x8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umov w0, v0.h[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umov w0, v0.h[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i16x8_uextend_i128(i16x8) -> i128 {
|
||||
block0(v0: i16x8):
|
||||
@@ -216,9 +331,15 @@ block0(v0: i16x8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: umov w0, v0.h[1]
|
||||
; nextln: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: umov w0, v0.h[1]
|
||||
; Inst 1: movz x1, #0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i16x8_sextend_i32(i16x8) -> i32 {
|
||||
block0(v0: i16x8):
|
||||
@@ -227,8 +348,14 @@ block0(v0: i16x8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov w0, v0.h[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smov w0, v0.h[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i16x8_sextend_i64(i16x8) -> i64 {
|
||||
block0(v0: i16x8):
|
||||
@@ -237,8 +364,14 @@ block0(v0: i16x8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov x0, v0.h[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smov x0, v0.h[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i16x8_sextend_i128(i16x8) -> i128 {
|
||||
block0(v0: i16x8):
|
||||
@@ -247,9 +380,15 @@ block0(v0: i16x8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov x0, v0.h[1]
|
||||
; nextln: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: smov x0, v0.h[1]
|
||||
; Inst 1: asr x1, x0, #63
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i32x4_uextend_i64(i32x4) -> i64 {
|
||||
block0(v0: i32x4):
|
||||
@@ -258,8 +397,14 @@ block0(v0: i32x4):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov w0, v0.s[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: mov w0, v0.s[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i32x4_uextend_i128(i32x4) -> i128 {
|
||||
block0(v0: i32x4):
|
||||
@@ -268,9 +413,15 @@ block0(v0: i32x4):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov w0, v0.s[1]
|
||||
; nextln: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: mov w0, v0.s[1]
|
||||
; Inst 1: movz x1, #0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i32x4_sextend_i64(i32x4) -> i64 {
|
||||
block0(v0: i32x4):
|
||||
@@ -279,8 +430,14 @@ block0(v0: i32x4):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov x0, v0.s[1]
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smov x0, v0.s[1]
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %i32x4_sextend_i128(i32x4) -> i128 {
|
||||
block0(v0: i32x4):
|
||||
@@ -289,9 +446,15 @@ block0(v0: i32x4):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: smov x0, v0.s[1]
|
||||
; nextln: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: smov x0, v0.s[1]
|
||||
; Inst 1: asr x1, x0, #63
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i64x2_uextend_i128(i64x2) -> i128 {
|
||||
block0(v0: i64x2):
|
||||
@@ -300,9 +463,15 @@ block0(v0: i64x2):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov x0, v0.d[1]
|
||||
; nextln: movz x1, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: mov x0, v0.d[1]
|
||||
; Inst 1: movz x1, #0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %i64x2_sextend_i128(i64x2) -> i128 {
|
||||
block0(v0: i64x2):
|
||||
@@ -311,6 +480,13 @@ block0(v0: i64x2):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov x0, v0.d[1]
|
||||
; nextln: asr x1, x0, #63
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: mov x0, v0.d[1]
|
||||
; Inst 1: asr x1, x0, #63
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,111 +1,168 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
function u0:0(i8) -> f32 {
|
||||
block0(v0: i8):
|
||||
v1 = fcvt_from_uint.f32 v0
|
||||
; check: uxtb w0, w0
|
||||
; check: ucvtf s0, w0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: ucvtf s0, w0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function u0:0(i8) -> f64 {
|
||||
block0(v0: i8):
|
||||
v1 = fcvt_from_uint.f64 v0
|
||||
; check: uxtb w0, w0
|
||||
; check: ucvtf d0, w0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: ucvtf d0, w0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function u0:0(i16) -> f32 {
|
||||
block0(v0: i16):
|
||||
v1 = fcvt_from_uint.f32 v0
|
||||
; check: uxth w0, w0
|
||||
; check: ucvtf s0, w0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: ucvtf s0, w0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function u0:0(i16) -> f64 {
|
||||
block0(v0: i16):
|
||||
v1 = fcvt_from_uint.f64 v0
|
||||
; check: uxth w0, w0
|
||||
; check: ucvtf d0, w0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: ucvtf d0, w0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function u0:0(f32) -> i8 {
|
||||
block0(v0: f32):
|
||||
v1 = fcvt_to_uint.i8 v0
|
||||
; check: fcmp s0, s0
|
||||
; check: b.vc 8 ; udf
|
||||
; check: movz x0, #49024, LSL #16
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp s0, s1
|
||||
; check: b.gt 8 ; udf
|
||||
; check: movz x0, #17280, LSL #16
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp s0, s1
|
||||
; check: b.mi 8 ; udf
|
||||
; check: fcvtzu w0, s0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 12)
|
||||
; Inst 0: fcmp s0, s0
|
||||
; Inst 1: b.vc 8 ; udf
|
||||
; Inst 2: movz x0, #49024, LSL #16
|
||||
; Inst 3: fmov d1, x0
|
||||
; Inst 4: fcmp s0, s1
|
||||
; Inst 5: b.gt 8 ; udf
|
||||
; Inst 6: movz x0, #17280, LSL #16
|
||||
; Inst 7: fmov d1, x0
|
||||
; Inst 8: fcmp s0, s1
|
||||
; Inst 9: b.mi 8 ; udf
|
||||
; Inst 10: fcvtzu w0, s0
|
||||
; Inst 11: ret
|
||||
; }}
|
||||
|
||||
function u0:0(f64) -> i8 {
|
||||
block0(v0: f64):
|
||||
v1 = fcvt_to_uint.i8 v0
|
||||
; check: fcmp d0, d0
|
||||
; check: b.vc 8 ; udf
|
||||
; check: movz x0, #49136, LSL #48
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp d0, d1
|
||||
; check: b.gt 8 ; udf
|
||||
; check: movz x0, #16496, LSL #48
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp d0, d1
|
||||
; check: b.mi 8 ; udf
|
||||
; check: fcvtzu w0, d0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 12)
|
||||
; Inst 0: fcmp d0, d0
|
||||
; Inst 1: b.vc 8 ; udf
|
||||
; Inst 2: movz x0, #49136, LSL #48
|
||||
; Inst 3: fmov d1, x0
|
||||
; Inst 4: fcmp d0, d1
|
||||
; Inst 5: b.gt 8 ; udf
|
||||
; Inst 6: movz x0, #16496, LSL #48
|
||||
; Inst 7: fmov d1, x0
|
||||
; Inst 8: fcmp d0, d1
|
||||
; Inst 9: b.mi 8 ; udf
|
||||
; Inst 10: fcvtzu w0, d0
|
||||
; Inst 11: ret
|
||||
; }}
|
||||
|
||||
function u0:0(f32) -> i16 {
|
||||
block0(v0: f32):
|
||||
v1 = fcvt_to_uint.i16 v0
|
||||
; check: fcmp s0, s0
|
||||
; check: b.vc 8 ; udf
|
||||
; check: movz x0, #49024, LSL #16
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp s0, s1
|
||||
; check: b.gt 8 ; udf
|
||||
; check: movz x0, #18304, LSL #16
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp s0, s1
|
||||
; check: b.mi 8 ; udf
|
||||
; check: fcvtzu w0, s0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 12)
|
||||
; Inst 0: fcmp s0, s0
|
||||
; Inst 1: b.vc 8 ; udf
|
||||
; Inst 2: movz x0, #49024, LSL #16
|
||||
; Inst 3: fmov d1, x0
|
||||
; Inst 4: fcmp s0, s1
|
||||
; Inst 5: b.gt 8 ; udf
|
||||
; Inst 6: movz x0, #18304, LSL #16
|
||||
; Inst 7: fmov d1, x0
|
||||
; Inst 8: fcmp s0, s1
|
||||
; Inst 9: b.mi 8 ; udf
|
||||
; Inst 10: fcvtzu w0, s0
|
||||
; Inst 11: ret
|
||||
; }}
|
||||
|
||||
function u0:0(f64) -> i16 {
|
||||
block0(v0: f64):
|
||||
v1 = fcvt_to_uint.i16 v0
|
||||
; check: fcmp d0, d0
|
||||
; check: b.vc 8 ; udf
|
||||
; check: movz x0, #49136, LSL #48
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp d0, d1
|
||||
; check: b.gt 8 ; udf
|
||||
; check: movz x0, #16624, LSL #48
|
||||
; check: fmov d1, x0
|
||||
; check: fcmp d0, d1
|
||||
; check: b.mi 8 ; udf
|
||||
; check: fcvtzu w0, d0
|
||||
return v1
|
||||
; check: ret
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 12)
|
||||
; Inst 0: fcmp d0, d0
|
||||
; Inst 1: b.vc 8 ; udf
|
||||
; Inst 2: movz x0, #49136, LSL #48
|
||||
; Inst 3: fmov d1, x0
|
||||
; Inst 4: fcmp d0, d1
|
||||
; Inst 5: b.gt 8 ; udf
|
||||
; Inst 6: movz x0, #16624, LSL #48
|
||||
; Inst 7: fmov d1, x0
|
||||
; Inst 8: fcmp d0, d1
|
||||
; Inst 9: b.mi 8 ; udf
|
||||
; Inst 10: fcvtzu w0, d0
|
||||
; Inst 11: ret
|
||||
; }}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
set enable_heap_access_spectre_mitigation=true
|
||||
target aarch64
|
||||
@@ -13,20 +13,31 @@ block0(v0: i64, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: mov w2, w1
|
||||
; nextln: ldr x3, [x0]
|
||||
; nextln: mov x3, x3
|
||||
; nextln: subs xzr, x2, x3
|
||||
; nextln: b.ls label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: add x0, x0, x1, UXTW
|
||||
; nextln: subs xzr, x2, x3
|
||||
; nextln: movz x1, #0
|
||||
; nextln: csel x0, x1, x0, hi
|
||||
; nextln: ret
|
||||
; check: Block 2:
|
||||
; check: udf
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: mov w2, w1
|
||||
; Inst 1: ldr x3, [x0]
|
||||
; Inst 2: mov x3, x3
|
||||
; Inst 3: subs xzr, x2, x3
|
||||
; Inst 4: b.ls label1 ; b label2
|
||||
; Block 1:
|
||||
; (original IR block: block2)
|
||||
; (instruction range: 5 .. 10)
|
||||
; Inst 5: add x0, x0, x1, UXTW
|
||||
; Inst 6: subs xzr, x2, x3
|
||||
; Inst 7: movz x1, #0
|
||||
; Inst 8: csel x0, x1, x0, hi
|
||||
; Inst 9: ret
|
||||
; Block 2:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 10 .. 11)
|
||||
; Inst 10: udf
|
||||
; }}
|
||||
|
||||
function %static_heap_check(i64 vmctx, i32) -> i64 {
|
||||
gv0 = vmctx
|
||||
@@ -37,15 +48,27 @@ block0(v0: i64, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: mov w2, w1
|
||||
; nextln: subs xzr, x2, #65536
|
||||
; nextln: b.ls label1 ; b label2
|
||||
; check: Block 1:
|
||||
; check: add x0, x0, x1, UXTW
|
||||
; nextln: subs xzr, x2, #65536
|
||||
; nextln: movz x1, #0
|
||||
; nextln: csel x0, x1, x0, hi
|
||||
; nextln: ret
|
||||
; check: Block 2:
|
||||
; check: udf
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: mov w2, w1
|
||||
; Inst 1: subs xzr, x2, #65536
|
||||
; Inst 2: b.ls label1 ; b label2
|
||||
; Block 1:
|
||||
; (original IR block: block2)
|
||||
; (instruction range: 3 .. 8)
|
||||
; Inst 3: add x0, x0, x1, UXTW
|
||||
; Inst 4: subs xzr, x2, #65536
|
||||
; Inst 5: movz x1, #0
|
||||
; Inst 6: csel x0, x1, x0, hi
|
||||
; Inst 7: ret
|
||||
; Block 2:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 8 .. 9)
|
||||
; Inst 8: udf
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
; Test that `put_input_in_rse` doesn't try to put the input of the `iconst` into a register, which
|
||||
; would result in an out-of-bounds panic. (#2147)
|
||||
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -14,16 +14,17 @@ block0:
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: VCode_ShowWithRRU {{
|
||||
; nextln: Entry block: 0
|
||||
; nextln: Block 0:
|
||||
; nextln: (original IR block: block0)
|
||||
; nextln: (instruction range: 0 .. 7)
|
||||
; nextln: Inst 0: movz x0, #56780
|
||||
; nextln: Inst 1: uxth w0, w0
|
||||
; nextln: Inst 2: movz x1, #56780
|
||||
; nextln: Inst 3: subs wzr, w0, w1, UXTH
|
||||
; nextln: Inst 4: cset x0, ne
|
||||
; nextln: Inst 5: and w0, w0, #1
|
||||
; nextln: Inst 6: ret
|
||||
; nextln: }}
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 7)
|
||||
; Inst 0: movz x0, #56780
|
||||
; Inst 1: uxth w0, w0
|
||||
; Inst 2: movz x1, #56780
|
||||
; Inst 3: subs wzr, w0, w1, UXTH
|
||||
; Inst 4: cset x0, ne
|
||||
; Inst 5: and w0, w0, #1
|
||||
; Inst 6: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -10,6 +10,13 @@ block1:
|
||||
return v0, v1
|
||||
}
|
||||
|
||||
; check: movz x0, #1
|
||||
; nextln: movz x1, #2
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block1)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movz x0, #1
|
||||
; Inst 1: movz x1, #2
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -8,8 +8,14 @@ block0(v0: i8, v1: i8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: add w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %add16(i16, i16) -> i16 {
|
||||
block0(v0: i16, v1: i16):
|
||||
@@ -17,8 +23,14 @@ block0(v0: i16, v1: i16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: add w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %add32(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -26,8 +38,14 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: add w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %add32_8(i32, i8) -> i32 {
|
||||
block0(v0: i32, v1: i8):
|
||||
@@ -36,8 +54,14 @@ block0(v0: i32, v1: i8):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: add w0, w0, w1, SXTB
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add w0, w0, w1, SXTB
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %add64_32(i64, i32) -> i64 {
|
||||
block0(v0: i64, v1: i32):
|
||||
@@ -46,5 +70,12 @@ block0(v0: i64, v1: i32):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: add x0, x0, x1, SXTW
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add x0, x0, x1, SXTW
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -7,25 +7,54 @@ block0(v0: i128):
|
||||
v1 = ireduce.i64 v0
|
||||
return v1
|
||||
}
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
function %ireduce_128_32(i128) -> i32 {
|
||||
block0(v0: i128):
|
||||
v1 = ireduce.i32 v0
|
||||
return v1
|
||||
}
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
function %ireduce_128_16(i128) -> i16 {
|
||||
block0(v0: i128):
|
||||
v1 = ireduce.i16 v0
|
||||
return v1
|
||||
}
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
function %ireduce_128_8(i128) -> i8 {
|
||||
block0(v0: i128):
|
||||
v1 = ireduce.i8 v0
|
||||
return v1
|
||||
}
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -7,7 +7,13 @@ block0(v0: r64):
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
function %f1(r64) -> b1 {
|
||||
block0(v0: r64):
|
||||
@@ -15,9 +21,15 @@ block0(v0: r64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, #0
|
||||
; nextln: cset x0, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: subs xzr, x0, #0
|
||||
; Inst 1: cset x0, eq
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f2(r64) -> b1 {
|
||||
block0(v0: r64):
|
||||
@@ -25,9 +37,15 @@ block0(v0: r64):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: adds xzr, x0, #1
|
||||
; nextln: cset x0, eq
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: adds xzr, x0, #1
|
||||
; Inst 1: cset x0, eq
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f3() -> r64 {
|
||||
block0:
|
||||
@@ -35,8 +53,14 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: movz x0, #0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: movz x0, #0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f4(r64, r64) -> r64, r64, r64 {
|
||||
fn0 = %f(r64) -> b1
|
||||
@@ -59,43 +83,63 @@ block3(v7: r64, v8: r64):
|
||||
return v7, v8, v9
|
||||
}
|
||||
|
||||
; check: Block 0:
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: stp x19, x20, [sp, #-16]!
|
||||
; nextln: sub sp, sp, #32
|
||||
; nextln: mov x19, x0
|
||||
; nextln: mov x20, x1
|
||||
; nextln: mov x0, x19
|
||||
; nextln: ldr x1, 8 ; b 12 ; data
|
||||
; nextln: stur x0, [sp, #8]
|
||||
; nextln: stur x19, [sp, #16]
|
||||
; nextln: stur x20, [sp, #24]
|
||||
; nextln: (safepoint: slots [S0, S1, S2]
|
||||
; nextln: blr x1
|
||||
; nextln: ldur x19, [sp, #16]
|
||||
; nextln: ldur x20, [sp, #24]
|
||||
; nextln: mov x1, sp
|
||||
; nextln: str x19, [x1]
|
||||
; nextln: and w0, w0, #1
|
||||
; nextln: cbz x0, label1 ; b label3
|
||||
; check: Block 1:
|
||||
; check: b label2
|
||||
; check: Block 2:
|
||||
; check: mov x0, x20
|
||||
; nextln: b label5
|
||||
; check: Block 3:
|
||||
; check: b label4
|
||||
; check: Block 4:
|
||||
; check: mov x0, x19
|
||||
; nextln: mov x19, x20
|
||||
; nextln: b label5
|
||||
; check: Block 5:
|
||||
; check: mov x1, sp
|
||||
; nextln: ldr x1, [x1]
|
||||
; nextln: mov x2, x1
|
||||
; nextln: mov x1, x19
|
||||
; nextln: add sp, sp, #32
|
||||
; nextln: ldp x19, x20, [sp], #16
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (successor: Block 1)
|
||||
; (successor: Block 3)
|
||||
; (instruction range: 0 .. 18)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: stp x19, x20, [sp, #-16]!
|
||||
; Inst 3: sub sp, sp, #32
|
||||
; Inst 4: mov x19, x0
|
||||
; Inst 5: mov x20, x1
|
||||
; Inst 6: mov x0, x19
|
||||
; Inst 7: ldr x1, 8 ; b 12 ; data TestCase { length: 1, ascii: [102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
|
||||
; Inst 8: stur x0, [sp, #8]
|
||||
; Inst 9: stur x19, [sp, #16]
|
||||
; Inst 10: stur x20, [sp, #24]
|
||||
; (safepoint: slots [S0, S1, S2] with EmitState EmitState { virtual_sp_offset: 0, nominal_sp_to_fp: 0, stack_map: None, cur_srcloc: SourceLoc(4294967295) })
|
||||
; Inst 11: blr x1
|
||||
; Inst 12: ldur x19, [sp, #16]
|
||||
; Inst 13: ldur x20, [sp, #24]
|
||||
; Inst 14: mov x1, sp
|
||||
; Inst 15: str x19, [x1]
|
||||
; Inst 16: and w0, w0, #1
|
||||
; Inst 17: cbz x0, label1 ; b label3
|
||||
; Block 1:
|
||||
; (original IR block: block1)
|
||||
; (successor: Block 2)
|
||||
; (instruction range: 18 .. 19)
|
||||
; Inst 18: b label2
|
||||
; Block 2:
|
||||
; (successor: Block 5)
|
||||
; (instruction range: 19 .. 21)
|
||||
; Inst 19: mov x0, x20
|
||||
; Inst 20: b label5
|
||||
; Block 3:
|
||||
; (original IR block: block2)
|
||||
; (successor: Block 4)
|
||||
; (instruction range: 21 .. 22)
|
||||
; Inst 21: b label4
|
||||
; Block 4:
|
||||
; (successor: Block 5)
|
||||
; (instruction range: 22 .. 25)
|
||||
; Inst 22: mov x0, x19
|
||||
; Inst 23: mov x19, x20
|
||||
; Inst 24: b label5
|
||||
; Block 5:
|
||||
; (original IR block: block3)
|
||||
; (instruction range: 25 .. 33)
|
||||
; Inst 25: mov x1, sp
|
||||
; Inst 26: ldr x1, [x1]
|
||||
; Inst 27: mov x2, x1
|
||||
; Inst 28: mov x1, x19
|
||||
; Inst 29: add sp, sp, #32
|
||||
; Inst 30: ldp x19, x20, [sp], #16
|
||||
; Inst 31: ldp fp, lr, [sp], #16
|
||||
; Inst 32: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -10,8 +10,14 @@ block0(v0: i64):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: add x0, x0, x0, LSL 3
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: add x0, x0, x0, LSL 3
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f(i32) -> i32 {
|
||||
block0(v0: i32):
|
||||
@@ -20,5 +26,12 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: lsl w0, w0, #21
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lsl w0, w0, #21
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -12,31 +12,36 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov x4, x1
|
||||
; nextln: orr x1, xzr, #128
|
||||
; nextln: sub x1, x1, x2
|
||||
; nextln: lsr x3, x0, x2
|
||||
; nextln: lsr x5, x4, x2
|
||||
; nextln: orn w6, wzr, w2
|
||||
; nextln: lsl x7, x4, #1
|
||||
; nextln: lsl x6, x7, x6
|
||||
; nextln: orr x6, x3, x6
|
||||
; nextln: ands xzr, x2, #64
|
||||
; nextln: csel x3, xzr, x5, ne
|
||||
; nextln: csel x2, x5, x6, ne
|
||||
; nextln: lsl x5, x0, x1
|
||||
; nextln: lsl x4, x4, x1
|
||||
; nextln: orn w6, wzr, w1
|
||||
; nextln: lsr x0, x0, #1
|
||||
; nextln: lsr x0, x0, x6
|
||||
; nextln: orr x0, x4, x0
|
||||
; nextln: ands xzr, x1, #64
|
||||
; nextln: csel x1, x5, x0, ne
|
||||
; nextln: csel x0, xzr, x5, ne
|
||||
; nextln: orr x1, x3, x1
|
||||
; nextln: orr x0, x2, x0
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 24)
|
||||
; Inst 0: mov x4, x1
|
||||
; Inst 1: orr x1, xzr, #128
|
||||
; Inst 2: sub x1, x1, x2
|
||||
; Inst 3: lsr x3, x0, x2
|
||||
; Inst 4: lsr x5, x4, x2
|
||||
; Inst 5: orn w6, wzr, w2
|
||||
; Inst 6: lsl x7, x4, #1
|
||||
; Inst 7: lsl x6, x7, x6
|
||||
; Inst 8: orr x6, x3, x6
|
||||
; Inst 9: ands xzr, x2, #64
|
||||
; Inst 10: csel x3, xzr, x5, ne
|
||||
; Inst 11: csel x2, x5, x6, ne
|
||||
; Inst 12: lsl x5, x0, x1
|
||||
; Inst 13: lsl x4, x4, x1
|
||||
; Inst 14: orn w6, wzr, w1
|
||||
; Inst 15: lsr x0, x0, #1
|
||||
; Inst 16: lsr x0, x0, x6
|
||||
; Inst 17: orr x0, x4, x0
|
||||
; Inst 18: ands xzr, x1, #64
|
||||
; Inst 19: csel x1, x5, x0, ne
|
||||
; Inst 20: csel x0, xzr, x5, ne
|
||||
; Inst 21: orr x1, x3, x1
|
||||
; Inst 22: orr x0, x2, x0
|
||||
; Inst 23: ret
|
||||
; }}
|
||||
|
||||
function %f0(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -44,8 +49,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ror x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ror x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f1(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -53,8 +64,14 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ror w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ror w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f2(i16, i16) -> i16 {
|
||||
block0(v0: i16, v1: i16):
|
||||
@@ -62,14 +79,20 @@ block0(v0: i16, v1: i16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: uxth w0, w0
|
||||
; nextln: and w1, w1, #15
|
||||
; nextln: sub w2, w1, #16
|
||||
; nextln: sub w2, wzr, w2
|
||||
; nextln: lsr w1, w0, w1
|
||||
; nextln: lsl w0, w0, w2
|
||||
; nextln: orr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: and w1, w1, #15
|
||||
; Inst 2: sub w2, w1, #16
|
||||
; Inst 3: sub w2, wzr, w2
|
||||
; Inst 4: lsr w1, w0, w1
|
||||
; Inst 5: lsl w0, w0, w2
|
||||
; Inst 6: orr w0, w0, w1
|
||||
; Inst 7: ret
|
||||
; }}
|
||||
|
||||
function %f3(i8, i8) -> i8 {
|
||||
block0(v0: i8, v1: i8):
|
||||
@@ -77,18 +100,20 @@ block0(v0: i8, v1: i8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: uxtb w0, w0
|
||||
; nextln: and w1, w1, #7
|
||||
; nextln: sub w2, w1, #8
|
||||
; nextln: sub w2, wzr, w2
|
||||
; nextln: lsr w1, w0, w1
|
||||
; nextln: lsl w0, w0, w2
|
||||
; nextln: orr w0, w0, w1
|
||||
; nextln: ret
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ROL, variable
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: and w1, w1, #7
|
||||
; Inst 2: sub w2, w1, #8
|
||||
; Inst 3: sub w2, wzr, w2
|
||||
; Inst 4: lsr w1, w0, w1
|
||||
; Inst 5: lsl w0, w0, w2
|
||||
; Inst 6: orr w0, w0, w1
|
||||
; Inst 7: ret
|
||||
; }}
|
||||
|
||||
function %i128_rotl(i128, i128) -> i128 {
|
||||
block0(v0: i128, v1: i128):
|
||||
@@ -96,33 +121,39 @@ block0(v0: i128, v1: i128):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: mov x4, x0
|
||||
; nextln: orr x0, xzr, #128
|
||||
; nextln: sub x0, x0, x2
|
||||
; nextln: lsl x3, x4, x2
|
||||
; nextln: lsl x5, x1, x2
|
||||
; nextln: orn w6, wzr, w2
|
||||
; nextln: lsr x7, x4, #1
|
||||
; nextln: lsr x6, x7, x6
|
||||
; nextln: orr x5, x5, x6
|
||||
; nextln: ands xzr, x2, #64
|
||||
; nextln: csel x2, x3, x5, ne
|
||||
; nextln: csel x3, xzr, x3, ne
|
||||
; nextln: lsr x5, x4, x0
|
||||
; nextln: lsr x4, x1, x0
|
||||
; nextln: orn w6, wzr, w0
|
||||
; nextln: lsl x1, x1, #1
|
||||
; nextln: lsl x1, x1, x6
|
||||
; nextln: orr x1, x5, x1
|
||||
; nextln: ands xzr, x0, #64
|
||||
; nextln: csel x0, xzr, x4, ne
|
||||
; nextln: csel x1, x4, x1, ne
|
||||
; nextln: orr x1, x3, x1
|
||||
; nextln: orr x0, x2, x0
|
||||
; nextln: mov x2, x0
|
||||
; nextln: mov x0, x1
|
||||
; nextln: mov x1, x2
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 27)
|
||||
; Inst 0: mov x4, x0
|
||||
; Inst 1: orr x0, xzr, #128
|
||||
; Inst 2: sub x0, x0, x2
|
||||
; Inst 3: lsl x3, x4, x2
|
||||
; Inst 4: lsl x5, x1, x2
|
||||
; Inst 5: orn w6, wzr, w2
|
||||
; Inst 6: lsr x7, x4, #1
|
||||
; Inst 7: lsr x6, x7, x6
|
||||
; Inst 8: orr x5, x5, x6
|
||||
; Inst 9: ands xzr, x2, #64
|
||||
; Inst 10: csel x2, x3, x5, ne
|
||||
; Inst 11: csel x3, xzr, x3, ne
|
||||
; Inst 12: lsr x5, x4, x0
|
||||
; Inst 13: lsr x4, x1, x0
|
||||
; Inst 14: orn w6, wzr, w0
|
||||
; Inst 15: lsl x1, x1, #1
|
||||
; Inst 16: lsl x1, x1, x6
|
||||
; Inst 17: orr x1, x5, x1
|
||||
; Inst 18: ands xzr, x0, #64
|
||||
; Inst 19: csel x0, xzr, x4, ne
|
||||
; Inst 20: csel x1, x4, x1, ne
|
||||
; Inst 21: orr x1, x3, x1
|
||||
; Inst 22: orr x0, x2, x0
|
||||
; Inst 23: mov x2, x0
|
||||
; Inst 24: mov x0, x1
|
||||
; Inst 25: mov x1, x2
|
||||
; Inst 26: ret
|
||||
; }}
|
||||
|
||||
function %f4(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -130,9 +161,15 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sub x1, xzr, x1
|
||||
; nextln: ror x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sub x1, xzr, x1
|
||||
; Inst 1: ror x0, x0, x1
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f5(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -140,9 +177,15 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sub w1, wzr, w1
|
||||
; nextln: ror w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: sub w1, wzr, w1
|
||||
; Inst 1: ror w0, w0, w1
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f6(i16, i16) -> i16 {
|
||||
block0(v0: i16, v1: i16):
|
||||
@@ -150,15 +193,21 @@ block0(v0: i16, v1: i16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sub w1, wzr, w1
|
||||
; nextln: uxth w0, w0
|
||||
; nextln: and w1, w1, #15
|
||||
; nextln: sub w2, w1, #16
|
||||
; nextln: sub w2, wzr, w2
|
||||
; nextln: lsr w1, w0, w1
|
||||
; nextln: lsl w0, w0, w2
|
||||
; nextln: orr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 9)
|
||||
; Inst 0: sub w1, wzr, w1
|
||||
; Inst 1: uxth w0, w0
|
||||
; Inst 2: and w1, w1, #15
|
||||
; Inst 3: sub w2, w1, #16
|
||||
; Inst 4: sub w2, wzr, w2
|
||||
; Inst 5: lsr w1, w0, w1
|
||||
; Inst 6: lsl w0, w0, w2
|
||||
; Inst 7: orr w0, w0, w1
|
||||
; Inst 8: ret
|
||||
; }}
|
||||
|
||||
function %f7(i8, i8) -> i8 {
|
||||
block0(v0: i8, v1: i8):
|
||||
@@ -166,19 +215,21 @@ block0(v0: i8, v1: i8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: sub w1, wzr, w1
|
||||
; nextln: uxtb w0, w0
|
||||
; nextln: and w1, w1, #7
|
||||
; nextln: sub w2, w1, #8
|
||||
; nextln: sub w2, wzr, w2
|
||||
; nextln: lsr w1, w0, w1
|
||||
; nextln: lsl w0, w0, w2
|
||||
; nextln: orr w0, w0, w1
|
||||
; nextln: ret
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; LSR, variable
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 9)
|
||||
; Inst 0: sub w1, wzr, w1
|
||||
; Inst 1: uxtb w0, w0
|
||||
; Inst 2: and w1, w1, #7
|
||||
; Inst 3: sub w2, w1, #8
|
||||
; Inst 4: sub w2, wzr, w2
|
||||
; Inst 5: lsr w1, w0, w1
|
||||
; Inst 6: lsl w0, w0, w2
|
||||
; Inst 7: orr w0, w0, w1
|
||||
; Inst 8: ret
|
||||
; }}
|
||||
|
||||
function %f8(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -186,8 +237,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: lsr x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lsr x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f9(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -195,8 +252,14 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: lsr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lsr w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f10(i16, i16) -> i16 {
|
||||
block0(v0: i16, v1: i16):
|
||||
@@ -204,10 +267,16 @@ block0(v0: i16, v1: i16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: uxth w0, w0
|
||||
; nextln: and w1, w1, #15
|
||||
; nextln: lsr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: and w1, w1, #15
|
||||
; Inst 2: lsr w0, w0, w1
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f11(i8, i8) -> i8 {
|
||||
block0(v0: i8, v1: i8):
|
||||
@@ -215,14 +284,16 @@ block0(v0: i8, v1: i8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: uxtb w0, w0
|
||||
; nextln: and w1, w1, #7
|
||||
; nextln: lsr w0, w0, w1
|
||||
; nextln: ret
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; LSL, variable
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: and w1, w1, #7
|
||||
; Inst 2: lsr w0, w0, w1
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f12(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -230,8 +301,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: lsl x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lsl x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f13(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -239,8 +316,14 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: lsl w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lsl w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f14(i16, i16) -> i16 {
|
||||
block0(v0: i16, v1: i16):
|
||||
@@ -248,9 +331,15 @@ block0(v0: i16, v1: i16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: and w1, w1, #15
|
||||
; nextln: lsl w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: and w1, w1, #15
|
||||
; Inst 1: lsl w0, w0, w1
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f15(i8, i8) -> i8 {
|
||||
block0(v0: i8, v1: i8):
|
||||
@@ -258,13 +347,15 @@ block0(v0: i8, v1: i8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: and w1, w1, #7
|
||||
; nextln: lsl w0, w0, w1
|
||||
; nextln: ret
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; ASR, variable
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: and w1, w1, #7
|
||||
; Inst 1: lsl w0, w0, w1
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %f16(i64, i64) -> i64 {
|
||||
block0(v0: i64, v1: i64):
|
||||
@@ -272,8 +363,14 @@ block0(v0: i64, v1: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: asr x0, x0, x1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: asr x0, x0, x1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f17(i32, i32) -> i32 {
|
||||
block0(v0: i32, v1: i32):
|
||||
@@ -281,8 +378,14 @@ block0(v0: i32, v1: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: asr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: asr w0, w0, w1
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f18(i16, i16) -> i16 {
|
||||
block0(v0: i16, v1: i16):
|
||||
@@ -290,9 +393,16 @@ block0(v0: i16, v1: i16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: and w1, w1, #15
|
||||
; nextln: asr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: sxth w0, w0
|
||||
; Inst 1: and w1, w1, #15
|
||||
; Inst 2: asr w0, w0, w1
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f19(i8, i8) -> i8 {
|
||||
block0(v0: i8, v1: i8):
|
||||
@@ -300,13 +410,16 @@ block0(v0: i8, v1: i8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: and w1, w1, #7
|
||||
; nextln: asr w0, w0, w1
|
||||
; nextln: ret
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;; immediate forms
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: sxtb w0, w0
|
||||
; Inst 1: and w1, w1, #7
|
||||
; Inst 2: asr w0, w0, w1
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f20(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -315,8 +428,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ror x0, x0, #17
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ror x0, x0, #17
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f21(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -325,8 +444,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ror x0, x0, #47
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ror x0, x0, #47
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f22(i32) -> i32 {
|
||||
block0(v0: i32):
|
||||
@@ -335,8 +460,14 @@ block0(v0: i32):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: ror w0, w0, #15
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ror w0, w0, #15
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f23(i16) -> i16 {
|
||||
block0(v0: i16):
|
||||
@@ -345,11 +476,17 @@ block0(v0: i16):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: uxth w0, w0
|
||||
; nextln: lsr w1, w0, #6
|
||||
; nextln: lsl w0, w0, #10
|
||||
; nextln: orr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: lsr w1, w0, #6
|
||||
; Inst 2: lsl w0, w0, #10
|
||||
; Inst 3: orr w0, w0, w1
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %f24(i8) -> i8 {
|
||||
block0(v0: i8):
|
||||
@@ -358,11 +495,17 @@ block0(v0: i8):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: uxtb w0, w0
|
||||
; nextln: lsr w1, w0, #5
|
||||
; nextln: lsl w0, w0, #3
|
||||
; nextln: orr w0, w0, w1
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 5)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: lsr w1, w0, #5
|
||||
; Inst 2: lsl w0, w0, #3
|
||||
; Inst 3: orr w0, w0, w1
|
||||
; Inst 4: ret
|
||||
; }}
|
||||
|
||||
function %f25(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -371,8 +514,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: lsr x0, x0, #17
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lsr x0, x0, #17
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f26(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -381,8 +530,14 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: asr x0, x0, #17
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: asr x0, x0, #17
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f27(i64) -> i64 {
|
||||
block0(v0: i64):
|
||||
@@ -391,5 +546,12 @@ block0(v0: i64):
|
||||
return v2
|
||||
}
|
||||
|
||||
; check: lsl x0, x0, #17
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: lsl x0, x0, #17
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -10,9 +10,14 @@ block0(v0: i8x16, v1: i8x16):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: sxtl
|
||||
; check: smull v0.8h, v0.8b, v1.8b
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smull v0.8h, v0.8b, v1.8b
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn2(i8x16, i8x16) -> i16x8 {
|
||||
block0(v0: i8x16, v1: i8x16):
|
||||
@@ -22,9 +27,14 @@ block0(v0: i8x16, v1: i8x16):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: sxtl
|
||||
; check: smull2 v0.8h, v0.16b, v1.16b
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smull2 v0.8h, v0.16b, v1.16b
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn3(i16x8, i16x8) -> i32x4 {
|
||||
block0(v0: i16x8, v1: i16x8):
|
||||
@@ -34,9 +44,14 @@ block0(v0: i16x8, v1: i16x8):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: sxtl
|
||||
; check: smull v0.4s, v0.4h, v1.4h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smull v0.4s, v0.4h, v1.4h
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn4(i16x8, i16x8) -> i32x4 {
|
||||
block0(v0: i16x8, v1: i16x8):
|
||||
@@ -46,9 +61,14 @@ block0(v0: i16x8, v1: i16x8):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: sxtl
|
||||
; check: smull2 v0.4s, v0.8h, v1.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smull2 v0.4s, v0.8h, v1.8h
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn5(i32x4, i32x4) -> i64x2 {
|
||||
block0(v0: i32x4, v1: i32x4):
|
||||
@@ -58,9 +78,14 @@ block0(v0: i32x4, v1: i32x4):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: sxtl
|
||||
; check: smull v0.2d, v0.2s, v1.2s
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smull v0.2d, v0.2s, v1.2s
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn6(i32x4, i32x4) -> i64x2 {
|
||||
block0(v0: i32x4, v1: i32x4):
|
||||
@@ -70,9 +95,14 @@ block0(v0: i32x4, v1: i32x4):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: sxtl
|
||||
; check: smull2 v0.2d, v0.4s, v1.4s
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: smull2 v0.2d, v0.4s, v1.4s
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn7(i8x16, i8x16) -> i16x8 {
|
||||
block0(v0: i8x16, v1: i8x16):
|
||||
@@ -82,9 +112,14 @@ block0(v0: i8x16, v1: i8x16):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: uxtl
|
||||
; check: umull v0.8h, v0.8b, v1.8b
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umull v0.8h, v0.8b, v1.8b
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn8(i8x16, i8x16) -> i16x8 {
|
||||
block0(v0: i8x16, v1: i8x16):
|
||||
@@ -94,9 +129,14 @@ block0(v0: i8x16, v1: i8x16):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: uxtl
|
||||
; check: umull2 v0.8h, v0.16b, v1.16b
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umull2 v0.8h, v0.16b, v1.16b
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn9(i16x8, i16x8) -> i32x4 {
|
||||
block0(v0: i16x8, v1: i16x8):
|
||||
@@ -106,9 +146,14 @@ block0(v0: i16x8, v1: i16x8):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: uxtl
|
||||
; check: umull v0.4s, v0.4h, v1.4h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umull v0.4s, v0.4h, v1.4h
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn10(i16x8, i16x8) -> i32x4 {
|
||||
block0(v0: i16x8, v1: i16x8):
|
||||
@@ -118,9 +163,14 @@ block0(v0: i16x8, v1: i16x8):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: uxtl
|
||||
; check: umull2 v0.4s, v0.8h, v1.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umull2 v0.4s, v0.8h, v1.8h
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn11(i32x4, i32x4) -> i64x2 {
|
||||
block0(v0: i32x4, v1: i32x4):
|
||||
@@ -130,9 +180,14 @@ block0(v0: i32x4, v1: i32x4):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: uxtl
|
||||
; check: umull v0.2d, v0.2s, v1.2s
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umull v0.2d, v0.2s, v1.2s
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn12(i32x4, i32x4) -> i64x2 {
|
||||
block0(v0: i32x4, v1: i32x4):
|
||||
@@ -142,6 +197,12 @@ block0(v0: i32x4, v1: i32x4):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check-not: uxtl2
|
||||
; check: umull2 v0.2d, v0.4s, v1.4s
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: umull2 v0.2d, v0.4s, v1.4s
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -11,8 +11,14 @@ block0(v0: i8x16):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: saddlp v0.8h, v0.16b
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: saddlp v0.8h, v0.16b
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn2(i8x16) -> i16x8 {
|
||||
block0(v0: i8x16):
|
||||
@@ -22,8 +28,14 @@ block0(v0: i8x16):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: uaddlp v0.8h, v0.16b
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: uaddlp v0.8h, v0.16b
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn3(i16x8) -> i32x4 {
|
||||
block0(v0: i16x8):
|
||||
@@ -33,8 +45,14 @@ block0(v0: i16x8):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: saddlp v0.4s, v0.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: saddlp v0.4s, v0.8h
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn4(i16x8) -> i32x4 {
|
||||
block0(v0: i16x8):
|
||||
@@ -44,8 +62,14 @@ block0(v0: i16x8):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: uaddlp v0.4s, v0.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: uaddlp v0.4s, v0.8h
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %fn5(i8x16, i8x16) -> i16x8 {
|
||||
block0(v0: i8x16, v1: i8x16):
|
||||
@@ -55,10 +79,16 @@ block0(v0: i8x16, v1: i8x16):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: sxtl v0.8h, v0.8b
|
||||
; nextln: sxtl2 v1.8h, v1.16b
|
||||
; nextln: addp v0.8h, v0.8h, v1.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: sxtl v0.8h, v0.8b
|
||||
; Inst 1: sxtl2 v1.8h, v1.16b
|
||||
; Inst 2: addp v0.8h, v0.8h, v1.8h
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %fn6(i8x16, i8x16) -> i16x8 {
|
||||
block0(v0: i8x16, v1: i8x16):
|
||||
@@ -68,10 +98,16 @@ block0(v0: i8x16, v1: i8x16):
|
||||
return v4
|
||||
}
|
||||
|
||||
; check: uxtl v0.8h, v0.8b
|
||||
; nextln: uxtl2 v1.8h, v1.16b
|
||||
; nextln: addp v0.8h, v0.8h, v1.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: uxtl v0.8h, v0.8b
|
||||
; Inst 1: uxtl2 v1.8h, v1.16b
|
||||
; Inst 2: addp v0.8h, v0.8h, v1.8h
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %fn7(i8x16) -> i16x8 {
|
||||
block0(v0: i8x16):
|
||||
@@ -81,10 +117,16 @@ block0(v0: i8x16):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: uxtl v1.8h, v0.8b
|
||||
; nextln: sxtl2 v0.8h, v0.16b
|
||||
; nextln: addp v0.8h, v1.8h, v0.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: uxtl v1.8h, v0.8b
|
||||
; Inst 1: sxtl2 v0.8h, v0.16b
|
||||
; Inst 2: addp v0.8h, v1.8h, v0.8h
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %fn8(i8x16) -> i16x8 {
|
||||
block0(v0: i8x16):
|
||||
@@ -94,7 +136,14 @@ block0(v0: i8x16):
|
||||
return v3
|
||||
}
|
||||
|
||||
; check: sxtl v1.8h, v0.8b
|
||||
; nextln: uxtl2 v0.8h, v0.16b
|
||||
; nextln: addp v0.8h, v1.8h, v0.8h
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: sxtl v1.8h, v0.8b
|
||||
; Inst 1: uxtl2 v0.8h, v0.16b
|
||||
; Inst 2: addp v0.8h, v1.8h, v0.8h
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -9,10 +9,16 @@ block0:
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: movz x0, #1
|
||||
; nextln: movk x0, #1, LSL #48
|
||||
; nextln: fmov d0, x0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 4)
|
||||
; Inst 0: movz x0, #1
|
||||
; Inst 1: movk x0, #1, LSL #48
|
||||
; Inst 2: fmov d0, x0
|
||||
; Inst 3: ret
|
||||
; }}
|
||||
|
||||
function %f2() -> i32x4 {
|
||||
block0:
|
||||
@@ -21,6 +27,13 @@ block0:
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: movz x0, #42679
|
||||
; nextln: fmov s0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: movz x0, #42679
|
||||
; Inst 1: fmov s0, w0
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -7,12 +7,26 @@ block0:
|
||||
return
|
||||
}
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
function %stack_limit_leaf_zero(i64 stack_limit) {
|
||||
block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
function %stack_limit_gv_leaf_zero(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
@@ -23,8 +37,13 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: ret
|
||||
; }}
|
||||
|
||||
function %stack_limit_call_zero(i64 stack_limit) {
|
||||
fn0 = %foo()
|
||||
@@ -33,14 +52,20 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: subs xzr, sp, x0
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: ldr x0
|
||||
; nextln: blr x0
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 8)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: subs xzr, sp, x0, UXTX
|
||||
; Inst 3: b.hs 8 ; udf
|
||||
; Inst 4: ldr x0, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
|
||||
; Inst 5: blr x0
|
||||
; Inst 6: ldp fp, lr, [sp], #16
|
||||
; Inst 7: ret
|
||||
; }}
|
||||
|
||||
function %stack_limit_gv_call_zero(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
@@ -53,17 +78,22 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: ldur x16, [x0]
|
||||
; nextln: ldur x16, [x16, #4]
|
||||
; nextln: subs xzr, sp, x16
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: ldr x0
|
||||
; nextln: blr x0
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 10)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: ldur x16, [x0]
|
||||
; Inst 3: ldur x16, [x16, #4]
|
||||
; Inst 4: subs xzr, sp, x16, UXTX
|
||||
; Inst 5: b.hs 8 ; udf
|
||||
; Inst 6: ldr x0, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
|
||||
; Inst 7: blr x0
|
||||
; Inst 8: ldp fp, lr, [sp], #16
|
||||
; Inst 9: ret
|
||||
; }}
|
||||
|
||||
function %stack_limit(i64 stack_limit) {
|
||||
ss0 = explicit_slot 168
|
||||
@@ -71,15 +101,21 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: add x16, x0, #176
|
||||
; nextln: subs xzr, sp, x16
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: sub sp, sp, #176
|
||||
; nextln: add sp, sp, #176
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 9)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: add x16, x0, #176
|
||||
; Inst 3: subs xzr, sp, x16, UXTX
|
||||
; Inst 4: b.hs 8 ; udf
|
||||
; Inst 5: sub sp, sp, #176
|
||||
; Inst 6: add sp, sp, #176
|
||||
; Inst 7: ldp fp, lr, [sp], #16
|
||||
; Inst 8: ret
|
||||
; }}
|
||||
|
||||
function %huge_stack_limit(i64 stack_limit) {
|
||||
ss0 = explicit_slot 400000
|
||||
@@ -87,23 +123,29 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: subs xzr, sp, x0
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: movz w17, #6784
|
||||
; nextln: movk w17, #6, LSL #16
|
||||
; nextln: add x16, x0, x17, UXTX
|
||||
; nextln: subs xzr, sp, x16
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: movz w16, #6784
|
||||
; nextln: movk w16, #6, LSL #16
|
||||
; nextln: sub sp, sp, x16, UXTX
|
||||
; nextln: movz w16, #6784
|
||||
; nextln: movk w16, #6, LSL #16
|
||||
; nextln: add sp, sp, x16, UXTX
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 17)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: subs xzr, sp, x0, UXTX
|
||||
; Inst 3: b.hs 8 ; udf
|
||||
; Inst 4: movz w17, #6784
|
||||
; Inst 5: movk w17, #6, LSL #16
|
||||
; Inst 6: add x16, x0, x17, UXTX
|
||||
; Inst 7: subs xzr, sp, x16, UXTX
|
||||
; Inst 8: b.hs 8 ; udf
|
||||
; Inst 9: movz w16, #6784
|
||||
; Inst 10: movk w16, #6, LSL #16
|
||||
; Inst 11: sub sp, sp, x16, UXTX
|
||||
; Inst 12: movz w16, #6784
|
||||
; Inst 13: movk w16, #6, LSL #16
|
||||
; Inst 14: add sp, sp, x16, UXTX
|
||||
; Inst 15: ldp fp, lr, [sp], #16
|
||||
; Inst 16: ret
|
||||
; }}
|
||||
|
||||
function %limit_preamble(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
@@ -115,17 +157,23 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: ldur x16, [x0]
|
||||
; nextln: ldur x16, [x16, #4]
|
||||
; nextln: add x16, x16, #32
|
||||
; nextln: subs xzr, sp, x16
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: sub sp, sp, #32
|
||||
; nextln: add sp, sp, #32
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 11)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: ldur x16, [x0]
|
||||
; Inst 3: ldur x16, [x16, #4]
|
||||
; Inst 4: add x16, x16, #32
|
||||
; Inst 5: subs xzr, sp, x16, UXTX
|
||||
; Inst 6: b.hs 8 ; udf
|
||||
; Inst 7: sub sp, sp, #32
|
||||
; Inst 8: add sp, sp, #32
|
||||
; Inst 9: ldp fp, lr, [sp], #16
|
||||
; Inst 10: ret
|
||||
; }}
|
||||
|
||||
function %limit_preamble_huge(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
@@ -137,25 +185,31 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: ldur x16, [x0]
|
||||
; nextln: ldur x16, [x16, #4]
|
||||
; nextln: subs xzr, sp, x16
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: movz w17, #6784
|
||||
; nextln: movk w17, #6, LSL #16
|
||||
; nextln: add x16, x16, x17, UXTX
|
||||
; nextln: subs xzr, sp, x16
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: movz w16, #6784
|
||||
; nextln: movk w16, #6, LSL #16
|
||||
; nextln: sub sp, sp, x16, UXTX
|
||||
; nextln: movz w16, #6784
|
||||
; nextln: movk w16, #6, LSL #16
|
||||
; nextln: add sp, sp, x16, UXTX
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 19)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: ldur x16, [x0]
|
||||
; Inst 3: ldur x16, [x16, #4]
|
||||
; Inst 4: subs xzr, sp, x16, UXTX
|
||||
; Inst 5: b.hs 8 ; udf
|
||||
; Inst 6: movz w17, #6784
|
||||
; Inst 7: movk w17, #6, LSL #16
|
||||
; Inst 8: add x16, x16, x17, UXTX
|
||||
; Inst 9: subs xzr, sp, x16, UXTX
|
||||
; Inst 10: b.hs 8 ; udf
|
||||
; Inst 11: movz w16, #6784
|
||||
; Inst 12: movk w16, #6, LSL #16
|
||||
; Inst 13: sub sp, sp, x16, UXTX
|
||||
; Inst 14: movz w16, #6784
|
||||
; Inst 15: movk w16, #6, LSL #16
|
||||
; Inst 16: add sp, sp, x16, UXTX
|
||||
; Inst 17: ldp fp, lr, [sp], #16
|
||||
; Inst 18: ret
|
||||
; }}
|
||||
|
||||
function %limit_preamble_huge_offset(i64 vmctx) {
|
||||
gv0 = vmctx
|
||||
@@ -166,13 +220,20 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
|
||||
; nextln: add x16, x16, #32
|
||||
; nextln: subs xzr, sp, x16
|
||||
; nextln: b.hs 8 ; udf
|
||||
; nextln: sub sp, sp, #32
|
||||
; nextln: add sp, sp, #32
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 10)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
|
||||
; Inst 3: add x16, x16, #32
|
||||
; Inst 4: subs xzr, sp, x16, UXTX
|
||||
; Inst 5: b.hs 8 ; udf
|
||||
; Inst 6: sub sp, sp, #32
|
||||
; Inst 7: add sp, sp, #32
|
||||
; Inst 8: ldp fp, lr, [sp], #16
|
||||
; Inst 9: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -10,5 +10,12 @@ block0:
|
||||
return v0
|
||||
}
|
||||
|
||||
; check: ldr x0, 8 ; b 12 ; data
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: ldr x0, 8 ; b 12 ; data TestCase { length: 9, ascii: [109, 121, 95, 103, 108, 111, 98, 97, 108, 0, 0, 0, 0, 0, 0, 0] } + 0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set tls_model=elf_gd
|
||||
target aarch64
|
||||
|
||||
@@ -9,21 +9,29 @@ block0(v0: i32):
|
||||
v1 = global_value.i64 gv0
|
||||
return v0, v1
|
||||
}
|
||||
; check: stp fp, lr, [sp, #-16]!
|
||||
; nextln: mov fp, sp
|
||||
; nextln: str x19, [sp, #-16]!
|
||||
; nextln: stp d14, d15, [sp, #-16]!
|
||||
; nextln: stp d12, d13, [sp, #-16]!
|
||||
; nextln: stp d10, d11, [sp, #-16]!
|
||||
; nextln: stp d8, d9, [sp, #-16]!
|
||||
; nextln: mov x19, x0
|
||||
; nextln: elf_tls_get_addr u1:0
|
||||
; nextln: mov x1, x0
|
||||
; nextln: mov x0, x19
|
||||
; nextln: ldp d8, d9, [sp], #16
|
||||
; nextln: ldp d10, d11, [sp], #16
|
||||
; nextln: ldp d12, d13, [sp], #16
|
||||
; nextln: ldp d14, d15, [sp], #16
|
||||
; nextln: ldr x19, [sp], #16
|
||||
; nextln: ldp fp, lr, [sp], #16
|
||||
; nextln: ret
|
||||
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 18)
|
||||
; Inst 0: stp fp, lr, [sp, #-16]!
|
||||
; Inst 1: mov fp, sp
|
||||
; Inst 2: str x19, [sp, #-16]!
|
||||
; Inst 3: stp d14, d15, [sp, #-16]!
|
||||
; Inst 4: stp d12, d13, [sp, #-16]!
|
||||
; Inst 5: stp d10, d11, [sp, #-16]!
|
||||
; Inst 6: stp d8, d9, [sp, #-16]!
|
||||
; Inst 7: mov x19, x0
|
||||
; Inst 8: elf_tls_get_addr u1:0
|
||||
; Inst 9: mov x1, x0
|
||||
; Inst 10: mov x0, x19
|
||||
; Inst 11: ldp d8, d9, [sp], #16
|
||||
; Inst 12: ldp d10, d11, [sp], #16
|
||||
; Inst 13: ldp d12, d13, [sp], #16
|
||||
; Inst 14: ldp d14, d15, [sp], #16
|
||||
; Inst 15: ldr x19, [sp], #16
|
||||
; Inst 16: ldp fp, lr, [sp], #16
|
||||
; Inst 17: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -7,7 +7,13 @@ block0:
|
||||
trap user0
|
||||
}
|
||||
|
||||
; check: udf
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 1)
|
||||
; Inst 0: udf
|
||||
; }}
|
||||
|
||||
function %g(i64) {
|
||||
block0(v0: i64):
|
||||
@@ -17,8 +23,15 @@ block0(v0: i64):
|
||||
return
|
||||
}
|
||||
|
||||
; check: subs xzr, x0, #42
|
||||
; nextln: b.ne 8 ; udf
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 3)
|
||||
; Inst 0: subs xzr, x0, #42
|
||||
; Inst 1: b.ne 8 ; udf
|
||||
; Inst 2: ret
|
||||
; }}
|
||||
|
||||
function %h() {
|
||||
block0:
|
||||
@@ -26,4 +39,12 @@ block0:
|
||||
return
|
||||
}
|
||||
|
||||
; check: brk #0
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: brk #0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
test compile
|
||||
test compile precise-output
|
||||
set unwind_info=false
|
||||
target aarch64
|
||||
|
||||
@@ -8,8 +8,14 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: uxtb w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_u_8_32(i8) -> i32 {
|
||||
block0(v0: i8):
|
||||
@@ -17,8 +23,14 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: uxtb w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_u_8_16(i8) -> i16 {
|
||||
block0(v0: i8):
|
||||
@@ -26,8 +38,14 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: uxtb w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: uxtb w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_s_8_64(i8) -> i64 {
|
||||
block0(v0: i8):
|
||||
@@ -35,8 +53,14 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxtb x0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sxtb x0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_s_8_32(i8) -> i32 {
|
||||
block0(v0: i8):
|
||||
@@ -44,8 +68,14 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxtb w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sxtb w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_s_8_16(i8) -> i16 {
|
||||
block0(v0: i8):
|
||||
@@ -53,8 +83,14 @@ block0(v0: i8):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxtb w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sxtb w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_u_16_64(i16) -> i64 {
|
||||
block0(v0: i16):
|
||||
@@ -62,8 +98,14 @@ block0(v0: i16):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: uxth w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_u_16_32(i16) -> i32 {
|
||||
block0(v0: i16):
|
||||
@@ -71,8 +113,14 @@ block0(v0: i16):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: uxth w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: uxth w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_s_16_64(i16) -> i64 {
|
||||
block0(v0: i16):
|
||||
@@ -80,8 +128,14 @@ block0(v0: i16):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxth x0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sxth x0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_s_16_32(i16) -> i32 {
|
||||
block0(v0: i16):
|
||||
@@ -89,8 +143,14 @@ block0(v0: i16):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxth w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sxth w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_u_32_64(i32) -> i64 {
|
||||
block0(v0: i32):
|
||||
@@ -98,8 +158,14 @@ block0(v0: i32):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: mov w0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: mov w0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
function %f_s_32_64(i32) -> i64 {
|
||||
block0(v0: i32):
|
||||
@@ -107,5 +173,12 @@ block0(v0: i32):
|
||||
return v1
|
||||
}
|
||||
|
||||
; check: sxtw x0, w0
|
||||
; nextln: ret
|
||||
; VCode_ShowWithRRU {{
|
||||
; Entry block: 0
|
||||
; Block 0:
|
||||
; (original IR block: block0)
|
||||
; (instruction range: 0 .. 2)
|
||||
; Inst 0: sxtw x0, w0
|
||||
; Inst 1: ret
|
||||
; }}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user