Update lots of isa/*/*.clif tests to precise-output (#3677)

* Update lots of `isa/*/*.clif` tests to `precise-output`

This commit goes through the `aarch64` and `x64` subdirectories and
subjectively changes tests from `test compile` to add `precise-output`.
This then auto-updates all the test expectations so they can be
automatically instead of manually updated in the future. Not all tests
were migrated, largely subject to the whims of myself, mainly looking to
see if the test was looking for specific instructions or just checking
the whole assembly output.

* Filter out `;;` comments from test expctations

Looks like the cranelift parser picks up all comments, not just those
trailing the function, so use a convention where `;;` is used for
human-readable-comments in test cases and `;`-prefixed comments are the
test expectation.
This commit is contained in:
Alex Crichton
2022-01-10 13:38:23 -06:00
committed by GitHub
parent a8ea0ec097
commit 1ef0abb12c
58 changed files with 6883 additions and 3386 deletions

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -9,8 +9,14 @@ block0(v0: i64, v1: i32):
return v3
}
; check: ldr w0, [x0, w1, UXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, UXTW]
; Inst 1: ret
; }}
function %f2(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -19,8 +25,14 @@ block0(v0: i64, v1: i32):
return v3
}
; check: ldr w0, [x0, w1, UXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, UXTW]
; Inst 1: ret
; }}
function %f3(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -29,8 +41,14 @@ block0(v0: i64, v1: i32):
return v3
}
; check: ldr w0, [x0, w1, SXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, SXTW]
; Inst 1: ret
; }}
function %f4(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -39,8 +57,14 @@ block0(v0: i64, v1: i32):
return v3
}
; check: ldr w0, [x0, w1, SXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, SXTW]
; Inst 1: ret
; }}
function %f5(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -50,8 +74,14 @@ block0(v0: i64, v1: i32):
return v4
}
; check: ldr w0, [x0, w1, SXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, SXTW]
; Inst 1: ret
; }}
function %f6(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -61,8 +91,14 @@ block0(v0: i64, v1: i32):
return v4
}
; check: ldr w0, [x0, w1, SXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0, w1, SXTW]
; Inst 1: ret
; }}
function %f7(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -73,9 +109,15 @@ block0(v0: i32, v1: i32):
return v5
}
; check: mov w0, w0
; nextln: ldr w0, [x0, w1, UXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov w0, w0
; Inst 1: ldr w0, [x0, w1, UXTW]
; Inst 2: ret
; }}
function %f8(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
@@ -88,14 +130,17 @@ block0(v0: i64, v1: i32):
return v7
}
; v6+4 = 2*v5 = 2*v4 + 2*v0 + 4 = 2*v2 + 2*v3 + 2*v0 + 4
; = 2*sextend($x1) + 2*$x0 + 68
; check: add x2, x0, #68
; nextln: add x0, x2, x0
; nextln: add x0, x0, x1, SXTW
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: add x2, x0, #68
; Inst 1: add x0, x2, x0
; Inst 2: add x0, x0, x1, SXTW
; Inst 3: ldr w0, [x0, w1, SXTW]
; Inst 4: ret
; }}
function %f9(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -107,12 +152,16 @@ block0(v0: i64, v1: i64, v2: i64):
return v7
}
; v6 = $x0 + $x1 + $x2 + 48
; check: add x0, x0, x2
; nextln: add x0, x0, x1
; nextln: ldur w0, [x0, #48]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: add x0, x0, x2
; Inst 1: add x0, x0, x1
; Inst 2: ldur w0, [x0, #48]
; Inst 3: ret
; }}
function %f10(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -124,13 +173,17 @@ block0(v0: i64, v1: i64, v2: i64):
return v7
}
; v6 = $x0 + $x1 + $x2 + 4100
; check: movz x3, #4100
; nextln: add x1, x3, x1
; nextln: add x1, x1, x2
; nextln: ldr w0, [x1, x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz x3, #4100
; Inst 1: add x1, x3, x1
; Inst 2: add x1, x1, x2
; Inst 3: ldr w0, [x1, x0]
; Inst 4: ret
; }}
function %f10() -> i32 {
block0:
@@ -139,23 +192,33 @@ block0:
return v2
}
; v6 = $x0 + $x1 + $x2 + 48
; check: movz x0, #1234
; nextln: ldr w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #1234
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
function %f11(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 8388608 ; Imm12: 0x800 << 12
v1 = iconst.i64 8388608 ;; Imm12: 0x800 << 12
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; check: add x0, x0, #8388608
; nextln: ldr w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: add x0, x0, #8388608
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
function %f12(i64) -> i32 {
block0(v0: i64):
@@ -165,9 +228,15 @@ block0(v0: i64):
return v3
}
; check: sub x0, x0, #4
; nextln: ldr w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sub x0, x0, #4
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
function %f13(i64) -> i32 {
block0(v0: i64):
@@ -177,11 +246,17 @@ block0(v0: i64):
return v3
}
; check: movz w1, #51712
; nextln: movk w1, #15258, LSL #16
; nextln: add x0, x1, x0
; nextln: ldr w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz w1, #51712
; Inst 1: movk w1, #15258, LSL #16
; Inst 2: add x0, x1, x0
; Inst 3: ldr w0, [x0]
; Inst 4: ret
; }}
function %f14(i32) -> i32 {
block0(v0: i32):
@@ -190,9 +265,15 @@ block0(v0: i32):
return v2
}
; check: sxtw x0, w0
; nextln: ldr w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtw x0, w0
; Inst 1: ldr w0, [x0]
; Inst 2: ret
; }}
function %f15(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -203,9 +284,15 @@ block0(v0: i32, v1: i32):
return v5
}
; check: sxtw x0, w0
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtw x0, w0
; Inst 1: ldr w0, [x0, w1, SXTW]
; Inst 2: ret
; }}
function %f16(i64) -> i32 {
block0(v0: i64):
@@ -215,8 +302,14 @@ block0(v0: i64):
return v3
}
; check: ldr w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr w0, [x0]
; Inst 1: ret
; }}
function %f17(i64) -> i32 {
block0(v0: i64):
@@ -226,8 +319,14 @@ block0(v0: i64):
return v3
}
; check: ldur w0, [x0, #4]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldur w0, [x0, #4]
; Inst 1: ret
; }}
function %f18(i64, i32) -> i16x8 {
block0(v0: i64, v1: i32):
@@ -236,9 +335,15 @@ block0(v0: i64, v1: i32):
return v3
}
; check: ldr d0, [x0, w1, UXTW]
; nextln: sxtl v0.8h, v0.8b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: ldr d0, [x0, w1, UXTW]
; Inst 1: sxtl v0.8h, v0.8b
; Inst 2: ret
; }}
function %f19(i64, i64) -> i32x4 {
block0(v0: i64, v1: i64):
@@ -246,10 +351,16 @@ block0(v0: i64, v1: i64):
return v2
}
; check: add x0, x0, x1
; nextln: ldr d0, [x0, #8]
; nextln: uxtl v0.4s, v0.4h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: add x0, x0, x1
; Inst 1: ldr d0, [x0, #8]
; Inst 2: uxtl v0.4s, v0.4h
; Inst 3: ret
; }}
function %f20(i64, i32) -> i64x2 {
block0(v0: i64, v1: i32):
@@ -258,9 +369,15 @@ block0(v0: i64, v1: i32):
return v3
}
; check: ldr d0, [x0, w1, SXTW]
; nextln: uxtl v0.2d, v0.2s
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: ldr d0, [x0, w1, SXTW]
; Inst 1: uxtl v0.2d, v0.2s
; Inst 2: ret
; }}
function %f18(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -270,9 +387,15 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; check: movn w0, #4097
; nextln: ldrsh x0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movn w0, #4097
; Inst 1: ldrsh x0, [x0]
; Inst 2: ret
; }}
function %f19(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -282,9 +405,15 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; check: movz x0, #4098
; nextln: ldrsh x0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #4098
; Inst 1: ldrsh x0, [x0]
; Inst 2: ret
; }}
function %f20(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -294,10 +423,16 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; check: movn w0, #4097
; nextln: sxtw x0, w0
; nextln: ldrsh x0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: movn w0, #4097
; Inst 1: sxtw x0, w0
; Inst 2: ldrsh x0, [x0]
; Inst 3: ret
; }}
function %f21(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
@@ -307,11 +442,16 @@ block0(v0: i64, v1: i64, v2: i64):
return v5
}
; check: movz x0, #4098
; nextln: sxtw x0, w0
; nextln: ldrsh x0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: movz x0, #4098
; Inst 1: sxtw x0, w0
; Inst 2: ldrsh x0, [x0]
; Inst 3: ret
; }}
function %i128(i64) -> i128 {
block0(v0: i64):
@@ -320,12 +460,17 @@ block0(v0: i64):
return v1
}
; check: mov x1, x0
; nextln: ldp x2, x1, [x1]
; nextln: stp x2, x1, [x0]
; nextln: mov x0, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1]
; Inst 2: stp x2, x1, [x0]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
function %i128_imm_offset(i64) -> i128 {
block0(v0: i64):
@@ -334,11 +479,17 @@ block0(v0: i64):
return v1
}
; check: mov x1, x0
; nextln: ldp x2, x1, [x1, #16]
; nextln: stp x2, x1, [x0, #16]
; nextln: mov x0, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #16]
; Inst 2: stp x2, x1, [x0, #16]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
function %i128_imm_offset_large(i64) -> i128 {
block0(v0: i64):
@@ -347,11 +498,17 @@ block0(v0: i64):
return v1
}
; check: mov x1, x0
; nextln: ldp x2, x1, [x1, #504]
; nextln: stp x2, x1, [x0, #504]
; nextln: mov x0, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #504]
; Inst 2: stp x2, x1, [x0, #504]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
function %i128_imm_offset_negative_large(i64) -> i128 {
block0(v0: i64):
@@ -360,12 +517,17 @@ block0(v0: i64):
return v1
}
; check: mov x1, x0
; nextln: ldp x2, x1, [x1, #-512]
; nextln: stp x2, x1, [x0, #-512]
; nextln: mov x0, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #-512]
; Inst 2: stp x2, x1, [x0, #-512]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
function %i128_add_offset(i64) -> i128 {
block0(v0: i64):
@@ -375,12 +537,17 @@ block0(v0: i64):
return v2
}
; check: mov x1, x0
; nextln: ldp x2, x1, [x1, #32]
; nextln: stp x2, x1, [x0, #32]
; nextln: mov x0, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov x1, x0
; Inst 1: ldp x2, x1, [x1, #32]
; Inst 2: stp x2, x1, [x0, #32]
; Inst 3: mov x0, x2
; Inst 4: ret
; }}
function %i128_32bit_sextend_simple(i32) -> i128 {
block0(v0: i32):
@@ -390,14 +557,18 @@ block0(v0: i32):
return v2
}
; TODO: We should be able to deduplicate the sxtw instruction
; check: sxtw x1, w0
; nextln: ldp x2, x1, [x1]
; nextln: sxtw x0, w0
; nextln: stp x2, x1, [x0]
; nextln: mov x0, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: sxtw x1, w0
; Inst 1: ldp x2, x1, [x1]
; Inst 2: sxtw x0, w0
; Inst 3: stp x2, x1, [x0]
; Inst 4: mov x0, x2
; Inst 5: ret
; }}
function %i128_32bit_sextend(i64, i32) -> i128 {
block0(v0: i64, v1: i32):
@@ -409,11 +580,18 @@ block0(v0: i64, v1: i32):
return v5
}
; check: mov x2, x0
; nextln: add x2, x2, x1, SXTW
; nextln: ldp x3, x2, [x2, #24]
; nextln: add x0, x0, x1, SXTW
; nextln: stp x3, x2, [x0, #24]
; nextln: mov x0, x3
; nextln: mov x1, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: mov x2, x0
; Inst 1: add x2, x2, x1, SXTW
; Inst 2: ldp x3, x2, [x2, #24]
; Inst 3: add x0, x0, x1, SXTW
; Inst 4: stp x3, x2, [x0, #24]
; Inst 5: mov x0, x3
; Inst 6: mov x1, x2
; Inst 7: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -8,9 +8,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: add x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, x1
; Inst 1: ret
; }}
function %f2(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -18,8 +23,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: sub x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sub x0, x0, x1
; Inst 1: ret
; }}
function %f3(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -27,8 +38,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: madd x0, x0, x1, xzr
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: madd x0, x0, x1, xzr
; Inst 1: ret
; }}
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -36,8 +53,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: umulh x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umulh x0, x0, x1
; Inst 1: ret
; }}
function %f5(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -45,8 +68,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: smulh x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smulh x0, x0, x1
; Inst 1: ret
; }}
function %f6(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -54,12 +83,18 @@ block0(v0: i64, v1: i64):
return v2
}
; check: cbnz x1, 8 ; udf
; nextln: adds xzr, x1, #1
; nextln: ccmp x0, #1, #nzcv, eq
; nextln: b.vc 8 ; udf
; nextln: sdiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: adds xzr, x1, #1
; Inst 2: ccmp x0, #1, #nzcv, eq
; Inst 3: b.vc 8 ; udf
; Inst 4: sdiv x0, x0, x1
; Inst 5: ret
; }}
function %f7(i64) -> i64 {
block0(v0: i64):
@@ -68,9 +103,15 @@ block0(v0: i64):
return v2
}
; check: orr x1, xzr, #2
; nextln: sdiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: orr x1, xzr, #2
; Inst 1: sdiv x0, x0, x1
; Inst 2: ret
; }}
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -78,9 +119,15 @@ block0(v0: i64, v1: i64):
return v2
}
; check: cbnz x1, 8 ; udf
; nextln: udiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: udiv x0, x0, x1
; Inst 2: ret
; }}
function %f9(i64) -> i64 {
block0(v0: i64):
@@ -89,9 +136,15 @@ block0(v0: i64):
return v2
}
; check: orr x1, xzr, #2
; nextln: udiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: orr x1, xzr, #2
; Inst 1: udiv x0, x0, x1
; Inst 2: ret
; }}
function %f10(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -99,10 +152,16 @@ block0(v0: i64, v1: i64):
return v2
}
; check: cbnz x1, 8 ; udf
; nextln: sdiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: sdiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
function %f11(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -110,11 +169,16 @@ block0(v0: i64, v1: i64):
return v2
}
; check: cbnz x1, 8 ; udf
; nextln: udiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: cbnz x1, 8 ; udf
; Inst 1: udiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
function %f12(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -122,14 +186,20 @@ block0(v0: i32, v1: i32):
return v2
}
; check: sxtw x0, w0
; nextln: sxtw x1, w1
; nextln: cbnz x1, 8 ; udf
; nextln: adds wzr, w1, #1
; nextln: ccmp w0, #1, #nzcv, eq
; nextln: b.vc 8 ; udf
; nextln: sdiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: sxtw x0, w0
; Inst 1: sxtw x1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: adds wzr, w1, #1
; Inst 4: ccmp w0, #1, #nzcv, eq
; Inst 5: b.vc 8 ; udf
; Inst 6: sdiv x0, x0, x1
; Inst 7: ret
; }}
function %f13(i32) -> i32 {
block0(v0: i32):
@@ -138,10 +208,16 @@ block0(v0: i32):
return v2
}
; check: sxtw x0, w0
; nextln: orr x1, xzr, #2
; nextln: sdiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtw x0, w0
; Inst 1: orr x1, xzr, #2
; Inst 2: sdiv x0, x0, x1
; Inst 3: ret
; }}
function %f14(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -149,12 +225,17 @@ block0(v0: i32, v1: i32):
return v2
}
; check: mov w0, w0
; nextln: mov w1, w1
; nextln: cbnz x1, 8 ; udf
; nextln: udiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: mov w0, w0
; Inst 1: mov w1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: udiv x0, x0, x1
; Inst 4: ret
; }}
function %f15(i32) -> i32 {
block0(v0: i32):
@@ -163,10 +244,16 @@ block0(v0: i32):
return v2
}
; check: mov w0, w0
; nextln: orr x1, xzr, #2
; nextln: udiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: mov w0, w0
; Inst 1: orr x1, xzr, #2
; Inst 2: udiv x0, x0, x1
; Inst 3: ret
; }}
function %f16(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -174,12 +261,18 @@ block0(v0: i32, v1: i32):
return v2
}
; check: sxtw x0, w0
; nextln: sxtw x1, w1
; nextln: cbnz x1, 8 ; udf
; nextln: sdiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: sxtw x0, w0
; Inst 1: sxtw x1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: sdiv x2, x0, x1
; Inst 4: msub x0, x2, x1, x0
; Inst 5: ret
; }}
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -187,12 +280,18 @@ block0(v0: i32, v1: i32):
return v2
}
; check: mov w0, w0
; nextln: mov w1, w1
; nextln: cbnz x1, 8 ; udf
; nextln: udiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: mov w0, w0
; Inst 1: mov w1, w1
; Inst 2: cbnz x1, 8 ; udf
; Inst 3: udiv x2, x0, x1
; Inst 4: msub x0, x2, x1, x0
; Inst 5: ret
; }}
function %f18(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -200,8 +299,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: and x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: and x0, x0, x1
; Inst 1: ret
; }}
function %f19(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -209,8 +314,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: orr x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, x0, x1
; Inst 1: ret
; }}
function %f20(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -218,8 +329,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: eor x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: eor x0, x0, x1
; Inst 1: ret
; }}
function %f21(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -227,8 +344,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: bic x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: bic x0, x0, x1
; Inst 1: ret
; }}
function %f22(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -236,8 +359,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: orn x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orn x0, x0, x1
; Inst 1: ret
; }}
function %f23(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -245,8 +374,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: eon x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: eon x0, x0, x1
; Inst 1: ret
; }}
function %f24(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -254,8 +389,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: orn x0, xzr, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orn x0, xzr, x0
; Inst 1: ret
; }}
function %f25(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -265,8 +406,14 @@ block0(v0: i32, v1: i32):
return v4
}
; check: sub w0, w1, w0, LSL 21
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sub w0, w1, w0, LSL 21
; Inst 1: ret
; }}
function %f26(i32) -> i32 {
block0(v0: i32):
@@ -275,8 +422,14 @@ block0(v0: i32):
return v2
}
; check: sub w0, w0, #1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sub w0, w0, #1
; Inst 1: ret
; }}
function %f27(i32) -> i32 {
block0(v0: i32):
@@ -285,8 +438,14 @@ block0(v0: i32):
return v2
}
; check: add w0, w0, #1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, #1
; Inst 1: ret
; }}
function %f28(i64) -> i64 {
block0(v0: i64):
@@ -295,8 +454,14 @@ block0(v0: i64):
return v2
}
; check: add x0, x0, #1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, #1
; Inst 1: ret
; }}
function %f29(i64) -> i64 {
block0(v0: i64):
@@ -305,9 +470,15 @@ block0(v0: i64):
return v2
}
; check: movz x0, #1
; nextln: sub x0, xzr, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #1
; Inst 1: sub x0, xzr, x0
; Inst 2: ret
; }}
function %f30(i8x16) -> i8x16 {
block0(v0: i8x16):
@@ -316,12 +487,17 @@ block0(v0: i8x16):
return v2
}
; check: movz x0, #1
; nextln: sub w0, wzr, w0
; nextln: dup v1.16b, w0
; nextln: ushl v0.16b, v0.16b, v1.16b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz x0, #1
; Inst 1: sub w0, wzr, w0
; Inst 2: dup v1.16b, w0
; Inst 3: ushl v0.16b, v0.16b, v1.16b
; Inst 4: ret
; }}
function %add_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -329,9 +505,15 @@ block0(v0: i128, v1: i128):
return v2
}
; check: adds x0, x0, x2
; nextln: adc x1, x1, x3
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: adds x0, x0, x2
; Inst 1: adc x1, x1, x3
; Inst 2: ret
; }}
function %sub_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -339,9 +521,15 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs x0, x0, x2
; nextln: sbc x1, x1, x3
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs x0, x0, x2
; Inst 1: sbc x1, x1, x3
; Inst 2: ret
; }}
function %mul_i128(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -349,11 +537,17 @@ block0(v0: i128, v1: i128):
return v2
}
; check: umulh x4, x0, x2
; nextln: madd x3, x0, x3, x4
; nextln: madd x1, x1, x2, x3
; nextln: madd x0, x0, x2, xzr
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: umulh x4, x0, x2
; Inst 1: madd x3, x0, x3, x4
; Inst 2: madd x1, x1, x2, x3
; Inst 3: madd x0, x0, x2, xzr
; Inst 4: ret
; }}
function %add_mul_1(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
@@ -362,8 +556,14 @@ block0(v0: i32, v1: i32, v2: i32):
return v4
}
; check: madd w0, w1, w2, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: madd w0, w1, w2, w0
; Inst 1: ret
; }}
function %add_mul_2(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
@@ -372,8 +572,14 @@ block0(v0: i32, v1: i32, v2: i32):
return v4
}
; check: madd w0, w1, w2, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: madd w0, w1, w2, w0
; Inst 1: ret
; }}
function %srem_const (i64) -> i64 {
block0(v0: i64):
@@ -382,10 +588,16 @@ block0(v0: i64):
return v2
}
; check: orr x1, xzr, #2
; nextln: sdiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: orr x1, xzr, #2
; Inst 1: sdiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
function %urem_const (i64) -> i64 {
block0(v0: i64):
@@ -394,10 +606,16 @@ block0(v0: i64):
return v2
}
; check: orr x1, xzr, #2
; nextln: udiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: orr x1, xzr, #2
; Inst 1: udiv x2, x0, x1
; Inst 2: msub x0, x2, x1, x0
; Inst 3: ret
; }}
function %sdiv_minus_one(i64) -> i64 {
block0(v0: i64):
@@ -406,9 +624,16 @@ block0(v0: i64):
return v2
}
; check: movn x1, #0
; nextln: adds xzr, x1, #1
; nextln: ccmp x0, #1, #nzcv, eq
; nextln: b.vc 8 ; udf
; nextln: sdiv x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: movn x1, #0
; Inst 1: adds xzr, x1, #1
; Inst 2: ccmp x0, #1, #nzcv, eq
; Inst 3: b.vc 8 ; udf
; Inst 4: sdiv x0, x0, x1
; Inst 5: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target aarch64 has_lse
function %atomic_rmw_add_i64(i64, i64) {
@@ -6,109 +6,238 @@ block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 add v0, v1
return
}
; check: ldaddal x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldaddal x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_add_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 add v0, v1
return
}
; check: ldaddal w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldaddal w1, w0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_and_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 and v0, v1
return
}
; check: ldclral x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldclral x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_and_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 and v0, v1
return
}
; check: ldclral w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldclral w1, w0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_or_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 or v0, v1
return
}
; check: ldsetal x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsetal x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_or_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 or v0, v1
return
}
; check: ldsetal w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsetal w1, w0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_xor_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 xor v0, v1
return
}
; check: ldeoral x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldeoral x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_xor_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 xor v0, v1
return
}
; check: ldeoral w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldeoral w1, w0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_smax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smax v0, v1
return
}
; check: ldsmaxal x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsmaxal x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_smax_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 smax v0, v1
return
}
; check: ldsmaxal w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsmaxal w1, w0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_umax_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umax v0, v1
return
}
; check: ldumaxal x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldumaxal x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_umax_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 umax v0, v1
return
}
; check: ldumaxal w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldumaxal w1, w0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_smin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 smin v0, v1
return
}
; check: ldsminal x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsminal x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_smin_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 smin v0, v1
return
}
; check: ldsminal w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldsminal w1, w0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_umin_i64(i64, i64) {
block0(v0: i64, v1: i64):
v2 = atomic_rmw.i64 umin v0, v1
return
}
; check: lduminal x1, x0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lduminal x1, x0, [x0]
; Inst 1: ret
; }}
function %atomic_rmw_umin_i32(i32, i32) {
block0(v0: i32, v1: i32):
v2 = atomic_rmw.i32 umin v0, v1
return
}
; check: lduminal w1, w0, [x0]
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lduminal w1, w0, [x0]
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target aarch64
function %atomic_load_i64(i64) -> i64 {
@@ -7,8 +7,14 @@ block0(v0: i64):
return v1
}
; check: ldar x0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldar x0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i32(i64) -> i32 {
block0(v0: i64):
@@ -16,8 +22,14 @@ block0(v0: i64):
return v1
}
; check: ldar w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldar w0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i16(i64) -> i16 {
block0(v0: i64):
@@ -25,8 +37,14 @@ block0(v0: i64):
return v1
}
; check: ldarh w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarh w0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i8(i64) -> i8 {
block0(v0: i64):
@@ -34,8 +52,14 @@ block0(v0: i64):
return v1
}
; check: ldarb w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarb w0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i32_i64(i64) -> i64 {
block0(v0: i64):
@@ -44,8 +68,14 @@ block0(v0: i64):
return v2
}
; check: ldar w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldar w0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i16_i64(i64) -> i64 {
block0(v0: i64):
@@ -54,8 +84,14 @@ block0(v0: i64):
return v2
}
; check: ldarh w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarh w0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i8_i64(i64) -> i64 {
block0(v0: i64):
@@ -64,8 +100,14 @@ block0(v0: i64):
return v2
}
; check: ldarb w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarb w0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i16_i32(i64) -> i32 {
block0(v0: i64):
@@ -74,8 +116,14 @@ block0(v0: i64):
return v2
}
; check: ldarh w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarh w0, [x0]
; Inst 1: ret
; }}
function %atomic_load_i8_i32(i64) -> i32 {
block0(v0: i64):
@@ -84,5 +132,12 @@ block0(v0: i64):
return v2
}
; check: ldarb w0, [x0]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldarb w0, [x0]
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target aarch64
function %atomic_store_i64(i64, i64) {
@@ -7,8 +7,14 @@ block0(v0: i64, v1: i64):
return
}
; check: stlr x0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlr x0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i32(i32, i64) {
block0(v0: i32, v1: i64):
@@ -16,8 +22,14 @@ block0(v0: i32, v1: i64):
return
}
; check: stlr w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlr w0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i16(i16, i64) {
block0(v0: i16, v1: i64):
@@ -25,8 +37,14 @@ block0(v0: i16, v1: i64):
return
}
; check: stlrh w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrh w0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i8(i8, i64) {
block0(v0: i8, v1: i64):
@@ -34,8 +52,14 @@ block0(v0: i8, v1: i64):
return
}
; check: stlrb w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrb w0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i64_i32(i64, i64) {
block0(v0: i64, v1: i64):
@@ -44,9 +68,14 @@ block0(v0: i64, v1: i64):
return
}
; check-not: uxt
; check: stlr w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlr w0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i64_i16(i64, i64) {
block0(v0: i64, v1: i64):
@@ -55,9 +84,14 @@ block0(v0: i64, v1: i64):
return
}
; check-not: uxt
; check: stlrh w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrh w0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i64_i8(i64, i64) {
block0(v0: i64, v1: i64):
@@ -66,9 +100,14 @@ block0(v0: i64, v1: i64):
return
}
; check-not: uxt
; check: stlrb w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrb w0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i32_i16(i32, i64) {
block0(v0: i32, v1: i64):
@@ -77,9 +116,14 @@ block0(v0: i32, v1: i64):
return
}
; check-not: uxt
; check: stlrh w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrh w0, [x1]
; Inst 1: ret
; }}
function %atomic_store_i32_i8(i32, i64) {
block0(v0: i32, v1: i64):
@@ -88,6 +132,12 @@ block0(v0: i32, v1: i64):
return
}
; check-not: uxt
; check: stlrb w0, [x1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: stlrb w0, [x1]
; Inst 1: ret
; }}

View File

@@ -1,11 +1,19 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
function %f(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iadd v0, v1
; check: add w0, w0, w1
return v2
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -9,8 +9,15 @@ block0(v0: i64, v1: i64):
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: blr x1
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: blr x1
; Inst 3: ldp fp, lr, [sp], #16
; Inst 4: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -8,9 +8,15 @@ block0(v0: i64, v1: i64):
return v2
}
; check: subs xzr, x0, x1
; nextln: cset x0, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs xzr, x0, x1
; Inst 1: cset x0, eq
; Inst 2: ret
; }}
function %icmp_eq_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -18,12 +24,17 @@ block0(v0: i128, v1: i128):
return v2
}
; check: eor x0, x0, x2
; nextln: eor x1, x1, x3
; nextln: adds xzr, x0, x1
; nextln: cset x0, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: cset x0, eq
; Inst 4: ret
; }}
function %icmp_ne_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -31,12 +42,17 @@ block0(v0: i128, v1: i128):
return v2
}
; check: eor x0, x0, x2
; nextln: eor x1, x1, x3
; nextln: adds xzr, x0, x1
; nextln: cset x0, ne
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: cset x0, ne
; Inst 4: ret
; }}
function %icmp_slt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -44,13 +60,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, lo
; nextln: subs xzr, x1, x3
; nextln: cset x1, lt
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_ult_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -58,12 +79,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, lo
; nextln: subs xzr, x1, x3
; nextln: cset x1, lo
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lo
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_sle_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -71,12 +98,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, ls
; nextln: subs xzr, x1, x3
; nextln: cset x1, le
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, le
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_ule_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -84,12 +117,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, ls
; nextln: subs xzr, x1, x3
; nextln: cset x1, ls
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ls
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_sgt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -97,12 +136,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, hi
; nextln: subs xzr, x1, x3
; nextln: cset x1, gt
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, gt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_ugt_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -110,13 +155,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, hi
; nextln: subs xzr, x1, x3
; nextln: cset x1, hi
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hi
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_sge_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -124,12 +174,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, hs
; nextln: subs xzr, x1, x3
; nextln: cset x1, ge
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ge
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_uge_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -137,12 +193,18 @@ block0(v0: i128, v1: i128):
return v2
}
; check: subs xzr, x0, x2
; nextln: cset x0, hs
; nextln: subs xzr, x1, x3
; nextln: cset x1, hs
; nextln: csel x0, x0, x1, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hs
; Inst 4: csel x0, x0, x1, eq
; Inst 5: ret
; }}
function %icmp_of_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -150,10 +212,16 @@ block0(v0: i128, v1: i128):
return v2
}
; check: adds xzr, x0, x2
; nextln: adcs xzr, x1, x3
; nextln: cset x0, vs
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: cset x0, vs
; Inst 3: ret
; }}
function %icmp_nof_i128(i128, i128) -> b1 {
block0(v0: i128, v1: i128):
@@ -161,11 +229,16 @@ block0(v0: i128, v1: i128):
return v2
}
; check: adds xzr, x0, x2
; nextln: adcs xzr, x1, x3
; nextln: cset x0, vc
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: cset x0, vc
; Inst 3: ret
; }}
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -182,15 +255,26 @@ block2:
return v5
}
; check: Block 0:
; check: subs xzr, x0, x1
; nextln: b.eq label1 ; b label2
; check: Block 1:
; check: movz x0, #1
; nextln: ret
; check: Block 2:
; check: movz x0, #2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: subs xzr, x0, x1
; Inst 1: b.eq label1 ; b label2
; Block 1:
; (original IR block: block1)
; (instruction range: 2 .. 4)
; Inst 2: movz x0, #1
; Inst 3: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 4 .. 6)
; Inst 4: movz x0, #2
; Inst 5: ret
; }}
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -203,11 +287,29 @@ block1:
return v4
}
; check: subs xzr, x0, x1
; check: Block 1:
; check: movz x0, #1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: subs xzr, x0, x1
; Inst 1: b.eq label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 2 .. 3)
; Inst 2: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 4 .. 6)
; Inst 4: movz x0, #1
; Inst 5: ret
; }}
function %i128_brz(i128){
block0(v0: i128):
@@ -219,15 +321,28 @@ block1:
return
}
; check: orr x0, x0, x1
; nextln: cbz x0, label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, x0, x1
; Inst 1: cbz x0, label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 2 .. 3)
; Inst 2: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 4 .. 5)
; Inst 4: ret
; }}
function %i128_brnz(i128){
block0(v0: i128):
@@ -239,16 +354,28 @@ block1:
return
}
; check: orr x0, x0, x1
; nextln: cbnz x0, label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, x0, x1
; Inst 1: cbnz x0, label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 2 .. 3)
; Inst 2: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 4 .. 5)
; Inst 4: ret
; }}
function %i128_bricmp_eq(i128, i128) {
block0(v0: i128, v1: i128):
@@ -259,17 +386,30 @@ block1:
return
}
; check: eor x0, x0, x2
; nextln: eor x1, x1, x3
; nextln: adds xzr, x0, x1
; nextln: b.eq label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: b.eq label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 5 .. 6)
; Inst 5: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 6 .. 7)
; Inst 6: ret
; }}
function %i128_bricmp_ne(i128, i128) {
block0(v0: i128, v1: i128):
@@ -280,17 +420,30 @@ block1:
return
}
; check: eor x0, x0, x2
; nextln: eor x1, x1, x3
; nextln: adds xzr, x0, x1
; nextln: b.ne label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: eor x0, x0, x2
; Inst 1: eor x1, x1, x3
; Inst 2: adds xzr, x0, x1
; Inst 3: b.ne label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 5 .. 6)
; Inst 5: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 6 .. 7)
; Inst 6: ret
; }}
function %i128_bricmp_slt(i128, i128) {
block0(v0: i128, v1: i128):
@@ -301,20 +454,33 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, lo
; nextln: subs xzr, x1, x3
; nextln: cset x1, lt
; nextln: csel x0, x0, x1, eq
; nextln: subs xzr, xzr, x0
; nextln: b.lt label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, xzr, x0
; Inst 6: b.lt label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
function %i128_bricmp_ult(i128, i128) {
block0(v0: i128, v1: i128):
@@ -325,19 +491,33 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, lo
; nextln: subs xzr, x1, x3
; nextln: cset x1, lo
; nextln: csel x0, x0, x1, eq
; nextln: subs xzr, xzr, x0
; nextln: b.lo label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, lo
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, lo
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, xzr, x0
; Inst 6: b.lo label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
function %i128_bricmp_sle(i128, i128) {
block0(v0: i128, v1: i128):
@@ -348,20 +528,34 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, ls
; nextln: subs xzr, x1, x3
; nextln: cset x1, le
; nextln: csel x0, x0, x1, eq
; nextln: movz x1, #1
; nextln: subs xzr, x1, x0
; nextln: b.le label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, le
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x1, x0
; Inst 7: b.le label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
function %i128_bricmp_ule(i128, i128) {
block0(v0: i128, v1: i128):
@@ -372,20 +566,34 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, ls
; nextln: subs xzr, x1, x3
; nextln: cset x1, ls
; nextln: csel x0, x0, x1, eq
; nextln: movz x1, #1
; nextln: subs xzr, x1, x0
; nextln: b.ls label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, ls
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ls
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x1, x0
; Inst 7: b.ls label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
function %i128_bricmp_sgt(i128, i128) {
block0(v0: i128, v1: i128):
@@ -396,19 +604,33 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, hi
; nextln: subs xzr, x1, x3
; nextln: cset x1, gt
; nextln: csel x0, x0, x1, eq
; nextln: subs xzr, x0, xzr
; nextln: b.gt label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, gt
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, x0, xzr
; Inst 6: b.gt label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
function %i128_bricmp_ugt(i128, i128) {
block0(v0: i128, v1: i128):
@@ -419,20 +641,33 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, hi
; nextln: subs xzr, x1, x3
; nextln: cset x1, hi
; nextln: csel x0, x0, x1, eq
; nextln: subs xzr, x0, xzr
; nextln: b.hi label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 7)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hi
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hi
; Inst 4: csel x0, x0, x1, eq
; Inst 5: subs xzr, x0, xzr
; Inst 6: b.hi label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 7 .. 8)
; Inst 7: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 9 .. 10)
; Inst 9: ret
; }}
function %i128_bricmp_sge(i128, i128) {
block0(v0: i128, v1: i128):
@@ -443,20 +678,34 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, hs
; nextln: subs xzr, x1, x3
; nextln: cset x1, ge
; nextln: csel x0, x0, x1, eq
; nextln: movz x1, #1
; nextln: subs xzr, x0, x1
; nextln: b.ge label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, ge
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x0, x1
; Inst 7: b.ge label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
function %i128_bricmp_uge(i128, i128) {
block0(v0: i128, v1: i128):
@@ -467,20 +716,34 @@ block1:
return
}
; check: subs xzr, x0, x2
; nextln: cset x0, hs
; nextln: subs xzr, x1, x3
; nextln: cset x1, hs
; nextln: csel x0, x0, x1, eq
; nextln: movz x1, #1
; nextln: subs xzr, x0, x1
; nextln: b.hs label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 8)
; Inst 0: subs xzr, x0, x2
; Inst 1: cset x0, hs
; Inst 2: subs xzr, x1, x3
; Inst 3: cset x1, hs
; Inst 4: csel x0, x0, x1, eq
; Inst 5: movz x1, #1
; Inst 6: subs xzr, x0, x1
; Inst 7: b.hs label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 8 .. 9)
; Inst 8: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 9 .. 10)
; Inst 9: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: ret
; }}
function %i128_bricmp_of(i128, i128) {
block0(v0: i128, v1: i128):
@@ -491,15 +754,29 @@ block1:
return
}
; check: adds xzr, x0, x2
; nextln: adcs xzr, x1, x3
; nextln: b.vs label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 3)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: b.vs label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 5 .. 6)
; Inst 5: ret
; }}
function %i128_bricmp_nof(i128, i128) {
block0(v0: i128, v1: i128):
@@ -510,12 +787,27 @@ block1:
return
}
; check: adds xzr, x0, x2
; nextln: adcs xzr, x1, x3
; nextln: b.vc label1 ; b label2
; check: Block 1:
; check: b label3
; check: Block 2:
; check: b label3
; check: Block 3:
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 3)
; Inst 0: adds xzr, x0, x2
; Inst 1: adcs xzr, x1, x3
; Inst 2: b.vc label1 ; b label2
; Block 1:
; (successor: Block 3)
; (instruction range: 3 .. 4)
; Inst 3: b label3
; Block 2:
; (successor: Block 3)
; (instruction range: 4 .. 5)
; Inst 4: b label3
; Block 3:
; (original IR block: block1)
; (instruction range: 5 .. 6)
; Inst 5: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -10,8 +10,16 @@ block0(v0: i8, v1: i64, v2: i64):
return v5
}
; check: subs wzr
; check: csel x0, $(=x[0-9]+, x[0-9]+), eq
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtb w0, w0
; Inst 1: subs wzr, w0, #42
; Inst 2: csel x0, x1, x2, eq
; Inst 3: ret
; }}
function %g(i8) -> b1 {
block0(v0: i8):
@@ -21,8 +29,16 @@ block0(v0: i8):
return v5
}
; check: subs wzr
; check: cset x0, eq
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtb w0, w0
; Inst 1: subs wzr, w0, #42
; Inst 2: cset x0, eq
; Inst 3: ret
; }}
function %h(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
@@ -30,9 +46,16 @@ block0(v0: i8, v1: i8, v2: i8):
return v3
}
; check: and
; nextln: bic
; nextln: orr
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: and x1, x1, x0
; Inst 1: bic x0, x2, x0
; Inst 2: orr x0, x0, x1
; Inst 3: ret
; }}
function %i(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
@@ -40,8 +63,16 @@ block0(v0: b1, v1: i8, v2: i8):
return v3
}
; check: subs wzr
; nextln: csel
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: and w0, w0, #1
; Inst 1: subs wzr, w0, wzr
; Inst 2: csel x0, x1, x2, ne
; Inst 3: ret
; }}
function %i(i32, i8, i8) -> i8 {
block0(v0: i32, v1: i8, v2: i8):
@@ -51,9 +82,15 @@ block0(v0: i32, v1: i8, v2: i8):
return v5
}
; check: subs wzr, w0, #42
; nextln: csel x0, x1, x2, eq
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs wzr, w0, #42
; Inst 1: csel x0, x1, x2, eq
; Inst 2: ret
; }}
function %i128_select(b1, i128, i128) -> i128 {
block0(v0: b1, v1: i128, v2: i128):
@@ -61,6 +98,15 @@ block0(v0: b1, v1: i128, v2: i128):
return v3
}
; check: subs wzr, w0, wzr
; nextln: csel x0, x2, x4, ne
; nextln: csel x1, x3, x5, ne
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: and w0, w0, #1
; Inst 1: subs wzr, w0, wzr
; Inst 2: csel x0, x2, x4, ne
; Inst 3: csel x1, x3, x5, ne
; Inst 4: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -8,8 +8,14 @@ block0:
return v0
}
; check: movz x0, #255
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #255
; Inst 1: ret
; }}
function %f() -> b16 {
block0:
@@ -17,8 +23,14 @@ block0:
return v0
}
; check: movz x0, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #0
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -26,8 +38,14 @@ block0:
return v0
}
; check: movz x0, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #0
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -35,8 +53,14 @@ block0:
return v0
}
; check: movz x0, #65535
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -44,8 +68,14 @@ block0:
return v0
}
; check: movz x0, #65535, LSL #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535, LSL #16
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -53,8 +83,14 @@ block0:
return v0
}
; check: movz x0, #65535, LSL #32
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535, LSL #32
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -62,8 +98,14 @@ block0:
return v0
}
; check: movz x0, #65535, LSL #48
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #65535, LSL #48
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -71,8 +113,14 @@ block0:
return v0
}
; check: movn x0, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #0
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -80,8 +128,14 @@ block0:
return v0
}
; check: movn x0, #65535
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -89,8 +143,14 @@ block0:
return v0
}
; check: movn x0, #65535, LSL #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535, LSL #16
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -98,8 +158,14 @@ block0:
return v0
}
; check: movn x0, #65535, LSL #32
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535, LSL #32
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -107,40 +173,64 @@ block0:
return v0
}
; check: movn x0, #65535, LSL #48
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #65535, LSL #48
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
v0 = iconst.i64 0xf34bf0a31212003a ; random digits
v0 = iconst.i64 0xf34bf0a31212003a ;; random digits
return v0
}
; check: movz x0, #58
; nextln: movk x0, #4626, LSL #16
; nextln: movk x0, #61603, LSL #32
; nextln: movk x0, #62283, LSL #48
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: movz x0, #58
; Inst 1: movk x0, #4626, LSL #16
; Inst 2: movk x0, #61603, LSL #32
; Inst 3: movk x0, #62283, LSL #48
; Inst 4: ret
; }}
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e900001ef40000 ; random digits with 2 clear half words
v0 = iconst.i64 0x12e900001ef40000 ;; random digits with 2 clear half words
return v0
}
; check: movz x0, #7924, LSL #16
; nextln: movk x0, #4841, LSL #48
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #7924, LSL #16
; Inst 1: movk x0, #4841, LSL #48
; Inst 2: ret
; }}
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e9ffff1ef4ffff ; random digits with 2 full half words
v0 = iconst.i64 0x12e9ffff1ef4ffff ;; random digits with 2 full half words
return v0
}
; check: movn x0, #57611, LSL #16
; nextln: movk x0, #4841, LSL #48
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movn x0, #57611, LSL #16
; Inst 1: movk x0, #4841, LSL #48
; Inst 2: ret
; }}
function %f() -> i32 {
block0:
@@ -148,8 +238,14 @@ block0:
return v0
}
; check: orr x0, xzr, #4294967295
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: orr x0, xzr, #4294967295
; Inst 1: ret
; }}
function %f() -> i32 {
block0:
@@ -157,8 +253,14 @@ block0:
return v0
}
; check: movn w0, #8
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn w0, #8
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -166,8 +268,14 @@ block0:
return v0
}
; check: movn w0, #8
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn w0, #8
; Inst 1: ret
; }}
function %f() -> i64 {
block0:
@@ -175,5 +283,12 @@ block0:
return v0
}
; check: movn x0, #8
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movn x0, #8
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -10,10 +10,15 @@ block0(v0: i8):
return v3
}
; check: sxtb x0, w0
; nextln: add x0, x0, #42
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtb x0, w0
; Inst 1: add x0, x0, #42
; Inst 2: ret
; }}
function %f2(i8, i64) -> i64 {
block0(v0: i8, v1: i64):
@@ -22,9 +27,14 @@ block0(v0: i8, v1: i64):
return v3
}
; check: add x0, x1, x0, SXTB
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x1, x0, SXTB
; Inst 1: ret
; }}
function %i128_uextend_i64(i64) -> i128 {
block0(v0: i64):
@@ -32,8 +42,14 @@ block0(v0: i64):
return v1
}
; check: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x1, #0
; Inst 1: ret
; }}
function %i128_sextend_i64(i64) -> i128 {
block0(v0: i64):
@@ -41,9 +57,14 @@ block0(v0: i64):
return v1
}
; check: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr x1, x0, #63
; Inst 1: ret
; }}
function %i128_uextend_i32(i32) -> i128 {
block0(v0: i32):
@@ -51,9 +72,15 @@ block0(v0: i32):
return v1
}
; check: mov w0, w0
; nextln: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov w0, w0
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
function %i128_sextend_i32(i32) -> i128 {
block0(v0: i32):
@@ -61,10 +88,15 @@ block0(v0: i32):
return v1
}
; check: sxtw x0, w0
; nextln: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtw x0, w0
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
function %i128_uextend_i16(i16) -> i128 {
block0(v0: i16):
@@ -72,9 +104,15 @@ block0(v0: i16):
return v1
}
; check: uxth w0, w0
; nextln: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxth w0, w0
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
function %i128_sextend_i16(i16) -> i128 {
block0(v0: i16):
@@ -82,10 +120,15 @@ block0(v0: i16):
return v1
}
; check: sxth x0, w0
; nextln: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxth x0, w0
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
function %i128_uextend_i8(i8) -> i128 {
block0(v0: i8):
@@ -93,9 +136,15 @@ block0(v0: i8):
return v1
}
; check: uxtb w0, w0
; nextln: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxtb w0, w0
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
function %i128_sextend_i8(i8) -> i128 {
block0(v0: i8):
@@ -103,9 +152,15 @@ block0(v0: i8):
return v1
}
; check: sxtb x0, w0
; nextln: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sxtb x0, w0
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
function %i8x16_uextend_i16(i8x16) -> i16 {
block0(v0: i8x16):
@@ -114,8 +169,14 @@ block0(v0: i8x16):
return v2
}
; check: umov w0, v0.b[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.b[1]
; Inst 1: ret
; }}
function %i8x16_uextend_i32(i8x16) -> i32 {
block0(v0: i8x16):
@@ -124,8 +185,14 @@ block0(v0: i8x16):
return v2
}
; check: umov w0, v0.b[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.b[1]
; Inst 1: ret
; }}
function %i8x16_uextend_i64(i8x16) -> i64 {
block0(v0: i8x16):
@@ -134,8 +201,14 @@ block0(v0: i8x16):
return v2
}
; check: umov w0, v0.b[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.b[1]
; Inst 1: ret
; }}
function %i8x16_uextend_i128(i8x16) -> i128 {
block0(v0: i8x16):
@@ -144,9 +217,15 @@ block0(v0: i8x16):
return v2
}
; check: umov w0, v0.b[1]
; nextln: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: umov w0, v0.b[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
function %i8x16_sextend_i16(i8x16) -> i16 {
block0(v0: i8x16):
@@ -155,8 +234,14 @@ block0(v0: i8x16):
return v2
}
; check: smov w0, v0.b[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov w0, v0.b[1]
; Inst 1: ret
; }}
function %i8x16_sextend_i32(i8x16) -> i32 {
block0(v0: i8x16):
@@ -165,8 +250,14 @@ block0(v0: i8x16):
return v2
}
; check: smov w0, v0.b[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov w0, v0.b[1]
; Inst 1: ret
; }}
function %i8x16_sextend_i64(i8x16) -> i64 {
block0(v0: i8x16):
@@ -175,8 +266,14 @@ block0(v0: i8x16):
return v2
}
; check: smov x0, v0.b[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov x0, v0.b[1]
; Inst 1: ret
; }}
function %i8x16_sextend_i128(i8x16) -> i128 {
block0(v0: i8x16):
@@ -185,9 +282,15 @@ block0(v0: i8x16):
return v2
}
; check: smov x0, v0.b[1]
; nextln: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: smov x0, v0.b[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
function %i16x8_uextend_i32(i16x8) -> i32 {
block0(v0: i16x8):
@@ -196,8 +299,14 @@ block0(v0: i16x8):
return v2
}
; check: umov w0, v0.h[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.h[1]
; Inst 1: ret
; }}
function %i16x8_uextend_i64(i16x8) -> i64 {
block0(v0: i16x8):
@@ -206,8 +315,14 @@ block0(v0: i16x8):
return v2
}
; check: umov w0, v0.h[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umov w0, v0.h[1]
; Inst 1: ret
; }}
function %i16x8_uextend_i128(i16x8) -> i128 {
block0(v0: i16x8):
@@ -216,9 +331,15 @@ block0(v0: i16x8):
return v2
}
; check: umov w0, v0.h[1]
; nextln: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: umov w0, v0.h[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
function %i16x8_sextend_i32(i16x8) -> i32 {
block0(v0: i16x8):
@@ -227,8 +348,14 @@ block0(v0: i16x8):
return v2
}
; check: smov w0, v0.h[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov w0, v0.h[1]
; Inst 1: ret
; }}
function %i16x8_sextend_i64(i16x8) -> i64 {
block0(v0: i16x8):
@@ -237,8 +364,14 @@ block0(v0: i16x8):
return v2
}
; check: smov x0, v0.h[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov x0, v0.h[1]
; Inst 1: ret
; }}
function %i16x8_sextend_i128(i16x8) -> i128 {
block0(v0: i16x8):
@@ -247,9 +380,15 @@ block0(v0: i16x8):
return v2
}
; check: smov x0, v0.h[1]
; nextln: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: smov x0, v0.h[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
function %i32x4_uextend_i64(i32x4) -> i64 {
block0(v0: i32x4):
@@ -258,8 +397,14 @@ block0(v0: i32x4):
return v2
}
; check: mov w0, v0.s[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: mov w0, v0.s[1]
; Inst 1: ret
; }}
function %i32x4_uextend_i128(i32x4) -> i128 {
block0(v0: i32x4):
@@ -268,9 +413,15 @@ block0(v0: i32x4):
return v2
}
; check: mov w0, v0.s[1]
; nextln: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov w0, v0.s[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
function %i32x4_sextend_i64(i32x4) -> i64 {
block0(v0: i32x4):
@@ -279,8 +430,14 @@ block0(v0: i32x4):
return v2
}
; check: smov x0, v0.s[1]
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smov x0, v0.s[1]
; Inst 1: ret
; }}
function %i32x4_sextend_i128(i32x4) -> i128 {
block0(v0: i32x4):
@@ -289,9 +446,15 @@ block0(v0: i32x4):
return v2
}
; check: smov x0, v0.s[1]
; nextln: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: smov x0, v0.s[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}
function %i64x2_uextend_i128(i64x2) -> i128 {
block0(v0: i64x2):
@@ -300,9 +463,15 @@ block0(v0: i64x2):
return v2
}
; check: mov x0, v0.d[1]
; nextln: movz x1, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov x0, v0.d[1]
; Inst 1: movz x1, #0
; Inst 2: ret
; }}
function %i64x2_sextend_i128(i64x2) -> i128 {
block0(v0: i64x2):
@@ -311,6 +480,13 @@ block0(v0: i64x2):
return v2
}
; check: mov x0, v0.d[1]
; nextln: asr x1, x0, #63
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: mov x0, v0.d[1]
; Inst 1: asr x1, x0, #63
; Inst 2: ret
; }}

View File

@@ -1,111 +1,168 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
function u0:0(i8) -> f32 {
block0(v0: i8):
v1 = fcvt_from_uint.f32 v0
; check: uxtb w0, w0
; check: ucvtf s0, w0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxtb w0, w0
; Inst 1: ucvtf s0, w0
; Inst 2: ret
; }}
function u0:0(i8) -> f64 {
block0(v0: i8):
v1 = fcvt_from_uint.f64 v0
; check: uxtb w0, w0
; check: ucvtf d0, w0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxtb w0, w0
; Inst 1: ucvtf d0, w0
; Inst 2: ret
; }}
function u0:0(i16) -> f32 {
block0(v0: i16):
v1 = fcvt_from_uint.f32 v0
; check: uxth w0, w0
; check: ucvtf s0, w0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxth w0, w0
; Inst 1: ucvtf s0, w0
; Inst 2: ret
; }}
function u0:0(i16) -> f64 {
block0(v0: i16):
v1 = fcvt_from_uint.f64 v0
; check: uxth w0, w0
; check: ucvtf d0, w0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: uxth w0, w0
; Inst 1: ucvtf d0, w0
; Inst 2: ret
; }}
function u0:0(f32) -> i8 {
block0(v0: f32):
v1 = fcvt_to_uint.i8 v0
; check: fcmp s0, s0
; check: b.vc 8 ; udf
; check: movz x0, #49024, LSL #16
; check: fmov d1, x0
; check: fcmp s0, s1
; check: b.gt 8 ; udf
; check: movz x0, #17280, LSL #16
; check: fmov d1, x0
; check: fcmp s0, s1
; check: b.mi 8 ; udf
; check: fcvtzu w0, s0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 12)
; Inst 0: fcmp s0, s0
; Inst 1: b.vc 8 ; udf
; Inst 2: movz x0, #49024, LSL #16
; Inst 3: fmov d1, x0
; Inst 4: fcmp s0, s1
; Inst 5: b.gt 8 ; udf
; Inst 6: movz x0, #17280, LSL #16
; Inst 7: fmov d1, x0
; Inst 8: fcmp s0, s1
; Inst 9: b.mi 8 ; udf
; Inst 10: fcvtzu w0, s0
; Inst 11: ret
; }}
function u0:0(f64) -> i8 {
block0(v0: f64):
v1 = fcvt_to_uint.i8 v0
; check: fcmp d0, d0
; check: b.vc 8 ; udf
; check: movz x0, #49136, LSL #48
; check: fmov d1, x0
; check: fcmp d0, d1
; check: b.gt 8 ; udf
; check: movz x0, #16496, LSL #48
; check: fmov d1, x0
; check: fcmp d0, d1
; check: b.mi 8 ; udf
; check: fcvtzu w0, d0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 12)
; Inst 0: fcmp d0, d0
; Inst 1: b.vc 8 ; udf
; Inst 2: movz x0, #49136, LSL #48
; Inst 3: fmov d1, x0
; Inst 4: fcmp d0, d1
; Inst 5: b.gt 8 ; udf
; Inst 6: movz x0, #16496, LSL #48
; Inst 7: fmov d1, x0
; Inst 8: fcmp d0, d1
; Inst 9: b.mi 8 ; udf
; Inst 10: fcvtzu w0, d0
; Inst 11: ret
; }}
function u0:0(f32) -> i16 {
block0(v0: f32):
v1 = fcvt_to_uint.i16 v0
; check: fcmp s0, s0
; check: b.vc 8 ; udf
; check: movz x0, #49024, LSL #16
; check: fmov d1, x0
; check: fcmp s0, s1
; check: b.gt 8 ; udf
; check: movz x0, #18304, LSL #16
; check: fmov d1, x0
; check: fcmp s0, s1
; check: b.mi 8 ; udf
; check: fcvtzu w0, s0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 12)
; Inst 0: fcmp s0, s0
; Inst 1: b.vc 8 ; udf
; Inst 2: movz x0, #49024, LSL #16
; Inst 3: fmov d1, x0
; Inst 4: fcmp s0, s1
; Inst 5: b.gt 8 ; udf
; Inst 6: movz x0, #18304, LSL #16
; Inst 7: fmov d1, x0
; Inst 8: fcmp s0, s1
; Inst 9: b.mi 8 ; udf
; Inst 10: fcvtzu w0, s0
; Inst 11: ret
; }}
function u0:0(f64) -> i16 {
block0(v0: f64):
v1 = fcvt_to_uint.i16 v0
; check: fcmp d0, d0
; check: b.vc 8 ; udf
; check: movz x0, #49136, LSL #48
; check: fmov d1, x0
; check: fcmp d0, d1
; check: b.gt 8 ; udf
; check: movz x0, #16624, LSL #48
; check: fmov d1, x0
; check: fcmp d0, d1
; check: b.mi 8 ; udf
; check: fcvtzu w0, d0
return v1
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 12)
; Inst 0: fcmp d0, d0
; Inst 1: b.vc 8 ; udf
; Inst 2: movz x0, #49136, LSL #48
; Inst 3: fmov d1, x0
; Inst 4: fcmp d0, d1
; Inst 5: b.gt 8 ; udf
; Inst 6: movz x0, #16624, LSL #48
; Inst 7: fmov d1, x0
; Inst 8: fcmp d0, d1
; Inst 9: b.mi 8 ; udf
; Inst 10: fcvtzu w0, d0
; Inst 11: ret
; }}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
set enable_heap_access_spectre_mitigation=true
target aarch64
@@ -13,20 +13,31 @@ block0(v0: i64, v1: i32):
return v2
}
; check: Block 0:
; check: mov w2, w1
; nextln: ldr x3, [x0]
; nextln: mov x3, x3
; nextln: subs xzr, x2, x3
; nextln: b.ls label1 ; b label2
; check: Block 1:
; check: add x0, x0, x1, UXTW
; nextln: subs xzr, x2, x3
; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi
; nextln: ret
; check: Block 2:
; check: udf
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 5)
; Inst 0: mov w2, w1
; Inst 1: ldr x3, [x0]
; Inst 2: mov x3, x3
; Inst 3: subs xzr, x2, x3
; Inst 4: b.ls label1 ; b label2
; Block 1:
; (original IR block: block2)
; (instruction range: 5 .. 10)
; Inst 5: add x0, x0, x1, UXTW
; Inst 6: subs xzr, x2, x3
; Inst 7: movz x1, #0
; Inst 8: csel x0, x1, x0, hi
; Inst 9: ret
; Block 2:
; (original IR block: block1)
; (instruction range: 10 .. 11)
; Inst 10: udf
; }}
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
@@ -37,15 +48,27 @@ block0(v0: i64, v1: i32):
return v2
}
; check: Block 0:
; check: mov w2, w1
; nextln: subs xzr, x2, #65536
; nextln: b.ls label1 ; b label2
; check: Block 1:
; check: add x0, x0, x1, UXTW
; nextln: subs xzr, x2, #65536
; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi
; nextln: ret
; check: Block 2:
; check: udf
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 3)
; Inst 0: mov w2, w1
; Inst 1: subs xzr, x2, #65536
; Inst 2: b.ls label1 ; b label2
; Block 1:
; (original IR block: block2)
; (instruction range: 3 .. 8)
; Inst 3: add x0, x0, x1, UXTW
; Inst 4: subs xzr, x2, #65536
; Inst 5: movz x1, #0
; Inst 6: csel x0, x1, x0, hi
; Inst 7: ret
; Block 2:
; (original IR block: block1)
; (instruction range: 8 .. 9)
; Inst 8: udf
; }}

View File

@@ -1,7 +1,7 @@
; Test that `put_input_in_rse` doesn't try to put the input of the `iconst` into a register, which
; would result in an out-of-bounds panic. (#2147)
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -14,16 +14,17 @@ block0:
return v2
}
; check: VCode_ShowWithRRU {{
; nextln: Entry block: 0
; nextln: Block 0:
; nextln: (original IR block: block0)
; nextln: (instruction range: 0 .. 7)
; nextln: Inst 0: movz x0, #56780
; nextln: Inst 1: uxth w0, w0
; nextln: Inst 2: movz x1, #56780
; nextln: Inst 3: subs wzr, w0, w1, UXTH
; nextln: Inst 4: cset x0, ne
; nextln: Inst 5: and w0, w0, #1
; nextln: Inst 6: ret
; nextln: }}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: movz x0, #56780
; Inst 1: uxth w0, w0
; Inst 2: movz x1, #56780
; Inst 3: subs wzr, w0, w1, UXTH
; Inst 4: cset x0, ne
; Inst 5: and w0, w0, #1
; Inst 6: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -10,6 +10,13 @@ block1:
return v0, v1
}
; check: movz x0, #1
; nextln: movz x1, #2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block1)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #1
; Inst 1: movz x1, #2
; Inst 2: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -8,8 +8,14 @@ block0(v0: i8, v1: i8):
return v2
}
; check: add w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}
function %add16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -17,8 +23,14 @@ block0(v0: i16, v1: i16):
return v2
}
; check: add w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}
function %add32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -26,8 +38,14 @@ block0(v0: i32, v1: i32):
return v2
}
; check: add w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1
; Inst 1: ret
; }}
function %add32_8(i32, i8) -> i32 {
block0(v0: i32, v1: i8):
@@ -36,8 +54,14 @@ block0(v0: i32, v1: i8):
return v3
}
; check: add w0, w0, w1, SXTB
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add w0, w0, w1, SXTB
; Inst 1: ret
; }}
function %add64_32(i64, i32) -> i64 {
block0(v0: i64, v1: i32):
@@ -46,5 +70,12 @@ block0(v0: i64, v1: i32):
return v3
}
; check: add x0, x0, x1, SXTW
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, x1, SXTW
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -7,25 +7,54 @@ block0(v0: i128):
v1 = ireduce.i64 v0
return v1
}
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
function %ireduce_128_32(i128) -> i32 {
block0(v0: i128):
v1 = ireduce.i32 v0
return v1
}
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
function %ireduce_128_16(i128) -> i16 {
block0(v0: i128):
v1 = ireduce.i16 v0
return v1
}
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
function %ireduce_128_8(i128) -> i8 {
block0(v0: i128):
v1 = ireduce.i8 v0
return v1
}
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -7,7 +7,13 @@ block0(v0: r64):
return v0
}
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
function %f1(r64) -> b1 {
block0(v0: r64):
@@ -15,9 +21,15 @@ block0(v0: r64):
return v1
}
; check: subs xzr, x0, #0
; nextln: cset x0, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs xzr, x0, #0
; Inst 1: cset x0, eq
; Inst 2: ret
; }}
function %f2(r64) -> b1 {
block0(v0: r64):
@@ -25,9 +37,15 @@ block0(v0: r64):
return v1
}
; check: adds xzr, x0, #1
; nextln: cset x0, eq
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: adds xzr, x0, #1
; Inst 1: cset x0, eq
; Inst 2: ret
; }}
function %f3() -> r64 {
block0:
@@ -35,8 +53,14 @@ block0:
return v0
}
; check: movz x0, #0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: movz x0, #0
; Inst 1: ret
; }}
function %f4(r64, r64) -> r64, r64, r64 {
fn0 = %f(r64) -> b1
@@ -59,43 +83,63 @@ block3(v7: r64, v8: r64):
return v7, v8, v9
}
; check: Block 0:
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: stp x19, x20, [sp, #-16]!
; nextln: sub sp, sp, #32
; nextln: mov x19, x0
; nextln: mov x20, x1
; nextln: mov x0, x19
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: stur x0, [sp, #8]
; nextln: stur x19, [sp, #16]
; nextln: stur x20, [sp, #24]
; nextln: (safepoint: slots [S0, S1, S2]
; nextln: blr x1
; nextln: ldur x19, [sp, #16]
; nextln: ldur x20, [sp, #24]
; nextln: mov x1, sp
; nextln: str x19, [x1]
; nextln: and w0, w0, #1
; nextln: cbz x0, label1 ; b label3
; check: Block 1:
; check: b label2
; check: Block 2:
; check: mov x0, x20
; nextln: b label5
; check: Block 3:
; check: b label4
; check: Block 4:
; check: mov x0, x19
; nextln: mov x19, x20
; nextln: b label5
; check: Block 5:
; check: mov x1, sp
; nextln: ldr x1, [x1]
; nextln: mov x2, x1
; nextln: mov x1, x19
; nextln: add sp, sp, #32
; nextln: ldp x19, x20, [sp], #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 3)
; (instruction range: 0 .. 18)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: stp x19, x20, [sp, #-16]!
; Inst 3: sub sp, sp, #32
; Inst 4: mov x19, x0
; Inst 5: mov x20, x1
; Inst 6: mov x0, x19
; Inst 7: ldr x1, 8 ; b 12 ; data TestCase { length: 1, ascii: [102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 8: stur x0, [sp, #8]
; Inst 9: stur x19, [sp, #16]
; Inst 10: stur x20, [sp, #24]
; (safepoint: slots [S0, S1, S2] with EmitState EmitState { virtual_sp_offset: 0, nominal_sp_to_fp: 0, stack_map: None, cur_srcloc: SourceLoc(4294967295) })
; Inst 11: blr x1
; Inst 12: ldur x19, [sp, #16]
; Inst 13: ldur x20, [sp, #24]
; Inst 14: mov x1, sp
; Inst 15: str x19, [x1]
; Inst 16: and w0, w0, #1
; Inst 17: cbz x0, label1 ; b label3
; Block 1:
; (original IR block: block1)
; (successor: Block 2)
; (instruction range: 18 .. 19)
; Inst 18: b label2
; Block 2:
; (successor: Block 5)
; (instruction range: 19 .. 21)
; Inst 19: mov x0, x20
; Inst 20: b label5
; Block 3:
; (original IR block: block2)
; (successor: Block 4)
; (instruction range: 21 .. 22)
; Inst 21: b label4
; Block 4:
; (successor: Block 5)
; (instruction range: 22 .. 25)
; Inst 22: mov x0, x19
; Inst 23: mov x19, x20
; Inst 24: b label5
; Block 5:
; (original IR block: block3)
; (instruction range: 25 .. 33)
; Inst 25: mov x1, sp
; Inst 26: ldr x1, [x1]
; Inst 27: mov x2, x1
; Inst 28: mov x1, x19
; Inst 29: add sp, sp, #32
; Inst 30: ldp x19, x20, [sp], #16
; Inst 31: ldp fp, lr, [sp], #16
; Inst 32: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -10,8 +10,14 @@ block0(v0: i64):
return v3
}
; check: add x0, x0, x0, LSL 3
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: add x0, x0, x0, LSL 3
; Inst 1: ret
; }}
function %f(i32) -> i32 {
block0(v0: i32):
@@ -20,5 +26,12 @@ block0(v0: i32):
return v2
}
; check: lsl w0, w0, #21
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl w0, w0, #21
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -12,31 +12,36 @@ block0(v0: i128, v1: i128):
return v2
}
; check: mov x4, x1
; nextln: orr x1, xzr, #128
; nextln: sub x1, x1, x2
; nextln: lsr x3, x0, x2
; nextln: lsr x5, x4, x2
; nextln: orn w6, wzr, w2
; nextln: lsl x7, x4, #1
; nextln: lsl x6, x7, x6
; nextln: orr x6, x3, x6
; nextln: ands xzr, x2, #64
; nextln: csel x3, xzr, x5, ne
; nextln: csel x2, x5, x6, ne
; nextln: lsl x5, x0, x1
; nextln: lsl x4, x4, x1
; nextln: orn w6, wzr, w1
; nextln: lsr x0, x0, #1
; nextln: lsr x0, x0, x6
; nextln: orr x0, x4, x0
; nextln: ands xzr, x1, #64
; nextln: csel x1, x5, x0, ne
; nextln: csel x0, xzr, x5, ne
; nextln: orr x1, x3, x1
; nextln: orr x0, x2, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 24)
; Inst 0: mov x4, x1
; Inst 1: orr x1, xzr, #128
; Inst 2: sub x1, x1, x2
; Inst 3: lsr x3, x0, x2
; Inst 4: lsr x5, x4, x2
; Inst 5: orn w6, wzr, w2
; Inst 6: lsl x7, x4, #1
; Inst 7: lsl x6, x7, x6
; Inst 8: orr x6, x3, x6
; Inst 9: ands xzr, x2, #64
; Inst 10: csel x3, xzr, x5, ne
; Inst 11: csel x2, x5, x6, ne
; Inst 12: lsl x5, x0, x1
; Inst 13: lsl x4, x4, x1
; Inst 14: orn w6, wzr, w1
; Inst 15: lsr x0, x0, #1
; Inst 16: lsr x0, x0, x6
; Inst 17: orr x0, x4, x0
; Inst 18: ands xzr, x1, #64
; Inst 19: csel x1, x5, x0, ne
; Inst 20: csel x0, xzr, x5, ne
; Inst 21: orr x1, x3, x1
; Inst 22: orr x0, x2, x0
; Inst 23: ret
; }}
function %f0(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -44,8 +49,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: ror x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror x0, x0, x1
; Inst 1: ret
; }}
function %f1(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -53,8 +64,14 @@ block0(v0: i32, v1: i32):
return v2
}
; check: ror w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror w0, w0, w1
; Inst 1: ret
; }}
function %f2(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -62,14 +79,20 @@ block0(v0: i16, v1: i16):
return v2
}
; check: uxth w0, w0
; nextln: and w1, w1, #15
; nextln: sub w2, w1, #16
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: uxth w0, w0
; Inst 1: and w1, w1, #15
; Inst 2: sub w2, w1, #16
; Inst 3: sub w2, wzr, w2
; Inst 4: lsr w1, w0, w1
; Inst 5: lsl w0, w0, w2
; Inst 6: orr w0, w0, w1
; Inst 7: ret
; }}
function %f3(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -77,18 +100,20 @@ block0(v0: i8, v1: i8):
return v2
}
; check: uxtb w0, w0
; nextln: and w1, w1, #7
; nextln: sub w2, w1, #8
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ROL, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: uxtb w0, w0
; Inst 1: and w1, w1, #7
; Inst 2: sub w2, w1, #8
; Inst 3: sub w2, wzr, w2
; Inst 4: lsr w1, w0, w1
; Inst 5: lsl w0, w0, w2
; Inst 6: orr w0, w0, w1
; Inst 7: ret
; }}
function %i128_rotl(i128, i128) -> i128 {
block0(v0: i128, v1: i128):
@@ -96,33 +121,39 @@ block0(v0: i128, v1: i128):
return v2
}
; check: mov x4, x0
; nextln: orr x0, xzr, #128
; nextln: sub x0, x0, x2
; nextln: lsl x3, x4, x2
; nextln: lsl x5, x1, x2
; nextln: orn w6, wzr, w2
; nextln: lsr x7, x4, #1
; nextln: lsr x6, x7, x6
; nextln: orr x5, x5, x6
; nextln: ands xzr, x2, #64
; nextln: csel x2, x3, x5, ne
; nextln: csel x3, xzr, x3, ne
; nextln: lsr x5, x4, x0
; nextln: lsr x4, x1, x0
; nextln: orn w6, wzr, w0
; nextln: lsl x1, x1, #1
; nextln: lsl x1, x1, x6
; nextln: orr x1, x5, x1
; nextln: ands xzr, x0, #64
; nextln: csel x0, xzr, x4, ne
; nextln: csel x1, x4, x1, ne
; nextln: orr x1, x3, x1
; nextln: orr x0, x2, x0
; nextln: mov x2, x0
; nextln: mov x0, x1
; nextln: mov x1, x2
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 27)
; Inst 0: mov x4, x0
; Inst 1: orr x0, xzr, #128
; Inst 2: sub x0, x0, x2
; Inst 3: lsl x3, x4, x2
; Inst 4: lsl x5, x1, x2
; Inst 5: orn w6, wzr, w2
; Inst 6: lsr x7, x4, #1
; Inst 7: lsr x6, x7, x6
; Inst 8: orr x5, x5, x6
; Inst 9: ands xzr, x2, #64
; Inst 10: csel x2, x3, x5, ne
; Inst 11: csel x3, xzr, x3, ne
; Inst 12: lsr x5, x4, x0
; Inst 13: lsr x4, x1, x0
; Inst 14: orn w6, wzr, w0
; Inst 15: lsl x1, x1, #1
; Inst 16: lsl x1, x1, x6
; Inst 17: orr x1, x5, x1
; Inst 18: ands xzr, x0, #64
; Inst 19: csel x0, xzr, x4, ne
; Inst 20: csel x1, x4, x1, ne
; Inst 21: orr x1, x3, x1
; Inst 22: orr x0, x2, x0
; Inst 23: mov x2, x0
; Inst 24: mov x0, x1
; Inst 25: mov x1, x2
; Inst 26: ret
; }}
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -130,9 +161,15 @@ block0(v0: i64, v1: i64):
return v2
}
; check: sub x1, xzr, x1
; nextln: ror x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sub x1, xzr, x1
; Inst 1: ror x0, x0, x1
; Inst 2: ret
; }}
function %f5(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -140,9 +177,15 @@ block0(v0: i32, v1: i32):
return v2
}
; check: sub w1, wzr, w1
; nextln: ror w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: sub w1, wzr, w1
; Inst 1: ror w0, w0, w1
; Inst 2: ret
; }}
function %f6(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -150,15 +193,21 @@ block0(v0: i16, v1: i16):
return v2
}
; check: sub w1, wzr, w1
; nextln: uxth w0, w0
; nextln: and w1, w1, #15
; nextln: sub w2, w1, #16
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: sub w1, wzr, w1
; Inst 1: uxth w0, w0
; Inst 2: and w1, w1, #15
; Inst 3: sub w2, w1, #16
; Inst 4: sub w2, wzr, w2
; Inst 5: lsr w1, w0, w1
; Inst 6: lsl w0, w0, w2
; Inst 7: orr w0, w0, w1
; Inst 8: ret
; }}
function %f7(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -166,19 +215,21 @@ block0(v0: i8, v1: i8):
return v2
}
; check: sub w1, wzr, w1
; nextln: uxtb w0, w0
; nextln: and w1, w1, #7
; nextln: sub w2, w1, #8
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; LSR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: sub w1, wzr, w1
; Inst 1: uxtb w0, w0
; Inst 2: and w1, w1, #7
; Inst 3: sub w2, w1, #8
; Inst 4: sub w2, wzr, w2
; Inst 5: lsr w1, w0, w1
; Inst 6: lsl w0, w0, w2
; Inst 7: orr w0, w0, w1
; Inst 8: ret
; }}
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -186,8 +237,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: lsr x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsr x0, x0, x1
; Inst 1: ret
; }}
function %f9(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -195,8 +252,14 @@ block0(v0: i32, v1: i32):
return v2
}
; check: lsr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsr w0, w0, w1
; Inst 1: ret
; }}
function %f10(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -204,10 +267,16 @@ block0(v0: i16, v1: i16):
return v2
}
; check: uxth w0, w0
; nextln: and w1, w1, #15
; nextln: lsr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxth w0, w0
; Inst 1: and w1, w1, #15
; Inst 2: lsr w0, w0, w1
; Inst 3: ret
; }}
function %f11(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -215,14 +284,16 @@ block0(v0: i8, v1: i8):
return v2
}
; check: uxtb w0, w0
; nextln: and w1, w1, #7
; nextln: lsr w0, w0, w1
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; LSL, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtb w0, w0
; Inst 1: and w1, w1, #7
; Inst 2: lsr w0, w0, w1
; Inst 3: ret
; }}
function %f12(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -230,8 +301,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: lsl x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl x0, x0, x1
; Inst 1: ret
; }}
function %f13(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -239,8 +316,14 @@ block0(v0: i32, v1: i32):
return v2
}
; check: lsl w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl w0, w0, w1
; Inst 1: ret
; }}
function %f14(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -248,9 +331,15 @@ block0(v0: i16, v1: i16):
return v2
}
; check: and w1, w1, #15
; nextln: lsl w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: and w1, w1, #15
; Inst 1: lsl w0, w0, w1
; Inst 2: ret
; }}
function %f15(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -258,13 +347,15 @@ block0(v0: i8, v1: i8):
return v2
}
; check: and w1, w1, #7
; nextln: lsl w0, w0, w1
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ASR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: and w1, w1, #7
; Inst 1: lsl w0, w0, w1
; Inst 2: ret
; }}
function %f16(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
@@ -272,8 +363,14 @@ block0(v0: i64, v1: i64):
return v2
}
; check: asr x0, x0, x1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr x0, x0, x1
; Inst 1: ret
; }}
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
@@ -281,8 +378,14 @@ block0(v0: i32, v1: i32):
return v2
}
; check: asr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr w0, w0, w1
; Inst 1: ret
; }}
function %f18(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
@@ -290,9 +393,16 @@ block0(v0: i16, v1: i16):
return v2
}
; check: and w1, w1, #15
; nextln: asr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxth w0, w0
; Inst 1: and w1, w1, #15
; Inst 2: asr w0, w0, w1
; Inst 3: ret
; }}
function %f19(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
@@ -300,13 +410,16 @@ block0(v0: i8, v1: i8):
return v2
}
; check: and w1, w1, #7
; nextln: asr w0, w0, w1
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; immediate forms
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtb w0, w0
; Inst 1: and w1, w1, #7
; Inst 2: asr w0, w0, w1
; Inst 3: ret
; }}
function %f20(i64) -> i64 {
block0(v0: i64):
@@ -315,8 +428,14 @@ block0(v0: i64):
return v2
}
; check: ror x0, x0, #17
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror x0, x0, #17
; Inst 1: ret
; }}
function %f21(i64) -> i64 {
block0(v0: i64):
@@ -325,8 +444,14 @@ block0(v0: i64):
return v2
}
; check: ror x0, x0, #47
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror x0, x0, #47
; Inst 1: ret
; }}
function %f22(i32) -> i32 {
block0(v0: i32):
@@ -335,8 +460,14 @@ block0(v0: i32):
return v2
}
; check: ror w0, w0, #15
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ror w0, w0, #15
; Inst 1: ret
; }}
function %f23(i16) -> i16 {
block0(v0: i16):
@@ -345,11 +476,17 @@ block0(v0: i16):
return v2
}
; check: uxth w0, w0
; nextln: lsr w1, w0, #6
; nextln: lsl w0, w0, #10
; nextln: orr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: uxth w0, w0
; Inst 1: lsr w1, w0, #6
; Inst 2: lsl w0, w0, #10
; Inst 3: orr w0, w0, w1
; Inst 4: ret
; }}
function %f24(i8) -> i8 {
block0(v0: i8):
@@ -358,11 +495,17 @@ block0(v0: i8):
return v2
}
; check: uxtb w0, w0
; nextln: lsr w1, w0, #5
; nextln: lsl w0, w0, #3
; nextln: orr w0, w0, w1
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: uxtb w0, w0
; Inst 1: lsr w1, w0, #5
; Inst 2: lsl w0, w0, #3
; Inst 3: orr w0, w0, w1
; Inst 4: ret
; }}
function %f25(i64) -> i64 {
block0(v0: i64):
@@ -371,8 +514,14 @@ block0(v0: i64):
return v2
}
; check: lsr x0, x0, #17
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsr x0, x0, #17
; Inst 1: ret
; }}
function %f26(i64) -> i64 {
block0(v0: i64):
@@ -381,8 +530,14 @@ block0(v0: i64):
return v2
}
; check: asr x0, x0, #17
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: asr x0, x0, #17
; Inst 1: ret
; }}
function %f27(i64) -> i64 {
block0(v0: i64):
@@ -391,5 +546,12 @@ block0(v0: i64):
return v2
}
; check: lsl x0, x0, #17
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: lsl x0, x0, #17
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -10,9 +10,14 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; check-not: sxtl
; check: smull v0.8h, v0.8b, v1.8b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull v0.8h, v0.8b, v1.8b
; Inst 1: ret
; }}
function %fn2(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -22,9 +27,14 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; check-not: sxtl
; check: smull2 v0.8h, v0.16b, v1.16b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull2 v0.8h, v0.16b, v1.16b
; Inst 1: ret
; }}
function %fn3(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -34,9 +44,14 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; check-not: sxtl
; check: smull v0.4s, v0.4h, v1.4h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull v0.4s, v0.4h, v1.4h
; Inst 1: ret
; }}
function %fn4(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -46,9 +61,14 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; check-not: sxtl
; check: smull2 v0.4s, v0.8h, v1.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull2 v0.4s, v0.8h, v1.8h
; Inst 1: ret
; }}
function %fn5(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -58,9 +78,14 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; check-not: sxtl
; check: smull v0.2d, v0.2s, v1.2s
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull v0.2d, v0.2s, v1.2s
; Inst 1: ret
; }}
function %fn6(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -70,9 +95,14 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; check-not: sxtl
; check: smull2 v0.2d, v0.4s, v1.4s
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: smull2 v0.2d, v0.4s, v1.4s
; Inst 1: ret
; }}
function %fn7(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -82,9 +112,14 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; check-not: uxtl
; check: umull v0.8h, v0.8b, v1.8b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull v0.8h, v0.8b, v1.8b
; Inst 1: ret
; }}
function %fn8(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -94,9 +129,14 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; check-not: uxtl
; check: umull2 v0.8h, v0.16b, v1.16b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull2 v0.8h, v0.16b, v1.16b
; Inst 1: ret
; }}
function %fn9(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -106,9 +146,14 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; check-not: uxtl
; check: umull v0.4s, v0.4h, v1.4h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull v0.4s, v0.4h, v1.4h
; Inst 1: ret
; }}
function %fn10(i16x8, i16x8) -> i32x4 {
block0(v0: i16x8, v1: i16x8):
@@ -118,9 +163,14 @@ block0(v0: i16x8, v1: i16x8):
return v4
}
; check-not: uxtl
; check: umull2 v0.4s, v0.8h, v1.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull2 v0.4s, v0.8h, v1.8h
; Inst 1: ret
; }}
function %fn11(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -130,9 +180,14 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; check-not: uxtl
; check: umull v0.2d, v0.2s, v1.2s
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull v0.2d, v0.2s, v1.2s
; Inst 1: ret
; }}
function %fn12(i32x4, i32x4) -> i64x2 {
block0(v0: i32x4, v1: i32x4):
@@ -142,6 +197,12 @@ block0(v0: i32x4, v1: i32x4):
return v4
}
; check-not: uxtl2
; check: umull2 v0.2d, v0.4s, v1.4s
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: umull2 v0.2d, v0.4s, v1.4s
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -11,8 +11,14 @@ block0(v0: i8x16):
return v3
}
; check: saddlp v0.8h, v0.16b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: saddlp v0.8h, v0.16b
; Inst 1: ret
; }}
function %fn2(i8x16) -> i16x8 {
block0(v0: i8x16):
@@ -22,8 +28,14 @@ block0(v0: i8x16):
return v3
}
; check: uaddlp v0.8h, v0.16b
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uaddlp v0.8h, v0.16b
; Inst 1: ret
; }}
function %fn3(i16x8) -> i32x4 {
block0(v0: i16x8):
@@ -33,8 +45,14 @@ block0(v0: i16x8):
return v3
}
; check: saddlp v0.4s, v0.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: saddlp v0.4s, v0.8h
; Inst 1: ret
; }}
function %fn4(i16x8) -> i32x4 {
block0(v0: i16x8):
@@ -44,8 +62,14 @@ block0(v0: i16x8):
return v3
}
; check: uaddlp v0.4s, v0.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uaddlp v0.4s, v0.8h
; Inst 1: ret
; }}
function %fn5(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -55,10 +79,16 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; check: sxtl v0.8h, v0.8b
; nextln: sxtl2 v1.8h, v1.16b
; nextln: addp v0.8h, v0.8h, v1.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtl v0.8h, v0.8b
; Inst 1: sxtl2 v1.8h, v1.16b
; Inst 2: addp v0.8h, v0.8h, v1.8h
; Inst 3: ret
; }}
function %fn6(i8x16, i8x16) -> i16x8 {
block0(v0: i8x16, v1: i8x16):
@@ -68,10 +98,16 @@ block0(v0: i8x16, v1: i8x16):
return v4
}
; check: uxtl v0.8h, v0.8b
; nextln: uxtl2 v1.8h, v1.16b
; nextln: addp v0.8h, v0.8h, v1.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtl v0.8h, v0.8b
; Inst 1: uxtl2 v1.8h, v1.16b
; Inst 2: addp v0.8h, v0.8h, v1.8h
; Inst 3: ret
; }}
function %fn7(i8x16) -> i16x8 {
block0(v0: i8x16):
@@ -81,10 +117,16 @@ block0(v0: i8x16):
return v3
}
; check: uxtl v1.8h, v0.8b
; nextln: sxtl2 v0.8h, v0.16b
; nextln: addp v0.8h, v1.8h, v0.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: uxtl v1.8h, v0.8b
; Inst 1: sxtl2 v0.8h, v0.16b
; Inst 2: addp v0.8h, v1.8h, v0.8h
; Inst 3: ret
; }}
function %fn8(i8x16) -> i16x8 {
block0(v0: i8x16):
@@ -94,7 +136,14 @@ block0(v0: i8x16):
return v3
}
; check: sxtl v1.8h, v0.8b
; nextln: uxtl2 v0.8h, v0.16b
; nextln: addp v0.8h, v1.8h, v0.8h
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: sxtl v1.8h, v0.8b
; Inst 1: uxtl2 v0.8h, v0.16b
; Inst 2: addp v0.8h, v1.8h, v0.8h
; Inst 3: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -9,10 +9,16 @@ block0:
return v1
}
; check: movz x0, #1
; nextln: movk x0, #1, LSL #48
; nextln: fmov d0, x0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 4)
; Inst 0: movz x0, #1
; Inst 1: movk x0, #1, LSL #48
; Inst 2: fmov d0, x0
; Inst 3: ret
; }}
function %f2() -> i32x4 {
block0:
@@ -21,6 +27,13 @@ block0:
return v1
}
; check: movz x0, #42679
; nextln: fmov s0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: movz x0, #42679
; Inst 1: fmov s0, w0
; Inst 2: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -7,12 +7,26 @@ block0:
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
function %stack_limit_leaf_zero(i64 stack_limit) {
block0(v0: i64):
return
}
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
function %stack_limit_gv_leaf_zero(i64 vmctx) {
gv0 = vmctx
@@ -23,8 +37,13 @@ block0(v0: i64):
return
}
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: ret
; }}
function %stack_limit_call_zero(i64 stack_limit) {
fn0 = %foo()
@@ -33,14 +52,20 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, sp, x0
; nextln: b.hs 8 ; udf
; nextln: ldr x0
; nextln: blr x0
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: subs xzr, sp, x0, UXTX
; Inst 3: b.hs 8 ; udf
; Inst 4: ldr x0, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 5: blr x0
; Inst 6: ldp fp, lr, [sp], #16
; Inst 7: ret
; }}
function %stack_limit_gv_call_zero(i64 vmctx) {
gv0 = vmctx
@@ -53,17 +78,22 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: ldr x0
; nextln: blr x0
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: ldur x16, [x0]
; Inst 3: ldur x16, [x16, #4]
; Inst 4: subs xzr, sp, x16, UXTX
; Inst 5: b.hs 8 ; udf
; Inst 6: ldr x0, 8 ; b 12 ; data TestCase { length: 3, ascii: [102, 111, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 7: blr x0
; Inst 8: ldp fp, lr, [sp], #16
; Inst 9: ret
; }}
function %stack_limit(i64 stack_limit) {
ss0 = explicit_slot 168
@@ -71,15 +101,21 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x16, x0, #176
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #176
; nextln: add sp, sp, #176
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: add x16, x0, #176
; Inst 3: subs xzr, sp, x16, UXTX
; Inst 4: b.hs 8 ; udf
; Inst 5: sub sp, sp, #176
; Inst 6: add sp, sp, #176
; Inst 7: ldp fp, lr, [sp], #16
; Inst 8: ret
; }}
function %huge_stack_limit(i64 stack_limit) {
ss0 = explicit_slot 400000
@@ -87,23 +123,29 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, sp, x0
; nextln: b.hs 8 ; udf
; nextln: movz w17, #6784
; nextln: movk w17, #6, LSL #16
; nextln: add x16, x0, x17, UXTX
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: add sp, sp, x16, UXTX
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 17)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: subs xzr, sp, x0, UXTX
; Inst 3: b.hs 8 ; udf
; Inst 4: movz w17, #6784
; Inst 5: movk w17, #6, LSL #16
; Inst 6: add x16, x0, x17, UXTX
; Inst 7: subs xzr, sp, x16, UXTX
; Inst 8: b.hs 8 ; udf
; Inst 9: movz w16, #6784
; Inst 10: movk w16, #6, LSL #16
; Inst 11: sub sp, sp, x16, UXTX
; Inst 12: movz w16, #6784
; Inst 13: movk w16, #6, LSL #16
; Inst 14: add sp, sp, x16, UXTX
; Inst 15: ldp fp, lr, [sp], #16
; Inst 16: ret
; }}
function %limit_preamble(i64 vmctx) {
gv0 = vmctx
@@ -115,17 +157,23 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: add x16, x16, #32
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #32
; nextln: add sp, sp, #32
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: ldur x16, [x0]
; Inst 3: ldur x16, [x16, #4]
; Inst 4: add x16, x16, #32
; Inst 5: subs xzr, sp, x16, UXTX
; Inst 6: b.hs 8 ; udf
; Inst 7: sub sp, sp, #32
; Inst 8: add sp, sp, #32
; Inst 9: ldp fp, lr, [sp], #16
; Inst 10: ret
; }}
function %limit_preamble_huge(i64 vmctx) {
gv0 = vmctx
@@ -137,25 +185,31 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w17, #6784
; nextln: movk w17, #6, LSL #16
; nextln: add x16, x16, x17, UXTX
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: add sp, sp, x16, UXTX
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 19)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: ldur x16, [x0]
; Inst 3: ldur x16, [x16, #4]
; Inst 4: subs xzr, sp, x16, UXTX
; Inst 5: b.hs 8 ; udf
; Inst 6: movz w17, #6784
; Inst 7: movk w17, #6, LSL #16
; Inst 8: add x16, x16, x17, UXTX
; Inst 9: subs xzr, sp, x16, UXTX
; Inst 10: b.hs 8 ; udf
; Inst 11: movz w16, #6784
; Inst 12: movk w16, #6, LSL #16
; Inst 13: sub sp, sp, x16, UXTX
; Inst 14: movz w16, #6784
; Inst 15: movk w16, #6, LSL #16
; Inst 16: add sp, sp, x16, UXTX
; Inst 17: ldp fp, lr, [sp], #16
; Inst 18: ret
; }}
function %limit_preamble_huge_offset(i64 vmctx) {
gv0 = vmctx
@@ -166,13 +220,20 @@ block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
; nextln: add x16, x16, #32
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #32
; nextln: add sp, sp, #32
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
; Inst 3: add x16, x16, #32
; Inst 4: subs xzr, sp, x16, UXTX
; Inst 5: b.hs 8 ; udf
; Inst 6: sub sp, sp, #32
; Inst 7: add sp, sp, #32
; Inst 8: ldp fp, lr, [sp], #16
; Inst 9: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -10,5 +10,12 @@ block0:
return v0
}
; check: ldr x0, 8 ; b 12 ; data
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: ldr x0, 8 ; b 12 ; data TestCase { length: 9, ascii: [109, 121, 95, 103, 108, 111, 98, 97, 108, 0, 0, 0, 0, 0, 0, 0] } + 0
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set tls_model=elf_gd
target aarch64
@@ -9,21 +9,29 @@ block0(v0: i32):
v1 = global_value.i64 gv0
return v0, v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: str x19, [sp, #-16]!
; nextln: stp d14, d15, [sp, #-16]!
; nextln: stp d12, d13, [sp, #-16]!
; nextln: stp d10, d11, [sp, #-16]!
; nextln: stp d8, d9, [sp, #-16]!
; nextln: mov x19, x0
; nextln: elf_tls_get_addr u1:0
; nextln: mov x1, x0
; nextln: mov x0, x19
; nextln: ldp d8, d9, [sp], #16
; nextln: ldp d10, d11, [sp], #16
; nextln: ldp d12, d13, [sp], #16
; nextln: ldp d14, d15, [sp], #16
; nextln: ldr x19, [sp], #16
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 18)
; Inst 0: stp fp, lr, [sp, #-16]!
; Inst 1: mov fp, sp
; Inst 2: str x19, [sp, #-16]!
; Inst 3: stp d14, d15, [sp, #-16]!
; Inst 4: stp d12, d13, [sp, #-16]!
; Inst 5: stp d10, d11, [sp, #-16]!
; Inst 6: stp d8, d9, [sp, #-16]!
; Inst 7: mov x19, x0
; Inst 8: elf_tls_get_addr u1:0
; Inst 9: mov x1, x0
; Inst 10: mov x0, x19
; Inst 11: ldp d8, d9, [sp], #16
; Inst 12: ldp d10, d11, [sp], #16
; Inst 13: ldp d12, d13, [sp], #16
; Inst 14: ldp d14, d15, [sp], #16
; Inst 15: ldr x19, [sp], #16
; Inst 16: ldp fp, lr, [sp], #16
; Inst 17: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -7,7 +7,13 @@ block0:
trap user0
}
; check: udf
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 1)
; Inst 0: udf
; }}
function %g(i64) {
block0(v0: i64):
@@ -17,8 +23,15 @@ block0(v0: i64):
return
}
; check: subs xzr, x0, #42
; nextln: b.ne 8 ; udf
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 3)
; Inst 0: subs xzr, x0, #42
; Inst 1: b.ne 8 ; udf
; Inst 2: ret
; }}
function %h() {
block0:
@@ -26,4 +39,12 @@ block0:
return
}
; check: brk #0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: brk #0
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set unwind_info=false
target aarch64
@@ -8,8 +8,14 @@ block0(v0: i8):
return v1
}
; check: uxtb w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxtb w0, w0
; Inst 1: ret
; }}
function %f_u_8_32(i8) -> i32 {
block0(v0: i8):
@@ -17,8 +23,14 @@ block0(v0: i8):
return v1
}
; check: uxtb w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxtb w0, w0
; Inst 1: ret
; }}
function %f_u_8_16(i8) -> i16 {
block0(v0: i8):
@@ -26,8 +38,14 @@ block0(v0: i8):
return v1
}
; check: uxtb w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxtb w0, w0
; Inst 1: ret
; }}
function %f_s_8_64(i8) -> i64 {
block0(v0: i8):
@@ -35,8 +53,14 @@ block0(v0: i8):
return v1
}
; check: sxtb x0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtb x0, w0
; Inst 1: ret
; }}
function %f_s_8_32(i8) -> i32 {
block0(v0: i8):
@@ -44,8 +68,14 @@ block0(v0: i8):
return v1
}
; check: sxtb w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtb w0, w0
; Inst 1: ret
; }}
function %f_s_8_16(i8) -> i16 {
block0(v0: i8):
@@ -53,8 +83,14 @@ block0(v0: i8):
return v1
}
; check: sxtb w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtb w0, w0
; Inst 1: ret
; }}
function %f_u_16_64(i16) -> i64 {
block0(v0: i16):
@@ -62,8 +98,14 @@ block0(v0: i16):
return v1
}
; check: uxth w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxth w0, w0
; Inst 1: ret
; }}
function %f_u_16_32(i16) -> i32 {
block0(v0: i16):
@@ -71,8 +113,14 @@ block0(v0: i16):
return v1
}
; check: uxth w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: uxth w0, w0
; Inst 1: ret
; }}
function %f_s_16_64(i16) -> i64 {
block0(v0: i16):
@@ -80,8 +128,14 @@ block0(v0: i16):
return v1
}
; check: sxth x0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxth x0, w0
; Inst 1: ret
; }}
function %f_s_16_32(i16) -> i32 {
block0(v0: i16):
@@ -89,8 +143,14 @@ block0(v0: i16):
return v1
}
; check: sxth w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxth w0, w0
; Inst 1: ret
; }}
function %f_u_32_64(i32) -> i64 {
block0(v0: i32):
@@ -98,8 +158,14 @@ block0(v0: i32):
return v1
}
; check: mov w0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: mov w0, w0
; Inst 1: ret
; }}
function %f_s_32_64(i32) -> i64 {
block0(v0: i32):
@@ -107,5 +173,12 @@ block0(v0: i32):
return v1
}
; check: sxtw x0, w0
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 2)
; Inst 0: sxtw x0, w0
; Inst 1: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function %amode_add(i64, i64) -> i64 {
@@ -6,29 +6,66 @@ block0(v0: i64, v1: i64):
v2 = iadd v0, v1
v3 = load.i64 v2
return v3
; check: movq 0(%rdi,%rsi,1), %rsi
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq 0(%rdi,%rsi,1), %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %amode_add_imm(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 42
v2 = iadd v0, v1
v3 = load.i64 v2
return v3
; check: movq 42(%rdi), %rsi
}
;; Same as above, but add operands have been reversed.
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq 42(%rdi), %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %amode_add_imm_order(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 42
v2 = iadd v1, v0
v3 = load.i64 v2
return v3
; check: movq 42(%rdi), %rsi
}
;; Make sure that uextend(cst) are ignored when the cst will naturally sign-extend.
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq 42(%rdi), %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %amode_add_uext_imm(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 42
@@ -36,5 +73,19 @@ block0(v0: i64):
v3 = iadd v2, v0
v4 = load.i64 v3
return v4
; check: movq 42(%rdi), %rsi
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq 42(%rdi), %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,72 +1,102 @@
test compile
test compile precise-output
target x86_64
function %f0(b1, i32, i32) -> i32 {
; check: pushq %rbp
; nextln: movq %rsp, %rbp
block0(v0: b1, v1: i32, v2: i32):
v3 = select.i32 v0, v1, v2
; nextln: testb $$1, %dil
; nextln: cmovnzl %esi, %edx
return v3
; nextln: movq %rdx, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}
function %f1(b1) -> i32 {
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: testb $1, %dil
; Inst 3: cmovnzl %esi, %edx
; Inst 4: movq %rdx, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %f1(b1) -> i32 {
block0(v0: b1):
brnz v0, block1
jump block2
; nextln: testb $$1, %dil
; nextln: jnz label1; j label2
block1:
v1 = iconst.i32 1
return v1
; check: movl $$1, %eax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
block2:
v2 = iconst.i32 2
return v2
; check: movl $$2, %eax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}
function %f2(b1) -> i32 {
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: testb $1, %dil
; Inst 3: jnz label1; j label2
; Block 1:
; (original IR block: block1)
; (instruction range: 4 .. 8)
; Inst 4: movl $1, %eax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 8 .. 12)
; Inst 8: movl $2, %eax
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}
function %f2(b1) -> i32 {
block0(v0: b1):
brz v0, block1
jump block2
; nextln: testb $$1, %dil
; nextln: jz label1; j label2
block1:
v1 = iconst.i32 1
return v1
; check: movl $$1, %eax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
block2:
v2 = iconst.i32 2
return v2
; check: movl $$2, %eax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: testb $1, %dil
; Inst 3: jz label1; j label2
; Block 1:
; (original IR block: block1)
; (instruction range: 4 .. 8)
; Inst 4: movl $1, %eax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 8 .. 12)
; Inst 8: movl $2, %eax
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}

View File

@@ -1,15 +1,23 @@
test compile
test compile precise-output
target x86_64
function %f(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
; check: pushq %rbp
; check: movq %rsp, %rbp
v2 = iadd v0, v1
; check: addl %esi, %edi
return v2
; check: movq %rdi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: addl %esi, %edi
; Inst 3: movq %rdi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function %f0(b8) -> b64 {
@@ -7,10 +7,17 @@ block0(v0: b8):
return v1
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movsbq %dil, %rsi
; nextln: movq %rsi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movsbq %dil, %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function %f0(i32, i32) -> i32 {
@@ -6,82 +6,167 @@ block0(v0: i32, v1: i32):
v2 = icmp eq v0, v1
brnz v2, block1
jump block2
; check: cmpl %esi, %edi
; nextln: jz label1; j label2
block1:
v3 = iconst.i32 1
; check: movl $$1, %eax
return v3
; check: ret
block2:
v4 = iconst.i32 2
; check: movl $$2, %eax
return v4
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: cmpl %esi, %edi
; Inst 3: jz label1; j label2
; Block 1:
; (original IR block: block1)
; (instruction range: 4 .. 8)
; Inst 4: movl $1, %eax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 8 .. 12)
; Inst 8: movl $2, %eax
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}
function %f1(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = icmp eq v0, v1
brz v2, block1
jump block2
; check: cmpl %esi, %edi
; nextln: jnz label1; j label2
block1:
v3 = iconst.i32 1
; check: movl $$1, %eax
return v3
; check: ret
block2:
v4 = iconst.i32 2
; check: movl $$2, %eax
return v4
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: cmpl %esi, %edi
; Inst 3: jnz label1; j label2
; Block 1:
; (original IR block: block1)
; (instruction range: 4 .. 8)
; Inst 4: movl $1, %eax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 8 .. 12)
; Inst 8: movl $2, %eax
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}
function %f2(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ifcmp v0, v1
brif eq v2, block1
jump block2
; check: cmpl %esi, %edi
; nextln: jz label1; j label2
block1:
v3 = iconst.i32 1
; check: movl $$1, %eax
return v3
; check: ret
block2:
v4 = iconst.i32 2
; check: movl $$2, %eax
return v4
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: cmpl %esi, %edi
; Inst 3: jz label1; j label2
; Block 1:
; (original IR block: block1)
; (instruction range: 4 .. 8)
; Inst 4: movl $1, %eax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 8 .. 12)
; Inst 8: movl $2, %eax
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}
function %f3(f32, f32) -> i32 {
block0(v0: f32, v1: f32):
v2 = ffcmp v0, v1
brff eq v2, block1
jump block2
; check: ucomiss %xmm1, %xmm0
; nextln: jp label2
; nextln: jnz label2; j label1
block1:
v3 = iconst.i32 1
; check: movl $$1, %eax
return v3
; check: ret
block2:
v4 = iconst.i32 2
; check: movl $$2, %eax
return v4
; check: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 2)
; (successor: Block 1)
; (instruction range: 0 .. 5)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: ucomiss %xmm1, %xmm0
; Inst 3: jp label2
; Inst 4: jnz label2; j label1
; Block 1:
; (original IR block: block1)
; (instruction range: 5 .. 9)
; Inst 5: movl $1, %eax
; Inst 6: movq %rbp, %rsp
; Inst 7: popq %rbp
; Inst 8: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 9 .. 13)
; Inst 9: movl $2, %eax
; Inst 10: movq %rbp, %rsp
; Inst 11: popq %rbp
; Inst 12: ret
; }}

View File

@@ -1,76 +1,114 @@
test compile
test compile precise-output
target x86_64
;; system_v has first param in %rdi, fascall in %rcx
function %one_arg(i32) system_v {
;; system_v has first param in %rdi, fascall in %rcx
sig0 = (i32) windows_fastcall
block0(v0: i32):
; check: movq %rdi, %rcx
; nextln: call *%rdi
call_indirect sig0, v0(v0)
return
}
;; system_v has params in %rdi, %xmm0, fascall in %rcx, %xmm1
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $32, %rsp
; Inst 3: virtual_sp_offset_adjust 32
; Inst 4: movq %rdi, %rcx
; Inst 5: call *%rdi
; Inst 6: addq $32, %rsp
; Inst 7: virtual_sp_offset_adjust -32
; Inst 8: movq %rbp, %rsp
; Inst 9: popq %rbp
; Inst 10: ret
; }}
function %two_args(i32, f32) system_v {
;; system_v has params in %rdi, %xmm0, fascall in %rcx, %xmm1
sig0 = (i32, f32) windows_fastcall
sig1 = (i32, f32) system_v
block0(v0: i32, v1: f32):
; check: movq %rdi, %rsi
; check: movaps %xmm0, %xmm6
; check: movq %rsi, %rcx
; nextln: movaps %xmm6, %xmm1
; nextln: call *%rsi
call_indirect sig0, v0(v0, v1)
; check: movq %rsi, %rdi
; nextln: movaps %xmm6, %xmm0
; nextln: call *%rsi
call_indirect sig1, v0(v0, v1)
return
}
;; fastcall preserves xmm6+, rbx, rbp, rdi, rsi, r12-r15
;; system_v preserves no xmm registers, rbx, rbp, r12-r15
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 17)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdi, %rsi
; Inst 3: movaps %xmm0, %xmm6
; Inst 4: subq $32, %rsp
; Inst 5: virtual_sp_offset_adjust 32
; Inst 6: movq %rsi, %rcx
; Inst 7: movaps %xmm6, %xmm1
; Inst 8: call *%rsi
; Inst 9: addq $32, %rsp
; Inst 10: virtual_sp_offset_adjust -32
; Inst 11: movq %rsi, %rdi
; Inst 12: movaps %xmm6, %xmm0
; Inst 13: call *%rsi
; Inst 14: movq %rbp, %rsp
; Inst 15: popq %rbp
; Inst 16: ret
; }}
function %fastcall_to_systemv(i32) windows_fastcall {
;; fastcall preserves xmm6+, rbx, rbp, rdi, rsi, r12-r15
;; system_v preserves no xmm registers, rbx, rbp, r12-r15
sig0 = () system_v
block0(v0: i32):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$176, %rsp
; nextln: movdqu %xmm6, 0(%rsp)
; nextln: movdqu %xmm7, 16(%rsp)
; nextln: movdqu %xmm8, 32(%rsp)
; nextln: movdqu %xmm9, 48(%rsp)
; nextln: movdqu %xmm10, 64(%rsp)
; nextln: movdqu %xmm11, 80(%rsp)
; nextln: movdqu %xmm12, 96(%rsp)
; nextln: movdqu %xmm13, 112(%rsp)
; nextln: movdqu %xmm14, 128(%rsp)
; nextln: movdqu %xmm15, 144(%rsp)
; nextln: movq %rsi, 160(%rsp)
; nextln: movq %rdi, 168(%rsp)
; nextln: call *%rcx
; nextln: movdqu 0(%rsp), %xmm6
; nextln: movdqu 16(%rsp), %xmm7
; nextln: movdqu 32(%rsp), %xmm8
; nextln: movdqu 48(%rsp), %xmm9
; nextln: movdqu 64(%rsp), %xmm10
; nextln: movdqu 80(%rsp), %xmm11
; nextln: movdqu 96(%rsp), %xmm12
; nextln: movdqu 112(%rsp), %xmm13
; nextln: movdqu 128(%rsp), %xmm14
; nextln: movdqu 144(%rsp), %xmm15
; nextln: movq 160(%rsp), %rsi
; nextln: movq 168(%rsp), %rdi
; nextln: addq $$176, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0()
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 32)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $176, %rsp
; Inst 3: movdqu %xmm6, 0(%rsp)
; Inst 4: movdqu %xmm7, 16(%rsp)
; Inst 5: movdqu %xmm8, 32(%rsp)
; Inst 6: movdqu %xmm9, 48(%rsp)
; Inst 7: movdqu %xmm10, 64(%rsp)
; Inst 8: movdqu %xmm11, 80(%rsp)
; Inst 9: movdqu %xmm12, 96(%rsp)
; Inst 10: movdqu %xmm13, 112(%rsp)
; Inst 11: movdqu %xmm14, 128(%rsp)
; Inst 12: movdqu %xmm15, 144(%rsp)
; Inst 13: movq %rsi, 160(%rsp)
; Inst 14: movq %rdi, 168(%rsp)
; Inst 15: call *%rcx
; Inst 16: movdqu 0(%rsp), %xmm6
; Inst 17: movdqu 16(%rsp), %xmm7
; Inst 18: movdqu 32(%rsp), %xmm8
; Inst 19: movdqu 48(%rsp), %xmm9
; Inst 20: movdqu 64(%rsp), %xmm10
; Inst 21: movdqu 80(%rsp), %xmm11
; Inst 22: movdqu 96(%rsp), %xmm12
; Inst 23: movdqu 112(%rsp), %xmm13
; Inst 24: movdqu 128(%rsp), %xmm14
; Inst 25: movdqu 144(%rsp), %xmm15
; Inst 26: movq 160(%rsp), %rsi
; Inst 27: movq 168(%rsp), %rdi
; Inst 28: addq $176, %rsp
; Inst 29: movq %rbp, %rsp
; Inst 30: popq %rbp
; Inst 31: ret
; }}
function %many_args(
;; rdi, rsi, rdx, rcx, r8, r9,
i64, i64, i64, i64, i64, i64,
@@ -91,50 +129,6 @@ block0(
v6: f64, v7: f64, v8:f64, v9:f64, v10:f64, v11:f64, v12:f64, v13:f64,
v14:i64, v15:i32, v16:f32, v17:f64
):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$32, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %r13, 8(%rsp)
; nextln: movq %r14, 16(%rsp)
; nextln: movq %rdx, %rax
; nextln: movq %rcx, %r10
; nextln: movq %r8, %r11
; nextln: movq %r9, %r12
; nextln: movq 16(%rbp), %r13
; nextln: movq 24(%rbp), %r14
; nextln: movss 32(%rbp), %xmm8
; nextln: movsd 40(%rbp), %xmm9
; nextln: subq $$144, %rsp
; nextln: virtual_sp_offset_adjust 144
; nextln: movq %rdi, %rcx
; nextln: movq %rsi, %rdx
; nextln: movq %rax, %r8
; nextln: movq %r10, %r9
; nextln: movq %r11, 32(%rsp)
; nextln: movq %r12, 40(%rsp)
; nextln: movsd %xmm0, 48(%rsp)
; nextln: movsd %xmm1, 56(%rsp)
; nextln: movsd %xmm2, 64(%rsp)
; nextln: movsd %xmm3, 72(%rsp)
; nextln: movsd %xmm4, 80(%rsp)
; nextln: movsd %xmm5, 88(%rsp)
; nextln: movsd %xmm6, 96(%rsp)
; nextln: movsd %xmm7, 104(%rsp)
; nextln: movq %r13, 112(%rsp)
; nextln: movl %r14d, 120(%rsp)
; nextln: movss %xmm8, 128(%rsp)
; nextln: movsd %xmm9, 136(%rsp)
; nextln: call *%rdi
; nextln: addq $$144, %rsp
; nextln: virtual_sp_offset_adjust -144
; nextln: movq 0(%rsp), %r12
; nextln: movq 8(%rsp), %r13
; nextln: movq 16(%rsp), %r14
; nextln: addq $$32, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(
v0, v1, v2, v3,
v4, v5, v6, v7,
@@ -145,200 +139,325 @@ block0(
return
}
; rdi => rcx
; rsi => rdx
; rdx => r8
; rcx => r9
; r8 => stack
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 44)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $32, %rsp
; Inst 3: movq %r12, 0(%rsp)
; Inst 4: movq %r13, 8(%rsp)
; Inst 5: movq %r14, 16(%rsp)
; Inst 6: movq %rdx, %rax
; Inst 7: movq %rcx, %r10
; Inst 8: movq %r8, %r11
; Inst 9: movq %r9, %r12
; Inst 10: movq 16(%rbp), %r13
; Inst 11: movq 24(%rbp), %r14
; Inst 12: movss 32(%rbp), %xmm8
; Inst 13: movsd 40(%rbp), %xmm9
; Inst 14: subq $144, %rsp
; Inst 15: virtual_sp_offset_adjust 144
; Inst 16: movq %rdi, %rcx
; Inst 17: movq %rsi, %rdx
; Inst 18: movq %rax, %r8
; Inst 19: movq %r10, %r9
; Inst 20: movq %r11, 32(%rsp)
; Inst 21: movq %r12, 40(%rsp)
; Inst 22: movsd %xmm0, 48(%rsp)
; Inst 23: movsd %xmm1, 56(%rsp)
; Inst 24: movsd %xmm2, 64(%rsp)
; Inst 25: movsd %xmm3, 72(%rsp)
; Inst 26: movsd %xmm4, 80(%rsp)
; Inst 27: movsd %xmm5, 88(%rsp)
; Inst 28: movsd %xmm6, 96(%rsp)
; Inst 29: movsd %xmm7, 104(%rsp)
; Inst 30: movq %r13, 112(%rsp)
; Inst 31: movl %r14d, 120(%rsp)
; Inst 32: movss %xmm8, 128(%rsp)
; Inst 33: movsd %xmm9, 136(%rsp)
; Inst 34: call *%rdi
; Inst 35: addq $144, %rsp
; Inst 36: virtual_sp_offset_adjust -144
; Inst 37: movq 0(%rsp), %r12
; Inst 38: movq 8(%rsp), %r13
; Inst 39: movq 16(%rsp), %r14
; Inst 40: addq $32, %rsp
; Inst 41: movq %rbp, %rsp
; Inst 42: popq %rbp
; Inst 43: ret
; }}
function %many_ints(i64, i64, i64, i64, i64) system_v {
;; rdi => rcx
;; rsi => rdx
;; rdx => r8
;; rcx => r9
;; r8 => stack
sig0 = (i64, i64, i64, i64, i64) windows_fastcall
block0(v0: i64, v1:i64, v2:i64, v3:i64, v4:i64):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rdx, %rax
; nextln: movq %rcx, %r9
; nextln: movq %r8, %r10
; nextln: subq $$48, %rsp
; nextln: virtual_sp_offset_adjust 48
; nextln: movq %rdi, %rcx
; nextln: movq %rsi, %rdx
; nextln: movq %rax, %r8
; nextln: movq %r10, 32(%rsp)
; nextln: call *%rdi
; nextln: addq $$48, %rsp
; nextln: virtual_sp_offset_adjust -48
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(v0, v1, v2, v3, v4)
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 17)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdx, %rax
; Inst 3: movq %rcx, %r9
; Inst 4: movq %r8, %r10
; Inst 5: subq $48, %rsp
; Inst 6: virtual_sp_offset_adjust 48
; Inst 7: movq %rdi, %rcx
; Inst 8: movq %rsi, %rdx
; Inst 9: movq %rax, %r8
; Inst 10: movq %r10, 32(%rsp)
; Inst 11: call *%rdi
; Inst 12: addq $48, %rsp
; Inst 13: virtual_sp_offset_adjust -48
; Inst 14: movq %rbp, %rsp
; Inst 15: popq %rbp
; Inst 16: ret
; }}
function %many_args2(i32, f32, i64, f64, i32, i32, i32, f32, f64, f32, f64) system_v {
sig0 = (i32, f32, i64, f64, i32, i32, i32, f32, f64, f32, f64) windows_fastcall
block0(v0: i32, v1: f32, v2: i64, v3: f64, v4: i32, v5: i32, v6: i32, v7: f32, v8: f64, v9: f32, v10: f64):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movaps %xmm1, %xmm6
; nextln: movq %rcx, %rax
; nextln: movq %r8, %r9
; nextln: movaps %xmm3, %xmm7
; nextln: subq $$96, %rsp
; nextln: virtual_sp_offset_adjust 96
; nextln: movq %rdi, %rcx
; nextln: movaps %xmm0, %xmm1
; nextln: movq %rsi, %r8
; nextln: movaps %xmm6, %xmm3
; nextln: movl %edx, 32(%rsp)
; nextln: movl %eax, 40(%rsp)
; nextln: movl %r9d, 48(%rsp)
; nextln: movss %xmm2, 56(%rsp)
; nextln: movsd %xmm7, 64(%rsp)
; nextln: movss %xmm4, 72(%rsp)
; nextln: movsd %xmm5, 80(%rsp)
; nextln: call *%rdi
; nextln: addq $$96, %rsp
; nextln: virtual_sp_offset_adjust -96
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
call_indirect sig0, v0(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10)
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 25)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movaps %xmm1, %xmm6
; Inst 3: movq %rcx, %rax
; Inst 4: movq %r8, %r9
; Inst 5: movaps %xmm3, %xmm7
; Inst 6: subq $96, %rsp
; Inst 7: virtual_sp_offset_adjust 96
; Inst 8: movq %rdi, %rcx
; Inst 9: movaps %xmm0, %xmm1
; Inst 10: movq %rsi, %r8
; Inst 11: movaps %xmm6, %xmm3
; Inst 12: movl %edx, 32(%rsp)
; Inst 13: movl %eax, 40(%rsp)
; Inst 14: movl %r9d, 48(%rsp)
; Inst 15: movss %xmm2, 56(%rsp)
; Inst 16: movsd %xmm7, 64(%rsp)
; Inst 17: movss %xmm4, 72(%rsp)
; Inst 18: movsd %xmm5, 80(%rsp)
; Inst 19: call *%rdi
; Inst 20: addq $96, %rsp
; Inst 21: virtual_sp_offset_adjust -96
; Inst 22: movq %rbp, %rsp
; Inst 23: popq %rbp
; Inst 24: ret
; }}
function %wasmtime_mix1(i32) wasmtime_system_v {
sig0 = (i32) system_v
block0(v0: i32):
; check: movq %rdi, %rsi
; nextln: movq %rsi, %rdi
; nextln: call *%rsi
call_indirect sig0, v0(v0)
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdi, %rsi
; Inst 3: movq %rsi, %rdi
; Inst 4: call *%rsi
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %wasmtime_mix2(i32) system_v {
sig0 = (i32) wasmtime_system_v
block0(v0: i32):
; check: movq %rdi, %rsi
; nextln: movq %rsi, %rdi
; nextln: call *%rsi
call_indirect sig0, v0(v0)
return
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdi, %rsi
; Inst 3: movq %rsi, %rdi
; Inst 4: call *%rsi
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %wasmtime_mix2() -> i32, i32 system_v {
sig0 = () -> i32, i32 wasmtime_system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movl $$1, %esi
; nextln: subq $$16, %rsp
; nextln: virtual_sp_offset_adjust 16
; nextln: lea 0(%rsp), %rdi
; nextln: call *%rsi
; nextln: movq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: virtual_sp_offset_adjust -16
; nextln: movq %rsi, %rdx
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v2 = iconst.i32 1
v0, v1 = call_indirect sig0, v2()
return v0, v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 14)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movl $1, %esi
; Inst 3: subq $16, %rsp
; Inst 4: virtual_sp_offset_adjust 16
; Inst 5: lea 0(%rsp), %rdi
; Inst 6: call *%rsi
; Inst 7: movq 0(%rsp), %rsi
; Inst 8: addq $16, %rsp
; Inst 9: virtual_sp_offset_adjust -16
; Inst 10: movq %rsi, %rdx
; Inst 11: movq %rbp, %rsp
; Inst 12: popq %rbp
; Inst 13: ret
; }}
function %wasmtime_mix3() -> i32, i32 wasmtime_system_v {
sig0 = () -> i32, i32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: call *%rsi
; nextln: movl %edx, 0(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v2 = iconst.i32 1
v0, v1 = call_indirect sig0, v2()
return v0, v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 13)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $16, %rsp
; Inst 3: movq %r12, 0(%rsp)
; Inst 4: movq %rdi, %r12
; Inst 5: movl $1, %esi
; Inst 6: call *%rsi
; Inst 7: movl %edx, 0(%r12)
; Inst 8: movq 0(%rsp), %r12
; Inst 9: addq $16, %rsp
; Inst 10: movq %rbp, %rsp
; Inst 11: popq %rbp
; Inst 12: ret
; }}
function %wasmtime_mix4() -> i32, i64, i32 wasmtime_system_v {
sig0 = () -> i32, i64, i32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: subq $$16, %rsp
; nextln: virtual_sp_offset_adjust 16
; nextln: lea 0(%rsp), %rdi
; nextln: call *%rsi
; nextln: movq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: virtual_sp_offset_adjust -16
; nextln: movq %rdx, 0(%r12)
; nextln: movl %esi, 8(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v3 = iconst.i32 1
v0, v1, v2 = call_indirect sig0, v3()
return v0, v1, v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 20)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $16, %rsp
; Inst 3: movq %r12, 0(%rsp)
; Inst 4: movq %rdi, %r12
; Inst 5: movl $1, %esi
; Inst 6: subq $16, %rsp
; Inst 7: virtual_sp_offset_adjust 16
; Inst 8: lea 0(%rsp), %rdi
; Inst 9: call *%rsi
; Inst 10: movq 0(%rsp), %rsi
; Inst 11: addq $16, %rsp
; Inst 12: virtual_sp_offset_adjust -16
; Inst 13: movq %rdx, 0(%r12)
; Inst 14: movl %esi, 8(%r12)
; Inst 15: movq 0(%rsp), %r12
; Inst 16: addq $16, %rsp
; Inst 17: movq %rbp, %rsp
; Inst 18: popq %rbp
; Inst 19: ret
; }}
function %wasmtime_mix5() -> f32, i64, i32, f32 wasmtime_system_v {
sig0 = () -> f32, i64, i32, f32 system_v
block0:
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: movl $$1, %esi
; nextln: call *%rsi
; nextln: movq %rax, 0(%r12)
; nextln: movl %edx, 8(%r12)
; nextln: movss %xmm1, 12(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v5 = iconst.i32 1
v0, v1, v2, v3 = call_indirect sig0, v5()
return v0, v1, v2, v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 15)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $16, %rsp
; Inst 3: movq %r12, 0(%rsp)
; Inst 4: movq %rdi, %r12
; Inst 5: movl $1, %esi
; Inst 6: call *%rsi
; Inst 7: movq %rax, 0(%r12)
; Inst 8: movl %edx, 8(%r12)
; Inst 9: movss %xmm1, 12(%r12)
; Inst 10: movq 0(%rsp), %r12
; Inst 11: addq $16, %rsp
; Inst 12: movq %rbp, %rsp
; Inst 13: popq %rbp
; Inst 14: ret
; }}
function %wasmtime_mix6(f32, i64, i32, f32) -> f32, i64, i32, f32 wasmtime_system_v {
sig0 = (f32, i64, i32, f32) -> f32, i64, i32, f32 system_v
block0(v0: f32, v1: i64, v2: i32, v3: f32):
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdx, %r12
; nextln: movl $$1, %eax
; nextln: call *%rax
; nextln: movq %rax, 0(%r12)
; nextln: movl %edx, 8(%r12)
; nextln: movss %xmm1, 12(%r12)
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
v4 = iconst.i32 1
v5, v6, v7, v8 = call_indirect sig0, v4(v0, v1, v2, v3)
return v5, v6, v7, v8
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 15)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $16, %rsp
; Inst 3: movq %r12, 0(%rsp)
; Inst 4: movq %rdx, %r12
; Inst 5: movl $1, %eax
; Inst 6: call *%rax
; Inst 7: movq %rax, 0(%r12)
; Inst 8: movl %edx, 8(%r12)
; Inst 9: movss %xmm1, 12(%r12)
; Inst 10: movq 0(%rsp), %r12
; Inst 11: addq $16, %rsp
; Inst 12: movq %rbp, %rsp
; Inst 13: popq %rbp
; Inst 14: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64 has_lzcnt
function %clz(i64) -> i64 {
@@ -7,13 +7,19 @@ block0(v0: i64):
return v1
}
; check: pushq %rbp
; check: movq %rsp, %rbp
; check: lzcntq %rdi, %rsi
; check: movq %rsi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: lzcntq %rdi, %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %clz(i32) -> i32 {
block0(v0: i32):
@@ -21,10 +27,17 @@ block0(v0: i32):
return v1
}
; check: pushq %rbp
; check: movq %rsp, %rbp
; check: lzcntl %edi, %esi
; check: movq %rsi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: lzcntl %edi, %esi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,48 +1,65 @@
test compile
test compile precise-output
target x86_64
function %f0(i64, i64) -> i64, i64 {
block0(v0: i64, v1: i64):
v2 = load.i64 v1
; check: movq 0(%rsi), %rax
v3 = icmp eq v0, v2
v4 = bint.i64 v3
; nextln: cmpq %rax, %rdi
; nextln: setz %cl
; nextln: andq $$1, %rcx
v5 = select.i64 v3, v0, v1
; nextln: cmpq %rax, %rdi
; nextln: cmovzq %rdi, %rsi
return v4, v5
; nextln: movq %rcx, %rax
; nextln: movq %rsi, %rdx
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 13)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq 0(%rsi), %rax
; Inst 3: cmpq %rax, %rdi
; Inst 4: setz %cl
; Inst 5: andq $1, %rcx
; Inst 6: cmpq %rax, %rdi
; Inst 7: cmovzq %rdi, %rsi
; Inst 8: movq %rcx, %rax
; Inst 9: movq %rsi, %rdx
; Inst 10: movq %rbp, %rsp
; Inst 11: popq %rbp
; Inst 12: ret
; }}
function %f1(f64, i64) -> i64, f64 {
block0(v0: f64, v1: i64):
v2 = load.f64 v1
; check: movsd 0(%rdi), %xmm1
v3 = fcmp eq v0, v2
v4 = bint.i64 v3
; nextln: ucomisd %xmm1, %xmm0
; nextln: setnp %dil
; nextln: setz %sil
; nextln: andl %edi, %esi
; nextln: andq $$1, %rsi
v5 = select.f64 v3, v0, v0
; nextln: ucomisd %xmm1, %xmm0
; nextln: movaps %xmm0, %xmm1
; nextln: jnp $$next; movsd %xmm0, %xmm1; $$next:
; nextln: jz $$next; movsd %xmm0, %xmm1; $$next:
return v4, v5
; nextln: movq %rsi, %rax
; nextln: movaps %xmm1, %xmm0
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 17)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movsd 0(%rdi), %xmm1
; Inst 3: ucomisd %xmm1, %xmm0
; Inst 4: setnp %dil
; Inst 5: setz %sil
; Inst 6: andl %edi, %esi
; Inst 7: andq $1, %rsi
; Inst 8: ucomisd %xmm1, %xmm0
; Inst 9: movaps %xmm0, %xmm1
; Inst 10: jnp $next; movsd %xmm0, %xmm1; $next:
; Inst 11: jz $next; movsd %xmm0, %xmm1; $next:
; Inst 12: movq %rsi, %rax
; Inst 13: movaps %xmm1, %xmm0
; Inst 14: movq %rbp, %rsp
; Inst 15: popq %rbp
; Inst 16: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64 has_bmi1
function %ctz(i64) -> i64 {
@@ -7,13 +7,19 @@ block0(v0: i64):
return v1
}
; check: pushq %rbp
; check: movq %rsp, %rbp
; check: tzcntq %rdi, %rsi
; check: movq %rsi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: tzcntq %rdi, %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %ctz(i32) -> i32 {
block0(v0: i32):
@@ -21,10 +27,17 @@ block0(v0: i32):
return v1
}
; check: pushq %rbp
; check: movq %rsp, %rbp
; check: tzcntl %edi, %esi
; check: movq %rsi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: tzcntl %edi, %esi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set enable_llvm_abi_extensions=true
set unwind_info=true
target x86_64
@@ -8,89 +8,124 @@ block0(v0: i64, v1: i64, v2: i64, v3: i64):
return v0
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; nextln: movq %rcx, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; Inst 4: movq %rcx, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %f1(i64, i64, i64, i64) -> i64 windows_fastcall {
block0(v0: i64, v1: i64, v2: i64, v3: i64):
return v1
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; nextln: movq %rdx, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; Inst 4: movq %rdx, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %f2(i64, i64, i64, i64) -> i64 windows_fastcall {
block0(v0: i64, v1: i64, v2: i64, v3: i64):
return v2
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; nextln: movq %r8, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; Inst 4: movq %r8, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %f3(i64, i64, i64, i64) -> i64 windows_fastcall {
block0(v0: i64, v1: i64, v2: i64, v3: i64):
return v3
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; nextln: movq %r9, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; Inst 4: movq %r9, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %f4(i64, i64, f64, i64) -> f64 windows_fastcall {
block0(v0: i64, v1: i64, v2: f64, v3: i64):
return v2
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; nextln: movaps %xmm2, %xmm0
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; Inst 4: movaps %xmm2, %xmm0
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %f5(i64, i64, f64, i64) -> i64 windows_fastcall {
block0(v0: i64, v1: i64, v2: f64, v3: i64):
return v3
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; nextln: movq %r9, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 0 }
; Inst 4: movq %r9, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %f6(i64, i64, i64, i64, i64, i64) -> i64 windows_fastcall {
block0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64, v5: i64):
return v5
}
;; This is truly odd (because of the regalloc ordering), but it works. Note
;; that we're spilling and using rsi, which is a callee-save in fastcall, because
@@ -101,52 +136,61 @@ block0(v0: i64, v1: i64, v2: i64, v3: i64, v4: i64, v5: i64):
;; first and so have to spill it (and consequently don't coalesce).
;;
;; TODO(#2704): fix regalloc's register priority ordering!
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 16 }
; nextln: subq $$16, %rsp
; nextln: movq %rsi, 0(%rsp)
; nextln: unwind SaveReg { clobber_offset: 0, reg: r16J }
; nextln: movq 48(%rbp), %rsi
; nextln: movq 56(%rbp), %rsi
; nextln: movq %rsi, %rax
; nextln: movq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 15)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 16 }
; Inst 4: subq $16, %rsp
; Inst 5: movq %rsi, 0(%rsp)
; Inst 6: unwind SaveReg { clobber_offset: 0, reg: r16J }
; Inst 7: movq 48(%rbp), %rsi
; Inst 8: movq 56(%rbp), %rsi
; Inst 9: movq %rsi, %rax
; Inst 10: movq 0(%rsp), %rsi
; Inst 11: addq $16, %rsp
; Inst 12: movq %rbp, %rsp
; Inst 13: popq %rbp
; Inst 14: ret
; }}
function %f7(i128, i64, i128, i128) -> i128 windows_fastcall {
block0(v0: i128, v1: i64, v2: i128, v3: i128):
return v3
}
;; Again, terrible regalloc behavior. The important part is that `v3` comes
;; from [rbp+56] and [rbp+64], i.e., the second and third non-shadow
;; stack slot.
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 16 }
; nextln: subq $$16, %rsp
; nextln: movq %rsi, 0(%rsp)
; nextln: unwind SaveReg { clobber_offset: 0, reg: r16J }
; nextln: movq %rdi, 8(%rsp)
; nextln: unwind SaveReg { clobber_offset: 8, reg: r17J }
; nextln: movq 48(%rbp), %rsi
; nextln: movq 56(%rbp), %rsi
; nextln: movq 64(%rbp), %rdi
; nextln: movq %rsi, %rax
; nextln: movq %rdi, %rdx
; nextln: movq 0(%rsp), %rsi
; nextln: movq 8(%rsp), %rdi
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 20)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 16 }
; Inst 4: subq $16, %rsp
; Inst 5: movq %rsi, 0(%rsp)
; Inst 6: unwind SaveReg { clobber_offset: 0, reg: r16J }
; Inst 7: movq %rdi, 8(%rsp)
; Inst 8: unwind SaveReg { clobber_offset: 8, reg: r17J }
; Inst 9: movq 48(%rbp), %rsi
; Inst 10: movq 56(%rbp), %rsi
; Inst 11: movq 64(%rbp), %rdi
; Inst 12: movq %rsi, %rax
; Inst 13: movq %rdi, %rdx
; Inst 14: movq 0(%rsp), %rsi
; Inst 15: movq 8(%rsp), %rdi
; Inst 16: addq $16, %rsp
; Inst 17: movq %rbp, %rsp
; Inst 18: popq %rbp
; Inst 19: ret
; }}
function %f8(i64) -> i64 windows_fastcall {
sig0 = (i64, i64, f64, f64, i64, i64) -> i64 windows_fastcall
@@ -158,31 +202,37 @@ block0(v0: i64):
return v2
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 16 }
; nextln: subq $$16, %rsp
; nextln: movq %rsi, 0(%rsp)
; nextln: unwind SaveReg { clobber_offset: 0, reg: r16J }
; nextln: movq %rcx, %rsi
; nextln: cvtsi2sd %rsi, %xmm3
; nextln: subq $$48, %rsp
; nextln: virtual_sp_offset_adjust 48
; nextln: movq %rsi, %rcx
; nextln: movq %rsi, %rdx
; nextln: movaps %xmm3, %xmm2
; nextln: movq %rsi, 32(%rsp)
; nextln: movq %rsi, 40(%rsp)
; nextln: load_ext_name %g+0, %rsi
; nextln: call *%rsi
; nextln: addq $$48, %rsp
; nextln: virtual_sp_offset_adjust -48
; nextln: movq 0(%rsp), %rsi
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 25)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 16 }
; Inst 4: subq $16, %rsp
; Inst 5: movq %rsi, 0(%rsp)
; Inst 6: unwind SaveReg { clobber_offset: 0, reg: r16J }
; Inst 7: movq %rcx, %rsi
; Inst 8: cvtsi2sd %rsi, %xmm3
; Inst 9: subq $48, %rsp
; Inst 10: virtual_sp_offset_adjust 48
; Inst 11: movq %rsi, %rcx
; Inst 12: movq %rsi, %rdx
; Inst 13: movaps %xmm3, %xmm2
; Inst 14: movq %rsi, 32(%rsp)
; Inst 15: movq %rsi, 40(%rsp)
; Inst 16: load_ext_name %g+0, %rsi
; Inst 17: call *%rsi
; Inst 18: addq $48, %rsp
; Inst 19: virtual_sp_offset_adjust -48
; Inst 20: movq 0(%rsp), %rsi
; Inst 21: addq $16, %rsp
; Inst 22: movq %rbp, %rsp
; Inst 23: popq %rbp
; Inst 24: ret
; }}
function %f9(i64) -> f64 windows_fastcall {
block0(v0: i64):
@@ -234,91 +284,97 @@ block0(v0: i64):
return v39
}
; check: pushq %rbp
; nextln: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; nextln: movq %rsp, %rbp
; nextln: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 160 }
; nextln: subq $$224, %rsp
; nextln: movdqu %xmm6, 64(%rsp)
; nextln: unwind SaveReg { clobber_offset: 0, reg: r6V }
; nextln: movdqu %xmm7, 80(%rsp)
; nextln: unwind SaveReg { clobber_offset: 16, reg: r7V }
; nextln: movdqu %xmm8, 96(%rsp)
; nextln: unwind SaveReg { clobber_offset: 32, reg: r8V }
; nextln: movdqu %xmm9, 112(%rsp)
; nextln: unwind SaveReg { clobber_offset: 48, reg: r9V }
; nextln: movdqu %xmm10, 128(%rsp)
; nextln: unwind SaveReg { clobber_offset: 64, reg: r10V }
; nextln: movdqu %xmm11, 144(%rsp)
; nextln: unwind SaveReg { clobber_offset: 80, reg: r11V }
; nextln: movdqu %xmm12, 160(%rsp)
; nextln: unwind SaveReg { clobber_offset: 96, reg: r12V }
; nextln: movdqu %xmm13, 176(%rsp)
; nextln: unwind SaveReg { clobber_offset: 112, reg: r13V }
; nextln: movdqu %xmm14, 192(%rsp)
; nextln: unwind SaveReg { clobber_offset: 128, reg: r14V }
; nextln: movdqu %xmm15, 208(%rsp)
; nextln: unwind SaveReg { clobber_offset: 144, reg: r15V }
; nextln: movsd 0(%rcx), %xmm4
; nextln: movsd 8(%rcx), %xmm1
; nextln: movsd 16(%rcx), %xmm0
; nextln: movdqu %xmm0, rsp(32 + virtual offset)
; nextln: movsd 24(%rcx), %xmm3
; nextln: movsd 32(%rcx), %xmm0
; nextln: movdqu %xmm0, rsp(48 + virtual offset)
; nextln: movsd 40(%rcx), %xmm5
; nextln: movsd 48(%rcx), %xmm6
; nextln: movsd 56(%rcx), %xmm7
; nextln: movsd 64(%rcx), %xmm8
; nextln: movsd 72(%rcx), %xmm9
; nextln: movsd 80(%rcx), %xmm10
; nextln: movsd 88(%rcx), %xmm11
; nextln: movsd 96(%rcx), %xmm12
; nextln: movsd 104(%rcx), %xmm13
; nextln: movsd 112(%rcx), %xmm14
; nextln: movsd 120(%rcx), %xmm15
; nextln: movsd 128(%rcx), %xmm0
; nextln: movdqu %xmm0, rsp(0 + virtual offset)
; nextln: movsd 136(%rcx), %xmm0
; nextln: movsd 144(%rcx), %xmm2
; nextln: movdqu %xmm2, rsp(16 + virtual offset)
; nextln: movsd 152(%rcx), %xmm2
; nextln: addsd %xmm1, %xmm4
; nextln: movdqu rsp(32 + virtual offset), %xmm1
; nextln: addsd %xmm3, %xmm1
; nextln: movdqu rsp(48 + virtual offset), %xmm3
; nextln: addsd %xmm5, %xmm3
; nextln: addsd %xmm7, %xmm6
; nextln: addsd %xmm9, %xmm8
; nextln: addsd %xmm11, %xmm10
; nextln: addsd %xmm13, %xmm12
; nextln: addsd %xmm15, %xmm14
; nextln: movdqu rsp(0 + virtual offset), %xmm5
; nextln: addsd %xmm0, %xmm5
; nextln: movdqu rsp(16 + virtual offset), %xmm0
; nextln: addsd %xmm2, %xmm0
; nextln: addsd %xmm1, %xmm4
; nextln: addsd %xmm6, %xmm3
; nextln: addsd %xmm10, %xmm8
; nextln: addsd %xmm14, %xmm12
; nextln: addsd %xmm0, %xmm5
; nextln: addsd %xmm3, %xmm4
; nextln: addsd %xmm12, %xmm8
; nextln: addsd %xmm8, %xmm4
; nextln: addsd %xmm5, %xmm4
; nextln: movaps %xmm4, %xmm0
; nextln: movdqu 64(%rsp), %xmm6
; nextln: movdqu 80(%rsp), %xmm7
; nextln: movdqu 96(%rsp), %xmm8
; nextln: movdqu 112(%rsp), %xmm9
; nextln: movdqu 128(%rsp), %xmm10
; nextln: movdqu 144(%rsp), %xmm11
; nextln: movdqu 160(%rsp), %xmm12
; nextln: movdqu 176(%rsp), %xmm13
; nextln: movdqu 192(%rsp), %xmm14
; nextln: movdqu 208(%rsp), %xmm15
; nextln: addq $$224, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 87)
; Inst 0: pushq %rbp
; Inst 1: unwind PushFrameRegs { offset_upward_to_caller_sp: 16 }
; Inst 2: movq %rsp, %rbp
; Inst 3: unwind DefineNewFrame { offset_upward_to_caller_sp: 16, offset_downward_to_clobbers: 160 }
; Inst 4: subq $224, %rsp
; Inst 5: movdqu %xmm6, 64(%rsp)
; Inst 6: unwind SaveReg { clobber_offset: 0, reg: r6V }
; Inst 7: movdqu %xmm7, 80(%rsp)
; Inst 8: unwind SaveReg { clobber_offset: 16, reg: r7V }
; Inst 9: movdqu %xmm8, 96(%rsp)
; Inst 10: unwind SaveReg { clobber_offset: 32, reg: r8V }
; Inst 11: movdqu %xmm9, 112(%rsp)
; Inst 12: unwind SaveReg { clobber_offset: 48, reg: r9V }
; Inst 13: movdqu %xmm10, 128(%rsp)
; Inst 14: unwind SaveReg { clobber_offset: 64, reg: r10V }
; Inst 15: movdqu %xmm11, 144(%rsp)
; Inst 16: unwind SaveReg { clobber_offset: 80, reg: r11V }
; Inst 17: movdqu %xmm12, 160(%rsp)
; Inst 18: unwind SaveReg { clobber_offset: 96, reg: r12V }
; Inst 19: movdqu %xmm13, 176(%rsp)
; Inst 20: unwind SaveReg { clobber_offset: 112, reg: r13V }
; Inst 21: movdqu %xmm14, 192(%rsp)
; Inst 22: unwind SaveReg { clobber_offset: 128, reg: r14V }
; Inst 23: movdqu %xmm15, 208(%rsp)
; Inst 24: unwind SaveReg { clobber_offset: 144, reg: r15V }
; Inst 25: movsd 0(%rcx), %xmm4
; Inst 26: movsd 8(%rcx), %xmm1
; Inst 27: movsd 16(%rcx), %xmm0
; Inst 28: movdqu %xmm0, rsp(32 + virtual offset)
; Inst 29: movsd 24(%rcx), %xmm3
; Inst 30: movsd 32(%rcx), %xmm0
; Inst 31: movdqu %xmm0, rsp(48 + virtual offset)
; Inst 32: movsd 40(%rcx), %xmm5
; Inst 33: movsd 48(%rcx), %xmm6
; Inst 34: movsd 56(%rcx), %xmm7
; Inst 35: movsd 64(%rcx), %xmm8
; Inst 36: movsd 72(%rcx), %xmm9
; Inst 37: movsd 80(%rcx), %xmm10
; Inst 38: movsd 88(%rcx), %xmm11
; Inst 39: movsd 96(%rcx), %xmm12
; Inst 40: movsd 104(%rcx), %xmm13
; Inst 41: movsd 112(%rcx), %xmm14
; Inst 42: movsd 120(%rcx), %xmm15
; Inst 43: movsd 128(%rcx), %xmm0
; Inst 44: movdqu %xmm0, rsp(0 + virtual offset)
; Inst 45: movsd 136(%rcx), %xmm0
; Inst 46: movsd 144(%rcx), %xmm2
; Inst 47: movdqu %xmm2, rsp(16 + virtual offset)
; Inst 48: movsd 152(%rcx), %xmm2
; Inst 49: addsd %xmm1, %xmm4
; Inst 50: movdqu rsp(32 + virtual offset), %xmm1
; Inst 51: addsd %xmm3, %xmm1
; Inst 52: movdqu rsp(48 + virtual offset), %xmm3
; Inst 53: addsd %xmm5, %xmm3
; Inst 54: addsd %xmm7, %xmm6
; Inst 55: addsd %xmm9, %xmm8
; Inst 56: addsd %xmm11, %xmm10
; Inst 57: addsd %xmm13, %xmm12
; Inst 58: addsd %xmm15, %xmm14
; Inst 59: movdqu rsp(0 + virtual offset), %xmm5
; Inst 60: addsd %xmm0, %xmm5
; Inst 61: movdqu rsp(16 + virtual offset), %xmm0
; Inst 62: addsd %xmm2, %xmm0
; Inst 63: addsd %xmm1, %xmm4
; Inst 64: addsd %xmm6, %xmm3
; Inst 65: addsd %xmm10, %xmm8
; Inst 66: addsd %xmm14, %xmm12
; Inst 67: addsd %xmm0, %xmm5
; Inst 68: addsd %xmm3, %xmm4
; Inst 69: addsd %xmm12, %xmm8
; Inst 70: addsd %xmm8, %xmm4
; Inst 71: addsd %xmm5, %xmm4
; Inst 72: movaps %xmm4, %xmm0
; Inst 73: movdqu 64(%rsp), %xmm6
; Inst 74: movdqu 80(%rsp), %xmm7
; Inst 75: movdqu 96(%rsp), %xmm8
; Inst 76: movdqu 112(%rsp), %xmm9
; Inst 77: movdqu 128(%rsp), %xmm10
; Inst 78: movdqu 144(%rsp), %xmm11
; Inst 79: movdqu 160(%rsp), %xmm12
; Inst 80: movdqu 176(%rsp), %xmm13
; Inst 81: movdqu 192(%rsp), %xmm14
; Inst 82: movdqu 208(%rsp), %xmm15
; Inst 83: addq $224, %rsp
; Inst 84: movq %rbp, %rsp
; Inst 85: popq %rbp
; Inst 86: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function %f(f64) -> f64 {
@@ -6,11 +6,22 @@ block0(v0: f64):
v1 = fabs.f64 v0
return v1
}
; check: movabsq $$9223372036854775807, %rsi
; nextln: movq %rsi, %xmm1
; nextln: andpd %xmm0, %xmm1
; nextln: movaps %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movabsq $9223372036854775807, %rsi
; Inst 3: movq %rsi, %xmm1
; Inst 4: andpd %xmm0, %xmm1
; Inst 5: movaps %xmm1, %xmm0
; Inst 6: movq %rbp, %rsp
; Inst 7: popq %rbp
; Inst 8: ret
; }}
function %f(i64) -> f64 {
block0(v0: i64):
@@ -18,8 +29,21 @@ block0(v0: i64):
v2 = fabs.f64 v1
return v2
}
; check: movsd 0(%rdi), %xmm0
; nextln: movabsq $$9223372036854775807, %rsi
; nextln: movq %rsi, %xmm1
; nextln: andpd %xmm0, %xmm1
; nextln: movaps %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movsd 0(%rdi), %xmm0
; Inst 3: movabsq $9223372036854775807, %rsi
; Inst 4: movq %rsi, %xmm1
; Inst 5: andpd %xmm0, %xmm1
; Inst 6: movaps %xmm1, %xmm0
; Inst 7: movq %rbp, %rsp
; Inst 8: popq %rbp
; Inst 9: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function %f(i32, i64 vmctx) -> i64 {
@@ -10,14 +10,39 @@ function %f(i32, i64 vmctx) -> i64 {
block0(v0: i32, v1: i64):
v2 = heap_addr.i64 heap0, v0, 0x8000
; check: movl %edi, %ecx
; nextln: movq 8(%rsi), %rdi
; nextln: movq %rcx, %rax
; nextln: addq $$32768, %rax
; nextln: jnb ; ud2 heap_oob ;
; nextln: cmpq %rdi, %rax
; nextln: jbe label1; j label2
; check: Block 1:
return v2
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 9)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movl %edi, %ecx
; Inst 3: movq 8(%rsi), %rdi
; Inst 4: movq %rcx, %rax
; Inst 5: addq $32768, %rax
; Inst 6: jnb ; ud2 heap_oob ;
; Inst 7: cmpq %rdi, %rax
; Inst 8: jbe label1; j label2
; Block 1:
; (original IR block: block2)
; (instruction range: 9 .. 17)
; Inst 9: addq 0(%rsi), %rcx
; Inst 10: xorq %rsi, %rsi
; Inst 11: cmpq %rdi, %rax
; Inst 12: cmovnbeq %rsi, %rcx
; Inst 13: movq %rcx, %rax
; Inst 14: movq %rbp, %rsp
; Inst 15: popq %rbp
; Inst 16: ret
; Block 2:
; (original IR block: block1)
; (instruction range: 17 .. 18)
; Inst 17: ud2 heap_oob
; }}

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function %f(i64) -> i32 {
@@ -16,16 +16,30 @@ block2:
return v3
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: cmpq %rsp, %rdi
; nextln: jnbe label1; j label2
; check: xorl %eax, %eax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; check: movl $$1, %eax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (successor: Block 1)
; (successor: Block 2)
; (instruction range: 0 .. 4)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: cmpq %rsp, %rdi
; Inst 3: jnbe label1; j label2
; Block 1:
; (original IR block: block1)
; (instruction range: 4 .. 8)
; Inst 4: xorl %eax, %eax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; Block 2:
; (original IR block: block2)
; (instruction range: 8 .. 12)
; Inst 8: movl $1, %eax
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set enable_simd
target x86_64 skylake
@@ -12,9 +12,16 @@ block0(v0: i32x4):
v3 = raw_bitcast.b8x16 v2
return v3
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rbp, %rsp
; Inst 3: popq %rbp
; Inst 4: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64 has_popcnt has_sse42
function %popcnt(i64) -> i64 {
@@ -7,13 +7,19 @@ block0(v0: i64):
return v1
}
; check: pushq %rbp
; check: movq %rsp, %rbp
; check: popcntq %rdi, %rsi
; check: movq %rsi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: popcntq %rdi, %rsi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %popcnt(i32) -> i32 {
block0(v0: i32):
@@ -21,10 +27,17 @@ block0(v0: i32):
return v1
}
; check: pushq %rbp
; check: movq %rsp, %rbp
; check: popcntl %edi, %esi
; check: movq %rsi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: popcntl %edi, %esi
; Inst 3: movq %rsi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,110 +1,153 @@
test compile
test compile precise-output
target x86_64
function %popcnt64(i64) -> i64 {
block0(v0: i64):
v1 = popcnt v0
; check: movq %rdi, %rsi
; nextln: shrq $$1, %rsi
; nextln: movabsq $$8608480567731124087, %rax
; nextln: andq %rax, %rsi
; nextln: subq %rsi, %rdi
; nextln: shrq $$1, %rsi
; nextln: andq %rax, %rsi
; nextln: subq %rsi, %rdi
; nextln: shrq $$1, %rsi
; nextln: andq %rax, %rsi
; nextln: subq %rsi, %rdi
; nextln: movq %rdi, %rsi
; nextln: shrq $$4, %rsi
; nextln: addq %rdi, %rsi
; nextln: movabsq $$1085102592571150095, %rdi
; nextln: andq %rdi, %rsi
; nextln: movabsq $$72340172838076673, %rdi
; nextln: imulq %rdi, %rsi
; nextln: shrq $$56, %rsi
; nextln: movq %rsi, %rax
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 25)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdi, %rsi
; Inst 3: shrq $1, %rsi
; Inst 4: movabsq $8608480567731124087, %rax
; Inst 5: andq %rax, %rsi
; Inst 6: subq %rsi, %rdi
; Inst 7: shrq $1, %rsi
; Inst 8: andq %rax, %rsi
; Inst 9: subq %rsi, %rdi
; Inst 10: shrq $1, %rsi
; Inst 11: andq %rax, %rsi
; Inst 12: subq %rsi, %rdi
; Inst 13: movq %rdi, %rsi
; Inst 14: shrq $4, %rsi
; Inst 15: addq %rdi, %rsi
; Inst 16: movabsq $1085102592571150095, %rdi
; Inst 17: andq %rdi, %rsi
; Inst 18: movabsq $72340172838076673, %rdi
; Inst 19: imulq %rdi, %rsi
; Inst 20: shrq $56, %rsi
; Inst 21: movq %rsi, %rax
; Inst 22: movq %rbp, %rsp
; Inst 23: popq %rbp
; Inst 24: ret
; }}
function %popcnt64load(i64) -> i64 {
block0(v0: i64):
v1 = load.i64 v0
v2 = popcnt v1
return v2
; check: movq 0(%rdi), %rdi
; nextln: movq %rdi, %rsi
; nextln: shrq $$1, %rsi
; nextln: movabsq $$8608480567731124087, %rax
; nextln: andq %rax, %rsi
; nextln: subq %rsi, %rdi
; nextln: shrq $$1, %rsi
; nextln: andq %rax, %rsi
; nextln: subq %rsi, %rdi
; nextln: shrq $$1, %rsi
; nextln: andq %rax, %rsi
; nextln: subq %rsi, %rdi
; nextln: movq %rdi, %rsi
; nextln: shrq $$4, %rsi
; nextln: addq %rdi, %rsi
; nextln: movabsq $$1085102592571150095, %rdi
; nextln: andq %rdi, %rsi
; nextln: movabsq $$72340172838076673, %rdi
; nextln: imulq %rdi, %rsi
; nextln: shrq $$56, %rsi
; nextln: movq %rsi, %rax
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 26)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq 0(%rdi), %rdi
; Inst 3: movq %rdi, %rsi
; Inst 4: shrq $1, %rsi
; Inst 5: movabsq $8608480567731124087, %rax
; Inst 6: andq %rax, %rsi
; Inst 7: subq %rsi, %rdi
; Inst 8: shrq $1, %rsi
; Inst 9: andq %rax, %rsi
; Inst 10: subq %rsi, %rdi
; Inst 11: shrq $1, %rsi
; Inst 12: andq %rax, %rsi
; Inst 13: subq %rsi, %rdi
; Inst 14: movq %rdi, %rsi
; Inst 15: shrq $4, %rsi
; Inst 16: addq %rdi, %rsi
; Inst 17: movabsq $1085102592571150095, %rdi
; Inst 18: andq %rdi, %rsi
; Inst 19: movabsq $72340172838076673, %rdi
; Inst 20: imulq %rdi, %rsi
; Inst 21: shrq $56, %rsi
; Inst 22: movq %rsi, %rax
; Inst 23: movq %rbp, %rsp
; Inst 24: popq %rbp
; Inst 25: ret
; }}
function %popcnt32(i32) -> i32 {
block0(v0: i32):
v1 = popcnt v0
return v1
; check: movq %rdi, %rsi
; nextln: shrl $$1, %esi
; nextln: andl $$2004318071, %esi
; nextln: subl %esi, %edi
; nextln: shrl $$1, %esi
; nextln: andl $$2004318071, %esi
; nextln: subl %esi, %edi
; nextln: shrl $$1, %esi
; nextln: andl $$2004318071, %esi
; nextln: subl %esi, %edi
; nextln: movq %rdi, %rsi
; nextln: shrl $$4, %esi
; nextln: addl %edi, %esi
; nextln: andl $$252645135, %esi
; nextln: imull $$16843009, %esi
; nextln: shrl $$24, %esi
; nextln: movq %rsi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 22)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdi, %rsi
; Inst 3: shrl $1, %esi
; Inst 4: andl $2004318071, %esi
; Inst 5: subl %esi, %edi
; Inst 6: shrl $1, %esi
; Inst 7: andl $2004318071, %esi
; Inst 8: subl %esi, %edi
; Inst 9: shrl $1, %esi
; Inst 10: andl $2004318071, %esi
; Inst 11: subl %esi, %edi
; Inst 12: movq %rdi, %rsi
; Inst 13: shrl $4, %esi
; Inst 14: addl %edi, %esi
; Inst 15: andl $252645135, %esi
; Inst 16: imull $16843009, %esi
; Inst 17: shrl $24, %esi
; Inst 18: movq %rsi, %rax
; Inst 19: movq %rbp, %rsp
; Inst 20: popq %rbp
; Inst 21: ret
; }}
function %popcnt32load(i64) -> i32 {
block0(v0: i64):
v1 = load.i32 v0
v2 = popcnt v1
return v2
; check: movl 0(%rdi), %edi
; nextln: movq %rdi, %rsi
; nextln: shrl $$1, %esi
; nextln: andl $$2004318071, %esi
; nextln: subl %esi, %edi
; nextln: shrl $$1, %esi
; nextln: andl $$2004318071, %esi
; nextln: subl %esi, %edi
; nextln: shrl $$1, %esi
; nextln: andl $$2004318071, %esi
; nextln: subl %esi, %edi
; nextln: movq %rdi, %rsi
; nextln: shrl $$4, %esi
; nextln: addl %edi, %esi
; nextln: andl $$252645135, %esi
; nextln: imull $$16843009, %esi
; nextln: shrl $$24, %esi
; nextln: movq %rsi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 23)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movl 0(%rdi), %edi
; Inst 3: movq %rdi, %rsi
; Inst 4: shrl $1, %esi
; Inst 5: andl $2004318071, %esi
; Inst 6: subl %esi, %edi
; Inst 7: shrl $1, %esi
; Inst 8: andl $2004318071, %esi
; Inst 9: subl %esi, %edi
; Inst 10: shrl $1, %esi
; Inst 11: andl $2004318071, %esi
; Inst 12: subl %esi, %edi
; Inst 13: movq %rdi, %rsi
; Inst 14: shrl $4, %esi
; Inst 15: addl %edi, %esi
; Inst 16: andl $252645135, %esi
; Inst 17: imull $16843009, %esi
; Inst 18: shrl $24, %esi
; Inst 19: movq %rsi, %rax
; Inst 20: movq %rbp, %rsp
; Inst 21: popq %rbp
; Inst 22: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set enable_probestack=true
target x86_64
@@ -10,7 +10,21 @@ block0:
return v1
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movl $$100000, %eax
; nextln: call LibCall(Probestack)
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movl $100000, %eax
; Inst 3: call LibCall(Probestack)
; Inst 4: subq $100000, %rsp
; Inst 5: lea rsp(0 + virtual offset), %rsi
; Inst 6: movq %rsi, %rax
; Inst 7: addq $100000, %rsp
; Inst 8: movq %rbp, %rsp
; Inst 9: popq %rbp
; Inst 10: ret
; }}

View File

@@ -1,29 +1,30 @@
test compile
test compile precise-output
set enable_llvm_abi_extensions=true
target x86_64
function %f0(i32, i128, i128) -> i128 {
; check: pushq %rbp
; nextln: movq %rsp, %rbp
block0(v0: i32, v1: i128, v2: i128):
v3 = iconst.i32 42
v4 = icmp.i32 eq v0, v3
; nextln: movl $$42, %eax
; nextln: cmpl %eax, %edi
v5 = select.i128 v4, v1, v2
; nextln: cmovzq %rsi, %rcx
; nextln: cmovzq %rdx, %r8
return v5
; nextln: movq %rcx, %rax
; nextln: movq %r8, %rdx
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movl $42, %eax
; Inst 3: cmpl %eax, %edi
; Inst 4: cmovzq %rsi, %rcx
; Inst 5: cmovzq %rdx, %r8
; Inst 6: movq %rcx, %rax
; Inst 7: movq %r8, %rdx
; Inst 8: movq %rbp, %rsp
; Inst 9: popq %rbp
; Inst 10: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set enable_simd
target x86_64 skylake
@@ -7,81 +7,171 @@ block0(v0: f32x4, v1: f32x4):
v2 = band v0, v1
return v2
}
; check: andps
; not: andpd
; not: pand
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: andps %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %band_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = band v0, v1
return v2
}
; check: andpd
; not: andps
; not: pand
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: andpd %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %band_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = band v0, v1
return v2
}
; check: pand
; not: andps
; not: andpd
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pand %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %bor_f32x4(f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = bor v0, v1
return v2
}
; check: orps
; not: orpd
; not: por
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: orps %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %bor_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = bor v0, v1
return v2
}
; check: orpd
; not: orps
; not: por
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: orpd %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %bor_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = bor v0, v1
return v2
}
; check: por
; not: orps
; not: orpd
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: por %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %bxor_f32x4(f32x4, f32x4) -> f32x4 {
block0(v0: f32x4, v1: f32x4):
v2 = bxor v0, v1
return v2
}
; check: xorps
; not: xorpd
; not: pxor
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: xorps %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %bxor_f64x2(f64x2, f64x2) -> f64x2 {
block0(v0: f64x2, v1: f64x2):
v2 = bxor v0, v1
return v2
}
; check: xorpd
; not: xorps
; not: pxor
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: xorpd %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %bxor_i32x4(i32x4, i32x4) -> i32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = bxor v0, v1
return v2
}
; check: pxor
; not: xorps
; not: xorpd
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pxor %xmm1, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %bitselect_i16x8() -> i16x8 {
block0:
@@ -91,42 +181,84 @@ block0:
v3 = bitselect v0, v1, v2
return v3
}
; check: pand %xmm0, %xmm1
; nextln: pandn %xmm2, %xmm0
; nextln: por %xmm1, %xmm0
; not: movdqa
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: load_const VCodeConstant(0), %xmm0
; Inst 3: load_const VCodeConstant(0), %xmm1
; Inst 4: load_const VCodeConstant(0), %xmm2
; Inst 5: pand %xmm0, %xmm1
; Inst 6: pandn %xmm2, %xmm0
; Inst 7: por %xmm1, %xmm0
; Inst 8: movq %rbp, %rsp
; Inst 9: popq %rbp
; Inst 10: ret
; }}
function %vselect_i16x8(b16x8, i16x8, i16x8) -> i16x8 {
block0(v0: b16x8, v1: i16x8, v2: i16x8):
v3 = vselect v0, v1, v2
return v3
}
; check: pblendvb
; not: blendvps
; not: blendvpd
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pblendvb %xmm1, %xmm2
; Inst 3: movdqa %xmm2, %xmm0
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %vselect_f32x4(b32x4, f32x4, f32x4) -> f32x4 {
block0(v0: b32x4, v1: f32x4, v2: f32x4):
v3 = vselect v0, v1, v2
return v3
}
; check: blendvps
; not: pblendvb
; not: blendvpd
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: blendvps %xmm1, %xmm2
; Inst 3: movdqa %xmm2, %xmm0
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %vselect_f64x2(b64x2, f64x2, f64x2) -> f64x2 {
block0(v0: b64x2, v1: f64x2, v2: f64x2):
v3 = vselect v0, v1, v2
return v3
}
; check: blendvpd
; not: pblendvb
; not: blendvps
; 8x16 shifts: these lower to complex sequences of instructions
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: blendvpd %xmm1, %xmm2
; Inst 3: movdqa %xmm2, %xmm0
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %ishl_i8x16(i32) -> i8x16 {
block0(v0: i32):
@@ -134,12 +266,25 @@ block0(v0: i32):
v2 = ishl v1, v0
return v2
}
; check: movd %edi, %xmm1
; nextln: psllw %xmm1, %xmm0
; nextln: lea const(VCodeConstant(0)), %rsi
; nextln: shlq $$4, %rdi
; nextln: movdqu 0(%rsi,%rdi,1), %xmm1
; nextln: pand %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 12)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: load_const VCodeConstant(1), %xmm0
; Inst 3: movd %edi, %xmm1
; Inst 4: psllw %xmm1, %xmm0
; Inst 5: lea const(VCodeConstant(0)), %rsi
; Inst 6: shlq $4, %rdi
; Inst 7: movdqu 0(%rsi,%rdi,1), %xmm1
; Inst 8: pand %xmm1, %xmm0
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}
function %ushr_i8x16_imm() -> i8x16 {
block0:
@@ -148,10 +293,22 @@ block0:
v2 = ushr v1, v0
return v2
}
; check: load_const VCodeConstant(1), %xmm0
; nextln: psrlw $$1, %xmm0
; nextln: movdqu const(VCodeConstant(0)), %xmm1
; nextln: pand %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: load_const VCodeConstant(1), %xmm0
; Inst 3: psrlw $1, %xmm0
; Inst 4: movdqu const(VCodeConstant(0)), %xmm1
; Inst 5: pand %xmm1, %xmm0
; Inst 6: movq %rbp, %rsp
; Inst 7: popq %rbp
; Inst 8: ret
; }}
function %sshr_i8x16(i32) -> i8x16 {
block0(v0: i32):
@@ -159,42 +316,79 @@ block0(v0: i32):
v2 = sshr v1, v0
return v2
}
; check: addl $$8, %edi
; nextln: movd %edi, %xmm2
; nextln: movdqa %xmm0, %xmm1
; nextln: punpcklbw %xmm1, %xmm1
; nextln: psraw %xmm2, %xmm1
; nextln: punpckhbw %xmm0, %xmm0
; nextln: psraw %xmm2, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 15)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: load_const VCodeConstant(0), %xmm0
; Inst 3: addl $8, %edi
; Inst 4: movd %edi, %xmm2
; Inst 5: movdqa %xmm0, %xmm1
; Inst 6: punpcklbw %xmm1, %xmm1
; Inst 7: psraw %xmm2, %xmm1
; Inst 8: punpckhbw %xmm0, %xmm0
; Inst 9: psraw %xmm2, %xmm0
; Inst 10: packsswb %xmm0, %xmm1
; Inst 11: movdqa %xmm1, %xmm0
; Inst 12: movq %rbp, %rsp
; Inst 13: popq %rbp
; Inst 14: ret
; }}
function %sshr_i8x16_imm(i8x16, i32) -> i8x16 {
block0(v0: i8x16, v1: i32):
v2 = sshr_imm v0, 3
return v2
}
; check: movdqa %xmm0, %xmm1
; nextln: movdqa %xmm1, %xmm0
; nextln: punpcklbw %xmm0, %xmm0
; nextln: psraw $$11, %xmm0
; nextln: punpckhbw %xmm1, %xmm1
; nextln: psraw $$11, %xmm1
; nextln: packsswb %xmm1, %xmm0
; i16x4 arithmetic shifts: x86 does not have a instruction for this
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 12)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movdqa %xmm0, %xmm1
; Inst 3: movdqa %xmm1, %xmm0
; Inst 4: punpcklbw %xmm0, %xmm0
; Inst 5: psraw $11, %xmm0
; Inst 6: punpckhbw %xmm1, %xmm1
; Inst 7: psraw $11, %xmm1
; Inst 8: packsswb %xmm1, %xmm0
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}
function %sshr_i64x2(i64x2, i32) -> i64x2 {
block0(v0: i64x2, v1: i32):
v2 = sshr v0, v1
return v2
}
; check: pextrd.w $$0, %xmm0, %rsi
; nextln: pextrd.w $$1, %xmm0, %rax
; nextln: movq %rdi, %rcx
; nextln: sarq %cl, %rsi
; nextln: movq %rdi, %rcx
; nextln: sarq %cl, %rax
; nextln: pinsrd.w $$0, %rsi, %xmm1
; nextln: pinsrd.w $$1, %rax, %xmm1
; nextln: movdqa %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 15)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movdqa %xmm0, %xmm1
; Inst 3: pextrd.w $0, %xmm0, %rsi
; Inst 4: pextrd.w $1, %xmm0, %rax
; Inst 5: movq %rdi, %rcx
; Inst 6: sarq %cl, %rsi
; Inst 7: movq %rdi, %rcx
; Inst 8: sarq %cl, %rax
; Inst 9: pinsrd.w $0, %rsi, %xmm1
; Inst 10: pinsrd.w $1, %rax, %xmm1
; Inst 11: movdqa %xmm1, %xmm0
; Inst 12: movq %rbp, %rsp
; Inst 13: popq %rbp
; Inst 14: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set enable_simd
target x86_64 skylake
@@ -7,32 +7,81 @@ block0(v0: i32x4, v1: i32x4):
v2 = icmp ne v0, v1
return v2
}
; check: pcmpeqd %xmm1, %xmm0
; nextln: pcmpeqd %xmm1, %xmm1
; nextln: pxor %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pcmpeqd %xmm1, %xmm0
; Inst 3: pcmpeqd %xmm1, %xmm1
; Inst 4: pxor %xmm1, %xmm0
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %icmp_ugt_i32x4(i32x4, i32x4) -> b32x4 {
block0(v0: i32x4, v1: i32x4):
v2 = icmp ugt v0, v1
return v2
}
; check: pmaxud %xmm1, %xmm0
; nextln: pcmpeqd %xmm1, %xmm0
; nextln: pcmpeqd %xmm1, %xmm1
; nextln: pxor %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pmaxud %xmm1, %xmm0
; Inst 3: pcmpeqd %xmm1, %xmm0
; Inst 4: pcmpeqd %xmm1, %xmm1
; Inst 5: pxor %xmm1, %xmm0
; Inst 6: movq %rbp, %rsp
; Inst 7: popq %rbp
; Inst 8: ret
; }}
function %icmp_sge_i16x8(i16x8, i16x8) -> b16x8 {
block0(v0: i16x8, v1: i16x8):
v2 = icmp sge v0, v1
return v2
}
; check: pminsw %xmm1, %xmm0
; nextln: pcmpeqw %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pminsw %xmm1, %xmm0
; Inst 3: pcmpeqw %xmm1, %xmm0
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %icmp_uge_i8x16(i8x16, i8x16) -> b8x16 {
block0(v0: i8x16, v1: i8x16):
v2 = icmp uge v0, v1
return v2
}
; check: pminub %xmm1, %xmm0
; nextln: pcmpeqb %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pminub %xmm1, %xmm0
; Inst 3: pcmpeqb %xmm1, %xmm0
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set enable_simd
target x86_64 has_ssse3 has_sse41
@@ -8,31 +8,50 @@ function %shuffle_different_ssa_values() -> i8x16 {
block0:
v0 = vconst.i8x16 0x00
v1 = vconst.i8x16 0x01
v2 = shuffle v0, v1, 0x11000000000000000000000000000000 ; pick the second lane of v1, the rest use the first lane of v0
v2 = shuffle v0, v1, 0x11000000000000000000000000000000 ;; pick the second lane of v1, the rest use the first lane of v0
return v2
}
; check: load_const VCodeConstant(3), %xmm1
; nextln: load_const VCodeConstant(2), %xmm0
; nextln: load_const VCodeConstant(0), %xmm2
; nextln: pshufb %xmm2, %xmm1
; nextln: load_const VCodeConstant(1), %xmm2
; nextln: pshufb %xmm2, %xmm0
; nextln: orps %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 12)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: load_const VCodeConstant(3), %xmm1
; Inst 3: load_const VCodeConstant(2), %xmm0
; Inst 4: load_const VCodeConstant(0), %xmm2
; Inst 5: pshufb %xmm2, %xmm1
; Inst 6: load_const VCodeConstant(1), %xmm2
; Inst 7: pshufb %xmm2, %xmm0
; Inst 8: orps %xmm1, %xmm0
; Inst 9: movq %rbp, %rsp
; Inst 10: popq %rbp
; Inst 11: ret
; }}
function %shuffle_same_ssa_value() -> i8x16 {
block0:
v1 = vconst.i8x16 0x01
v2 = shuffle v1, v1, 0x13000000000000000000000000000000 ; pick the fourth lane of v1 and the rest from the first lane of v1
v2 = shuffle v1, v1, 0x13000000000000000000000000000000 ;; pick the fourth lane of v1 and the rest from the first lane of v1
return v2
}
; check: load_const VCodeConstant(1), %xmm0
; nextln: load_const VCodeConstant(0), %xmm1
; nextln: pshufb %xmm1, %xmm0
;; swizzle
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: load_const VCodeConstant(1), %xmm0
; Inst 3: load_const VCodeConstant(0), %xmm1
; Inst 4: pshufb %xmm1, %xmm0
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %swizzle() -> i8x16 {
block0:
@@ -41,26 +60,46 @@ block0:
v2 = swizzle.i8x16 v0, v1
return v2
}
; check: load_const VCodeConstant(1), %xmm1
; nextln: load_const VCodeConstant(1), %xmm0
; nextln: load_const VCodeConstant(0), %xmm2
; nextln: paddusb %xmm2, %xmm0
; nextln: pshufb %xmm0, %xmm1
; nextln: movdqa %xmm1, %xmm0
;; splat
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: load_const VCodeConstant(1), %xmm1
; Inst 3: load_const VCodeConstant(1), %xmm0
; Inst 4: load_const VCodeConstant(0), %xmm2
; Inst 5: paddusb %xmm2, %xmm0
; Inst 6: pshufb %xmm0, %xmm1
; Inst 7: movdqa %xmm1, %xmm0
; Inst 8: movq %rbp, %rsp
; Inst 9: popq %rbp
; Inst 10: ret
; }}
function %splat_i8(i8) -> i8x16 {
block0(v0: i8):
v1 = splat.i8x16 v0
return v1
}
; check: uninit %xmm0
; nextln: pinsrb $$0, %rdi, %xmm0
; nextln: pxor %xmm1, %xmm1
; nextln: pshufb %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: uninit %xmm0
; Inst 3: pinsrb $0, %rdi, %xmm0
; Inst 4: pxor %xmm1, %xmm1
; Inst 5: pshufb %xmm1, %xmm0
; Inst 6: movq %rbp, %rsp
; Inst 7: popq %rbp
; Inst 8: ret
; }}
function %splat_b16() -> b16x8 {
block0:
@@ -68,56 +107,121 @@ block0:
v1 = splat.b16x8 v0
return v1
}
; check: uninit %xmm0
; nextln: pinsrw $$0, %rsi, %xmm0
; nextln: pinsrw $$1, %rsi, %xmm0
; nextln: pshufd $$0, %xmm0, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movl $65535, %esi
; Inst 3: uninit %xmm0
; Inst 4: pinsrw $0, %rsi, %xmm0
; Inst 5: pinsrw $1, %rsi, %xmm0
; Inst 6: pshufd $0, %xmm0, %xmm0
; Inst 7: movq %rbp, %rsp
; Inst 8: popq %rbp
; Inst 9: ret
; }}
function %splat_i32(i32) -> i32x4 {
block0(v0: i32):
v1 = splat.i32x4 v0
return v1
}
; check: uninit %xmm0
; nextln: pinsrd $$0, %rdi, %xmm0
; nextln: pshufd $$0, %xmm0, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: uninit %xmm0
; Inst 3: pinsrd $0, %rdi, %xmm0
; Inst 4: pshufd $0, %xmm0, %xmm0
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %splat_f64(f64) -> f64x2 {
block0(v0: f64):
v1 = splat.f64x2 v0
return v1
}
; check: uninit %xmm1
; nextln: movsd %xmm0, %xmm1
; nextln: movlhps %xmm0, %xmm1
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 9)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: uninit %xmm1
; Inst 3: movsd %xmm0, %xmm1
; Inst 4: movlhps %xmm0, %xmm1
; Inst 5: movdqa %xmm1, %xmm0
; Inst 6: movq %rbp, %rsp
; Inst 7: popq %rbp
; Inst 8: ret
; }}
;; load*_zero
; Verify that a `load` followed by a `scalar_to_vector` (the CLIF translation of `load32_zero`) is
; lowered to a single MOVSS instruction.
function %load32_zero_coalesced(i64) -> i32x4 {
block0(v0: i64):
v1 = load.i32 v0
v2 = scalar_to_vector.i32x4 v1
; check: movss 0(%rdi), %xmm0
return v2
}
;; Verify that `scalar_to_vector` (used by `load32_zero`), lowers as expected.
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movss 0(%rdi), %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %load32_zero_int(i32) -> i32x4 {
block0(v0: i32):
v1 = scalar_to_vector.i32x4 v0
; check: movd %edi, %xmm0
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movd %edi, %xmm0
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}
function %load32_zero_float(f32) -> f32x4 {
block0(v0: f32):
v1 = scalar_to_vector.f32x4 v0
; regex: MOV=movap*
; check: pushq
; not: $MOV
; check: ret
return v1
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rbp, %rsp
; Inst 3: popq %rbp
; Inst 4: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set enable_simd
target x86_64 skylake
@@ -7,23 +7,62 @@ block0(v0: b32x4):
v1 = bnot v0
return v1
}
; check: pcmpeqd %xmm1, %xmm1
; nextln: pxor %xmm1, %xmm0
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pcmpeqd %xmm1, %xmm1
; Inst 3: pxor %xmm1, %xmm0
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}
function %vany_true_b32x4(b32x4) -> b1 {
block0(v0: b32x4):
v1 = vany_true v0
return v1
}
; check: ptest %xmm0, %xmm0
; nextln: setnz %sil
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: ptest %xmm0, %xmm0
; Inst 3: setnz %sil
; Inst 4: movq %rsi, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function %vall_true_i64x2(i64x2) -> b1 {
block0(v0: i64x2):
v1 = vall_true v0
return v1
}
; check: pxor %xmm1, %xmm1
; nextln: pcmpeqq %xmm0, %xmm1
; nextln: ptest %xmm1, %xmm1
; nextln: setz %sil
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: pxor %xmm1, %xmm1
; Inst 3: pcmpeqq %xmm0, %xmm1
; Inst 4: ptest %xmm1, %xmm1
; Inst 5: setz %sil
; Inst 6: movq %rsi, %rax
; Inst 7: movq %rbp, %rsp
; Inst 8: popq %rbp
; Inst 9: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function u0:0(i64 sarg(64)) -> i8 system_v {
@@ -7,14 +7,20 @@ block0(v0: i64):
return v1
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: lea 16(%rbp), %rsi
; nextln: movzbq 0(%rsi), %rsi
; nextln: movq %rsi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: lea 16(%rbp), %rsi
; Inst 3: movzbq 0(%rsi), %rsi
; Inst 4: movq %rsi, %rax
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}
function u0:1(i64 sarg(64), i64) -> i8 system_v {
block0(v0: i64, v1: i64):
@@ -24,16 +30,22 @@ block0(v0: i64, v1: i64):
return v4
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: lea 16(%rbp), %rsi
; nextln: movzbq 0(%rdi), %rdi
; nextln: movzbq 0(%rsi), %rsi
; nextln: addl %esi, %edi
; nextln: movq %rdi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 10)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: lea 16(%rbp), %rsi
; Inst 3: movzbq 0(%rdi), %rdi
; Inst 4: movzbq 0(%rsi), %rsi
; Inst 5: addl %esi, %edi
; Inst 6: movq %rdi, %rax
; Inst 7: movq %rbp, %rsp
; Inst 8: popq %rbp
; Inst 9: ret
; }}
function u0:2(i64) -> i8 system_v {
fn1 = colocated u0:0(i64 sarg(64)) -> i8 system_v
@@ -43,21 +55,27 @@ block0(v0: i64):
return v1
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rdi, %rsi
; nextln: subq $$64, %rsp
; nextln: virtual_sp_offset_adjust 64
; nextln: lea 0(%rsp), %rdi
; nextln: movl $$64, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: call User { namespace: 0, index: 0 }
; nextln: addq $$64, %rsp
; nextln: virtual_sp_offset_adjust -64
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 15)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdi, %rsi
; Inst 3: subq $64, %rsp
; Inst 4: virtual_sp_offset_adjust 64
; Inst 5: lea 0(%rsp), %rdi
; Inst 6: movl $64, %edx
; Inst 7: load_ext_name %Memcpy+0, %rcx
; Inst 8: call *%rcx
; Inst 9: call User { namespace: 0, index: 0 }
; Inst 10: addq $64, %rsp
; Inst 11: virtual_sp_offset_adjust -64
; Inst 12: movq %rbp, %rsp
; Inst 13: popq %rbp
; Inst 14: ret
; }}
function u0:3(i64, i64) -> i8 system_v {
fn1 = colocated u0:0(i64, i64 sarg(64)) -> i8 system_v
@@ -67,26 +85,32 @@ block0(v0: i64, v1: i64):
return v2
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %rdi, %r12
; nextln: subq $$64, %rsp
; nextln: virtual_sp_offset_adjust 64
; nextln: lea 0(%rsp), %rdi
; nextln: movl $$64, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: movq %r12, %rdi
; nextln: call User { namespace: 0, index: 0 }
; nextln: addq $$64, %rsp
; nextln: virtual_sp_offset_adjust -64
; nextln: movq 0(%rsp), %r12
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 20)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $16, %rsp
; Inst 3: movq %r12, 0(%rsp)
; Inst 4: movq %rdi, %r12
; Inst 5: subq $64, %rsp
; Inst 6: virtual_sp_offset_adjust 64
; Inst 7: lea 0(%rsp), %rdi
; Inst 8: movl $64, %edx
; Inst 9: load_ext_name %Memcpy+0, %rcx
; Inst 10: call *%rcx
; Inst 11: movq %r12, %rdi
; Inst 12: call User { namespace: 0, index: 0 }
; Inst 13: addq $64, %rsp
; Inst 14: virtual_sp_offset_adjust -64
; Inst 15: movq 0(%rsp), %r12
; Inst 16: addq $16, %rsp
; Inst 17: movq %rbp, %rsp
; Inst 18: popq %rbp
; Inst 19: ret
; }}
function u0:4(i64 sarg(128), i64 sarg(64)) -> i8 system_v {
block0(v0: i64, v1: i64):
@@ -96,16 +120,23 @@ block0(v0: i64, v1: i64):
return v4
}
; check: movq %rsp, %rbp
; nextln: lea 16(%rbp), %rsi
; nextln: lea 144(%rbp), %rdi
; nextln: movzbq 0(%rsi), %rsi
; nextln: movzbq 0(%rdi), %rdi
; nextln: addl %edi, %esi
; nextln: movq %rsi, %rax
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 11)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: lea 16(%rbp), %rsi
; Inst 3: lea 144(%rbp), %rdi
; Inst 4: movzbq 0(%rsi), %rsi
; Inst 5: movzbq 0(%rdi), %rdi
; Inst 6: addl %edi, %esi
; Inst 7: movq %rsi, %rax
; Inst 8: movq %rbp, %rsp
; Inst 9: popq %rbp
; Inst 10: ret
; }}
function u0:5(i64, i64, i64) -> i8 system_v {
fn1 = colocated u0:0(i64, i64 sarg(128), i64 sarg(64)) -> i8 system_v
@@ -115,30 +146,38 @@ block0(v0: i64, v1: i64, v2: i64):
return v3
}
; check: movq %rsp, %rbp
; nextln: subq $$16, %rsp
; nextln: movq %r12, 0(%rsp)
; nextln: movq %r13, 8(%rsp)
; nextln: movq %rdi, %r12
; nextln: movq %rdx, %r13
; nextln: subq $$192, %rsp
; nextln: virtual_sp_offset_adjust 192
; nextln: lea 0(%rsp), %rdi
; nextln: movl $$128, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: lea 128(%rsp), %rdi
; nextln: movq %r13, %rsi
; nextln: movl $$64, %edx
; nextln: load_ext_name %Memcpy+0, %rcx
; nextln: call *%rcx
; nextln: movq %r12, %rdi
; nextln: call User { namespace: 0, index: 0 }
; nextln: addq $$192, %rsp
; nextln: virtual_sp_offset_adjust -192
; nextln: movq 0(%rsp), %r12
; nextln: movq 8(%rsp), %r13
; nextln: addq $$16, %rsp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 28)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: subq $16, %rsp
; Inst 3: movq %r12, 0(%rsp)
; Inst 4: movq %r13, 8(%rsp)
; Inst 5: movq %rdi, %r12
; Inst 6: movq %rdx, %r13
; Inst 7: subq $192, %rsp
; Inst 8: virtual_sp_offset_adjust 192
; Inst 9: lea 0(%rsp), %rdi
; Inst 10: movl $128, %edx
; Inst 11: load_ext_name %Memcpy+0, %rcx
; Inst 12: call *%rcx
; Inst 13: lea 128(%rsp), %rdi
; Inst 14: movq %r13, %rsi
; Inst 15: movl $64, %edx
; Inst 16: load_ext_name %Memcpy+0, %rcx
; Inst 17: call *%rcx
; Inst 18: movq %r12, %rdi
; Inst 19: call User { namespace: 0, index: 0 }
; Inst 20: addq $192, %rsp
; Inst 21: virtual_sp_offset_adjust -192
; Inst 22: movq 0(%rsp), %r12
; Inst 23: movq 8(%rsp), %r13
; Inst 24: addq $16, %rsp
; Inst 25: movq %rbp, %rsp
; Inst 26: popq %rbp
; Inst 27: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
function %f0(i64 sret) {
@@ -8,11 +8,18 @@ block0(v0: i64):
return
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rdi, %rax
; nextln: movl $$42, %esi
; nextln: movq %rsi, 0(%rdi)
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 8)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rdi, %rax
; Inst 3: movl $42, %esi
; Inst 4: movq %rsi, 0(%rdi)
; Inst 5: movq %rbp, %rsp
; Inst 6: popq %rbp
; Inst 7: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
set tls_model=elf_gd
target x86_64
@@ -10,9 +10,16 @@ block0(v0: i32):
return v1
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: elf_tls_get_addr User { namespace: 1, index: 0 }
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 6)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: elf_tls_get_addr User { namespace: 1, index: 0 }
; Inst 3: movq %rbp, %rsp
; Inst 4: popq %rbp
; Inst 5: ret
; }}

View File

@@ -1,16 +1,24 @@
test compile
test compile precise-output
target x86_64
function %elide_uextend_add(i32, i32) -> i64 {
block0(v0: i32, v1: i32):
; check: pushq %rbp
; check: movq %rsp, %rbp
v2 = iadd v0, v1
; check: addl %esi, %edi
v3 = uextend.i64 v2
; check: movq %rdi, %rax
; check: movq %rbp, %rsp
; check: popq %rbp
; check: ret
return v3
}
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 7)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: addl %esi, %edi
; Inst 3: movq %rdi, %rax
; Inst 4: movq %rbp, %rsp
; Inst 5: popq %rbp
; Inst 6: ret
; }}

View File

@@ -1,4 +1,4 @@
test compile
test compile precise-output
target x86_64
;; From: https://github.com/bytecodealliance/wasmtime/issues/2670
@@ -13,8 +13,15 @@ block1:
trap unreachable
}
; check: pushq %rbp
; nextln: movq %rsp, %rbp
; nextln: movq %rbp, %rsp
; nextln: popq %rbp
; nextln: ret
; VCode_ShowWithRRU {{
; Entry block: 0
; Block 0:
; (original IR block: block0)
; (instruction range: 0 .. 5)
; Inst 0: pushq %rbp
; Inst 1: movq %rsp, %rbp
; Inst 2: movq %rbp, %rsp
; Inst 3: popq %rbp
; Inst 4: ret
; }}

View File

@@ -121,6 +121,7 @@ fn check_precise_output(text: &str, context: &Context) -> Result<()> {
.details
.comments
.iter()
.filter(|c| !c.text.starts_with(";;"))
.map(|c| c.text.strip_prefix("; ").unwrap_or(c.text))
.collect::<Vec<_>>();