refactor: move all 'filetests/vcode' tests to 'filetests/isa'

This commit is contained in:
Andrew Brown
2020-09-28 15:16:23 -07:00
parent 452d854855
commit b43f4a464a
34 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,271 @@
test compile
target aarch64
function %f0(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = uextend.i64 v1
v3 = load_complex.i32 v0+v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr w0, [x0, w1, UXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f2(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = uextend.i64 v1
v3 = load_complex.i32 v2+v0
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr w0, [x0, w1, UXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f3(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = load_complex.i32 v0+v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f4(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = load_complex.i32 v2+v0
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f5(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
v4 = load.i32 v3
return v4
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f6(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v2, v0
v4 = load.i32 v3
return v4
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f7(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = uextend.i64 v0
v3 = uextend.i64 v1
v4 = iadd.i64 v2, v3
v5 = load.i32 v4
return v5
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: ldr w0, [x0, w1, UXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f8(i64, i32) -> i32 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iconst.i64 32
v4 = iadd.i64 v2, v3
v5 = iadd.i64 v4, v0
v6 = iadd.i64 v5, v5
v7 = load.i32 v6+4
return v7
}
; v6+4 = 2*v5 = 2*v4 + 2*v0 + 4 = 2*v2 + 2*v3 + 2*v0 + 4
; = 2*sextend($x1) + 2*$x0 + 68
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x2, x0, #68
; nextln: add x0, x2, x0
; nextln: add x0, x0, x1, SXTW
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f9(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i64 48
v4 = iadd.i64 v0, v1
v5 = iadd.i64 v4, v2
v6 = iadd.i64 v5, v3
v7 = load.i32 v6
return v7
}
; v6 = $x0 + $x1 + $x2 + 48
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, x2
; nextln: add x0, x0, x1
; nextln: ldur w0, [x0, #48]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f10(i64, i64, i64) -> i32 {
block0(v0: i64, v1: i64, v2: i64):
v3 = iconst.i64 4100
v4 = iadd.i64 v0, v1
v5 = iadd.i64 v4, v2
v6 = iadd.i64 v5, v3
v7 = load.i32 v6
return v7
}
; v6 = $x0 + $x1 + $x2 + 4100
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x3, #4100
; nextln: add x1, x3, x1
; nextln: add x1, x1, x2
; nextln: ldr w0, [x1, x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f10() -> i32 {
block0:
v1 = iconst.i64 1234
v2 = load.i32 v1
return v2
}
; v6 = $x0 + $x1 + $x2 + 48
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #1234
; nextln: ldr w0, [x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f11(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 8388608 ; Imm12: 0x800 << 12
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, #8388608
; nextln: ldr w0, [x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f12(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 -4
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub x0, x0, #4
; nextln: ldr w0, [x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f13(i64) -> i32 {
block0(v0: i64):
v1 = iconst.i64 1000000000
v2 = iadd.i64 v0, v1
v3 = load.i32 v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w1, #51712
; nextln: movk w1, #15258, LSL #16
; nextln: add x0, x1, x0
; nextln: ldr w0, [x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f14(i32) -> i32 {
block0(v0: i32):
v1 = sextend.i64 v0
v2 = load.i32 v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: ldr w0, [x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f15(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sextend.i64 v0
v3 = sextend.i64 v1
v4 = iadd.i64 v2, v3
v5 = load.i32 v4
return v5
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: ldr w0, [x0, w1, SXTW]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,424 @@
test compile
target aarch64
function %f1(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iadd.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f2(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = isub.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f3(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = imul.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: madd x0, x0, x1, xzr
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = umulhi.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: umulh x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f5(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = smulhi.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: smulh x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f6(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = sdiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sdiv x2, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: adds xzr, x1, #1
; nextln: ccmp x0, #1, #nzcv, eq
; nextln: b.vc 8 ; udf
; nextln: mov x0, x2
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f7(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = sdiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x2, #2
; nextln: sdiv x1, x0, x2
; nextln: cbnz x2, 8 ; udf
; nextln: adds xzr, x2, #1
; nextln: ccmp x0, #1, #nzcv, eq
; nextln: b.vc 8 ; udf
; nextln: mov x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = udiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: udiv x0, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f9(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = udiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x1, #2
; nextln: udiv x0, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f10(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = srem.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sdiv x2, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: msub x0, x2, x1, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f11(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = urem.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: udiv x2, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: msub x0, x2, x1, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f12(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sdiv.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x3, w0
; nextln: sxtw x2, w1
; nextln: sdiv x0, x3, x2
; nextln: cbnz x2, 8 ; udf
; nextln: adds wzr, w2, #1
; nextln: ccmp w3, #1, #nzcv, eq
; nextln: b.vc 8 ; udf
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f13(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 2
v2 = sdiv.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: movz x1, #2
; nextln: sxtw x2, w1
; nextln: sdiv x1, x0, x2
; nextln: cbnz x2, 8 ; udf
; nextln: adds wzr, w2, #1
; nextln: ccmp w0, #1, #nzcv, eq
; nextln: b.vc 8 ; udf
; nextln: mov x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f14(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = udiv.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: mov w1, w1
; nextln: udiv x0, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f15(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 2
v2 = udiv.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: movz x1, #2
; nextln: udiv x0, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f16(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = srem.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: sxtw x1, w1
; nextln: sdiv x2, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: msub x0, x2, x1, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = urem.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: mov w1, w1
; nextln: udiv x2, x0, x1
; nextln: cbnz x1, 8 ; udf
; nextln: msub x0, x2, x1, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f18(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: and x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f19(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: orr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f20(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: eor x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f21(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band_not.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: bic x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f22(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor_not.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: orn x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f23(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor_not.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: eon x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f24(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bnot.i64 v0
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: orn x0, xzr, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f25(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iconst.i32 53
v3 = ishl.i32 v0, v2
v4 = isub.i32 v1, v3
return v4
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub w0, w1, w0, LSL 21
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f26(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 -1
v2 = iadd.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub w0, w0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f27(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 -1
v2 = isub.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f28(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 -1
v2 = isub.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,14 @@
test compile
target aarch64
function %f(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v2 = iadd v0, v1
; check: add w0, w0, w1
return v2
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}

View File

@@ -0,0 +1,326 @@
test compile
target aarch64
function %a(i8) -> i8 {
block0(v0: i8):
v1 = bitrev v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: lsr w0, w0, #24
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %a(i16) -> i16 {
block0(v0: i16):
v1 = bitrev v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: lsr w0, w0, #16
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %a(i32) -> i32 {
block0(v0: i32):
v1 = bitrev v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %a(i64) -> i64 {
block0(v0: i64):
v1 = bitrev v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %b(i8) -> i8 {
block0(v0: i8):
v1 = clz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %b(i16) -> i16 {
block0(v0: i16):
v1 = clz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %b(i32) -> i32 {
block0(v0: i32):
v1 = clz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %b(i64) -> i64 {
block0(v0: i64):
v1 = clz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: clz x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %c(i8) -> i8 {
block0(v0: i8):
v1 = cls v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: cls w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %c(i16) -> i16 {
block0(v0: i16):
v1 = cls v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: cls w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %c(i32) -> i32 {
block0(v0: i32):
v1 = cls v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: cls w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %c(i64) -> i64 {
block0(v0: i64):
v1 = cls v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: cls x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i8) -> i8 {
block0(v0: i8):
v1 = ctz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: lsr w0, w0, #24
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i16) -> i16 {
block0(v0: i16):
v1 = ctz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: lsr w0, w0, #16
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i32) -> i32 {
block0(v0: i32):
v1 = ctz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i64) -> i64 {
block0(v0: i64):
v1 = ctz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit x0, x0
; nextln: clz x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i64) -> i64 {
block0(v0: i64):
v1 = popcnt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr x1, x0, #1
; nextln: and x1, x1, #6148914691236517205
; nextln: sub x1, x0, x1
; nextln: and x0, x1, #3689348814741910323
; nextln: lsr x1, x1, #2
; nextln: and x1, x1, #3689348814741910323
; nextln: add x0, x1, x0
; nextln: add x0, x0, x0, LSR 4
; nextln: and x0, x0, #1085102592571150095
; nextln: add x0, x0, x0, LSL 8
; nextln: add x0, x0, x0, LSL 16
; nextln: add x0, x0, x0, LSL 32
; nextln: lsr x0, x0, #56
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i32) -> i32 {
block0(v0: i32):
v1 = popcnt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: lsr w1, w0, #1
; nextln: and x1, x1, #6148914691236517205
; nextln: sub x1, x0, x1
; nextln: and x0, x1, #3689348814741910323
; nextln: lsr x1, x1, #2
; nextln: and x1, x1, #3689348814741910323
; nextln: add x0, x1, x0
; nextln: add x0, x0, x0, LSR 4
; nextln: and x0, x0, #1085102592571150095
; nextln: add x0, x0, x0, LSL 8
; nextln: add x0, x0, x0, LSL 16
; nextln: add x0, x0, x0, LSL 32
; nextln: lsr x0, x0, #56
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i16) -> i16 {
block0(v0: i16):
v1 = popcnt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth x0, w0
; nextln: lsr w1, w0, #1
; nextln: and x1, x1, #6148914691236517205
; nextln: sub x1, x0, x1
; nextln: and x0, x1, #3689348814741910323
; nextln: lsr x1, x1, #2
; nextln: and x1, x1, #3689348814741910323
; nextln: add x0, x1, x0
; nextln: add x0, x0, x0, LSR 4
; nextln: and x0, x0, #1085102592571150095
; nextln: add x0, x0, x0, LSL 8
; nextln: add x0, x0, x0, LSL 16
; nextln: add x0, x0, x0, LSL 32
; nextln: lsr x0, x0, #56
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i8) -> i8 {
block0(v0: i8):
v1 = popcnt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb x0, w0
; nextln: lsr w1, w0, #1
; nextln: and x1, x1, #6148914691236517205
; nextln: sub x1, x0, x1
; nextln: and x0, x1, #3689348814741910323
; nextln: lsr x1, x1, #2
; nextln: and x1, x1, #3689348814741910323
; nextln: add x0, x1, x0
; nextln: add x0, x0, x0, LSR 4
; nextln: and x0, x0, #1085102592571150095
; nextln: add x0, x0, x0, LSL 8
; nextln: add x0, x0, x0, LSL 16
; nextln: add x0, x0, x0, LSL 32
; nextln: lsr x0, x0, #56
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,16 @@
test compile
target aarch64
function %f(i64, i64) -> i64 {
sig0 = (i64) -> i64
block0(v0: i64, v1: i64):
v2 = call_indirect.i64 sig0, v1(v0)
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: blr x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,132 @@
test compile
target aarch64
function %f1(i64) -> i64 {
fn0 = %g(i64) -> i64
block0(v0: i64):
v1 = call fn0(v0)
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: blr x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f2(i32) -> i64 {
fn0 = %g(i32 uext) -> i64
block0(v0: i32):
v1 = call fn0(v0)
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: blr x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f3(i32) -> i32 uext {
block0(v0: i32):
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f4(i32) -> i64 {
fn0 = %g(i32 sext) -> i64
block0(v0: i32):
v1 = call fn0(v0)
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: blr x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f5(i32) -> i32 sext {
block0(v0: i32):
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f6(i8) -> i64 {
fn0 = %g(i32, i32, i32, i32, i32, i32, i32, i32, i8 sext) -> i64
block0(v0: i8):
v1 = iconst.i32 42
v2 = call fn0(v1, v1, v1, v1, v1, v1, v1, v1, v0)
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov x8, x0
; nextln: sub sp, sp, #16
; nextln: virtual_sp_offset_adjust 16
; nextln: movz x0, #42
; nextln: movz x1, #42
; nextln: movz x2, #42
; nextln: movz x3, #42
; nextln: movz x4, #42
; nextln: movz x5, #42
; nextln: movz x6, #42
; nextln: movz x7, #42
; nextln: sxtb x8, w8
; nextln: stur x8, [sp]
; nextln: ldr x8, 8 ; b 12 ; data
; nextln: blr x8
; nextln: add sp, sp, #16
; nextln: virtual_sp_offset_adjust -16
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f7(i8) -> i32, i32, i32, i32, i32, i32, i32, i32, i8 sext {
block0(v0: i8):
v1 = iconst.i32 42
return v1, v1, v1, v1, v1, v1, v1, v1, v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov x9, x0
; nextln: mov x8, x1
; nextln: movz x0, #42
; nextln: movz x1, #42
; nextln: movz x2, #42
; nextln: movz x3, #42
; nextln: movz x4, #42
; nextln: movz x5, #42
; nextln: movz x6, #42
; nextln: movz x7, #42
; nextln: sxtb x9, w9
; nextln: stur x9, [x8]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,67 @@
test compile
target aarch64
function %f(i64, i64) -> b1 {
block0(v0: i64, v1: i64):
v2 = icmp eq v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, x0, x1
; nextln: cset x0, eq
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ifcmp v0, v1
brif eq v2, block1
jump block2
block1:
v4 = iconst.i64 1
return v4
block2:
v5 = iconst.i64 2
return v5
}
; check: Block 0:
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, x0, x1
; nextln: b.eq label1 ; b label2
; check: Block 1:
; check: movz x0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; check: Block 2:
; check: movz x0, #2
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ifcmp v0, v1
brif eq v2, block1
jump block1
block1:
v4 = iconst.i64 1
return v4
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, x0, x1
; check: Block 1:
; check: movz x0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,54 @@
test compile
target aarch64
function %f(i8, i64, i64) -> i64 {
block0(v0: i8, v1: i64, v2: i64):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
v5 = selectif.i64 eq v4, v1, v2
return v5
}
; check: subs wzr
; check: csel x0, $(=x[0-9]+, x[0-9]+), eq
function %g(i8) -> b1 {
block0(v0: i8):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
v5 = trueif eq v4
return v5
}
; check: subs wzr
; check: cset x0, eq
function %h(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
v3 = bitselect.i8 v0, v1, v2
return v3
}
; check: and
; nextln: bic
; nextln: orr
function %i(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
v3 = select.i8 v0, v1, v2
return v3
}
; check: subs wzr
; nextln: csel
function %i(i32, i8, i8) -> i8 {
block0(v0: i32, v1: i8, v2: i8):
v3 = iconst.i32 42
v4 = icmp.i32 eq v0, v3
v5 = select.i8 v4, v1, v2
return v5
}
; check: subs wzr, w0, #42
; nextln: csel x0, x1, x2, eq

View File

@@ -0,0 +1,254 @@
test compile
target aarch64
function %f() -> b8 {
block0:
v0 = bconst.b8 true
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> b16 {
block0:
v0 = bconst.b16 false
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff0000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535, LSL #16
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff00000000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535, LSL #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff000000000000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffffffffffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffffffff0000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffff0000ffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535, LSL #16
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff0000ffffffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535, LSL #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x0000ffffffffffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xf34bf0a31212003a ; random digits
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #58
; nextln: movk x0, #4626, LSL #16
; nextln: movk x0, #61603, LSL #32
; nextln: movk x0, #62283, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e900001ef40000 ; random digits with 2 clear half words
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #7924, LSL #16
; nextln: movk x0, #4841, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e9ffff1ef4ffff ; random digits with 2 full half words
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #57611, LSL #16
; nextln: movk x0, #4841, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i32 {
block0:
v0 = iconst.i32 -1
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: orr x0, xzr, #4294967295
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i32 {
block0:
v0 = iconst.i32 0xfffffff7
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn w0, #8
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xfffffff7
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn w0, #8
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xfffffffffffffff7
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #8
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,18 @@
test compile
target aarch64
function %f(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
v2 = iconst.i64 42
v3 = iadd.i64 v2, v1
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x1, #42
; nextln: add x0, x1, x0, SXTB
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,134 @@
test compile
target aarch64
function u0:0(i8) -> f32 {
block0(v0: i8):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_from_uint.f32 v0
; check: uxtb w0, w0
; check: ucvtf s0, w0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}
function u0:0(i8) -> f64 {
block0(v0: i8):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_from_uint.f64 v0
; check: uxtb w0, w0
; check: ucvtf d0, w0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}
function u0:0(i16) -> f32 {
block0(v0: i16):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_from_uint.f32 v0
; check: uxth w0, w0
; check: ucvtf s0, w0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}
function u0:0(i16) -> f64 {
block0(v0: i16):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_from_uint.f64 v0
; check: uxth w0, w0
; check: ucvtf d0, w0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}
function u0:0(f32) -> i8 {
block0(v0: f32):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_to_uint.i8 v0
; check: fcmp s0, s0
; check: b.vc 8 ; udf
; check: ldr s1, pc+8 ; b 8 ; data.f32 -1
; check: fcmp s0, s1
; check: b.gt 8 ; udf
; check: ldr s1, pc+8 ; b 8 ; data.f32 256
; check: fcmp s0, s1
; check: b.mi 8 ; udf
; check: fcvtzu w0, s0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}
function u0:0(f64) -> i8 {
block0(v0: f64):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_to_uint.i8 v0
; check: fcmp d0, d0
; check: b.vc 8 ; udf
; check: ldr d1, pc+8 ; b 12 ; data.f64 -1
; check: fcmp d0, d1
; check: b.gt 8 ; udf
; check: ldr d1, pc+8 ; b 12 ; data.f64 256
; check: fcmp d0, d1
; check: b.mi 8 ; udf
; check: fcvtzu w0, d0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}
function u0:0(f32) -> i16 {
block0(v0: f32):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_to_uint.i16 v0
; check: fcmp s0, s0
; check: b.vc 8 ; udf
; check: ldr s1, pc+8 ; b 8 ; data.f32 -1
; check: fcmp s0, s1
; check: b.gt 8 ; udf
; check: ldr s1, pc+8 ; b 8 ; data.f32 65536
; check: fcmp s0, s1
; check: b.mi 8 ; udf
; check: fcvtzu w0, s0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}
function u0:0(f64) -> i16 {
block0(v0: f64):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v1 = fcvt_to_uint.i16 v0
; check: fcmp d0, d0
; check: b.vc 8 ; udf
; check: ldr d1, pc+8 ; b 12 ; data.f64 -1
; check: fcmp d0, d1
; check: b.gt 8 ; udf
; check: ldr d1, pc+8 ; b 12 ; data.f64 65536
; check: fcmp d0, d1
; check: b.mi 8 ; udf
; check: fcvtzu w0, d0
return v1
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}

View File

@@ -0,0 +1,846 @@
test compile
target aarch64
function %f1(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fadd v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fadd s0, s0, s1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f2(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fadd v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fadd d0, d0, d1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f3(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fsub v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fsub s0, s0, s1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f4(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fsub v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fsub d0, d0, d1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f5(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fmul v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmul s0, s0, s1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f6(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fmul v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmul d0, d0, d1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f7(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fdiv v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fdiv s0, s0, s1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f8(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fdiv v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fdiv d0, d0, d1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f9(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fmin v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmin s0, s0, s1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f10(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fmin v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmin d0, d0, d1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f11(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fmax v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmax s0, s0, s1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f12(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fmax v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmax d0, d0, d1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f13(f32) -> f32 {
block0(v0: f32):
v1 = sqrt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fsqrt s0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f15(f64) -> f64 {
block0(v0: f64):
v1 = sqrt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fsqrt d0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f16(f32) -> f32 {
block0(v0: f32):
v1 = fabs v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fabs s0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f17(f64) -> f64 {
block0(v0: f64):
v1 = fabs v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fabs d0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f18(f32) -> f32 {
block0(v0: f32):
v1 = fneg v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fneg s0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f19(f64) -> f64 {
block0(v0: f64):
v1 = fneg v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fneg d0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f20(f32) -> f64 {
block0(v0: f32):
v1 = fpromote.f64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcvt d0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f21(f64) -> f32 {
block0(v0: f64):
v1 = fdemote.f32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcvt s0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f22(f32) -> f32 {
block0(v0: f32):
v1 = ceil v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintp s0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f22(f64) -> f64 {
block0(v0: f64):
v1 = ceil v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintp d0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f23(f32) -> f32 {
block0(v0: f32):
v1 = floor v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintm s0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f24(f64) -> f64 {
block0(v0: f64):
v1 = floor v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintm d0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f25(f32) -> f32 {
block0(v0: f32):
v1 = trunc v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintz s0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f26(f64) -> f64 {
block0(v0: f64):
v1 = trunc v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintz d0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f27(f32) -> f32 {
block0(v0: f32):
v1 = nearest v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintn s0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f28(f64) -> f64 {
block0(v0: f64):
v1 = nearest v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: frintn d0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f29(f32, f32, f32) -> f32 {
block0(v0: f32, v1: f32, v2: f32):
v3 = fma v0, v1, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmadd s0, s0, s1, s2
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f30(f64, f64, f64) -> f64 {
block0(v0: f64, v1: f64, v2: f64):
v3 = fma v0, v1, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmadd d0, d0, d1, d2
; nextln: mov sp, fp
function %f31(f32, f32) -> f32 {
block0(v0: f32, v1: f32):
v2 = fcopysign v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ushr v1.2s, v1.2s, #31
; nextln: sli v0.2s, v1.2s, #31
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f32(f64, f64) -> f64 {
block0(v0: f64, v1: f64):
v2 = fcopysign v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ushr d1, d1, #63
; nextln: sli d0, d1, #63
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f33(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_uint.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp s0, s0
; nextln: b.vc 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 -1
; nextln: fcmp s0, s1
; nextln: b.gt 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 4294967300
; nextln: fcmp s0, s1
; nextln: b.mi 8 ; udf
; nextln: fcvtzu w0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f34(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_sint.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp s0, s0
; nextln: b.vc 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 -2147483600
; nextln: fcmp s0, s1
; nextln: b.ge 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 2147483600
; nextln: fcmp s0, s1
; nextln: b.mi 8 ; udf
; nextln: fcvtzs w0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f35(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_uint.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp s0, s0
; nextln: b.vc 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 -1
; nextln: fcmp s0, s1
; nextln: b.gt 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 18446744000000000000
; nextln: fcmp s0, s1
; nextln: b.mi 8 ; udf
; nextln: fcvtzu x0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f36(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_sint.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp s0, s0
; nextln: b.vc 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 -9223372000000000000
; nextln: fcmp s0, s1
; nextln: b.ge 8 ; udf
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 9223372000000000000
; nextln: fcmp s0, s1
; nextln: b.mi 8 ; udf
; nextln: fcvtzs x0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f37(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_uint.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp d0, d0
; nextln: b.vc 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 -1
; nextln: fcmp d0, d1
; nextln: b.gt 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 4294967296
; nextln: fcmp d0, d1
; nextln: b.mi 8 ; udf
; nextln: fcvtzu w0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f38(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_sint.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp d0, d0
; nextln: b.vc 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 -2147483649
; nextln: fcmp d0, d1
; nextln: b.gt 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 2147483648
; nextln: fcmp d0, d1
; nextln: b.mi 8 ; udf
; nextln: fcvtzs w0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f39(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_uint.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp d0, d0
; nextln: b.vc 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 -1
; nextln: fcmp d0, d1
; nextln: b.gt 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 18446744073709552000
; nextln: fcmp d0, d1
; nextln: b.mi 8 ; udf
; nextln: fcvtzu x0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f40(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_sint.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fcmp d0, d0
; nextln: b.vc 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 -9223372036854776000
; nextln: fcmp d0, d1
; nextln: b.ge 8 ; udf
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 9223372036854776000
; nextln: fcmp d0, d1
; nextln: b.mi 8 ; udf
; nextln: fcvtzs x0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f41(i32) -> f32 {
block0(v0: i32):
v1 = fcvt_from_uint.f32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ucvtf s0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f42(i32) -> f32 {
block0(v0: i32):
v1 = fcvt_from_sint.f32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: scvtf s0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f43(i64) -> f32 {
block0(v0: i64):
v1 = fcvt_from_uint.f32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ucvtf s0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f44(i64) -> f32 {
block0(v0: i64):
v1 = fcvt_from_sint.f32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: scvtf s0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f45(i32) -> f64 {
block0(v0: i32):
v1 = fcvt_from_uint.f64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ucvtf d0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f46(i32) -> f64 {
block0(v0: i32):
v1 = fcvt_from_sint.f64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: scvtf d0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f47(i64) -> f64 {
block0(v0: i64):
v1 = fcvt_from_uint.f64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ucvtf d0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f48(i64) -> f64 {
block0(v0: i64):
v1 = fcvt_from_sint.f64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: scvtf d0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f49(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_uint_sat.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 4294967300
; nextln: fmin s2, s0, s1
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 0
; nextln: fmax s2, s2, s1
; nextln: fcmp s0, s0
; nextln: fcsel s0, s1, s2, ne
; nextln: fcvtzu w0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f50(f32) -> i32 {
block0(v0: f32):
v1 = fcvt_to_sint_sat.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 2147483600
; nextln: fmin s1, s0, s1
; nextln: ldr s2, pc+8 ; b 8 ; data.f32 -2147483600
; nextln: fmax s1, s1, s2
; nextln: ldr s2, pc+8 ; b 8 ; data.f32 0
; nextln: fcmp s0, s0
; nextln: fcsel s0, s2, s1, ne
; nextln: fcvtzs w0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f51(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_uint_sat.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 18446744000000000000
; nextln: fmin s2, s0, s1
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 0
; nextln: fmax s2, s2, s1
; nextln: fcmp s0, s0
; nextln: fcsel s0, s1, s2, ne
; nextln: fcvtzu x0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f52(f32) -> i64 {
block0(v0: f32):
v1 = fcvt_to_sint_sat.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr s1, pc+8 ; b 8 ; data.f32 9223372000000000000
; nextln: fmin s1, s0, s1
; nextln: ldr s2, pc+8 ; b 8 ; data.f32 -9223372000000000000
; nextln: fmax s1, s1, s2
; nextln: ldr s2, pc+8 ; b 8 ; data.f32 0
; nextln: fcmp s0, s0
; nextln: fcsel s0, s2, s1, ne
; nextln: fcvtzs x0, s0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f53(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_uint_sat.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 4294967295
; nextln: fmin d2, d0, d1
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 0
; nextln: fmax d2, d2, d1
; nextln: fcmp d0, d0
; nextln: fcsel d0, d1, d2, ne
; nextln: fcvtzu w0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f54(f64) -> i32 {
block0(v0: f64):
v1 = fcvt_to_sint_sat.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 2147483647
; nextln: fmin d1, d0, d1
; nextln: ldr d2, pc+8 ; b 12 ; data.f64 -2147483648
; nextln: fmax d1, d1, d2
; nextln: ldr d2, pc+8 ; b 12 ; data.f64 0
; nextln: fcmp d0, d0
; nextln: fcsel d0, d2, d1, ne
; nextln: fcvtzs w0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f55(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_uint_sat.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 18446744073709552000
; nextln: fmin d2, d0, d1
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 0
; nextln: fmax d2, d2, d1
; nextln: fcmp d0, d0
; nextln: fcsel d0, d1, d2, ne
; nextln: fcvtzu x0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f56(f64) -> i64 {
block0(v0: f64):
v1 = fcvt_to_sint_sat.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr d1, pc+8 ; b 12 ; data.f64 9223372036854776000
; nextln: fmin d1, d0, d1
; nextln: ldr d2, pc+8 ; b 12 ; data.f64 -9223372036854776000
; nextln: fmax d1, d1, d2
; nextln: ldr d2, pc+8 ; b 12 ; data.f64 0
; nextln: fcmp d0, d0
; nextln: fcsel d0, d2, d1, ne
; nextln: fcvtzs x0, d0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,56 @@
test compile
set enable_heap_access_spectre_mitigation=true
target aarch64
function %dynamic_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
gv1 = load.i32 notrap aligned gv0
heap0 = dynamic gv0, bound gv1, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0
return v2
}
; check: Block 0:
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr w2, [x0]
; nextln: add w2, w2, #0
; nextln: subs wzr, w1, w2
; nextln: b.ls label1 ; b label2
; check: Block 1:
; check: add x0, x0, x1, UXTW
; nextln: subs wzr, w1, w2
; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; check: Block 2:
; check: udf
function %static_heap_check(i64 vmctx, i32) -> i64 {
gv0 = vmctx
heap0 = static gv0, bound 0x1_0000, offset_guard 0x1000, index_type i32
block0(v0: i64, v1: i32):
v2 = heap_addr.i64 heap0, v1, 0
return v2
}
; check: Block 0:
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs wzr, w1, #65536
; nextln: b.ls label1 ; b label2
; check: Block 1:
; check: add x0, x0, x1, UXTW
; nextln: subs wzr, w1, #65536
; nextln: movz x1, #0
; nextln: csel x0, x1, x0, hi
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; check: Block 2:
; check: udf

View File

@@ -0,0 +1,32 @@
; Test that `put_input_in_rse` doesn't try to put the input of the `iconst` into a register, which
; would result in an out-of-bounds panic. (#2147)
test compile
target aarch64
function u0:0() -> i8 system_v {
block0:
v0 = iconst.i16 0xddcc
v1 = icmp.i16 ne v0, v0
v2 = bint.i8 v1
return v2
}
; check: VCode_ShowWithRRU {{
; nextln: Entry block: 0
; nextln: Block 0:
; nextln: (original IR block: block0)
; nextln: (instruction range: 0 .. 11)
; nextln: Inst 0: stp fp, lr, [sp, #-16]!
; nextln: Inst 1: mov fp, sp
; nextln: Inst 2: movz x0, #56780
; nextln: Inst 3: uxth w0, w0
; nextln: Inst 4: movz x1, #56780
; nextln: Inst 5: subs wzr, w0, w1, UXTH
; nextln: Inst 6: cset x0, ne
; nextln: Inst 7: and w0, w0, #1
; nextln: Inst 8: mov sp, fp
; nextln: Inst 9: ldp fp, lr, [sp], #16
; nextln: Inst 10: ret
; nextln: }}

View File

@@ -0,0 +1,43 @@
test compile
target aarch64
function %f(i64) -> i64 {
jt0 = jump_table [block1, block2, block3]
block0(v0: i64):
br_table v0, block4, jt0
block1:
v1 = iconst.i64 1
jump block5(v1)
block2:
v2 = iconst.i64 2
jump block5(v2)
block3:
v3 = iconst.i64 3
jump block5(v3)
block4:
v4 = iconst.i64 4
jump block5(v4)
block5(v5: i64):
v6 = iadd.i64 v0, v5
return v6
}
; check: subs wzr, w0, #3
; nextln: b.hs label1 ; adr x1, pc+16 ; ldrsw x2, [x1, x0, LSL 2] ; add x1, x1, x2 ; br x1 ; jt_entries
; check: movz x1, #1
; nextln: b
; check: movz x1, #2
; nextln: b
; check: movz x1, #3
; check: add x0, x0, x1

View File

@@ -0,0 +1,18 @@
test compile
target aarch64
;; Test default (non-SpiderMonkey) ABI.
function %f() -> i64, i64 {
block1:
v0 = iconst.i64 1
v1 = iconst.i64 2
return v0, v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #1
; nextln: movz x1, #2
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,69 @@
test compile
target aarch64
function %add8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = iadd.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = iadd.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iadd.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add32_8(i32, i8) -> i32 {
block0(v0: i32, v1: i8):
v2 = sextend.i32 v1
v3 = iadd.i32 v0, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1, SXTB
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add64_32(i64, i32) -> i64 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, x1, SXTW
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,117 @@
test compile
target aarch64
function %f0(r64) -> r64 {
block0(v0: r64):
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f1(r64) -> b1 {
block0(v0: r64):
v1 = is_null v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, x0, #0
; nextln: cset x0, eq
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f2(r64) -> b1 {
block0(v0: r64):
v1 = is_invalid v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: adds xzr, x0, #1
; nextln: cset x0, eq
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f3() -> r64 {
block0:
v0 = null.r64
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f4(r64, r64) -> r64, r64, r64 {
fn0 = %f(r64) -> b1
ss0 = explicit_slot 8
block0(v0: r64, v1: r64):
v2 = call fn0(v0)
stack_store.r64 v0, ss0
brz v2, block1(v1, v0)
jump block2(v0, v1)
block1(v3: r64, v4: r64):
jump block3(v3, v4)
block2(v5: r64, v6: r64):
jump block3(v5, v6)
block3(v7: r64, v8: r64):
v9 = stack_load.r64 ss0
return v7, v8, v9
}
; check: Block 0:
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #32
; nextln: stp x19, x20, [sp, #-16]!
; nextln: virtual_sp_offset_adjust 16
; nextln: mov x19, x0
; nextln: mov x20, x1
; nextln: mov x0, x19
; nextln: ldr x1, 8 ; b 12 ; data
; nextln: stur x0, [sp, #24]
; nextln: stur x19, [sp, #32]
; nextln: stur x20, [sp, #40]
; nextln: (safepoint: slots [S0, S1, S2]
; nextln: blr x1
; nextln: ldur x19, [sp, #32]
; nextln: ldur x20, [sp, #40]
; nextln: add x1, sp, #16
; nextln: str x19, [x1]
; nextln: and w0, w0, #1
; nextln: cbz x0, label1 ; b label3
; check: Block 1:
; check: b label2
; check: Block 2:
; check: mov x0, x20
; nextln: b label5
; check: Block 3:
; check: b label4
; check: Block 4:
; check: mov x0, x19
; nextln: mov x19, x20
; nextln: b label5
; check: Block 5:
; check: add x1, sp, #16
; nextln: ldr x1, [x1]
; nextln: mov x2, x1
; nextln: mov x1, x19
; nextln: ldp x19, x20, [sp], #16
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,36 @@
test compile
target aarch64
function %uaddsat64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = uadd_sat.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmov d0, x0
; nextln: fmov d1, x1
; nextln: uqadd d0, d0, d1
; nextln: mov x0, v0.d[0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %uaddsat8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = uadd_sat.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb x0, w0
; nextln: uxtb x1, w1
; nextln: fmov d0, x0
; nextln: fmov d1, x1
; nextln: uqadd d0, d0, d1
; nextln: mov x0, v0.d[0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,31 @@
test compile
target aarch64
function %f(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = ishl.i64 v0, v1
v3 = iadd.i64 v0, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, x0, LSL 3
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 53
v2 = ishl.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w0, w0, #21
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,436 @@
test compile
target aarch64
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ROR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f0(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = rotr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f1(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotr.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f2(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = rotr.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: and w1, w1, #15
; nextln: sub w2, w1, #16
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f3(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = rotr.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: and w1, w1, #7
; nextln: sub w2, w1, #8
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ROL, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = rotl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub x1, xzr, x1
; nextln: ror x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f5(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotl.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub w1, wzr, w1
; nextln: ror w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f6(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = rotl.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: sub w1, wzr, w1
; nextln: and w1, w1, #15
; nextln: sub w2, w1, #16
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f7(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = rotl.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: sub w1, wzr, w1
; nextln: and w1, w1, #7
; nextln: sub w2, w1, #8
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; LSR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ushr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f9(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ushr.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f10(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ushr.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: lsr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f11(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ushr.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: lsr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; LSL, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f12(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ishl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f13(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ishl.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f14(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ishl.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f15(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ishl.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ASR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f16(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = sshr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: asr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sshr.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: asr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f18(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = sshr.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxth w0, w0
; nextln: asr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f19(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = sshr.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb w0, w0
; nextln: asr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; immediate forms
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f20(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = rotr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f21(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = rotl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror x0, x0, #47
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f22(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 17
v2 = rotl.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror w0, w0, #15
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f23(i16) -> i16 {
block0(v0: i16):
v1 = iconst.i32 10
v2 = rotl.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: lsr w1, w0, #6
; nextln: lsl w0, w0, #10
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f24(i8) -> i8 {
block0(v0: i8):
v1 = iconst.i32 3
v2 = rotl.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: lsr w1, w0, #5
; nextln: lsl w0, w0, #3
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f25(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = ushr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f26(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = sshr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: asr x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f27(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = ishl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,183 @@
test compile
target aarch64
function %foo() {
block0:
return
}
function %stack_limit_leaf_zero(i64 stack_limit) {
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit_gv_leaf_zero(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit_call_zero(i64 stack_limit) {
fn0 = %foo()
block0(v0: i64):
call fn0()
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, sp, x0
; nextln: b.hs 8 ; udf
; nextln: ldr x0
; nextln: blr x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit_gv_call_zero(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
fn0 = %foo()
block0(v0: i64):
call fn0()
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: ldr x0
; nextln: blr x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_limit(i64 stack_limit) {
ss0 = explicit_slot 168
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x16, x0, #176
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #176
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %huge_stack_limit(i64 stack_limit) {
ss0 = explicit_slot 400000
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, sp, x0
; nextln: b.hs 8 ; udf
; nextln: movz w17, #6784
; nextln: movk w17, #6, LSL #16
; nextln: add x16, x0, x17, UXTX
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %limit_preamble(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
ss0 = explicit_slot 20
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: add x16, x16, #32
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %limit_preamble_huge(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv1+4
stack_limit = gv2
ss0 = explicit_slot 400000
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldur x16, [x0]
; nextln: ldur x16, [x16, #4]
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w17, #6784
; nextln: movk w17, #6, LSL #16
; nextln: add x16, x16, x17, UXTX
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: movz w16, #6784
; nextln: movk w16, #6, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %limit_preamble_huge_offset(i64 vmctx) {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0+400000
stack_limit = gv1
ss0 = explicit_slot 20
block0(v0: i64):
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #6784 ; movk w16, #6, LSL #16 ; add x16, x0, x16, UXTX ; ldr x16, [x16]
; nextln: add x16, x16, #32
; nextln: subs xzr, sp, x16
; nextln: b.hs 8 ; udf
; nextln: sub sp, sp, #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,271 @@
test compile
target aarch64
function %stack_addr_small() -> i64 {
ss0 = explicit_slot 8
block0:
v0 = stack_addr.i64 ss0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x0, sp
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_addr_big() -> i64 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_addr.i64 ss0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x0, sp
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; FIXME: don't use stack_addr legalization for stack_load and stack_store
function %stack_load_small() -> i64 {
ss0 = explicit_slot 8
block0:
v0 = stack_load.i64 ss0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x0, sp
; nextln: ldr x0, [x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_load_big() -> i64 {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0:
v0 = stack_load.i64 ss0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x0, sp
; nextln: ldr x0, [x0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_store_small(i64) {
ss0 = explicit_slot 8
block0(v0: i64):
stack_store.i64 v0, ss0
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub sp, sp, #16
; nextln: mov x1, sp
; nextln: str x0, [x1]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %stack_store_big(i64) {
ss0 = explicit_slot 100000
ss1 = explicit_slot 8
block0(v0: i64):
stack_store.i64 v0, ss0
return
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz w16, #34480
; nextln: movk w16, #1, LSL #16
; nextln: sub sp, sp, x16, UXTX
; nextln: mov x1, sp
; nextln: str x0, [x1]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; Force a b1 to be spilled into a slot at an SP offset between 0x100 and
; 0x1fff, to exercise the scaled addressing mode.
function %b1_spill_slot(b1) -> b1, i64 {
ss0 = explicit_slot 1000
block0(v0: b1):
v1 = iconst.i64 1
v2 = iconst.i64 2
v3 = iconst.i64 3
v4 = iconst.i64 4
v5 = iconst.i64 5
v6 = iconst.i64 6
v7 = iconst.i64 7
v8 = iconst.i64 8
v9 = iconst.i64 9
v10 = iconst.i64 10
v11 = iconst.i64 11
v12 = iconst.i64 12
v13 = iconst.i64 13
v14 = iconst.i64 14
v15 = iconst.i64 15
v16 = iconst.i64 16
v17 = iconst.i64 17
v18 = iconst.i64 18
v19 = iconst.i64 19
v20 = iconst.i64 20
v21 = iconst.i64 21
v22 = iconst.i64 22
v23 = iconst.i64 23
v24 = iconst.i64 24
v25 = iconst.i64 25
v26 = iconst.i64 26
v27 = iconst.i64 27
v28 = iconst.i64 28
v29 = iconst.i64 29
v30 = iconst.i64 30
v31 = iconst.i64 31
v32 = iconst.i64 32
v33 = iconst.i64 33
v34 = iconst.i64 34
v35 = iconst.i64 35
v36 = iconst.i64 36
v37 = iconst.i64 37
v38 = iconst.i64 38
v39 = iconst.i64 39
v40 = iconst.i64 30
v41 = iconst.i64 31
v42 = iconst.i64 32
v43 = iconst.i64 33
v44 = iconst.i64 34
v45 = iconst.i64 35
v46 = iconst.i64 36
v47 = iconst.i64 37
v48 = iconst.i64 38
v49 = iconst.i64 39
v50 = iconst.i64 30
v51 = iconst.i64 31
v52 = iconst.i64 32
v53 = iconst.i64 33
v54 = iconst.i64 34
v55 = iconst.i64 35
v56 = iconst.i64 36
v57 = iconst.i64 37
v58 = iconst.i64 38
v59 = iconst.i64 39
v60 = iconst.i64 30
v61 = iconst.i64 31
v62 = iconst.i64 32
v63 = iconst.i64 33
v64 = iconst.i64 34
v65 = iconst.i64 35
v66 = iconst.i64 36
v67 = iconst.i64 37
v68 = iconst.i64 38
v69 = iconst.i64 39
v70 = iadd.i64 v1, v2
v71 = iadd.i64 v3, v4
v72 = iadd.i64 v5, v6
v73 = iadd.i64 v7, v8
v74 = iadd.i64 v9, v10
v75 = iadd.i64 v11, v12
v76 = iadd.i64 v13, v14
v77 = iadd.i64 v15, v16
v78 = iadd.i64 v17, v18
v79 = iadd.i64 v19, v20
v80 = iadd.i64 v21, v22
v81 = iadd.i64 v23, v24
v82 = iadd.i64 v25, v26
v83 = iadd.i64 v27, v28
v84 = iadd.i64 v29, v30
v85 = iadd.i64 v31, v32
v86 = iadd.i64 v33, v34
v87 = iadd.i64 v35, v36
v88 = iadd.i64 v37, v38
v89 = iadd.i64 v39, v40
v90 = iadd.i64 v41, v42
v91 = iadd.i64 v43, v44
v92 = iadd.i64 v45, v46
v93 = iadd.i64 v47, v48
v94 = iadd.i64 v49, v50
v95 = iadd.i64 v51, v52
v96 = iadd.i64 v53, v54
v97 = iadd.i64 v55, v56
v98 = iadd.i64 v57, v58
v99 = iadd.i64 v59, v60
v100 = iadd.i64 v61, v62
v101 = iadd.i64 v63, v64
v102 = iadd.i64 v65, v66
v103 = iadd.i64 v67, v68
v104 = iadd.i64 v69, v70
v105 = iadd.i64 v71, v72
v106 = iadd.i64 v73, v74
v107 = iadd.i64 v75, v76
v108 = iadd.i64 v77, v78
v109 = iadd.i64 v79, v80
v110 = iadd.i64 v81, v82
v111 = iadd.i64 v83, v84
v112 = iadd.i64 v85, v86
v113 = iadd.i64 v87, v88
v114 = iadd.i64 v89, v90
v115 = iadd.i64 v91, v92
v116 = iadd.i64 v93, v94
v117 = iadd.i64 v95, v96
v118 = iadd.i64 v97, v98
v119 = iadd.i64 v99, v100
v120 = iadd.i64 v101, v102
v121 = iadd.i64 v103, v104
v122 = iadd.i64 v105, v106
v123 = iadd.i64 v107, v108
v124 = iadd.i64 v109, v110
v125 = iadd.i64 v111, v112
v126 = iadd.i64 v113, v114
v127 = iadd.i64 v115, v116
v128 = iadd.i64 v117, v118
v129 = iadd.i64 v119, v120
v130 = iadd.i64 v121, v122
v131 = iadd.i64 v123, v124
v132 = iadd.i64 v125, v126
v133 = iadd.i64 v127, v128
v134 = iadd.i64 v129, v130
v135 = iadd.i64 v131, v132
v136 = iadd.i64 v133, v134
v137 = iadd.i64 v135, v136
return v0, v137
}

View File

@@ -0,0 +1,17 @@
test compile
target aarch64
function %f() -> i64 {
gv0 = symbol %my_global
block0:
v0 = symbol_value.i64 gv0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,28 @@
test compile
target aarch64
function %f() {
block0:
trap user0
}
; check: udf
function %g(i64) {
block0(v0: i64):
v1 = iconst.i64 42
v2 = ifcmp v0, v1
trapif eq v2, user0
return
}
; check: subs xzr, x0, #42
; nextln: b.ne 8 ; udf
function %h() {
block0:
debugtrap
return
}
; check: brk #0

View File

@@ -0,0 +1,158 @@
test compile
target aarch64
function %f_u_8_64(i8) -> i64 {
block0(v0: i8):
v1 = uextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_8_32(i8) -> i32 {
block0(v0: i8):
v1 = uextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_8_16(i8) -> i16 {
block0(v0: i8):
v1 = uextend.i16 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_8_64(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_8_32(i8) -> i32 {
block0(v0: i8):
v1 = sextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_8_16(i8) -> i16 {
block0(v0: i8):
v1 = sextend.i16 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_16_64(i16) -> i64 {
block0(v0: i16):
v1 = uextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_16_32(i16) -> i32 {
block0(v0: i16):
v1 = uextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_16_64(i16) -> i64 {
block0(v0: i16):
v1 = sextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxth x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_16_32(i16) -> i32 {
block0(v0: i16):
v1 = sextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxth w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_32_64(i32) -> i64 {
block0(v0: i32):
v1 = uextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_32_64(i32) -> i64 {
block0(v0: i32):
v1 = sextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,270 @@
test compile
target arm
feature "experimental_arm32"
function %iadd(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = iadd v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: add r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %iadd(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = iadd v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: add r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %iadd(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iadd v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: add r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sadd_sat(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sadd_sat v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: qadd r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %isub(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = isub v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: sub r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ssub_sat(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ssub_sat v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: qsub r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ineg(i32) -> i32 {
block0(v0: i32):
v1 = ineg v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: rsb r0, r0, #0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %imul(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = imul v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mul r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %umulhi(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = umulhi v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: umull r1, r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %smulhi(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = smulhi v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: smull r1, r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %udiv(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = udiv v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: udiv r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sdiv(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sdiv v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: sdiv r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %iadd_flags(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2, v3 = iadd_ifcout v0, v1
v4, v5 = iadd_ifcarry v1, v2, v3
v6 = iadd_ifcin v1, v4, v5
return v6
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: adds r0, r0, r1
; nextln: adcs r0, r1, r0
; nextln: adc r0, r1, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %isub_flags(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2, v3 = isub_ifbout v0, v1
v4, v5 = isub_ifborrow v1, v2, v3
v6 = isub_ifbin v1, v4, v5
return v6
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: subs r0, r0, r1
; nextln: sbcs r0, r1, r0
; nextln: sbc r0, r1, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %band(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = band v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: and r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %bor(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bor v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: orr r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %bxor(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bxor v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: eor r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %bnot(i32) -> i32 {
block0(v0: i32):
v1 = bnot v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mvn r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %band_not(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = band_not v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: bic r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %bor_not(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = bor_not v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: orn r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr

View File

@@ -0,0 +1,133 @@
test compile
target arm
feature "experimental_arm32"
function %bitrev_i8(i8) -> i8 {
block0(v0: i8):
v1 = bitrev v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, r0, lsl #24
; nextln: rbit r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %bitrev_i16(i16) -> i16 {
block0(v0: i16):
v1 = bitrev v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, r0, lsl #16
; nextln: rbit r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %bitrev_i32(i32) -> i32 {
block0(v0: i32):
v1 = bitrev v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: rbit r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %clz_i8(i8) -> i8 {
block0(v0: i8):
v1 = clz v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxtb r0, r0
; nextln: clz r0, r0
; nextln: sub r0, r0, #24
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %clz_i16(i16) -> i16 {
block0(v0: i16):
v1 = clz v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxth r0, r0
; nextln: clz r0, r0
; nextln: sub r0, r0, #16
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %clz_i32(i32) -> i32 {
block0(v0: i32):
v1 = clz v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: clz r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ctz_i8(i8) -> i8 {
block0(v0: i8):
v1 = ctz v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxtb r0, r0
; nextln: rbit r0, r0
; nextln: clz r0, r0
; nextln: sub r0, r0, #24
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ctz_i16(i16) -> i16 {
block0(v0: i16):
v1 = ctz v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxth r0, r0
; nextln: rbit r0, r0
; nextln: clz r0, r0
; nextln: sub r0, r0, #16
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ctz_i32(i32) -> i32 {
block0(v0: i32):
v1 = ctz v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: rbit r0, r0
; nextln: clz r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr

View File

@@ -0,0 +1,61 @@
test compile
target arm
feature "experimental_arm32"
function %icmp(i32, i32) -> b1 {
block0(v0: i32, v1: i32):
v2 = icmp eq v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: cmp r0, r1
; nextln: ite eq ; mov r0, #1 ; mov r0, #0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ifcmp_trueif(i32, i32) -> b1 {
block0(v0: i32, v1: i32):
v2 = ifcmp v0, v1
v3 = trueif eq v2
return v3
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: cmp r0, r1
; nextln: ite eq ; mov r0, #1 ; mov r0, #0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %select(i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32):
v3 = select v0, v1, v2
return v3
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: cmp r0, #0
; nextln: ite ne ; mov r0, r1 ; mov r0, r2
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %selectif(i32, i32, i32, i32) -> i32 {
block0(v0: i32, v1: i32, v2: i32, v3: i32):
v4 = ifcmp v0, v1
v5 = selectif.i32 eq v4, v2, v3
return v5
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: cmp r0, r1
; nextln: ite eq ; mov r0, r2 ; mov r0, r3
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr

View File

@@ -0,0 +1,108 @@
test compile
target arm
feature "experimental_arm32"
function %b1() -> b1 {
block0:
v0 = bconst.b1 true
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %b8() -> b8 {
block0:
v0 = bconst.b8 false
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %b16() -> b16 {
block0:
v0 = bconst.b16 true
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %b32() -> b32 {
block0:
v0 = bconst.b32 false
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %i8() -> i8 {
block0:
v0 = iconst.i8 0xff
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #255
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %i8() -> i16 {
block0:
v0 = iconst.i16 0xffff
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #65535
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %f() -> i32 {
block0:
v0 = iconst.i32 0xffff
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #65535
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %f() -> i32 {
block0:
v0 = iconst.i32 0xffffffff
return v0
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #65535
; nextln: movt r0, #65535
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr

View File

@@ -0,0 +1,132 @@
test compile
target arm
feature "experimental_arm32"
function %brnz(b1) -> i32 {
block0(v0: b1):
brnz v0, block1
jump block2
block1:
v1 = iconst.i32 1
return v1
block2:
v2 = iconst.i32 2
return v2
}
; check: Block 0:
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: and r0, r0, #1
; nextln: cmp r0, #0
; nextln: bne label1 ; b label2
; check: Block 1:
; check: mov r0, #1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
; check: Block 2:
; check: mov r0, #2
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %brz(b1) -> i32 {
block0(v0: b1):
brz v0, block1
jump block2
block1:
v1 = iconst.i32 1
return v1
block2:
v2 = iconst.i32 2
return v2
}
; check: Block 0:
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: and r0, r0, #1
; nextln: cmp r0, #0
; nextln: beq label1 ; b label2
; check: Block 1:
; check: mov r0, #1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
; check: Block 2:
; check: mov r0, #2
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %trap() {
block0:
trap user0
}
; check: udf #0
function %trapif(i32, i32) {
block0(v0: i32, v1: i32):
v2 = ifcmp v0, v1
trapif eq v2, user0
return
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: cmp r0, r1
; nextln: bne 2 ; udf #0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %debugtrap() {
block0:
debugtrap
return
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: bkpt #0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %call(i32) -> i32 {
fn0 = %f(i32) -> i32
block0(v0: i32):
v1 = call fn0(v0)
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: ldr r1, [pc, #4] ; b 4 ; data
; nextln: blx r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %call_indirect(i32, i32) -> i32 {
sig0 = (i32) -> i32
block0(v0: i32, v1: i32):
v2 = call_indirect.i32 sig0, v1(v0)
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: blx r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr

View File

@@ -0,0 +1,81 @@
test compile
target arm
feature "experimental_arm32"
function %uextend_i8_i32(i8) -> i32 {
block0(v0: i8):
v1 = uextend.i32 v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxtb r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %uextend_i8_i16(i8) -> i16 {
block0(v0: i8):
v1 = uextend.i16 v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxtb r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %uextend_i16_i32(i16) -> i32 {
block0(v0: i16):
v1 = uextend.i32 v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxth r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sextend_i8_i32(i8) -> i32 {
block0(v0: i8):
v1 = sextend.i32 v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: sxtb r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sextend_i8_i16(i8) -> i16 {
block0(v0: i8):
v1 = sextend.i16 v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: sxtb r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sextend_i16_i32(i16) -> i32 {
block0(v0: i16):
v1 = sextend.i32 v0
return v1
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: sxth r0, r0
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr

View File

@@ -0,0 +1,48 @@
test compile
target arm
feature "experimental_arm32"
function %args(i32) -> i32 {
sig0 = (i32, i32, i32, i32) -> i32
block0(v0: i32):
v1 = iconst.i32 1
v2 = iconst.i32 2
v3 = iconst.i32 3
v4 = iconst.i32 4
v5 = call_indirect.i32 sig0, v0(v1, v2, v3, v4)
return v5
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: push {r4, ip}
; nextln: virtual_sp_offset_adjust 8
; nextln: mov r4, r0
; nextln: mov r0, #1
; nextln: mov r1, #2
; nextln: mov r2, #3
; nextln: mov r3, #4
; nextln: blx r4
; nextln: pop {r4, ip}
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %multi_return() -> i32, i32, i32, i32 {
block0:
v0 = iconst.i32 1
v1 = iconst.i32 2
v2 = iconst.i32 3
v3 = iconst.i32 4
return v0, v1, v2, v3
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: mov r0, #1
; nextln: mov r1, #2
; nextln: mov r2, #3
; nextln: mov r3, #4
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr

View File

@@ -0,0 +1,158 @@
test compile
target arm
feature "experimental_arm32"
function %ishl_i8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ishl v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxtb r1, r1
; nextln: lsl r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ishl_i16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ishl v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxth r1, r1
; nextln: lsl r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ishl_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ishl v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: lsl r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ushr_i8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ushr v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxtb r0, r0
; nextln: uxtb r1, r1
; nextln: lsr r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ushr_i16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ushr v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: uxth r0, r0
; nextln: uxth r1, r1
; nextln: lsr r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ushr_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ushr v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: lsr r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sshr_i8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = sshr v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: sxtb r0, r0
; nextln: uxtb r1, r1
; nextln: asr r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sshr_i16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = sshr v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: sxth r0, r0
; nextln: uxth r1, r1
; nextln: asr r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %sshr_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sshr v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: asr r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %ror_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotr v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: ror r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr
function %rotl_i32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotl v0, v1
return v2
}
; check: push {fp, lr}
; nextln: mov fp, sp
; nextln: and r1, r1, #31
; nextln: rsb r1, r1, #32
; nextln: ror r0, r0, r1
; nextln: mov sp, fp
; nextln: pop {fp, lr}
; nextln: bx lr