ARM64 backend, part 11 / 11: filetests for ARM64 VCode.

This patch, the last in the series, adds the filetests for the new ARM64
backend. The filetests cover most of the opcodes, except for the
recently-added floating point support.

This patch contains code written by Julian Seward <jseward@acm.org> and
Benjamin Bouvier <public@benj.me>, originally developed on a side-branch
before rebasing and condensing into this patch series. See the `arm64`
branch at `https://github.com/cfallin/wasmtime` for original development
history.

This patch also contains code written by Joey Gouly
<joey.gouly@arm.com> and contributed to the above branch. These
contributions are "Copyright (c) 2020, Arm Limited."

Co-authored-by: Julian Seward <jseward@acm.org>
Co-authored-by: Benjamin Bouvier <public@benj.me>
Co-authored-by: Joey Gouly <joey.gouly@arm.com>
This commit is contained in:
Chris Fallin
2020-04-09 14:02:19 -07:00
parent 402303f67a
commit 3de504c24c
17 changed files with 1544 additions and 0 deletions

View File

@@ -0,0 +1,242 @@
test vcode arch=arm64
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iadd.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = isub.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = imul.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: madd x0, x0, x1, xzr
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = umulhi.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: umulh x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = smulhi.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: smulh x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = sdiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sdiv x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = sdiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x1, #2
; nextln: sdiv x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = udiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: udiv x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 2
v2 = udiv.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x1, #2
; nextln: udiv x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = srem.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sdiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = urem.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: udiv x2, x0, x1
; nextln: msub x0, x2, x1, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: and x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: orr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: eor x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = band_not.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: bic x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bor_not.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: orn x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bxor_not.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: eon x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = bnot.i64 v0
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: orn x0, xzr, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,13 @@
test vcode arch=arm64
function %f(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
; check: stp fp, lr, [sp, #-16]!
; check: mov fp, sp
v2 = iadd v0, v1
; check: add w0, w0, w1
return v2
; check: mov sp, fp
; check: ldp fp, lr, [sp], #16
; check: ret
}

View File

@@ -0,0 +1,157 @@
test vcode arch=arm64
function %a(i32) -> i32 {
block0(v0: i32):
v1 = bitrev v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %a(i64) -> i64 {
block0(v0: i64):
v1 = bitrev v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %b(i32) -> i32 {
block0(v0: i32):
v1 = clz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %b(i64) -> i64 {
block0(v0: i64):
v1 = clz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: clz x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %c(i32) -> i32 {
block0(v0: i32):
v1 = cls v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: cls w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %c(i64) -> i64 {
block0(v0: i64):
v1 = cls v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: cls x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i32) -> i32 {
block0(v0: i32):
v1 = ctz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit w0, w0
; nextln: clz w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i64) -> i64 {
block0(v0: i64):
v1 = ctz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit x0, x0
; nextln: clz x0, x0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i64) -> i64 {
block0(v0: i64):
v1 = popcnt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr x1, x0, #1
; nextln: and x1, x1, #6148914691236517205
; nextln: sub x1, x0, x1
; nextln: and x0, x1, #3689348814741910323
; nextln: lsr x1, x1, #2
; nextln: and x1, x1, #3689348814741910323
; nextln: add x0, x1, x0
; nextln: add x0, x0, x0, LSR 4
; nextln: and x0, x0, #1085102592571150095
; nextln: add x0, x0, x0, LSL 8
; nextln: add x0, x0, x0, LSL 16
; nextln: add x0, x0, x0, LSL 32
; nextln: lsr x0, x0, #56
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i32) -> i32 {
block0(v0: i32):
v1 = popcnt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr w1, w0, #1
; nextln: and x1, x1, #6148914691236517205
; nextln: sub x1, x0, x1
; nextln: and x0, x1, #3689348814741910323
; nextln: lsr x1, x1, #2
; nextln: and x1, x1, #3689348814741910323
; nextln: add x0, x1, x0
; nextln: add x0, x0, x0, LSR 4
; nextln: and x0, x0, #1085102592571150095
; nextln: add x0, x0, x0, LSL 8
; nextln: add x0, x0, x0, LSL 16
; nextln: add x0, x0, x0, LSL 32
; nextln: lsr x0, x0, #56
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,15 @@
test vcode arch=arm64
function %f(i64, i64) -> i64 {
sig0 = (i64) -> i64
block0(v0: i64, v1: i64):
v2 = call_indirect.i64 sig0, v1(v0)
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: blr x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,16 @@
test vcode arch=arm64
function %f(i64) -> i64 {
fn0 = %g(i64) -> i64
block0(v0: i64):
v1 = call fn0(v0)
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: bl 0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,65 @@
test vcode arch=arm64
function %f(i64, i64) -> b1 {
block0(v0: i64, v1: i64):
v2 = icmp eq v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, x0, x1
; nextln: cset x0, eq
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ifcmp v0, v1
brif eq v2, block1
jump block2
block1:
v4 = iconst.i64 1
return v4
block2:
v5 = iconst.i64 2
return v5
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, x0, x1
; nextln: b.eq 20
; check: Block 0:
; check: movz x0, #2
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
; check: Block 1:
; check: movz x0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ifcmp v0, v1
brif eq v2, block1
jump block1
block1:
v4 = iconst.i64 1
return v4
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: subs xzr, x0, x1
; check: Block 0:
; check: movz x0, #1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,42 @@
test vcode arch=arm64
function %f(i8, i64, i64) -> i64 {
block0(v0: i8, v1: i64, v2: i64):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
v5 = selectif.i64 eq v4, v1, v2
return v5
}
; check: subs wzr
; check: csel x0, $(=x[0-9]+, x[0-9]+), eq
function %g(i8) -> b1 {
block0(v0: i8):
v3 = iconst.i8 42
v4 = ifcmp v0, v3
v5 = trueif eq v4
return v5
}
; check: subs wzr
; check: cset x0, eq
function %h(i8, i8, i8) -> i8 {
block0(v0: i8, v1: i8, v2: i8):
v3 = bitselect.i8 v0, v1, v2
return v3
}
; check: and
; nextln: bic
; nextln: orr
function %i(b1, i8, i8) -> i8 {
block0(v0: b1, v1: i8, v2: i8):
v3 = select.i8 v0, v1, v2
return v3
}
; check: subs wzr
; nextln: csel

View File

@@ -0,0 +1,175 @@
test vcode arch=arm64
function %f() -> i64 {
block0:
v0 = iconst.i64 0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff0000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535, LSL #16
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff00000000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535, LSL #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff000000000000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #65535, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffffffffffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffffffff0000
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffffffff0000ffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535, LSL #16
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xffff0000ffffffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535, LSL #32
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x0000ffffffffffff
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #65535, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0xf34bf0a31212003a ; random digits
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #58
; nextln: movk x0, #4626, LSL #16
; nextln: movk x0, #61603, LSL #32
; nextln: movk x0, #62283, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e900001ef40000 ; random digits with 2 clear half words
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x0, #7924, LSL #16
; nextln: movk x0, #4841, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f() -> i64 {
block0:
v0 = iconst.i64 0x12e9ffff1ef4ffff ; random digits with 2 full half words
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movn x0, #57611, LSL #16
; nextln: movk x0, #4841, LSL #48
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,17 @@
test vcode arch=arm64
function %f(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
v2 = iconst.i64 42
v3 = iadd.i64 v2, v1
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: movz x1, #42
; nextln: add x0, x1, x0, SXTB
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,43 @@
test vcode arch=arm64
function %f(i64) -> i64 {
jt0 = jump_table [block1, block2, block3]
block0(v0: i64):
br_table v0, block4, jt0
block1:
v1 = iconst.i64 1
jump block5(v1)
block2:
v2 = iconst.i64 2
jump block5(v2)
block3:
v3 = iconst.i64 3
jump block5(v3)
block4:
v4 = iconst.i64 4
jump block5(v4)
block5(v5: i64):
v6 = iadd.i64 v0, v5
return v6
}
; check: subs wzr, w0, #3
; nextln: b.hs
; nextln: adr x2, pc+16 ; ldrsw x1, [x2, x0, LSL 2] ; add x2, x2, x1 ; br x2 ; jt_entries
; check: movz x1, #3
; nextln: b
; check: movz x1, #2
; nextln: b
; check: movz x1, #1
; check: add x0, x0, x1

View File

@@ -0,0 +1,68 @@
test vcode arch=arm64
function %add8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = iadd.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add16(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = iadd.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add32(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = iadd.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add32_8(i32, i8) -> i32 {
block0(v0: i32, v1: i8):
v2 = sextend.i32 v1
v3 = iadd.i32 v0, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add w0, w0, w1, SXTB
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %add64_32(i64, i32) -> i64 {
block0(v0: i64, v1: i32):
v2 = sextend.i64 v1
v3 = iadd.i64 v0, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, x1, SXTW
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,35 @@
test vcode arch=arm64
function %uaddsat64(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = uadd_sat.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov v0.d[0], x0
; nextln: mov v1.d[0], x1
; nextln: uqadd d0, d0, d1
; nextln: mov x0, v0.d[0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %uaddsat8(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = uadd_sat.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb x0, w0
; nextln: uxtb x1, w1
; nextln: mov v0.d[0], x0
; nextln: mov v1.d[0], x1
; nextln: uqadd d0, d0, d1
; nextln: mov x0, v0.d[0]
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,16 @@
test vcode arch=arm64
function %f(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i64 3
v2 = ishl.i64 v0, v1
v3 = iadd.i64 v0, v2
return v3
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: add x0, x0, x0, LSL 3
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,439 @@
test vcode arch=arm64
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ROR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f0(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = rotr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f1(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotr.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f2(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = rotr.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: sub w2, w1, #16
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f3(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = rotr.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: sub w2, w1, #8
; nextln: sub w2, wzr, w2
; nextln: lsr w1, w0, w1
; nextln: lsl w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ROL, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f4(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = rotl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub w2, w1, #64
; nextln: sub w2, wzr, w2
; nextln: lsl x1, x0, x1
; nextln: lsr x0, x0, x2
; nextln: orr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f5(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = rotl.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sub w2, w1, #32
; nextln: sub w2, wzr, w2
; nextln: lsl w1, w0, w1
; nextln: lsr w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f6(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = rotl.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: sub w2, w1, #16
; nextln: sub w2, wzr, w2
; nextln: lsl w1, w0, w1
; nextln: lsr w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f7(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = rotl.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: sub w2, w1, #8
; nextln: sub w2, wzr, w2
; nextln: lsl w1, w0, w1
; nextln: lsr w0, w0, w2
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; LSR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f8(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ushr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f9(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ushr.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f10(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ushr.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: lsr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f11(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ushr.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: lsr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; LSL, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f12(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = ishl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f13(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = ishl.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f14(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = ishl.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f15(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = ishl.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ASR, variable
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f16(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = sshr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: asr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f17(i32, i32) -> i32 {
block0(v0: i32, v1: i32):
v2 = sshr.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: asr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f18(i16, i16) -> i16 {
block0(v0: i16, v1: i16):
v2 = sshr.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxth w0, w0
; nextln: asr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f19(i8, i8) -> i8 {
block0(v0: i8, v1: i8):
v2 = sshr.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb w0, w0
; nextln: asr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; immediate forms
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
function %f20(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = rotr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ror x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f21(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = rotl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl x1, x0, #17
; nextln: lsr x0, x0, #47
; nextln: orr x0, x0, x1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f22(i32) -> i32 {
block0(v0: i32):
v1 = iconst.i32 17
v2 = rotl.i32 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl w1, w0, #17
; nextln: lsr w0, w0, #15
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f23(i16) -> i16 {
block0(v0: i16):
v1 = iconst.i32 10
v2 = rotl.i16 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: lsl w1, w0, #10
; nextln: lsr w0, w0, #6
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f24(i8) -> i8 {
block0(v0: i8):
v1 = iconst.i32 3
v2 = rotl.i8 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: lsl w1, w0, #3
; nextln: lsr w0, w0, #5
; nextln: orr w0, w0, w1
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f25(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = ushr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsr x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f26(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = sshr.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: asr x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f27(i64) -> i64 {
block0(v0: i64):
v1 = iconst.i32 17
v2 = ishl.i64 v0, v1
return v2
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: lsl x0, x0, #17
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,16 @@
test vcode arch=arm64
function %f() -> i64 {
gv0 = symbol %my_global
block0:
v0 = symbol_value.i64 gv0
return v0
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: ldr x0, 8 ; b 12 ; data
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret

View File

@@ -0,0 +1,28 @@
test vcode arch=arm64
function %f() {
block0:
trap user0
}
; check: udf
function %g(i64) {
block0(v0: i64):
v1 = iconst.i64 42
v2 = ifcmp v0, v1
trapif eq v2, user0
return
}
; check: subs xzr, x0, #42
; nextln: b.ne 8
; nextln: udf
function %h() {
block0:
debugtrap
return
}
; check: brk #0

View File

@@ -0,0 +1,157 @@
test vcode arch=arm64
function %f_u_8_64(i8) -> i64 {
block0(v0: i8):
v1 = uextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_8_32(i8) -> i32 {
block0(v0: i8):
v1 = uextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_8_16(i8) -> i16 {
block0(v0: i8):
v1 = uextend.i16 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_8_64(i8) -> i64 {
block0(v0: i8):
v1 = sextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_8_32(i8) -> i32 {
block0(v0: i8):
v1 = sextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_8_16(i8) -> i16 {
block0(v0: i8):
v1 = sextend.i16 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtb w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_16_64(i16) -> i64 {
block0(v0: i16):
v1 = uextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_16_32(i16) -> i32 {
block0(v0: i16):
v1 = uextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: uxth w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_16_64(i16) -> i64 {
block0(v0: i16):
v1 = sextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxth x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_16_32(i16) -> i32 {
block0(v0: i16):
v1 = sextend.i32 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxth w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_u_32_64(i32) -> i64 {
block0(v0: i32):
v1 = uextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: mov w0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %f_s_32_64(i32) -> i64 {
block0(v0: i32):
v1 = sextend.i64 v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: sxtw x0, w0
; nextln: mov sp, fp
; nextln: ldp fp, lr, [sp], #16
; nextln: ret