I hadn't realized before that the filetest backend for `test vcode` is doing essentially what `compile` is doing, but for new (`MachInst`) backends: it is just getting a disassembly and running it through filecheck. There's no reason not to reuse `test compile` for the AArch64 tests as well. This was motivated by the desire to have "this IR compiles successfully" tests work on both x86 and AArch64. It seems this should work fine by adding multiple `target` directives when a test case should be compile-tested on multiple architectures.
327 lines
6.2 KiB
Plaintext
327 lines
6.2 KiB
Plaintext
test compile
|
|
target aarch64
|
|
|
|
function %a(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = bitrev v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit w0, w0
|
|
; nextln: lsr w0, w0, #24
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %a(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = bitrev v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit w0, w0
|
|
; nextln: lsr w0, w0, #16
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %a(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = bitrev v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %a(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = bitrev v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit x0, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %b(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = clz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: clz w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %b(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = clz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: clz w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %b(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = clz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: clz w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %b(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = clz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: clz x0, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %c(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = cls v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: cls w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %c(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = cls v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: cls w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %c(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = cls v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: cls w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %c(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = cls v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: cls x0, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = ctz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit w0, w0
|
|
; nextln: lsr w0, w0, #24
|
|
; nextln: clz w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = ctz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit w0, w0
|
|
; nextln: lsr w0, w0, #16
|
|
; nextln: clz w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = ctz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit w0, w0
|
|
; nextln: clz w0, w0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = ctz v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: rbit x0, x0
|
|
; nextln: clz x0, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = popcnt v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsr x1, x0, #1
|
|
; nextln: and x1, x1, #6148914691236517205
|
|
; nextln: sub x1, x0, x1
|
|
; nextln: and x0, x1, #3689348814741910323
|
|
; nextln: lsr x1, x1, #2
|
|
; nextln: and x1, x1, #3689348814741910323
|
|
; nextln: add x0, x1, x0
|
|
; nextln: add x0, x0, x0, LSR 4
|
|
; nextln: and x0, x0, #1085102592571150095
|
|
; nextln: add x0, x0, x0, LSL 8
|
|
; nextln: add x0, x0, x0, LSL 16
|
|
; nextln: add x0, x0, x0, LSL 32
|
|
; nextln: lsr x0, x0, #56
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = popcnt v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: mov w0, w0
|
|
; nextln: lsr w1, w0, #1
|
|
; nextln: and x1, x1, #6148914691236517205
|
|
; nextln: sub x1, x0, x1
|
|
; nextln: and x0, x1, #3689348814741910323
|
|
; nextln: lsr x1, x1, #2
|
|
; nextln: and x1, x1, #3689348814741910323
|
|
; nextln: add x0, x1, x0
|
|
; nextln: add x0, x0, x0, LSR 4
|
|
; nextln: and x0, x0, #1085102592571150095
|
|
; nextln: add x0, x0, x0, LSL 8
|
|
; nextln: add x0, x0, x0, LSL 16
|
|
; nextln: add x0, x0, x0, LSL 32
|
|
; nextln: lsr x0, x0, #56
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = popcnt v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth x0, w0
|
|
; nextln: lsr w1, w0, #1
|
|
; nextln: and x1, x1, #6148914691236517205
|
|
; nextln: sub x1, x0, x1
|
|
; nextln: and x0, x1, #3689348814741910323
|
|
; nextln: lsr x1, x1, #2
|
|
; nextln: and x1, x1, #3689348814741910323
|
|
; nextln: add x0, x1, x0
|
|
; nextln: add x0, x0, x0, LSR 4
|
|
; nextln: and x0, x0, #1085102592571150095
|
|
; nextln: add x0, x0, x0, LSL 8
|
|
; nextln: add x0, x0, x0, LSL 16
|
|
; nextln: add x0, x0, x0, LSL 32
|
|
; nextln: lsr x0, x0, #56
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %d(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = popcnt v0
|
|
return v1
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb x0, w0
|
|
; nextln: lsr w1, w0, #1
|
|
; nextln: and x1, x1, #6148914691236517205
|
|
; nextln: sub x1, x0, x1
|
|
; nextln: and x0, x1, #3689348814741910323
|
|
; nextln: lsr x1, x1, #2
|
|
; nextln: and x1, x1, #3689348814741910323
|
|
; nextln: add x0, x1, x0
|
|
; nextln: add x0, x0, x0, LSR 4
|
|
; nextln: and x0, x0, #1085102592571150095
|
|
; nextln: add x0, x0, x0, LSL 8
|
|
; nextln: add x0, x0, x0, LSL 16
|
|
; nextln: add x0, x0, x0, LSL 32
|
|
; nextln: lsr x0, x0, #56
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|