479 lines
9.5 KiB
Plaintext
479 lines
9.5 KiB
Plaintext
test compile
|
|
set unwind_info=false
|
|
target aarch64
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; ROR, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %i128_rotr(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = rotr.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: movz x3, #128
|
|
; nextln: sub x5, x3, x2
|
|
; nextln: orn w4, wzr, w2
|
|
; nextln: lsl x6, x1, #1
|
|
; nextln: lsr x3, x0, x2
|
|
; nextln: lsl x6, x6, x4
|
|
; nextln: lsr x4, x1, x2
|
|
; nextln: ands xzr, x2, #64
|
|
; nextln: orr x2, x3, x6
|
|
; nextln: csel x3, xzr, x4, ne
|
|
; nextln: csel x4, x4, x2, ne
|
|
; nextln: orn w2, wzr, w5
|
|
; nextln: lsr x6, x0, #1
|
|
; nextln: lsl x1, x1, x5
|
|
; nextln: lsr x2, x6, x2
|
|
; nextln: lsl x0, x0, x5
|
|
; nextln: ands xzr, x5, #64
|
|
; nextln: orr x1, x1, x2
|
|
; nextln: csel x1, x0, x1, ne
|
|
; nextln: csel x0, xzr, x0, ne
|
|
; nextln: orr x0, x0, x4
|
|
; nextln: orr x1, x1, x3
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f0(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = rotr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror x0, x0, x1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f1(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = rotr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f2(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = rotr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: and w1, w1, #15
|
|
; nextln: sub w2, w1, #16
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f3(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = rotr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: and w1, w1, #7
|
|
; nextln: sub w2, w1, #8
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; ROL, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %i128_rotl(i128, i128) -> i128 {
|
|
block0(v0: i128, v1: i128):
|
|
v2 = rotl.i128 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: movz x3, #128
|
|
; nextln: sub x5, x3, x2
|
|
; nextln: orn w4, wzr, w2
|
|
; nextln: lsr x6, x0, #1
|
|
; nextln: lsl x3, x1, x2
|
|
; nextln: lsr x6, x6, x4
|
|
; nextln: lsl x4, x0, x2
|
|
; nextln: ands xzr, x2, #64
|
|
; nextln: orr x2, x3, x6
|
|
; nextln: csel x3, x4, x2, ne
|
|
; nextln: csel x4, xzr, x4, ne
|
|
; nextln: orn w2, wzr, w5
|
|
; nextln: lsl x6, x1, #1
|
|
; nextln: lsr x0, x0, x5
|
|
; nextln: lsl x2, x6, x2
|
|
; nextln: lsr x1, x1, x5
|
|
; nextln: ands xzr, x5, #64
|
|
; nextln: orr x2, x0, x2
|
|
; nextln: csel x0, xzr, x1, ne
|
|
; nextln: csel x1, x1, x2, ne
|
|
; nextln: orr x1, x1, x4
|
|
; nextln: orr x0, x0, x3
|
|
; nextln: mov x2, x0
|
|
; nextln: mov x0, x1
|
|
; nextln: mov x1, x2
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f4(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = rotl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub x1, xzr, x1
|
|
; nextln: ror x0, x0, x1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f5(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = rotl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub w1, wzr, w1
|
|
; nextln: ror w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f6(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = rotl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: sub w1, wzr, w1
|
|
; nextln: and w1, w1, #15
|
|
; nextln: sub w2, w1, #16
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f7(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = rotl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: sub w1, wzr, w1
|
|
; nextln: and w1, w1, #7
|
|
; nextln: sub w2, w1, #8
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; LSR, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f8(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ushr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsr x0, x0, x1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f9(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ushr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f10(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = ushr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: lsr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f11(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = ushr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: lsr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; LSL, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f12(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ishl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl x0, x0, x1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f13(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ishl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f14(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = ishl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f15(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = ishl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; ASR, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f16(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = sshr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: asr x0, x0, x1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f17(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = sshr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: asr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f18(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = sshr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sxth w0, w0
|
|
; nextln: asr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f19(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = sshr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sxtb w0, w0
|
|
; nextln: asr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; immediate forms
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f20(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = rotr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror x0, x0, #17
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f21(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = rotl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror x0, x0, #47
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f22(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = rotl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror w0, w0, #15
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f23(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i32 10
|
|
v2 = rotl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: lsr w1, w0, #6
|
|
; nextln: lsl w0, w0, #10
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f24(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 3
|
|
v2 = rotl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: lsr w1, w0, #5
|
|
; nextln: lsl w0, w0, #3
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f25(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = ushr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsr x0, x0, #17
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f26(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = sshr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: asr x0, x0, #17
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f27(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = ishl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl x0, x0, #17
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|