Given an integer size N, a left rotation of K places is the same as a right rotation of N - K places. This means we can use right rotations to implement left rotations too. The Cranelift's rotation semantics are inherited from WebAssembly, which mean the rotation count is truncated modulo the operand's bit size. Note the ROR aarch64 instruction has the same semantics, when both input operands are registers.
437 lines
8.4 KiB
Plaintext
437 lines
8.4 KiB
Plaintext
test vcode
|
|
target aarch64
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; ROR, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f0(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = rotr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f1(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = rotr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f2(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = rotr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: and w1, w1, #15
|
|
; nextln: sub w2, w1, #16
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f3(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = rotr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: and w1, w1, #7
|
|
; nextln: sub w2, w1, #8
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; ROL, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f4(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = rotl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub x1, xzr, x1
|
|
; nextln: ror x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f5(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = rotl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub w1, wzr, w1
|
|
; nextln: ror w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f6(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = rotl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: sub w1, wzr, w1
|
|
; nextln: and w1, w1, #15
|
|
; nextln: sub w2, w1, #16
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f7(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = rotl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: sub w1, wzr, w1
|
|
; nextln: and w1, w1, #7
|
|
; nextln: sub w2, w1, #8
|
|
; nextln: sub w2, wzr, w2
|
|
; nextln: lsr w1, w0, w1
|
|
; nextln: lsl w0, w0, w2
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; LSR, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f8(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ushr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsr x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f9(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ushr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f10(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = ushr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: lsr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f11(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = ushr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: lsr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; LSL, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f12(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = ishl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f13(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = ishl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f14(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = ishl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f15(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = ishl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; ASR, variable
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f16(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = sshr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: asr x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f17(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = sshr.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: asr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f18(i16, i16) -> i16 {
|
|
block0(v0: i16, v1: i16):
|
|
v2 = sshr.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sxth w0, w0
|
|
; nextln: asr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f19(i8, i8) -> i8 {
|
|
block0(v0: i8, v1: i8):
|
|
v2 = sshr.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sxtb w0, w0
|
|
; nextln: asr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
;; immediate forms
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
function %f20(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = rotr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror x0, x0, #17
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f21(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = rotl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror x0, x0, #47
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f22(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 17
|
|
v2 = rotl.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: ror w0, w0, #15
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f23(i16) -> i16 {
|
|
block0(v0: i16):
|
|
v1 = iconst.i32 10
|
|
v2 = rotl.i16 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxth w0, w0
|
|
; nextln: lsr w1, w0, #6
|
|
; nextln: lsl w0, w0, #10
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f24(i8) -> i8 {
|
|
block0(v0: i8):
|
|
v1 = iconst.i32 3
|
|
v2 = rotl.i8 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: uxtb w0, w0
|
|
; nextln: lsr w1, w0, #5
|
|
; nextln: lsl w0, w0, #3
|
|
; nextln: orr w0, w0, w1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f25(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = ushr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsr x0, x0, #17
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f26(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = sshr.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: asr x0, x0, #17
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f27(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i32 17
|
|
v2 = ishl.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: lsl x0, x0, #17
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|