We had previously fixed a bug in which constant shift amounts should be masked to modulo the number of bits in the operand; however, we did not fix the analogous case for shifts incorporated into the second register argument of ALU instructions that support integrated shifts. This failure to mask resulted in illegal instructions being generated, e.g. in https://bugzilla.mozilla.org/show_bug.cgi?id=1653502. This PR fixes the issue by masking the amount, as the shift semantics require.
383 lines
7.4 KiB
Plaintext
383 lines
7.4 KiB
Plaintext
test compile
|
|
target aarch64
|
|
|
|
function %f1(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = iadd.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: add x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %f2(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = isub.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f3(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = imul.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: madd x0, x0, x1, xzr
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f4(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = umulhi.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: umulh x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f5(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = smulhi.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: smulh x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f6(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = sdiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sdiv x2, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: adds xzr, x1, #1
|
|
; nextln: ccmp x0, #1, #nzcv, eq
|
|
; nextln: b.vc 8 ; udf
|
|
; nextln: mov x0, x2
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f7(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 2
|
|
v2 = sdiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: movz x2, #2
|
|
; nextln: sdiv x1, x0, x2
|
|
; nextln: cbnz x2, 8 ; udf
|
|
; nextln: adds xzr, x2, #1
|
|
; nextln: ccmp x0, #1, #nzcv, eq
|
|
; nextln: b.vc 8 ; udf
|
|
; nextln: mov x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f8(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = udiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: udiv x0, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f9(i64) -> i64 {
|
|
block0(v0: i64):
|
|
v1 = iconst.i64 2
|
|
v2 = udiv.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: movz x1, #2
|
|
; nextln: udiv x0, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f10(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = srem.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sdiv x2, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: msub x0, x2, x1, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f11(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = urem.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: udiv x2, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: msub x0, x2, x1, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %f12(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = sdiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sxtw x3, w0
|
|
; nextln: sxtw x2, w1
|
|
; nextln: sdiv x0, x3, x2
|
|
; nextln: cbnz x2, 8 ; udf
|
|
; nextln: adds wzr, w2, #1
|
|
; nextln: ccmp w3, #1, #nzcv, eq
|
|
; nextln: b.vc 8 ; udf
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f13(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = sdiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sxtw x0, w0
|
|
; nextln: movz x1, #2
|
|
; nextln: sxtw x2, w1
|
|
; nextln: sdiv x1, x0, x2
|
|
; nextln: cbnz x2, 8 ; udf
|
|
; nextln: adds wzr, w2, #1
|
|
; nextln: ccmp w0, #1, #nzcv, eq
|
|
; nextln: b.vc 8 ; udf
|
|
; nextln: mov x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f14(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = udiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: mov w0, w0
|
|
; nextln: mov w1, w1
|
|
; nextln: udiv x0, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
|
|
function %f15(i32) -> i32 {
|
|
block0(v0: i32):
|
|
v1 = iconst.i32 2
|
|
v2 = udiv.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: mov w0, w0
|
|
; nextln: movz x1, #2
|
|
; nextln: udiv x0, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f16(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = srem.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sxtw x0, w0
|
|
; nextln: sxtw x1, w1
|
|
; nextln: sdiv x2, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: msub x0, x2, x1, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f17(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = urem.i32 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: mov w0, w0
|
|
; nextln: mov w1, w1
|
|
; nextln: udiv x2, x0, x1
|
|
; nextln: cbnz x1, 8 ; udf
|
|
; nextln: msub x0, x2, x1, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f18(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = band.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: and x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f19(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bor.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: orr x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f20(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bxor.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: eor x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f21(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = band_not.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: bic x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f22(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bor_not.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: orn x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f23(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bxor_not.i64 v0, v1
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: eon x0, x0, x1
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f24(i64, i64) -> i64 {
|
|
block0(v0: i64, v1: i64):
|
|
v2 = bnot.i64 v0
|
|
return v2
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: orn x0, xzr, x0
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|
|
|
|
function %f25(i32, i32) -> i32 {
|
|
block0(v0: i32, v1: i32):
|
|
v2 = iconst.i32 53
|
|
v3 = ishl.i32 v0, v2
|
|
v4 = isub.i32 v1, v3
|
|
return v4
|
|
}
|
|
|
|
; check: stp fp, lr, [sp, #-16]!
|
|
; nextln: mov fp, sp
|
|
; nextln: sub w0, w1, w0, LSL 21
|
|
; nextln: mov sp, fp
|
|
; nextln: ldp fp, lr, [sp], #16
|
|
; nextln: ret
|