Merge pull request #2990 from afonso360/aarch64-bitops-count

aarch64: Add i128 bit counting instructions
This commit is contained in:
Chris Fallin
2021-06-16 10:25:45 -07:00
committed by GitHub
7 changed files with 412 additions and 82 deletions

View File

@@ -1471,6 +1471,50 @@ pub(crate) fn emit_shr_i128<C: LowerCtx<I = Inst>>(
}); });
} }
pub(crate) fn emit_clz_i128<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src: ValueRegs<Reg>,
dst: ValueRegs<Writable<Reg>>,
) {
let src_lo = src.regs()[0];
let src_hi = src.regs()[1];
let dst_lo = dst.regs()[0];
let dst_hi = dst.regs()[1];
// clz dst_hi, src_hi
// clz dst_lo, src_lo
// lsr tmp, dst_hi, #6
// madd dst_lo, dst_lo, tmp, dst_hi
// mov dst_hi, 0
let tmp = ctx.alloc_tmp(I64).only_reg().unwrap();
ctx.emit(Inst::BitRR {
rd: dst_hi,
rn: src_hi,
op: BitOp::Clz64,
});
ctx.emit(Inst::BitRR {
rd: dst_lo,
rn: src_lo,
op: BitOp::Clz64,
});
ctx.emit(Inst::AluRRImmShift {
alu_op: ALUOp::Lsr64,
rd: tmp,
rn: dst_hi.to_reg(),
immshift: ImmShift::maybe_from_u64(6).unwrap(),
});
ctx.emit(Inst::AluRRRR {
alu_op: ALUOp3::MAdd64,
rd: dst_lo,
rn: dst_lo.to_reg(),
rm: tmp.to_reg(),
ra: dst_hi.to_reg(),
});
lower_constant_u64(ctx, dst_hi, 0);
}
//============================================================================= //=============================================================================
// Lowering-backend trait implementation. // Lowering-backend trait implementation.

View File

@@ -1027,24 +1027,10 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
} }
Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => { Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => {
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let needs_zext = match op {
Opcode::Bitrev | Opcode::Ctz => false,
Opcode::Clz | Opcode::Cls => true,
_ => unreachable!(),
};
let ty = ty.unwrap(); let ty = ty.unwrap();
let narrow_mode = if needs_zext && ty_bits(ty) == 64 {
NarrowValueMode::ZeroExtend64
} else if needs_zext {
NarrowValueMode::ZeroExtend32
} else {
NarrowValueMode::None
};
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
let op_ty = match ty { let op_ty = match ty {
I8 | I16 | I32 => I32, I8 | I16 | I32 => I32,
I64 => I64, I64 | I128 => I64,
_ => panic!("Unsupported type for Bitrev/Clz/Cls"), _ => panic!("Unsupported type for Bitrev/Clz/Cls"),
}; };
let bitop = match op { let bitop = match op {
@@ -1052,62 +1038,197 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
Opcode::Ctz => BitOp::from((Opcode::Bitrev, op_ty)), Opcode::Ctz => BitOp::from((Opcode::Bitrev, op_ty)),
_ => unreachable!(), _ => unreachable!(),
}; };
ctx.emit(Inst::BitRR { rd, rn, op: bitop });
// Both bitrev and ctz use a bit-reverse (rbit) instruction; ctz to reduce the problem if ty == I128 {
// to a clz, and bitrev as the main operation. let out_regs = get_output_reg(ctx, outputs[0]);
if op == Opcode::Bitrev || op == Opcode::Ctz { let in_regs = put_input_in_regs(ctx, inputs[0]);
// Reversing an n-bit value (n < 32) with a 32-bit bitrev instruction will place
// the reversed result in the highest n bits, so we need to shift them down into let in_lo = in_regs.regs()[0];
// place. let in_hi = in_regs.regs()[1];
let right_shift = match ty { let out_lo = out_regs.regs()[0];
I8 => Some(24), let out_hi = out_regs.regs()[1];
I16 => Some(16),
I32 => None, if op == Opcode::Bitrev || op == Opcode::Ctz {
I64 => None, ctx.emit(Inst::BitRR {
_ => panic!("Unsupported type for Bitrev"), rd: out_hi,
}; rn: in_lo,
if let Some(s) = right_shift { op: bitop,
ctx.emit(Inst::AluRRImmShift { });
alu_op: ALUOp::Lsr32, ctx.emit(Inst::BitRR {
rd, rd: out_lo,
rn: rd.to_reg(), rn: in_hi,
immshift: ImmShift::maybe_from_u64(s).unwrap(), op: bitop,
}); });
} }
}
if op == Opcode::Ctz { if op == Opcode::Ctz {
ctx.emit(Inst::BitRR { // We have reduced the problem to a clz by reversing the inputs previouly
op: BitOp::from((Opcode::Clz, op_ty)), emit_clz_i128(ctx, out_regs.map(|r| r.to_reg()), out_regs);
rd, } else if op == Opcode::Clz {
rn: rd.to_reg(), emit_clz_i128(ctx, in_regs, out_regs);
}); } else if op == Opcode::Cls {
// cls out_hi, in_hi
// cls out_lo, in_lo
// eon sign_eq, in_hi, in_lo
// lsr sign_eq, sign_eq, #63
// madd out_lo, out_lo, sign_eq, sign_eq
// cmp out_hi, #63
// csel out_lo, out_lo, xzr, eq
// add out_lo, out_lo, out_hi
// mov out_hi, 0
let sign_eq = ctx.alloc_tmp(I64).only_reg().unwrap();
let xzr = writable_zero_reg();
ctx.emit(Inst::BitRR {
rd: out_lo,
rn: in_lo,
op: bitop,
});
ctx.emit(Inst::BitRR {
rd: out_hi,
rn: in_hi,
op: bitop,
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::EorNot64,
rd: sign_eq,
rn: in_hi,
rm: in_lo,
});
ctx.emit(Inst::AluRRImmShift {
alu_op: ALUOp::Lsr64,
rd: sign_eq,
rn: sign_eq.to_reg(),
immshift: ImmShift::maybe_from_u64(63).unwrap(),
});
ctx.emit(Inst::AluRRRR {
alu_op: ALUOp3::MAdd64,
rd: out_lo,
rn: out_lo.to_reg(),
rm: sign_eq.to_reg(),
ra: sign_eq.to_reg(),
});
ctx.emit(Inst::AluRRImm12 {
alu_op: ALUOp::SubS64,
rd: xzr,
rn: out_hi.to_reg(),
imm12: Imm12::maybe_from_u64(63).unwrap(),
});
ctx.emit(Inst::CSel {
cond: Cond::Eq,
rd: out_lo,
rn: out_lo.to_reg(),
rm: xzr.to_reg(),
});
ctx.emit(Inst::AluRRR {
alu_op: ALUOp::Add64,
rd: out_lo,
rn: out_lo.to_reg(),
rm: out_hi.to_reg(),
});
lower_constant_u64(ctx, out_hi, 0);
}
} else {
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap();
let needs_zext = match op {
Opcode::Bitrev | Opcode::Ctz => false,
Opcode::Clz | Opcode::Cls => true,
_ => unreachable!(),
};
let narrow_mode = if needs_zext && ty_bits(ty) == 64 {
NarrowValueMode::ZeroExtend64
} else if needs_zext {
NarrowValueMode::ZeroExtend32
} else {
NarrowValueMode::None
};
let rn = put_input_in_reg(ctx, inputs[0], narrow_mode);
ctx.emit(Inst::BitRR { rd, rn, op: bitop });
// Both bitrev and ctz use a bit-reverse (rbit) instruction; ctz to reduce the problem
// to a clz, and bitrev as the main operation.
if op == Opcode::Bitrev || op == Opcode::Ctz {
// Reversing an n-bit value (n < 32) with a 32-bit bitrev instruction will place
// the reversed result in the highest n bits, so we need to shift them down into
// place.
let right_shift = match ty {
I8 => Some(24),
I16 => Some(16),
I32 => None,
I64 => None,
_ => panic!("Unsupported type for Bitrev"),
};
if let Some(s) = right_shift {
ctx.emit(Inst::AluRRImmShift {
alu_op: ALUOp::Lsr32,
rd,
rn: rd.to_reg(),
immshift: ImmShift::maybe_from_u64(s).unwrap(),
});
}
}
if op == Opcode::Ctz {
ctx.emit(Inst::BitRR {
op: BitOp::from((Opcode::Clz, op_ty)),
rd,
rn: rd.to_reg(),
});
}
} }
} }
Opcode::Popcnt => { Opcode::Popcnt => {
let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); let out_regs = get_output_reg(ctx, outputs[0]);
let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); let in_regs = put_input_in_regs(ctx, inputs[0]);
let ty = ty.unwrap(); let ty = ty.unwrap();
let size = ScalarSize::from_operand_size(OperandSize::from_ty(ty)); let size = if ty == I128 {
ScalarSize::Size64
} else {
ScalarSize::from_operand_size(OperandSize::from_ty(ty))
};
let vec_size = if ty == I128 {
VectorSize::Size8x16
} else {
VectorSize::Size8x8
};
let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap(); let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap();
// fmov tmp, rn // fmov tmp, in_lo
// cnt tmp.8b, tmp.8b // if ty == i128:
// addp tmp.8b, tmp.8b, tmp.8b / addv tmp, tmp.8b / (no instruction for 8-bit inputs) // mov tmp.d[1], in_hi
// umov rd, tmp.b[0] //
// cnt tmp.16b, tmp.16b / cnt tmp.8b, tmp.8b
// addv tmp, tmp.16b / addv tmp, tmp.8b / addp tmp.8b, tmp.8b, tmp.8b / (no instruction for 8-bit inputs)
//
// umov out_lo, tmp.b[0]
// if ty == i128:
// mov out_hi, 0
ctx.emit(Inst::MovToFpu { ctx.emit(Inst::MovToFpu {
rd: tmp, rd: tmp,
rn: rn, rn: in_regs.regs()[0],
size, size,
}); });
if ty == I128 {
ctx.emit(Inst::MovToVec {
rd: tmp,
rn: in_regs.regs()[1],
idx: 1,
size: VectorSize::Size64x2,
});
}
ctx.emit(Inst::VecMisc { ctx.emit(Inst::VecMisc {
op: VecMisc2::Cnt, op: VecMisc2::Cnt,
rd: tmp, rd: tmp,
rn: tmp.to_reg(), rn: tmp.to_reg(),
size: VectorSize::Size8x8, size: vec_size,
}); });
match ScalarSize::from_ty(ty) { match ScalarSize::from_ty(ty) {
@@ -1122,23 +1243,25 @@ pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
size: VectorSize::Size8x8, size: VectorSize::Size8x8,
}); });
} }
ScalarSize::Size32 | ScalarSize::Size64 => { ScalarSize::Size32 | ScalarSize::Size64 | ScalarSize::Size128 => {
ctx.emit(Inst::VecLanes { ctx.emit(Inst::VecLanes {
op: VecLanesOp::Addv, op: VecLanesOp::Addv,
rd: tmp, rd: tmp,
rn: tmp.to_reg(), rn: tmp.to_reg(),
size: VectorSize::Size8x8, size: vec_size,
}); });
} }
sz => panic!("Unexpected scalar FP operand size: {:?}", sz),
} }
ctx.emit(Inst::MovFromVec { ctx.emit(Inst::MovFromVec {
rd, rd: out_regs.regs()[0],
rn: tmp.to_reg(), rn: tmp.to_reg(),
idx: 0, idx: 0,
size: VectorSize::Size8x16, size: VectorSize::Size8x16,
}); });
if ty == I128 {
lower_constant_u64(ctx, out_regs.regs()[1], 0);
}
} }
Opcode::Load Opcode::Load

View File

@@ -52,6 +52,19 @@ block0(v0: i64):
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16
; nextln: ret ; nextln: ret
function %a(i128) -> i128 {
block0(v0: i128):
v1 = bitrev v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit x2, x0
; nextln: rbit x0, x1
; nextln: mov x1, x2
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %b(i8) -> i8 { function %b(i8) -> i8 {
block0(v0: i8): block0(v0: i8):
@@ -103,6 +116,22 @@ block0(v0: i64):
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16
; nextln: ret ; nextln: ret
function %b(i128) -> i128 {
block0(v0: i128):
v1 = clz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: clz x1, x1
; nextln: clz x0, x0
; nextln: lsr x2, x1, #6
; nextln: madd x0, x0, x2, x1
; nextln: movz x1, #0
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %c(i8) -> i8 { function %c(i8) -> i8 {
block0(v0: i8): block0(v0: i8):
v1 = cls v0 v1 = cls v0
@@ -153,6 +182,26 @@ block0(v0: i64):
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16
; nextln: ret ; nextln: ret
function %c(i128) -> i128 {
block0(v0: i128):
v1 = cls v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: cls x2, x0
; nextln: cls x3, x1
; nextln: eon x0, x1, x0
; nextln: lsr x0, x0, #63
; nextln: madd x0, x2, x0, x0
; nextln: subs xzr, x3, #63
; nextln: csel x0, x0, xzr, eq
; nextln: add x0, x0, x3
; nextln: movz x1, #0
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i8) -> i8 { function %d(i8) -> i8 {
block0(v0: i8): block0(v0: i8):
v1 = ctz v0 v1 = ctz v0
@@ -207,6 +256,42 @@ block0(v0: i64):
; nextln: ldp fp, lr, [sp], #16 ; nextln: ldp fp, lr, [sp], #16
; nextln: ret ; nextln: ret
function %d(i128) -> i128 {
block0(v0: i128):
v1 = ctz v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: rbit x0, x0
; nextln: rbit x1, x1
; nextln: clz x0, x0
; nextln: clz x1, x1
; nextln: lsr x2, x0, #6
; nextln: madd x0, x1, x2, x0
; nextln: movz x1, #0
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i128) -> i128 {
block0(v0: i128):
v1 = popcnt v0
return v1
}
; check: stp fp, lr, [sp, #-16]!
; nextln: mov fp, sp
; nextln: fmov d0, x0
; nextln: mov v0.d[1], x1
; nextln: cnt v0.16b, v0.16b
; nextln: addv b0, v0.16b
; nextln: umov w0, v0.b[0]
; nextln: movz x1, #0
; nextln: ldp fp, lr, [sp], #16
; nextln: ret
function %d(i64) -> i64 { function %d(i64) -> i64 {
block0(v0: i64): block0(v0: i64):
v1 = popcnt v0 v1 = popcnt v0

View File

@@ -0,0 +1,24 @@
test run
target aarch64
; TODO: Move this test into i128-bitops-count.clif when x86_64 supports it
function %cls_i128(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = cls v2
v4, v5 = isplit v3
v6 = iadd v4, v5
return v6
}
; run: %cls_i128(0x00000000_00000000, 0x00000000_00000000) == 127
; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 63
; run: %cls_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 63
; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 127
; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0x7FFFFFFF_FFFFFFFF) == 0
; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0x3FFFFFFF_FFFFFFFF) == 1
; run: %cls_i128(0x7FFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 63
; run: %cls_i128(0x80000000_00000000, 0xC0000000_00000000) == 1
; run: %cls_i128(0x00000000_00000000, 0xC0000000_00000000) == 1
; run: %cls_i128(0x80000000_00000000, 0x80000000_00000000) == 0

View File

@@ -0,0 +1,63 @@
test run
target aarch64
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
function %ctz_i128(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = ctz v2
v4, v5 = isplit v3
v6 = iadd v4, v5
return v6
}
; run: %ctz_i128(0x00000000_00000000, 0x00000000_00000000) == 128
; run: %ctz_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 0
; run: %ctz_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 64
; run: %ctz_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 0
; run: %ctz_i128(0xFFFFFFFF_00000000, 0xF0000000_00000000) == 32
; run: %ctz_i128(0xF0000000_00000000, 0xFF000000_00000000) == 60
; run: %ctz_i128(0x00000001_00000000, 0x00000000_00000000) == 32
; run: %ctz_i128(0x00000000_00000000, 0x00000001_00000000) == 96
; run: %ctz_i128(0x00000000_00010000, 0x00000001_00000000) == 16
; run: %ctz_i128(0x00000000_00010000, 0x00000000_00000000) == 16
function %clz_i128(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = clz v2
v4, v5 = isplit v3
v6 = iadd v4, v5
return v6
}
; run: %clz_i128(0x00000000_00000000, 0x00000000_00000000) == 128
; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 64
; run: %clz_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 0
; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 0
; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0x40000000_00000000) == 1
; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0x20000000_00000000) == 2
; run: %clz_i128(0x00000000_00000000, 0x00000000_80000000) == 32
; run: %clz_i128(0x00000000_00000000, 0x00000001_00000000) == 31
; run: %clz_i128(0x00000000_00010000, 0x00000001_00000000) == 31
; run: %clz_i128(0x00000000_00010000, 0x00000000_00000000) == 111
function %popcnt_i128(i64, i64) -> i64 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = popcnt v2
v4, v5 = isplit v3
v6 = iadd v4, v5
return v6
}
; run: %popcnt_i128(0x00000000_00000000, 0x00000000_00000000) == 0
; run: %popcnt_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 64
; run: %popcnt_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 64
; run: %popcnt_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 128
; run: %popcnt_i128(0x55555555_55555555, 0x55555555_55555555) == 64
; run: %popcnt_i128(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == 96

View File

@@ -1,27 +0,0 @@
test run
; target s390x TODO: Not yet implemented on s390x
target x86_64 machinst
function %ctz(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = ctz.i128 v2
v4 = ireduce.i8 v3
return v4
}
; run: %ctz(0x00000000_00000000, 0x00000001_00000000) == 96
; run: %ctz(0x00000000_00010000, 0x00000001_00000000) == 16
; run: %ctz(0x00000000_00010000, 0x00000000_00000000) == 16
; run: %ctz(0x00000000_00000000, 0x00000000_00000000) == 128
function %clz(i64, i64) -> i8 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = clz.i128 v2
v4 = ireduce.i8 v3
return v4
}
; run: %clz(0x00000000_00000000, 0x00000001_00000000) == 31
; run: %clz(0x00000000_00010000, 0x00000001_00000000) == 31
; run: %clz(0x00000000_00010000, 0x00000000_00000000) == 111
; run: %clz(0x00000000_00000000, 0x00000000_00000000) == 128

View File

@@ -133,3 +133,21 @@ return v7, v8
; run: %bxor_not_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [0, 0] ; run: %bxor_not_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [0, 0]
; run: %bxor_not_i128(0x8FA50A64_8FA50A64, 0x9440A07D_9440A07D, 0xB0A51B75_B0A51B75, 0xB575A07D_B575A07D) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] ; run: %bxor_not_i128(0x8FA50A64_8FA50A64, 0x9440A07D_9440A07D, 0xB0A51B75_B0A51B75, 0xB575A07D_B575A07D) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF]
function %bitrev_i128(i64, i64) -> i64, i64 {
block0(v0: i64, v1: i64):
v2 = iconcat v0, v1
v3 = bitrev v2
v4, v5 = isplit v3
return v4, v5
}
; run: %bitrev_i128(0, 0) == [0, 0]
; run: %bitrev_i128(-1, -1) == [-1, -1]
; run: %bitrev_i128(-1, 0) == [0, -1]
; run: %bitrev_i128(0, -1) == [-1, 0]
; run: %bitrev_i128(0x00000000_00000000, 0x80000000_00000000) == [1, 0]
; run: %bitrev_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210) == [0x084C2A6E_195D3B7F, 0xF7B3D591_E6A2C480]
; run: %bitrev_i128(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == [0x7777FF03_FFFF537B, 0xFFFF537B_7777FF03]