From 09fec151eba4cde38967eb6044dd668d31c6e661 Mon Sep 17 00:00:00 2001 From: Afonso Bordado Date: Wed, 2 Jun 2021 17:35:02 +0100 Subject: [PATCH 1/2] aarch64: Add popcnt for i128 values --- .../codegen/src/isa/aarch64/lower_inst.rs | 55 ++++++++++++++----- .../filetests/isa/aarch64/bitops.clif | 18 ++++++ ...itops-misc.clif => i128-bitops-count.clif} | 17 ++++++ 3 files changed, 77 insertions(+), 13 deletions(-) rename cranelift/filetests/filetests/runtests/{i128-bitops-misc.clif => i128-bitops-count.clif} (65%) diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index 690eacc298..ae9fd9b4d7 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -1087,27 +1087,54 @@ pub(crate) fn lower_insn_to_regs>( } Opcode::Popcnt => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); + let out_regs = get_output_reg(ctx, outputs[0]); + let in_regs = put_input_in_regs(ctx, inputs[0]); let ty = ty.unwrap(); - let size = ScalarSize::from_operand_size(OperandSize::from_ty(ty)); + let size = if ty == I128 { + ScalarSize::Size64 + } else { + ScalarSize::from_operand_size(OperandSize::from_ty(ty)) + }; + + let vec_size = if ty == I128 { + VectorSize::Size8x16 + } else { + VectorSize::Size8x8 + }; + let tmp = ctx.alloc_tmp(I8X16).only_reg().unwrap(); - // fmov tmp, rn - // cnt tmp.8b, tmp.8b - // addp tmp.8b, tmp.8b, tmp.8b / addv tmp, tmp.8b / (no instruction for 8-bit inputs) - // umov rd, tmp.b[0] + // fmov tmp, in_lo + // if ty == i128: + // mov tmp.d[1], in_hi + // + // cnt tmp.16b, tmp.16b / cnt tmp.8b, tmp.8b + // addv tmp, tmp.16b / addv tmp, tmp.8b / addp tmp.8b, tmp.8b, tmp.8b / (no instruction for 8-bit inputs) + // + // umov out_lo, tmp.b[0] + // if ty == i128: + // mov out_hi, 0 ctx.emit(Inst::MovToFpu { rd: tmp, - rn: rn, + rn: in_regs.regs()[0], size, }); + + if ty == I128 { + ctx.emit(Inst::MovToVec { + rd: tmp, + rn: in_regs.regs()[1], + idx: 1, + size: VectorSize::Size64x2, + }); + } + ctx.emit(Inst::VecMisc { op: VecMisc2::Cnt, rd: tmp, rn: tmp.to_reg(), - size: VectorSize::Size8x8, + size: vec_size, }); match ScalarSize::from_ty(ty) { @@ -1122,23 +1149,25 @@ pub(crate) fn lower_insn_to_regs>( size: VectorSize::Size8x8, }); } - ScalarSize::Size32 | ScalarSize::Size64 => { + ScalarSize::Size32 | ScalarSize::Size64 | ScalarSize::Size128 => { ctx.emit(Inst::VecLanes { op: VecLanesOp::Addv, rd: tmp, rn: tmp.to_reg(), - size: VectorSize::Size8x8, + size: vec_size, }); } - sz => panic!("Unexpected scalar FP operand size: {:?}", sz), } ctx.emit(Inst::MovFromVec { - rd, + rd: out_regs.regs()[0], rn: tmp.to_reg(), idx: 0, size: VectorSize::Size8x16, }); + if ty == I128 { + lower_constant_u64(ctx, out_regs.regs()[1], 0); + } } Opcode::Load diff --git a/cranelift/filetests/filetests/isa/aarch64/bitops.clif b/cranelift/filetests/filetests/isa/aarch64/bitops.clif index 215a207dda..eaddadb37c 100644 --- a/cranelift/filetests/filetests/isa/aarch64/bitops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/bitops.clif @@ -207,6 +207,24 @@ block0(v0: i64): ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret +function %d(i128) -> i128 { +block0(v0: i128): + v1 = popcnt v0 + return v1 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: fmov d0, x0 +; nextln: mov v0.d[1], x1 +; nextln: cnt v0.16b, v0.16b +; nextln: addv b0, v0.16b +; nextln: umov w0, v0.b[0] +; nextln: movz x1, #0 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + function %d(i64) -> i64 { block0(v0: i64): v1 = popcnt v0 diff --git a/cranelift/filetests/filetests/runtests/i128-bitops-misc.clif b/cranelift/filetests/filetests/runtests/i128-bitops-count.clif similarity index 65% rename from cranelift/filetests/filetests/runtests/i128-bitops-misc.clif rename to cranelift/filetests/filetests/runtests/i128-bitops-count.clif index ec55510e5d..c701b0911f 100644 --- a/cranelift/filetests/filetests/runtests/i128-bitops-misc.clif +++ b/cranelift/filetests/filetests/runtests/i128-bitops-count.clif @@ -25,3 +25,20 @@ block0(v0: i64, v1: i64): ; run: %clz(0x00000000_00010000, 0x00000001_00000000) == 31 ; run: %clz(0x00000000_00010000, 0x00000000_00000000) == 111 ; run: %clz(0x00000000_00000000, 0x00000000_00000000) == 128 + +function %popcnt_i128(i64, i64) -> i64 { +block0(v0: i64, v1: i64): + v2 = iconcat v0, v1 + + v3 = popcnt v2 + + v4, v5 = isplit v3 + v6 = iadd v4, v5 + return v6 +} +; run: %popcnt_i128(0, 0) == 0 +; run: %popcnt_i128(-1, 0) == 64 +; run: %popcnt_i128(0, -1) == 64 +; run: %popcnt_i128(-1, -1) == 128 +; run: %popcnt_i128(0x55555555_55555555, 0x55555555_55555555) == 64 +; run: %popcnt_i128(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == 96 From 9fc89d23163b6872aa51f6820a7c2c2e639e3744 Mon Sep 17 00:00:00 2001 From: Afonso Bordado Date: Thu, 3 Jun 2021 16:31:45 +0100 Subject: [PATCH 2/2] aarch64: Add bitrev,clz,cls,ctz for i128 values --- cranelift/codegen/src/isa/aarch64/lower.rs | 44 +++++ .../codegen/src/isa/aarch64/lower_inst.rs | 178 +++++++++++++----- .../filetests/isa/aarch64/bitops.clif | 67 +++++++ .../filetests/runtests/i128-bitops-cls.clif | 24 +++ .../filetests/runtests/i128-bitops-count.clif | 61 +++--- .../filetests/runtests/i128-bitops.clif | 18 ++ 6 files changed, 329 insertions(+), 63 deletions(-) create mode 100644 cranelift/filetests/filetests/runtests/i128-bitops-cls.clif diff --git a/cranelift/codegen/src/isa/aarch64/lower.rs b/cranelift/codegen/src/isa/aarch64/lower.rs index bdece7311d..3130b03b4c 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.rs +++ b/cranelift/codegen/src/isa/aarch64/lower.rs @@ -1471,6 +1471,50 @@ pub(crate) fn emit_shr_i128>( }); } +pub(crate) fn emit_clz_i128>( + ctx: &mut C, + src: ValueRegs, + dst: ValueRegs>, +) { + let src_lo = src.regs()[0]; + let src_hi = src.regs()[1]; + let dst_lo = dst.regs()[0]; + let dst_hi = dst.regs()[1]; + + // clz dst_hi, src_hi + // clz dst_lo, src_lo + // lsr tmp, dst_hi, #6 + // madd dst_lo, dst_lo, tmp, dst_hi + // mov dst_hi, 0 + + let tmp = ctx.alloc_tmp(I64).only_reg().unwrap(); + + ctx.emit(Inst::BitRR { + rd: dst_hi, + rn: src_hi, + op: BitOp::Clz64, + }); + ctx.emit(Inst::BitRR { + rd: dst_lo, + rn: src_lo, + op: BitOp::Clz64, + }); + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Lsr64, + rd: tmp, + rn: dst_hi.to_reg(), + immshift: ImmShift::maybe_from_u64(6).unwrap(), + }); + ctx.emit(Inst::AluRRRR { + alu_op: ALUOp3::MAdd64, + rd: dst_lo, + rn: dst_lo.to_reg(), + rm: tmp.to_reg(), + ra: dst_hi.to_reg(), + }); + lower_constant_u64(ctx, dst_hi, 0); +} + //============================================================================= // Lowering-backend trait implementation. diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index ae9fd9b4d7..da4501b811 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -1027,24 +1027,10 @@ pub(crate) fn lower_insn_to_regs>( } Opcode::Bitrev | Opcode::Clz | Opcode::Cls | Opcode::Ctz => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - let needs_zext = match op { - Opcode::Bitrev | Opcode::Ctz => false, - Opcode::Clz | Opcode::Cls => true, - _ => unreachable!(), - }; let ty = ty.unwrap(); - let narrow_mode = if needs_zext && ty_bits(ty) == 64 { - NarrowValueMode::ZeroExtend64 - } else if needs_zext { - NarrowValueMode::ZeroExtend32 - } else { - NarrowValueMode::None - }; - let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); let op_ty = match ty { I8 | I16 | I32 => I32, - I64 => I64, + I64 | I128 => I64, _ => panic!("Unsupported type for Bitrev/Clz/Cls"), }; let bitop = match op { @@ -1052,37 +1038,145 @@ pub(crate) fn lower_insn_to_regs>( Opcode::Ctz => BitOp::from((Opcode::Bitrev, op_ty)), _ => unreachable!(), }; - ctx.emit(Inst::BitRR { rd, rn, op: bitop }); - // Both bitrev and ctz use a bit-reverse (rbit) instruction; ctz to reduce the problem - // to a clz, and bitrev as the main operation. - if op == Opcode::Bitrev || op == Opcode::Ctz { - // Reversing an n-bit value (n < 32) with a 32-bit bitrev instruction will place - // the reversed result in the highest n bits, so we need to shift them down into - // place. - let right_shift = match ty { - I8 => Some(24), - I16 => Some(16), - I32 => None, - I64 => None, - _ => panic!("Unsupported type for Bitrev"), - }; - if let Some(s) = right_shift { - ctx.emit(Inst::AluRRImmShift { - alu_op: ALUOp::Lsr32, - rd, - rn: rd.to_reg(), - immshift: ImmShift::maybe_from_u64(s).unwrap(), + if ty == I128 { + let out_regs = get_output_reg(ctx, outputs[0]); + let in_regs = put_input_in_regs(ctx, inputs[0]); + + let in_lo = in_regs.regs()[0]; + let in_hi = in_regs.regs()[1]; + let out_lo = out_regs.regs()[0]; + let out_hi = out_regs.regs()[1]; + + if op == Opcode::Bitrev || op == Opcode::Ctz { + ctx.emit(Inst::BitRR { + rd: out_hi, + rn: in_lo, + op: bitop, + }); + ctx.emit(Inst::BitRR { + rd: out_lo, + rn: in_hi, + op: bitop, }); } - } - if op == Opcode::Ctz { - ctx.emit(Inst::BitRR { - op: BitOp::from((Opcode::Clz, op_ty)), - rd, - rn: rd.to_reg(), - }); + if op == Opcode::Ctz { + // We have reduced the problem to a clz by reversing the inputs previouly + emit_clz_i128(ctx, out_regs.map(|r| r.to_reg()), out_regs); + } else if op == Opcode::Clz { + emit_clz_i128(ctx, in_regs, out_regs); + } else if op == Opcode::Cls { + // cls out_hi, in_hi + // cls out_lo, in_lo + // eon sign_eq, in_hi, in_lo + // lsr sign_eq, sign_eq, #63 + // madd out_lo, out_lo, sign_eq, sign_eq + // cmp out_hi, #63 + // csel out_lo, out_lo, xzr, eq + // add out_lo, out_lo, out_hi + // mov out_hi, 0 + + let sign_eq = ctx.alloc_tmp(I64).only_reg().unwrap(); + let xzr = writable_zero_reg(); + + ctx.emit(Inst::BitRR { + rd: out_lo, + rn: in_lo, + op: bitop, + }); + ctx.emit(Inst::BitRR { + rd: out_hi, + rn: in_hi, + op: bitop, + }); + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::EorNot64, + rd: sign_eq, + rn: in_hi, + rm: in_lo, + }); + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Lsr64, + rd: sign_eq, + rn: sign_eq.to_reg(), + immshift: ImmShift::maybe_from_u64(63).unwrap(), + }); + ctx.emit(Inst::AluRRRR { + alu_op: ALUOp3::MAdd64, + rd: out_lo, + rn: out_lo.to_reg(), + rm: sign_eq.to_reg(), + ra: sign_eq.to_reg(), + }); + ctx.emit(Inst::AluRRImm12 { + alu_op: ALUOp::SubS64, + rd: xzr, + rn: out_hi.to_reg(), + imm12: Imm12::maybe_from_u64(63).unwrap(), + }); + ctx.emit(Inst::CSel { + cond: Cond::Eq, + rd: out_lo, + rn: out_lo.to_reg(), + rm: xzr.to_reg(), + }); + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Add64, + rd: out_lo, + rn: out_lo.to_reg(), + rm: out_hi.to_reg(), + }); + lower_constant_u64(ctx, out_hi, 0); + } + } else { + let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); + let needs_zext = match op { + Opcode::Bitrev | Opcode::Ctz => false, + Opcode::Clz | Opcode::Cls => true, + _ => unreachable!(), + }; + let narrow_mode = if needs_zext && ty_bits(ty) == 64 { + NarrowValueMode::ZeroExtend64 + } else if needs_zext { + NarrowValueMode::ZeroExtend32 + } else { + NarrowValueMode::None + }; + let rn = put_input_in_reg(ctx, inputs[0], narrow_mode); + + ctx.emit(Inst::BitRR { rd, rn, op: bitop }); + + // Both bitrev and ctz use a bit-reverse (rbit) instruction; ctz to reduce the problem + // to a clz, and bitrev as the main operation. + if op == Opcode::Bitrev || op == Opcode::Ctz { + // Reversing an n-bit value (n < 32) with a 32-bit bitrev instruction will place + // the reversed result in the highest n bits, so we need to shift them down into + // place. + let right_shift = match ty { + I8 => Some(24), + I16 => Some(16), + I32 => None, + I64 => None, + _ => panic!("Unsupported type for Bitrev"), + }; + if let Some(s) = right_shift { + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Lsr32, + rd, + rn: rd.to_reg(), + immshift: ImmShift::maybe_from_u64(s).unwrap(), + }); + } + } + + if op == Opcode::Ctz { + ctx.emit(Inst::BitRR { + op: BitOp::from((Opcode::Clz, op_ty)), + rd, + rn: rd.to_reg(), + }); + } } } diff --git a/cranelift/filetests/filetests/isa/aarch64/bitops.clif b/cranelift/filetests/filetests/isa/aarch64/bitops.clif index eaddadb37c..18c77e62ae 100644 --- a/cranelift/filetests/filetests/isa/aarch64/bitops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/bitops.clif @@ -52,6 +52,19 @@ block0(v0: i64): ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret +function %a(i128) -> i128 { +block0(v0: i128): + v1 = bitrev v0 + return v1 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: rbit x2, x0 +; nextln: rbit x0, x1 +; nextln: mov x1, x2 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret function %b(i8) -> i8 { block0(v0: i8): @@ -103,6 +116,22 @@ block0(v0: i64): ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret +function %b(i128) -> i128 { +block0(v0: i128): + v1 = clz v0 + return v1 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: clz x1, x1 +; nextln: clz x0, x0 +; nextln: lsr x2, x1, #6 +; nextln: madd x0, x0, x2, x1 +; nextln: movz x1, #0 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + function %c(i8) -> i8 { block0(v0: i8): v1 = cls v0 @@ -153,6 +182,26 @@ block0(v0: i64): ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret +function %c(i128) -> i128 { +block0(v0: i128): + v1 = cls v0 + return v1 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: cls x2, x0 +; nextln: cls x3, x1 +; nextln: eon x0, x1, x0 +; nextln: lsr x0, x0, #63 +; nextln: madd x0, x2, x0, x0 +; nextln: subs xzr, x3, #63 +; nextln: csel x0, x0, xzr, eq +; nextln: add x0, x0, x3 +; nextln: movz x1, #0 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + function %d(i8) -> i8 { block0(v0: i8): v1 = ctz v0 @@ -207,6 +256,24 @@ block0(v0: i64): ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret +function %d(i128) -> i128 { +block0(v0: i128): + v1 = ctz v0 + return v1 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: rbit x0, x0 +; nextln: rbit x1, x1 +; nextln: clz x0, x0 +; nextln: clz x1, x1 +; nextln: lsr x2, x0, #6 +; nextln: madd x0, x1, x2, x0 +; nextln: movz x1, #0 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + function %d(i128) -> i128 { block0(v0: i128): v1 = popcnt v0 diff --git a/cranelift/filetests/filetests/runtests/i128-bitops-cls.clif b/cranelift/filetests/filetests/runtests/i128-bitops-cls.clif new file mode 100644 index 0000000000..14c82ceec4 --- /dev/null +++ b/cranelift/filetests/filetests/runtests/i128-bitops-cls.clif @@ -0,0 +1,24 @@ +test run +target aarch64 + +; TODO: Move this test into i128-bitops-count.clif when x86_64 supports it +function %cls_i128(i64, i64) -> i64 { +block0(v0: i64, v1: i64): + v2 = iconcat v0, v1 + + v3 = cls v2 + + v4, v5 = isplit v3 + v6 = iadd v4, v5 + return v6 +} +; run: %cls_i128(0x00000000_00000000, 0x00000000_00000000) == 127 +; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 63 +; run: %cls_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 63 +; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 127 +; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0x7FFFFFFF_FFFFFFFF) == 0 +; run: %cls_i128(0xFFFFFFFF_FFFFFFFF, 0x3FFFFFFF_FFFFFFFF) == 1 +; run: %cls_i128(0x7FFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 63 +; run: %cls_i128(0x80000000_00000000, 0xC0000000_00000000) == 1 +; run: %cls_i128(0x00000000_00000000, 0xC0000000_00000000) == 1 +; run: %cls_i128(0x80000000_00000000, 0x80000000_00000000) == 0 diff --git a/cranelift/filetests/filetests/runtests/i128-bitops-count.clif b/cranelift/filetests/filetests/runtests/i128-bitops-count.clif index c701b0911f..6c0f23dd1f 100644 --- a/cranelift/filetests/filetests/runtests/i128-bitops-count.clif +++ b/cranelift/filetests/filetests/runtests/i128-bitops-count.clif @@ -1,30 +1,49 @@ test run +target aarch64 ; target s390x TODO: Not yet implemented on s390x target x86_64 machinst -function %ctz(i64, i64) -> i8 { +function %ctz_i128(i64, i64) -> i64 { block0(v0: i64, v1: i64): v2 = iconcat v0, v1 - v3 = ctz.i128 v2 - v4 = ireduce.i8 v3 - return v4 -} -; run: %ctz(0x00000000_00000000, 0x00000001_00000000) == 96 -; run: %ctz(0x00000000_00010000, 0x00000001_00000000) == 16 -; run: %ctz(0x00000000_00010000, 0x00000000_00000000) == 16 -; run: %ctz(0x00000000_00000000, 0x00000000_00000000) == 128 -function %clz(i64, i64) -> i8 { + v3 = ctz v2 + + v4, v5 = isplit v3 + v6 = iadd v4, v5 + return v6 +} +; run: %ctz_i128(0x00000000_00000000, 0x00000000_00000000) == 128 +; run: %ctz_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 0 +; run: %ctz_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 64 +; run: %ctz_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 0 +; run: %ctz_i128(0xFFFFFFFF_00000000, 0xF0000000_00000000) == 32 +; run: %ctz_i128(0xF0000000_00000000, 0xFF000000_00000000) == 60 +; run: %ctz_i128(0x00000001_00000000, 0x00000000_00000000) == 32 +; run: %ctz_i128(0x00000000_00000000, 0x00000001_00000000) == 96 +; run: %ctz_i128(0x00000000_00010000, 0x00000001_00000000) == 16 +; run: %ctz_i128(0x00000000_00010000, 0x00000000_00000000) == 16 + +function %clz_i128(i64, i64) -> i64 { block0(v0: i64, v1: i64): v2 = iconcat v0, v1 - v3 = clz.i128 v2 - v4 = ireduce.i8 v3 - return v4 + + v3 = clz v2 + + v4, v5 = isplit v3 + v6 = iadd v4, v5 + return v6 } -; run: %clz(0x00000000_00000000, 0x00000001_00000000) == 31 -; run: %clz(0x00000000_00010000, 0x00000001_00000000) == 31 -; run: %clz(0x00000000_00010000, 0x00000000_00000000) == 111 -; run: %clz(0x00000000_00000000, 0x00000000_00000000) == 128 +; run: %clz_i128(0x00000000_00000000, 0x00000000_00000000) == 128 +; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 64 +; run: %clz_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 0 +; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 0 +; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0x40000000_00000000) == 1 +; run: %clz_i128(0xFFFFFFFF_FFFFFFFF, 0x20000000_00000000) == 2 +; run: %clz_i128(0x00000000_00000000, 0x00000000_80000000) == 32 +; run: %clz_i128(0x00000000_00000000, 0x00000001_00000000) == 31 +; run: %clz_i128(0x00000000_00010000, 0x00000001_00000000) == 31 +; run: %clz_i128(0x00000000_00010000, 0x00000000_00000000) == 111 function %popcnt_i128(i64, i64) -> i64 { block0(v0: i64, v1: i64): @@ -36,9 +55,9 @@ block0(v0: i64, v1: i64): v6 = iadd v4, v5 return v6 } -; run: %popcnt_i128(0, 0) == 0 -; run: %popcnt_i128(-1, 0) == 64 -; run: %popcnt_i128(0, -1) == 64 -; run: %popcnt_i128(-1, -1) == 128 +; run: %popcnt_i128(0x00000000_00000000, 0x00000000_00000000) == 0 +; run: %popcnt_i128(0xFFFFFFFF_FFFFFFFF, 0x00000000_00000000) == 64 +; run: %popcnt_i128(0x00000000_00000000, 0xFFFFFFFF_FFFFFFFF) == 64 +; run: %popcnt_i128(0xFFFFFFFF_FFFFFFFF, 0xFFFFFFFF_FFFFFFFF) == 128 ; run: %popcnt_i128(0x55555555_55555555, 0x55555555_55555555) == 64 ; run: %popcnt_i128(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == 96 diff --git a/cranelift/filetests/filetests/runtests/i128-bitops.clif b/cranelift/filetests/filetests/runtests/i128-bitops.clif index 5a1139f1d0..1723186e6b 100644 --- a/cranelift/filetests/filetests/runtests/i128-bitops.clif +++ b/cranelift/filetests/filetests/runtests/i128-bitops.clif @@ -133,3 +133,21 @@ return v7, v8 ; run: %bxor_not_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [0, 0] ; run: %bxor_not_i128(0x8FA50A64_8FA50A64, 0x9440A07D_9440A07D, 0xB0A51B75_B0A51B75, 0xB575A07D_B575A07D) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] + + +function %bitrev_i128(i64, i64) -> i64, i64 { +block0(v0: i64, v1: i64): + v2 = iconcat v0, v1 + + v3 = bitrev v2 + + v4, v5 = isplit v3 + return v4, v5 +} +; run: %bitrev_i128(0, 0) == [0, 0] +; run: %bitrev_i128(-1, -1) == [-1, -1] +; run: %bitrev_i128(-1, 0) == [0, -1] +; run: %bitrev_i128(0, -1) == [-1, 0] +; run: %bitrev_i128(0x00000000_00000000, 0x80000000_00000000) == [1, 0] +; run: %bitrev_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210) == [0x084C2A6E_195D3B7F, 0xF7B3D591_E6A2C480] +; run: %bitrev_i128(0xC0FFEEEE_DECAFFFF, 0xDECAFFFF_C0FFEEEE) == [0x7777FF03_FFFF537B, 0xFFFF537B_7777FF03]