From b1475f32a61583c889baa83acee9c24a23ef19e6 Mon Sep 17 00:00:00 2001 From: Afonso Bordado Date: Sun, 30 May 2021 16:50:22 +0100 Subject: [PATCH] aarch64: Add ishl,ushr,sshr for i128 values --- cranelift/codegen/src/isa/aarch64/lower.rs | 205 ++++++++++++++++++ .../codegen/src/isa/aarch64/lower_inst.rs | 22 +- .../filetests/isa/aarch64/bitops.clif | 132 ++++++++++- .../filetests/runtests/i128-arithmetic.clif | 2 +- 4 files changed, 357 insertions(+), 4 deletions(-) diff --git a/cranelift/codegen/src/isa/aarch64/lower.rs b/cranelift/codegen/src/isa/aarch64/lower.rs index d07311159e..bdece7311d 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.rs +++ b/cranelift/codegen/src/isa/aarch64/lower.rs @@ -1266,6 +1266,211 @@ pub(crate) fn lower_load, F: FnMut(&mut C, Writable, f(ctx, rd, elem_ty, mem); } +pub(crate) fn emit_shl_i128>( + ctx: &mut C, + src: ValueRegs, + dst: ValueRegs>, + amt: Reg, +) { + let src_lo = src.regs()[0]; + let src_hi = src.regs()[1]; + let dst_lo = dst.regs()[0]; + let dst_hi = dst.regs()[1]; + + // mvn inv_amt, amt + // lsr tmp1, src_lo, #1 + // lsl tmp2, src_hi, amt + // lsr tmp1, tmp1, inv_amt + // lsl tmp3, src_lo, amt + // tst amt, #0x40 + // orr tmp2, tmp2, tmp1 + // csel dst_hi, tmp3, tmp2, ne + // csel dst_lo, xzr, tmp3, ne + + let xzr = writable_zero_reg(); + let inv_amt = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp1 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp2 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp3 = ctx.alloc_tmp(I64).only_reg().unwrap(); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::OrrNot32, + rd: inv_amt, + rn: xzr.to_reg(), + rm: amt, + }); + + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Lsr64, + rd: tmp1, + rn: src_lo, + immshift: ImmShift::maybe_from_u64(1).unwrap(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsl64, + rd: tmp2, + rn: src_hi, + rm: amt, + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsr64, + rd: tmp1, + rn: tmp1.to_reg(), + rm: inv_amt.to_reg(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsl64, + rd: tmp3, + rn: src_lo, + rm: amt, + }); + + ctx.emit(Inst::AluRRImmLogic { + alu_op: ALUOp::AndS64, + rd: xzr, + rn: amt, + imml: ImmLogic::maybe_from_u64(64, I64).unwrap(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Orr64, + rd: tmp2, + rn: tmp2.to_reg(), + rm: tmp1.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_hi, + rn: tmp3.to_reg(), + rm: tmp2.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_lo, + rn: xzr.to_reg(), + rm: tmp3.to_reg(), + }); +} + +pub(crate) fn emit_shr_i128>( + ctx: &mut C, + src: ValueRegs, + dst: ValueRegs>, + amt: Reg, + is_signed: bool, +) { + let src_lo = src.regs()[0]; + let src_hi = src.regs()[1]; + let dst_lo = dst.regs()[0]; + let dst_hi = dst.regs()[1]; + + // mvn inv_amt, amt + // lsl tmp1, src_lo, #1 + // lsr tmp2, src_hi, amt + // lsl tmp1, tmp1, inv_amt + // lsr/asr tmp3, src_lo, amt + // tst amt, #0x40 + // orr tmp2, tmp2, tmp1 + // + // if signed: + // asr tmp4, src_hi, #63 + // csel dst_hi, tmp4, tmp3, ne + // else: + // csel dst_hi, xzr, tmp3, ne + // + // csel dst_lo, tmp3, tmp2, ne + + let xzr = writable_zero_reg(); + let inv_amt = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp1 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp2 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp3 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp4 = ctx.alloc_tmp(I64).only_reg().unwrap(); + + let shift_op = if is_signed { + ALUOp::Asr64 + } else { + ALUOp::Lsr64 + }; + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::OrrNot32, + rd: inv_amt, + rn: xzr.to_reg(), + rm: amt, + }); + + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Lsl64, + rd: tmp1, + rn: src_hi, + immshift: ImmShift::maybe_from_u64(1).unwrap(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsr64, + rd: tmp2, + rn: src_lo, + rm: amt, + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsl64, + rd: tmp1, + rn: tmp1.to_reg(), + rm: inv_amt.to_reg(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: shift_op, + rd: tmp3, + rn: src_hi, + rm: amt, + }); + + ctx.emit(Inst::AluRRImmLogic { + alu_op: ALUOp::AndS64, + rd: xzr, + rn: amt, + imml: ImmLogic::maybe_from_u64(64, I64).unwrap(), + }); + + if is_signed { + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Asr64, + rd: tmp4, + rn: src_hi, + immshift: ImmShift::maybe_from_u64(63).unwrap(), + }); + } + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Orr64, + rd: tmp2, + rn: tmp2.to_reg(), + rm: tmp1.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_hi, + rn: if is_signed { tmp4 } else { xzr }.to_reg(), + rm: tmp3.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_lo, + rn: tmp3.to_reg(), + rm: tmp2.to_reg(), + }); +} + //============================================================================= // Lowering-backend trait implementation. diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index a18d24deae..10e8d2e9ac 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -768,9 +768,26 @@ pub(crate) fn lower_insn_to_regs>( } Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => { + let out_regs = get_output_reg(ctx, outputs[0]); let ty = ty.unwrap(); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - if !ty.is_vector() { + if ty == I128 { + // TODO: We can use immlogic here + let src = put_input_in_regs(ctx, inputs[0]); + // We can ignore the top half of the shift amount register + let amt = put_input_in_regs(ctx, inputs[1]).regs()[0]; + + match op { + Opcode::Ishl => emit_shl_i128(ctx, src, out_regs, amt), + Opcode::Ushr => { + emit_shr_i128(ctx, src, out_regs, amt, /* is_signed = */ false) + } + Opcode::Sshr => { + emit_shr_i128(ctx, src, out_regs, amt, /* is_signed = */ true) + } + _ => unreachable!(), + }; + } else if !ty.is_vector() { + let rd = out_regs.only_reg().unwrap(); let size = OperandSize::from_bits(ty_bits(ty)); let narrow_mode = match (op, size) { (Opcode::Ishl, _) => NarrowValueMode::None, @@ -790,6 +807,7 @@ pub(crate) fn lower_insn_to_regs>( }; ctx.emit(alu_inst_immshift(alu_op, rd, rn, rm)); } else { + let rd = out_regs.only_reg().unwrap(); let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); let size = VectorSize::from_ty(ty); let (alu_op, is_right_shift) = match op { diff --git a/cranelift/filetests/filetests/isa/aarch64/bitops.clif b/cranelift/filetests/filetests/isa/aarch64/bitops.clif index 32e7fe7f04..215a207dda 100644 --- a/cranelift/filetests/filetests/isa/aarch64/bitops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/bitops.clif @@ -383,4 +383,134 @@ block0(v0: i128, v1: i128): ; nextln: eon x0, x0, x2 ; nextln: eon x1, x1, x3 ; nextln: ldp fp, lr, [sp], #16 -; nextln: ret \ No newline at end of file +; nextln: ret + + +function %ishl_i128_i8(i128, i8) -> i128 { +block0(v0: i128, v1: i8): + v2 = ishl.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsr x4, x0, #1 +; nextln: lsl x1, x1, x2 +; nextln: lsr x3, x4, x3 +; nextln: lsl x0, x0, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x1, x1, x3 +; nextln: csel x1, x0, x1, ne +; nextln: csel x0, xzr, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %ishl_i128_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = ishl.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsr x4, x0, #1 +; nextln: lsl x1, x1, x2 +; nextln: lsr x3, x4, x3 +; nextln: lsl x0, x0, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x1, x1, x3 +; nextln: csel x1, x0, x1, ne +; nextln: csel x0, xzr, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %ushr_i128_i8(i128, i8) -> i128 { +block0(v0: i128, v1: i8): + v2 = ushr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x3, x4, x3 +; nextln: lsr x1, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x0, x0, x3 +; nextln: csel x2, xzr, x1, ne +; nextln: csel x0, x1, x0, ne +; nextln: mov x1, x2 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %ushr_i128_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = ushr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x3, x4, x3 +; nextln: lsr x1, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x0, x0, x3 +; nextln: csel x2, xzr, x1, ne +; nextln: csel x0, x1, x0, ne +; nextln: mov x1, x2 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %sshr_i128_i8(i128, i8) -> i128 { +block0(v0: i128, v1: i8): + v2 = sshr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x4, x4, x3 +; nextln: asr x3, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: asr x1, x1, #63 +; nextln: orr x0, x0, x4 +; nextln: csel x1, x1, x3, ne +; nextln: csel x0, x3, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %sshr_i128_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = sshr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x4, x4, x3 +; nextln: asr x3, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: asr x1, x1, #63 +; nextln: orr x0, x0, x4 +; nextln: csel x1, x1, x3, ne +; nextln: csel x0, x3, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret diff --git a/cranelift/filetests/filetests/runtests/i128-arithmetic.clif b/cranelift/filetests/filetests/runtests/i128-arithmetic.clif index efa31ae42d..f78bdb7ed7 100644 --- a/cranelift/filetests/filetests/runtests/i128-arithmetic.clif +++ b/cranelift/filetests/filetests/runtests/i128-arithmetic.clif @@ -1,5 +1,5 @@ test run -; target aarch64 TODO: Not yet implemented on aarch64 +target aarch64 ; target s390x TODO: Not yet implemented on s390x target x86_64 machinst