From c38a5e8b62040915b7d60f6a55e8fb5fab81d859 Mon Sep 17 00:00:00 2001 From: Afonso Bordado Date: Wed, 26 May 2021 16:01:26 +0100 Subject: [PATCH 1/3] aarch64: Add basic i128 bit ops to the AArch64 backend Currently we just basically use a two instruction version of the same i64 ops. IMMLogic doesn't really support multiple register inputs, so its left as a TODO for future optimizations. --- .../codegen/src/isa/aarch64/lower_inst.rs | 54 ++++++- .../filetests/isa/aarch64/bitops.clif | 91 ++++++++++++ .../filetests/runtests/i128-bitops.clif | 135 ++++++++++++++++++ 3 files changed, 275 insertions(+), 5 deletions(-) create mode 100644 cranelift/filetests/filetests/runtests/i128-bitops.clif diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index c4384fa83a..a18d24deae 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -661,14 +661,31 @@ pub(crate) fn lower_insn_to_regs>( } Opcode::Bnot => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); + let out_regs = get_output_reg(ctx, outputs[0]); let ty = ty.unwrap(); - if !ty.is_vector() { + if ty == I128 { + // TODO: We can merge this block with the one below once we support immlogic here + let in_regs = put_input_in_regs(ctx, inputs[0]); + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::OrrNot64, + rd: out_regs.regs()[0], + rn: zero_reg(), + rm: in_regs.regs()[0], + }); + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::OrrNot64, + rd: out_regs.regs()[1], + rn: zero_reg(), + rm: in_regs.regs()[1], + }); + } else if !ty.is_vector() { + let rd = out_regs.only_reg().unwrap(); let rm = put_input_in_rs_immlogic(ctx, inputs[0], NarrowValueMode::None); let alu_op = choose_32_64(ty, ALUOp::OrrNot32, ALUOp::OrrNot64); // NOT rd, rm ==> ORR_NOT rd, zero, rm ctx.emit(alu_inst_immlogic(alu_op, rd, zero_reg(), rm)); } else { + let rd = out_regs.only_reg().unwrap(); let rm = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); ctx.emit(Inst::VecMisc { op: VecMisc2::Not, @@ -685,9 +702,36 @@ pub(crate) fn lower_insn_to_regs>( | Opcode::BandNot | Opcode::BorNot | Opcode::BxorNot => { - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); + let out_regs = get_output_reg(ctx, outputs[0]); let ty = ty.unwrap(); - if !ty.is_vector() { + if ty == I128 { + // TODO: Support immlogic here + let lhs = put_input_in_regs(ctx, inputs[0]); + let rhs = put_input_in_regs(ctx, inputs[1]); + let alu_op = match op { + Opcode::Band => ALUOp::And64, + Opcode::Bor => ALUOp::Orr64, + Opcode::Bxor => ALUOp::Eor64, + Opcode::BandNot => ALUOp::AndNot64, + Opcode::BorNot => ALUOp::OrrNot64, + Opcode::BxorNot => ALUOp::EorNot64, + _ => unreachable!(), + }; + + ctx.emit(Inst::AluRRR { + alu_op, + rd: out_regs.regs()[0], + rn: lhs.regs()[0], + rm: rhs.regs()[0], + }); + ctx.emit(Inst::AluRRR { + alu_op, + rd: out_regs.regs()[1], + rn: lhs.regs()[1], + rm: rhs.regs()[1], + }); + } else if !ty.is_vector() { + let rd = out_regs.only_reg().unwrap(); let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); let rm = put_input_in_rs_immlogic(ctx, inputs[1], NarrowValueMode::None); let alu_op = match op { @@ -711,7 +755,7 @@ pub(crate) fn lower_insn_to_regs>( let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); let rm = put_input_in_reg(ctx, inputs[1], NarrowValueMode::None); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); + let rd = out_regs.only_reg().unwrap(); ctx.emit(Inst::VecRRR { alu_op, diff --git a/cranelift/filetests/filetests/isa/aarch64/bitops.clif b/cranelift/filetests/filetests/isa/aarch64/bitops.clif index e651be167a..32e7fe7f04 100644 --- a/cranelift/filetests/filetests/isa/aarch64/bitops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/bitops.clif @@ -293,3 +293,94 @@ block0: ; nextln: sbfx w0, w0, #0, #1 ; nextln: ldp fp, lr, [sp], #16 ; nextln: ret + +function %bnot_i128(i128) -> i128 { +block0(v0: i128): + v1 = bnot v0 + return v1 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn x0, xzr, x0 +; nextln: orn x1, xzr, x1 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + +function %band_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = band v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: and x0, x0, x2 +; nextln: and x1, x1, x3 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + +function %bor_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = bor v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orr x0, x0, x2 +; nextln: orr x1, x1, x3 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + +function %bxor_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = bxor v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: eor x0, x0, x2 +; nextln: eor x1, x1, x3 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + +function %band_not_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = band_not v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: bic x0, x0, x2 +; nextln: bic x1, x1, x3 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + +function %bor_not_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = bor_not v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn x0, x0, x2 +; nextln: orn x1, x1, x3 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + +function %bxor_not_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = bxor_not v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: eon x0, x0, x2 +; nextln: eon x1, x1, x3 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret \ No newline at end of file diff --git a/cranelift/filetests/filetests/runtests/i128-bitops.clif b/cranelift/filetests/filetests/runtests/i128-bitops.clif new file mode 100644 index 0000000000..5a1139f1d0 --- /dev/null +++ b/cranelift/filetests/filetests/runtests/i128-bitops.clif @@ -0,0 +1,135 @@ +test run +target aarch64 +; target s390x TODO: Not yet implemented on s390x +; target x86_64 TODO: Not yet implemented on x86_64 + +; i128 tests +; TODO: Cleanup these tests when we have native support for i128 immediates in CLIF's parser +function %bnot_i128(i64, i64) -> i64, i64 { +block0(v0: i64,v1: i64): +v2 = iconcat v0, v1 + +v3 = bnot v2 + +v4, v5 = isplit v3 +return v4, v5 +} +; run: %bnot_i128(0, 0) == [-1, -1] +; run: %bnot_i128(-1, -1) == [0, 0] +; run: %bnot_i128(-1, 0) == [0, -1] + +; run: %bnot_i128(0x3F001111_3F001111, 0x21350000_21350000) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] + + +function %band_i128(i64, i64, i64, i64) -> i64, i64 { +block0(v0: i64,v1: i64,v2: i64,v3: i64): +v4 = iconcat v0, v1 +v5 = iconcat v2, v3 + +v6 = band v4, v5 + +v7, v8 = isplit v6 +return v7, v8 +} +; run: %band_i128(0, 0, 0, 0) == [0, 0] +; run: %band_i128(-1, -1, 0, 0) == [0, 0] +; run: %band_i128(-1, -1, -1, -1) == [-1, -1] +; run: %band_i128(-1, -1, 0, -1) == [0, -1] + +; run: %band_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [0, 0] +; run: %band_i128(0xF1FFFEFE_F1FFFEFE, 0xFEEEFFFF_FEEEFFFF, 0xCEFFEFEF_CEFFEFEF, 0xDFDBFFFF_DFDBFFFF) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] + + +function %bor_i128(i64, i64, i64, i64) -> i64, i64 { +block0(v0: i64,v1: i64,v2: i64,v3: i64): +v4 = iconcat v0, v1 +v5 = iconcat v2, v3 + +v6 = bor v4, v5 + +v7, v8 = isplit v6 +return v7, v8 +} +; run: %bor_i128(0, 0, 0, 0) == [0, 0] +; run: %bor_i128(-1, -1, 0, 0) == [-1, -1] +; run: %bor_i128(-1, -1, -1, -1) == [-1, -1] +; run: %bor_i128(0, 0, 0, -1) == [0, -1] + +; run: %bor_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [-1, -1] +; run: %bor_i128(0x80AAAAAA_80AAAAAA, 0x8A8AAAAA_8A8AAAAA, 0x40554444_40554444, 0x54405555_54405555) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] + + +function %bxor_i128(i64, i64, i64, i64) -> i64, i64 { +block0(v0: i64,v1: i64,v2: i64,v3: i64): +v4 = iconcat v0, v1 +v5 = iconcat v2, v3 + +v6 = bxor v4, v5 + +v7, v8 = isplit v6 +return v7, v8 +} +; run: %bxor_i128(0, 0, 0, 0) == [0, 0] +; run: %bxor_i128(-1, -1, 0, 0) == [-1, -1] +; run: %bxor_i128(-1, -1, -1, -1) == [0, 0] +; run: %bxor_i128(-1, -1, 0, -1) == [-1, 0] + +; run: %bxor_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [-1, -1] +; run: %bxor_i128(0x8FA50A64_8FA50A64, 0x9440A07D_9440A07D, 0x4F5AE48A_4F5AE48A, 0x4A8A5F82_4A8A5F82) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] + + +function %band_not_i128(i64, i64, i64, i64) -> i64, i64 { +block0(v0: i64,v1: i64,v2: i64,v3: i64): +v4 = iconcat v0, v1 +v5 = iconcat v2, v3 + +v6 = band_not v4, v5 + +v7, v8 = isplit v6 +return v7, v8 +} +; run: %band_not_i128(0, 0, 0, 0) == [0, 0] +; run: %band_not_i128(-1, -1, 0, 0) == [-1, -1] +; run: %band_not_i128(-1, -1, -1, -1) == [0, 0] +; run: %band_not_i128(-1, -1, 0, -1) == [-1, 0] + +; run: %band_not_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [0x01234567_89ABCDEF, 0xFEDCBA98_76543210] +; run: %band_not_i128(0xF1FFFEFE_F1FFFEFE, 0xFEEEFFFF_FEEEFFFF, 0x31001010_31001010, 0x20240000_20240000) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] + + +function %bor_not_i128(i64, i64, i64, i64) -> i64, i64 { +block0(v0: i64,v1: i64,v2: i64,v3: i64): +v4 = iconcat v0, v1 +v5 = iconcat v2, v3 + +v6 = bor_not v4, v5 + +v7, v8 = isplit v6 +return v7, v8 +} +; run: %bor_not_i128(0, 0, 0, 0) == [-1, -1] +; run: %bor_not_i128(-1, -1, 0, 0) == [-1, -1] +; run: %bor_not_i128(-1, -1, -1, -1) == [-1, -1] +; run: %bor_not_i128(-1, 0, 0, -1) == [-1, 0] + +; run: %bor_not_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [0x01234567_89ABCDEF, 0xFEDCBA98_76543210] +; run: %bor_not_i128(0x80AAAAAA_80AAAAAA, 0x8A8AAAAA_8A8AAAAA, 0xBFAABBBB_BFAABBBB, 0xABBFAAAA_ABBFAAAA) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] + + +function %bxor_not_i128(i64, i64, i64, i64) -> i64, i64 { +block0(v0: i64,v1: i64,v2: i64,v3: i64): +v4 = iconcat v0, v1 +v5 = iconcat v2, v3 + +v6 = bxor_not v4, v5 + +v7, v8 = isplit v6 +return v7, v8 +} +; run: %bxor_not_i128(0, 0, 0, 0) == [-1, -1] +; run: %bxor_not_i128(-1, -1, 0, 0) == [0, 0] +; run: %bxor_not_i128(-1, -1, -1, -1) == [-1, -1] +; run: %bxor_not_i128(-1, -1, 0, -1) == [0, -1] + +; run: %bxor_not_i128(0x01234567_89ABCDEF, 0xFEDCBA98_76543210, 0xFEDCBA98_76543210, 0x01234567_89ABCDEF) == [0, 0] +; run: %bxor_not_i128(0x8FA50A64_8FA50A64, 0x9440A07D_9440A07D, 0xB0A51B75_B0A51B75, 0xB575A07D_B575A07D) == [0xC0FFEEEE_C0FFEEEE, 0xDECAFFFF_DECAFFFF] From 2c4d1c0003954778e49f3e9dd3ec0cafc9d5c8dd Mon Sep 17 00:00:00 2001 From: Afonso Bordado Date: Tue, 8 Jun 2021 10:51:57 +0100 Subject: [PATCH 2/3] aarch64: Add ands instruction encoding --- .../codegen/src/isa/aarch64/inst/emit.rs | 6 ++ .../src/isa/aarch64/inst/emit_tests.rs | 68 +++++++++++++++++++ cranelift/codegen/src/isa/aarch64/inst/mod.rs | 4 ++ 3 files changed, 78 insertions(+) diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit.rs b/cranelift/codegen/src/isa/aarch64/inst/emit.rs index 60fedcd0d3..02027d48d4 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit.rs @@ -607,6 +607,8 @@ impl MachInstEmit for Inst { ALUOp::Orr64 => 0b10101010_000, ALUOp::And32 => 0b00001010_000, ALUOp::And64 => 0b10001010_000, + ALUOp::AndS32 => 0b01101010_000, + ALUOp::AndS64 => 0b11101010_000, ALUOp::Eor32 => 0b01001010_000, ALUOp::Eor64 => 0b11001010_000, ALUOp::OrrNot32 => 0b00101010_001, @@ -694,6 +696,8 @@ impl MachInstEmit for Inst { ALUOp::Orr64 => (0b101_100100, false), ALUOp::And32 => (0b000_100100, false), ALUOp::And64 => (0b100_100100, false), + ALUOp::AndS32 => (0b011_100100, false), + ALUOp::AndS64 => (0b111_100100, false), ALUOp::Eor32 => (0b010_100100, false), ALUOp::Eor64 => (0b110_100100, false), ALUOp::OrrNot32 => (0b001_100100, true), @@ -763,6 +767,8 @@ impl MachInstEmit for Inst { ALUOp::Orr64 => 0b101_01010000, ALUOp::And32 => 0b000_01010000, ALUOp::And64 => 0b100_01010000, + ALUOp::AndS32 => 0b011_01010000, + ALUOp::AndS64 => 0b111_01010000, ALUOp::Eor32 => 0b010_01010000, ALUOp::Eor64 => 0b110_01010000, ALUOp::OrrNot32 => 0b001_01010001, diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs index 530269b201..b6fa2125ce 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit_tests.rs @@ -151,6 +151,26 @@ fn test_aarch64_binemit() { "A400068A", "and x4, x5, x6", )); + insns.push(( + Inst::AluRRR { + alu_op: ALUOp::AndS32, + rd: writable_xreg(1), + rn: xreg(2), + rm: xreg(3), + }, + "4100036A", + "ands w1, w2, w3", + )); + insns.push(( + Inst::AluRRR { + alu_op: ALUOp::AndS64, + rd: writable_xreg(4), + rn: xreg(5), + rm: xreg(6), + }, + "A40006EA", + "ands x4, x5, x6", + )); insns.push(( Inst::AluRRR { alu_op: ALUOp::SubS32, @@ -648,6 +668,34 @@ fn test_aarch64_binemit() { "6A5D0C8A", "and x10, x11, x12, LSL 23", )); + insns.push(( + Inst::AluRRRShift { + alu_op: ALUOp::AndS32, + rd: writable_xreg(10), + rn: xreg(11), + rm: xreg(12), + shiftop: ShiftOpAndAmt::new( + ShiftOp::LSL, + ShiftOpShiftImm::maybe_from_shift(23).unwrap(), + ), + }, + "6A5D0C6A", + "ands w10, w11, w12, LSL 23", + )); + insns.push(( + Inst::AluRRRShift { + alu_op: ALUOp::AndS64, + rd: writable_xreg(10), + rn: xreg(11), + rm: xreg(12), + shiftop: ShiftOpAndAmt::new( + ShiftOp::LSL, + ShiftOpShiftImm::maybe_from_shift(23).unwrap(), + ), + }, + "6A5D0CEA", + "ands x10, x11, x12, LSL 23", + )); insns.push(( Inst::AluRRRShift { alu_op: ALUOp::Eor32, @@ -1015,6 +1063,26 @@ fn test_aarch64_binemit() { "C7381592", "and x7, x6, #288221580125796352", )); + insns.push(( + Inst::AluRRImmLogic { + alu_op: ALUOp::AndS32, + rd: writable_xreg(21), + rn: xreg(27), + imml: ImmLogic::maybe_from_u64(0x80003fff, I32).unwrap(), + }, + "753B0172", + "ands w21, w27, #2147500031", + )); + insns.push(( + Inst::AluRRImmLogic { + alu_op: ALUOp::AndS64, + rd: writable_xreg(7), + rn: xreg(6), + imml: ImmLogic::maybe_from_u64(0x3fff80003fff800, I64).unwrap(), + }, + "C73815F2", + "ands x7, x6, #288221580125796352", + )); insns.push(( Inst::AluRRImmLogic { alu_op: ALUOp::Orr32, diff --git a/cranelift/codegen/src/isa/aarch64/inst/mod.rs b/cranelift/codegen/src/isa/aarch64/inst/mod.rs index ecdf43c6ff..07cac9483b 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/mod.rs @@ -52,6 +52,8 @@ pub enum ALUOp { OrrNot64, And32, And64, + AndS32, + AndS64, AndNot32, AndNot64, /// XOR (AArch64 calls this "EOR") @@ -3186,6 +3188,8 @@ impl Inst { ALUOp::Orr64 => ("orr", OperandSize::Size64), ALUOp::And32 => ("and", OperandSize::Size32), ALUOp::And64 => ("and", OperandSize::Size64), + ALUOp::AndS32 => ("ands", OperandSize::Size32), + ALUOp::AndS64 => ("ands", OperandSize::Size64), ALUOp::Eor32 => ("eor", OperandSize::Size32), ALUOp::Eor64 => ("eor", OperandSize::Size64), ALUOp::AddS32 => ("adds", OperandSize::Size32), From b1475f32a61583c889baa83acee9c24a23ef19e6 Mon Sep 17 00:00:00 2001 From: Afonso Bordado Date: Sun, 30 May 2021 16:50:22 +0100 Subject: [PATCH 3/3] aarch64: Add ishl,ushr,sshr for i128 values --- cranelift/codegen/src/isa/aarch64/lower.rs | 205 ++++++++++++++++++ .../codegen/src/isa/aarch64/lower_inst.rs | 22 +- .../filetests/isa/aarch64/bitops.clif | 132 ++++++++++- .../filetests/runtests/i128-arithmetic.clif | 2 +- 4 files changed, 357 insertions(+), 4 deletions(-) diff --git a/cranelift/codegen/src/isa/aarch64/lower.rs b/cranelift/codegen/src/isa/aarch64/lower.rs index d07311159e..bdece7311d 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.rs +++ b/cranelift/codegen/src/isa/aarch64/lower.rs @@ -1266,6 +1266,211 @@ pub(crate) fn lower_load, F: FnMut(&mut C, Writable, f(ctx, rd, elem_ty, mem); } +pub(crate) fn emit_shl_i128>( + ctx: &mut C, + src: ValueRegs, + dst: ValueRegs>, + amt: Reg, +) { + let src_lo = src.regs()[0]; + let src_hi = src.regs()[1]; + let dst_lo = dst.regs()[0]; + let dst_hi = dst.regs()[1]; + + // mvn inv_amt, amt + // lsr tmp1, src_lo, #1 + // lsl tmp2, src_hi, amt + // lsr tmp1, tmp1, inv_amt + // lsl tmp3, src_lo, amt + // tst amt, #0x40 + // orr tmp2, tmp2, tmp1 + // csel dst_hi, tmp3, tmp2, ne + // csel dst_lo, xzr, tmp3, ne + + let xzr = writable_zero_reg(); + let inv_amt = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp1 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp2 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp3 = ctx.alloc_tmp(I64).only_reg().unwrap(); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::OrrNot32, + rd: inv_amt, + rn: xzr.to_reg(), + rm: amt, + }); + + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Lsr64, + rd: tmp1, + rn: src_lo, + immshift: ImmShift::maybe_from_u64(1).unwrap(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsl64, + rd: tmp2, + rn: src_hi, + rm: amt, + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsr64, + rd: tmp1, + rn: tmp1.to_reg(), + rm: inv_amt.to_reg(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsl64, + rd: tmp3, + rn: src_lo, + rm: amt, + }); + + ctx.emit(Inst::AluRRImmLogic { + alu_op: ALUOp::AndS64, + rd: xzr, + rn: amt, + imml: ImmLogic::maybe_from_u64(64, I64).unwrap(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Orr64, + rd: tmp2, + rn: tmp2.to_reg(), + rm: tmp1.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_hi, + rn: tmp3.to_reg(), + rm: tmp2.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_lo, + rn: xzr.to_reg(), + rm: tmp3.to_reg(), + }); +} + +pub(crate) fn emit_shr_i128>( + ctx: &mut C, + src: ValueRegs, + dst: ValueRegs>, + amt: Reg, + is_signed: bool, +) { + let src_lo = src.regs()[0]; + let src_hi = src.regs()[1]; + let dst_lo = dst.regs()[0]; + let dst_hi = dst.regs()[1]; + + // mvn inv_amt, amt + // lsl tmp1, src_lo, #1 + // lsr tmp2, src_hi, amt + // lsl tmp1, tmp1, inv_amt + // lsr/asr tmp3, src_lo, amt + // tst amt, #0x40 + // orr tmp2, tmp2, tmp1 + // + // if signed: + // asr tmp4, src_hi, #63 + // csel dst_hi, tmp4, tmp3, ne + // else: + // csel dst_hi, xzr, tmp3, ne + // + // csel dst_lo, tmp3, tmp2, ne + + let xzr = writable_zero_reg(); + let inv_amt = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp1 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp2 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp3 = ctx.alloc_tmp(I64).only_reg().unwrap(); + let tmp4 = ctx.alloc_tmp(I64).only_reg().unwrap(); + + let shift_op = if is_signed { + ALUOp::Asr64 + } else { + ALUOp::Lsr64 + }; + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::OrrNot32, + rd: inv_amt, + rn: xzr.to_reg(), + rm: amt, + }); + + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Lsl64, + rd: tmp1, + rn: src_hi, + immshift: ImmShift::maybe_from_u64(1).unwrap(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsr64, + rd: tmp2, + rn: src_lo, + rm: amt, + }); + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Lsl64, + rd: tmp1, + rn: tmp1.to_reg(), + rm: inv_amt.to_reg(), + }); + + ctx.emit(Inst::AluRRR { + alu_op: shift_op, + rd: tmp3, + rn: src_hi, + rm: amt, + }); + + ctx.emit(Inst::AluRRImmLogic { + alu_op: ALUOp::AndS64, + rd: xzr, + rn: amt, + imml: ImmLogic::maybe_from_u64(64, I64).unwrap(), + }); + + if is_signed { + ctx.emit(Inst::AluRRImmShift { + alu_op: ALUOp::Asr64, + rd: tmp4, + rn: src_hi, + immshift: ImmShift::maybe_from_u64(63).unwrap(), + }); + } + + ctx.emit(Inst::AluRRR { + alu_op: ALUOp::Orr64, + rd: tmp2, + rn: tmp2.to_reg(), + rm: tmp1.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_hi, + rn: if is_signed { tmp4 } else { xzr }.to_reg(), + rm: tmp3.to_reg(), + }); + + ctx.emit(Inst::CSel { + cond: Cond::Ne, + rd: dst_lo, + rn: tmp3.to_reg(), + rm: tmp2.to_reg(), + }); +} + //============================================================================= // Lowering-backend trait implementation. diff --git a/cranelift/codegen/src/isa/aarch64/lower_inst.rs b/cranelift/codegen/src/isa/aarch64/lower_inst.rs index a18d24deae..10e8d2e9ac 100644 --- a/cranelift/codegen/src/isa/aarch64/lower_inst.rs +++ b/cranelift/codegen/src/isa/aarch64/lower_inst.rs @@ -768,9 +768,26 @@ pub(crate) fn lower_insn_to_regs>( } Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => { + let out_regs = get_output_reg(ctx, outputs[0]); let ty = ty.unwrap(); - let rd = get_output_reg(ctx, outputs[0]).only_reg().unwrap(); - if !ty.is_vector() { + if ty == I128 { + // TODO: We can use immlogic here + let src = put_input_in_regs(ctx, inputs[0]); + // We can ignore the top half of the shift amount register + let amt = put_input_in_regs(ctx, inputs[1]).regs()[0]; + + match op { + Opcode::Ishl => emit_shl_i128(ctx, src, out_regs, amt), + Opcode::Ushr => { + emit_shr_i128(ctx, src, out_regs, amt, /* is_signed = */ false) + } + Opcode::Sshr => { + emit_shr_i128(ctx, src, out_regs, amt, /* is_signed = */ true) + } + _ => unreachable!(), + }; + } else if !ty.is_vector() { + let rd = out_regs.only_reg().unwrap(); let size = OperandSize::from_bits(ty_bits(ty)); let narrow_mode = match (op, size) { (Opcode::Ishl, _) => NarrowValueMode::None, @@ -790,6 +807,7 @@ pub(crate) fn lower_insn_to_regs>( }; ctx.emit(alu_inst_immshift(alu_op, rd, rn, rm)); } else { + let rd = out_regs.only_reg().unwrap(); let rn = put_input_in_reg(ctx, inputs[0], NarrowValueMode::None); let size = VectorSize::from_ty(ty); let (alu_op, is_right_shift) = match op { diff --git a/cranelift/filetests/filetests/isa/aarch64/bitops.clif b/cranelift/filetests/filetests/isa/aarch64/bitops.clif index 32e7fe7f04..215a207dda 100644 --- a/cranelift/filetests/filetests/isa/aarch64/bitops.clif +++ b/cranelift/filetests/filetests/isa/aarch64/bitops.clif @@ -383,4 +383,134 @@ block0(v0: i128, v1: i128): ; nextln: eon x0, x0, x2 ; nextln: eon x1, x1, x3 ; nextln: ldp fp, lr, [sp], #16 -; nextln: ret \ No newline at end of file +; nextln: ret + + +function %ishl_i128_i8(i128, i8) -> i128 { +block0(v0: i128, v1: i8): + v2 = ishl.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsr x4, x0, #1 +; nextln: lsl x1, x1, x2 +; nextln: lsr x3, x4, x3 +; nextln: lsl x0, x0, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x1, x1, x3 +; nextln: csel x1, x0, x1, ne +; nextln: csel x0, xzr, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %ishl_i128_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = ishl.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsr x4, x0, #1 +; nextln: lsl x1, x1, x2 +; nextln: lsr x3, x4, x3 +; nextln: lsl x0, x0, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x1, x1, x3 +; nextln: csel x1, x0, x1, ne +; nextln: csel x0, xzr, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %ushr_i128_i8(i128, i8) -> i128 { +block0(v0: i128, v1: i8): + v2 = ushr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x3, x4, x3 +; nextln: lsr x1, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x0, x0, x3 +; nextln: csel x2, xzr, x1, ne +; nextln: csel x0, x1, x0, ne +; nextln: mov x1, x2 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %ushr_i128_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = ushr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x3, x4, x3 +; nextln: lsr x1, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: orr x0, x0, x3 +; nextln: csel x2, xzr, x1, ne +; nextln: csel x0, x1, x0, ne +; nextln: mov x1, x2 +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %sshr_i128_i8(i128, i8) -> i128 { +block0(v0: i128, v1: i8): + v2 = sshr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x4, x4, x3 +; nextln: asr x3, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: asr x1, x1, #63 +; nextln: orr x0, x0, x4 +; nextln: csel x1, x1, x3, ne +; nextln: csel x0, x3, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret + + +function %sshr_i128_i128(i128, i128) -> i128 { +block0(v0: i128, v1: i128): + v2 = sshr.i128 v0, v1 + return v2 +} + +; check: stp fp, lr, [sp, #-16]! +; nextln: mov fp, sp +; nextln: orn w3, wzr, w2 +; nextln: lsl x4, x1, #1 +; nextln: lsr x0, x0, x2 +; nextln: lsl x4, x4, x3 +; nextln: asr x3, x1, x2 +; nextln: ands xzr, x2, #64 +; nextln: asr x1, x1, #63 +; nextln: orr x0, x0, x4 +; nextln: csel x1, x1, x3, ne +; nextln: csel x0, x3, x0, ne +; nextln: ldp fp, lr, [sp], #16 +; nextln: ret diff --git a/cranelift/filetests/filetests/runtests/i128-arithmetic.clif b/cranelift/filetests/filetests/runtests/i128-arithmetic.clif index efa31ae42d..f78bdb7ed7 100644 --- a/cranelift/filetests/filetests/runtests/i128-arithmetic.clif +++ b/cranelift/filetests/filetests/runtests/i128-arithmetic.clif @@ -1,5 +1,5 @@ test run -; target aarch64 TODO: Not yet implemented on aarch64 +target aarch64 ; target s390x TODO: Not yet implemented on s390x target x86_64 machinst