diff --git a/cranelift/codegen/src/isa/aarch64/lower.isle b/cranelift/codegen/src/isa/aarch64/lower.isle index aa90a3546e..01335205e0 100644 --- a/cranelift/codegen/src/isa/aarch64/lower.isle +++ b/cranelift/codegen/src/isa/aarch64/lower.isle @@ -30,26 +30,39 @@ (rule (lower (has_type (fits_in_64 ty) (iadd x y))) (value_reg (alu_rrr (iadd_op ty) (put_in_reg x) (put_in_reg y)))) -;; Special case for when one operand is an immediate that fits in 12 bits. +;; Special cases for when one operand is an immediate that fits in 12 bits. (rule (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_value y)))) (value_reg (alu_rr_imm12 (iadd_op ty) (put_in_reg x) y))) -;; Same as the previous special case, except we can switch the addition to a +(rule (lower (has_type (fits_in_64 ty) (iadd (imm12_from_value x) y))) + (value_reg (alu_rr_imm12 (iadd_op ty) (put_in_reg y) x))) + +;; Same as the previous special cases, except we can switch the addition to a ;; subtraction if the negated immediate fits in 12 bits. (rule (lower (has_type (fits_in_64 ty) (iadd x (imm12_from_negated_value y)))) (value_reg (alu_rr_imm12 (isub_op ty) (put_in_reg x) y))) -;; Special case for when we're adding an extended register where the extending +(rule (lower (has_type (fits_in_64 ty) (iadd (imm12_from_negated_value x) y))) + (value_reg (alu_rr_imm12 (isub_op ty) (put_in_reg y) x))) + +;; Special cases for when we're adding an extended register where the extending ;; operation can get folded into the add itself. (rule (lower (has_type (fits_in_64 ty) (iadd x (extended_value_from_value y)))) (value_reg (alu_rr_extend_reg (iadd_op ty) (put_in_reg x) y))) -;; Special case for when we're adding the shift of a different +(rule (lower (has_type (fits_in_64 ty) (iadd (extended_value_from_value x) y))) + (value_reg (alu_rr_extend_reg (iadd_op ty) (put_in_reg y) x))) + +;; Special cases for when we're adding the shift of a different ;; register by a constant amount and the shift can get folded into the add. (rule (lower (has_type (fits_in_64 ty) (iadd x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 (ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - // Rule at src/isa/aarch64/lower.isle line 81. + // Rule at src/isa/aarch64/lower.isle line 94. let expr0_0 = C::put_in_regs(ctx, pattern7_0); let expr1_0: usize = 0; let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); @@ -1293,7 +1293,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - // Rule at src/isa/aarch64/lower.isle line 132. + // Rule at src/isa/aarch64/lower.isle line 145. let expr0_0 = C::put_in_regs(ctx, pattern7_0); let expr1_0: usize = 0; let expr2_0 = C::value_regs_get(ctx, expr0_0, expr1_0); @@ -1373,7 +1373,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - // Rule at src/isa/aarch64/lower.isle line 77. + // Rule at src/isa/aarch64/lower.isle line 90. let expr0_0 = VecALUOp::Add; let expr1_0 = C::put_in_reg(ctx, pattern7_0); let expr2_0 = C::put_in_reg(ctx, pattern7_1); @@ -1385,7 +1385,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { let (pattern7_0, pattern7_1) = C::unpack_value_array_2(ctx, &pattern5_1); - // Rule at src/isa/aarch64/lower.isle line 128. + // Rule at src/isa/aarch64/lower.isle line 141. let expr0_0 = VecALUOp::Sub; let expr1_0 = C::put_in_reg(ctx, pattern7_0); let expr2_0 = C::put_in_reg(ctx, pattern7_1); @@ -1411,27 +1411,127 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option { + if let &Opcode::Iconst = &pattern10_0 { + let pattern12_0 = C::u64_from_imm64(ctx, pattern10_1); + if let Some(pattern13_0) = + C::imm12_from_u64(ctx, pattern12_0) + { + // Rule at src/isa/aarch64/lower.isle line 37. + let expr0_0 = constructor_iadd_op(ctx, pattern3_0)?; + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_alu_rr_imm12( + ctx, + &expr0_0, + expr1_0, + pattern13_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + if let Some(pattern13_0) = + C::imm12_from_negated_u64(ctx, pattern12_0) + { + // Rule at src/isa/aarch64/lower.isle line 45. + let expr0_0 = constructor_isub_op(ctx, pattern3_0)?; + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = constructor_alu_rr_imm12( + ctx, + &expr0_0, + expr1_0, + pattern13_0, + )?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } + } } + &InstructionData::Binary { + opcode: ref pattern10_0, + args: ref pattern10_1, + } => { + match &pattern10_0 { + &Opcode::Imul => { + let (pattern12_0, pattern12_1) = + C::unpack_value_array_2(ctx, &pattern10_1); + // Rule at src/isa/aarch64/lower.isle line 70. + let expr0_0 = constructor_madd_op(ctx, pattern3_0)?; + let expr1_0 = C::put_in_reg(ctx, pattern12_0); + let expr2_0 = C::put_in_reg(ctx, pattern12_1); + let expr3_0 = C::put_in_reg(ctx, pattern7_1); + let expr4_0 = constructor_alu_rrrr( + ctx, &expr0_0, expr1_0, expr2_0, expr3_0, + )?; + let expr5_0 = C::value_reg(ctx, expr4_0); + return Some(expr5_0); + } + &Opcode::Ishl => { + let (pattern12_0, pattern12_1) = + C::unpack_value_array_2(ctx, &pattern10_1); + if let Some(pattern13_0) = C::def_inst(ctx, pattern12_1) + { + let pattern14_0 = C::inst_data(ctx, pattern13_0); + if let &InstructionData::UnaryImm { + opcode: ref pattern15_0, + imm: pattern15_1, + } = &pattern14_0 + { + if let &Opcode::Iconst = &pattern15_0 { + let closure17 = || { + return Some(pattern3_0); + }; + if let Some(pattern17_0) = closure17() { + if let Some(pattern18_0) = + C::lshl_from_imm64( + ctx, + pattern15_1, + pattern17_0, + ) + { + // Rule at src/isa/aarch64/lower.isle line 62. + let expr0_0 = constructor_iadd_op( + ctx, pattern3_0, + )?; + let expr1_0 = + C::put_in_reg(ctx, pattern7_1); + let expr2_0 = + C::put_in_reg(ctx, pattern12_0); + let expr3_0 = + constructor_alu_rrr_shift( + ctx, + &expr0_0, + expr1_0, + expr2_0, + pattern18_0, + )?; + let expr4_0 = + C::value_reg(ctx, expr3_0); + return Some(expr4_0); + } + } + } + } + } + } + _ => {} + } + } + _ => {} } } + if let Some(pattern8_0) = C::extended_value_from_value(ctx, pattern7_0) { + // Rule at src/isa/aarch64/lower.isle line 53. + let expr0_0 = constructor_iadd_op(ctx, pattern3_0)?; + let expr1_0 = C::put_in_reg(ctx, pattern7_1); + let expr2_0 = + constructor_alu_rr_extend_reg(ctx, &expr0_0, expr1_0, &pattern8_0)?; + let expr3_0 = C::value_reg(ctx, expr2_0); + return Some(expr3_0); + } if let Some(pattern8_0) = C::def_inst(ctx, pattern7_1) { let pattern9_0 = C::inst_data(ctx, pattern8_0); match &pattern9_0 { @@ -1459,7 +1559,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option { let (pattern12_0, pattern12_1) = C::unpack_value_array_2(ctx, &pattern10_1); - // Rule at src/isa/aarch64/lower.isle line 54. + // Rule at src/isa/aarch64/lower.isle line 67. let expr0_0 = constructor_madd_op(ctx, pattern3_0)?; let expr1_0 = C::put_in_reg(ctx, pattern12_0); let expr2_0 = C::put_in_reg(ctx, pattern12_1); @@ -1515,7 +1615,7 @@ pub fn constructor_lower(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Inst) -> Option(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - // Rule at src/isa/aarch64/lower.isle line 62. + // Rule at src/isa/aarch64/lower.isle line 75. let expr0_0 = ALUOp::Add32; return Some(expr0_0); } - // Rule at src/isa/aarch64/lower.isle line 63. + // Rule at src/isa/aarch64/lower.isle line 76. let expr0_0 = ALUOp::Add64; return Some(expr0_0); } @@ -1703,11 +1803,11 @@ pub fn constructor_iadd_op(ctx: &mut C, arg0: Type) -> Option pub fn constructor_isub_op(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - // Rule at src/isa/aarch64/lower.isle line 67. + // Rule at src/isa/aarch64/lower.isle line 80. let expr0_0 = ALUOp::Sub32; return Some(expr0_0); } - // Rule at src/isa/aarch64/lower.isle line 68. + // Rule at src/isa/aarch64/lower.isle line 81. let expr0_0 = ALUOp::Sub64; return Some(expr0_0); } @@ -1716,11 +1816,11 @@ pub fn constructor_isub_op(ctx: &mut C, arg0: Type) -> Option pub fn constructor_madd_op(ctx: &mut C, arg0: Type) -> Option { let pattern0_0 = arg0; if let Some(pattern1_0) = C::fits_in_32(ctx, pattern0_0) { - // Rule at src/isa/aarch64/lower.isle line 72. + // Rule at src/isa/aarch64/lower.isle line 85. let expr0_0 = ALUOp3::MAdd32; return Some(expr0_0); } - // Rule at src/isa/aarch64/lower.isle line 73. + // Rule at src/isa/aarch64/lower.isle line 86. let expr0_0 = ALUOp3::MAdd64; return Some(expr0_0); } diff --git a/cranelift/filetests/filetests/isa/aarch64/extend-op.clif b/cranelift/filetests/filetests/isa/aarch64/extend-op.clif index 58735d5bf8..f8e25d7814 100644 --- a/cranelift/filetests/filetests/isa/aarch64/extend-op.clif +++ b/cranelift/filetests/filetests/isa/aarch64/extend-op.clif @@ -10,8 +10,19 @@ block0(v0: i8): return v3 } -; check: movz x1, #42 -; nextln: add x0, x1, x0, SXTB +; check: sxtb x0, w0 +; nextln: add x0, x0, #42 +; nextln: ret + + +function %f2(i8, i64) -> i64 { +block0(v0: i8, v1: i64): + v2 = sextend.i64 v0 + v3 = iadd.i64 v2, v1 + return v3 +} + +; check: add x0, x1, x0, SXTB ; nextln: ret