ISLE: remove all uses of argument polarity, and remove it from the language. (#4091)

This PR removes "argument polarity": the feature of ISLE extractors that lets them take
inputs aside from the value to be matched.

Cases that need this expressivity have been subsumed by #4072 with if-let clauses;
we can now finally remove this misfeature of the language, which has caused significant
confusion and has always felt like a bit of a hack.

This PR (i) removes the feature from the ISLE compiler; (ii) removes it from the reference
documentation; and (iii) refactors away all uses of the feature in our three existing
backends written in ISLE.
This commit is contained in:
Chris Fallin
2022-05-02 09:52:12 -07:00
committed by GitHub
parent c7e2c21bb2
commit 03793b71a7
21 changed files with 2123 additions and 2464 deletions

View File

@@ -1290,14 +1290,14 @@
(decl move_wide_const_from_negated_u64 (MoveWideConst) u64)
(extern extractor move_wide_const_from_negated_u64 move_wide_const_from_negated_u64)
(decl imm_logic_from_u64 (Type ImmLogic) u64)
(extern extractor imm_logic_from_u64 imm_logic_from_u64 (in out))
(decl pure imm_logic_from_u64 (Type u64) ImmLogic)
(extern constructor imm_logic_from_u64 imm_logic_from_u64)
(decl imm_logic_from_imm64 (Type ImmLogic) Imm64)
(extern extractor imm_logic_from_imm64 imm_logic_from_imm64 (in out))
(decl pure imm_logic_from_imm64 (Type Imm64) ImmLogic)
(extern constructor imm_logic_from_imm64 imm_logic_from_imm64)
(decl imm_shift_from_imm64 (Type ImmShift) Imm64)
(extern extractor imm_shift_from_imm64 imm_shift_from_imm64 (in out))
(decl pure imm_shift_from_imm64 (Type Imm64) ImmShift)
(extern constructor imm_shift_from_imm64 imm_shift_from_imm64)
(decl imm_shift_from_u8 (u8) ImmShift)
(extern constructor imm_shift_from_u8 imm_shift_from_u8)
@@ -1317,8 +1317,8 @@
(decl imm12_from_negated_u64 (Imm12) u64)
(extern extractor imm12_from_negated_u64 imm12_from_negated_u64)
(decl lshl_from_imm64 (Type ShiftOpAndAmt) Imm64)
(extern extractor lshl_from_imm64 lshl_from_imm64 (in out))
(decl pure lshl_from_imm64 (Type Imm64) ShiftOpAndAmt)
(extern constructor lshl_from_imm64 lshl_from_imm64)
(decl integral_ty (Type) Type)
(extern extractor integral_ty integral_ty)
@@ -1330,13 +1330,13 @@
(decl imm12_from_value (Imm12) Value)
(extractor
(imm12_from_value n)
(def_inst (iconst (u64_from_imm64 (imm12_from_u64 n)))))
(iconst (u64_from_imm64 (imm12_from_u64 n))))
;; Same as `imm12_from_value`, but tries negating the constant value.
(decl imm12_from_negated_value (Imm12) Value)
(extractor
(imm12_from_negated_value n)
(def_inst (iconst (u64_from_imm64 (imm12_from_negated_u64 n)))))
(iconst (u64_from_imm64 (imm12_from_negated_u64 n))))
;; Helper type to represent a value and an extend operation fused together.
(type ExtendedValue extern (enum))
@@ -1877,7 +1877,8 @@
(movn n (OperandSize.Size64)))
;; Weird logical-instruction immediate in ORI using zero register
(rule (imm (integral_ty _ty) (imm_logic_from_u64 <$I64 n))
(rule (imm (integral_ty _ty) k)
(if-let n (imm_logic_from_u64 $I64 k))
(orr_imm $I64 (zero_reg) n))
(decl load_constant64_full (u64) Reg)
@@ -1978,29 +1979,35 @@
;; Base case of operating on registers.
(rule (alu_rs_imm_logic_commutative op ty x y)
(alu_rrr op ty (put_in_reg x) (put_in_reg y)))
(alu_rrr op ty x y))
;; Special cases for when one operand is a constant.
(rule (alu_rs_imm_logic_commutative op ty x (def_inst (iconst (imm_logic_from_imm64 <ty imm))))
(alu_rr_imm_logic op ty (put_in_reg x) imm))
(rule (alu_rs_imm_logic_commutative op ty (def_inst (iconst (imm_logic_from_imm64 <ty imm))) x)
(alu_rr_imm_logic op ty (put_in_reg x) imm))
(rule (alu_rs_imm_logic_commutative op ty x (iconst k))
(if-let imm (imm_logic_from_imm64 ty k))
(alu_rr_imm_logic op ty x imm))
(rule (alu_rs_imm_logic_commutative op ty (iconst k) x)
(if-let imm (imm_logic_from_imm64 ty k))
(alu_rr_imm_logic op ty x imm))
;; Special cases for when one operand is shifted left by a constant.
(rule (alu_rs_imm_logic_commutative op ty x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt))))))
(alu_rrr_shift op ty (put_in_reg x) (put_in_reg y) amt))
(rule (alu_rs_imm_logic_commutative op ty (def_inst (ishl x (def_inst (iconst (lshl_from_imm64 <ty amt))))) y)
(alu_rrr_shift op ty (put_in_reg y) (put_in_reg x) amt))
(rule (alu_rs_imm_logic_commutative op ty x (ishl y (iconst k)))
(if-let amt (lshl_from_imm64 ty k))
(alu_rrr_shift op ty x y amt))
(rule (alu_rs_imm_logic_commutative op ty (ishl x (iconst k)) y)
(if-let amt (lshl_from_imm64 ty k))
(alu_rrr_shift op ty y x amt))
;; Same as `alu_rs_imm_logic_commutative` above, except that it doesn't require
;; that the operation is commutative.
(decl alu_rs_imm_logic (ALUOp Type Value Value) Reg)
(rule (alu_rs_imm_logic op ty x y)
(alu_rrr op ty (put_in_reg x) (put_in_reg y)))
(rule (alu_rs_imm_logic op ty x (def_inst (iconst (imm_logic_from_imm64 <ty imm))))
(alu_rr_imm_logic op ty (put_in_reg x) imm))
(rule (alu_rs_imm_logic op ty x (def_inst (ishl y (def_inst (iconst (lshl_from_imm64 <ty amt))))))
(alu_rrr_shift op ty (put_in_reg x) (put_in_reg y) amt))
(alu_rrr op ty x y))
(rule (alu_rs_imm_logic op ty x (iconst k))
(if-let imm (imm_logic_from_imm64 ty k))
(alu_rr_imm_logic op ty x imm))
(rule (alu_rs_imm_logic op ty x (ishl y (iconst k)))
(if-let amt (lshl_from_imm64 ty k))
(alu_rrr_shift op ty x y amt))
;; Helper for generating i128 bitops which simply do the same operation to the
;; hi/lo registers.

View File

@@ -56,11 +56,13 @@
;; Special cases for when we're adding the shift of a different
;; register by a constant amount and the shift can get folded into the add.
(rule (lower (has_type (fits_in_64 ty)
(iadd x (ishl y (iconst (lshl_from_imm64 <ty amt))))))
(iadd x (ishl y (iconst k)))))
(if-let amt (lshl_from_imm64 ty k))
(add_shift ty x y amt))
(rule (lower (has_type (fits_in_64 ty)
(iadd (ishl x (iconst (lshl_from_imm64 <ty amt))) y)))
(iadd (ishl x (iconst k)) y)))
(if-let amt (lshl_from_imm64 ty k))
(add_shift ty y x amt))
;; Fold an `iadd` and `imul` combination into a `madd` instruction.
@@ -122,7 +124,8 @@
;; Finally a special case for when we're subtracting the shift of a different
;; register by a constant amount and the shift can get folded into the sub.
(rule (lower (has_type (fits_in_64 ty)
(isub x (ishl y (iconst (lshl_from_imm64 <ty amt))))))
(isub x (ishl y (iconst k)))))
(if-let amt (lshl_from_imm64 ty k))
(sub_shift ty x y amt))
;; vectors
@@ -568,7 +571,8 @@
;; Special case to use `orr_not_shift` if it's a `bnot` of a const-left-shifted
;; value.
(rule (lower (has_type (fits_in_64 ty)
(bnot (ishl x (iconst (lshl_from_imm64 <ty amt))))))
(bnot (ishl x (iconst k)))))
(if-let amt (lshl_from_imm64 ty k))
(orr_not_shift ty (zero_reg) x amt))
;; Implementation of `bnot` for `i128`.
@@ -737,7 +741,8 @@
;; Note that this rule explicitly has a higher priority than the others
;; to ensure it's attempted first, otherwise the type-based filters on the
;; previous rules seem to take priority over this rule.
(rule 1 (do_shift op ty x (iconst (imm_shift_from_imm64 <ty shift)))
(rule 1 (do_shift op ty x (iconst k))
(if-let shift (imm_shift_from_imm64 ty k))
(alu_rr_imm_shift op ty x shift))
;;;; Rules for `ushr` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -846,7 +851,8 @@
(small_rotr ty (put_in_reg_zext32 x) neg_shift)))
;; Specialization for the 8/16-bit case when the rotation amount is an immediate.
(rule (lower (has_type (fits_in_16 ty) (rotl x (iconst (imm_shift_from_imm64 <ty n)))))
(rule (lower (has_type (fits_in_16 ty) (rotl x (iconst k))))
(if-let n (imm_shift_from_imm64 ty k))
(small_rotr_imm ty (put_in_reg_zext32 x) (negate_imm_shift ty n)))
;; aarch64 doesn't have a left-rotate instruction, but a left rotation of K
@@ -868,11 +874,13 @@
(a64_rotr $I64 x neg_shift)))
;; Specialization for the 32-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I32 (rotl x (iconst (imm_shift_from_imm64 <$I32 n)))))
(rule (lower (has_type $I32 (rotl x (iconst k))))
(if-let n (imm_shift_from_imm64 $I32 k))
(a64_rotr_imm $I32 x (negate_imm_shift $I32 n)))
;; Specialization for the 64-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I64 (rotl x (iconst (imm_shift_from_imm64 <$I64 n)))))
(rule (lower (has_type $I64 (rotl x (iconst k))))
(if-let n (imm_shift_from_imm64 $I64 k))
(a64_rotr_imm $I64 x (negate_imm_shift $I64 n)))
(decl negate_imm_shift (Type ImmShift) ImmShift)
@@ -906,15 +914,18 @@
(a64_rotr $I64 x y))
;; Specialization for the 8/16-bit case when the rotation amount is an immediate.
(rule (lower (has_type (fits_in_16 ty) (rotr x (iconst (imm_shift_from_imm64 <ty n)))))
(rule (lower (has_type (fits_in_16 ty) (rotr x (iconst k))))
(if-let n (imm_shift_from_imm64 ty k))
(small_rotr_imm ty (put_in_reg_zext32 x) n))
;; Specialization for the 32-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I32 (rotr x (iconst (imm_shift_from_imm64 <$I32 n)))))
(rule (lower (has_type $I32 (rotr x (iconst k))))
(if-let n (imm_shift_from_imm64 $I32 k))
(a64_rotr_imm $I32 x n))
;; Specialization for the 64-bit case when the rotation amount is an immediate.
(rule (lower (has_type $I64 (rotr x (iconst (imm_shift_from_imm64 <$I64 n)))))
(rule (lower (has_type $I64 (rotr x (iconst k))))
(if-let n (imm_shift_from_imm64 $I64 k))
(a64_rotr_imm $I64 x n))
;; For a < 32-bit rotate-right, we synthesize this as:

View File

@@ -83,13 +83,13 @@ where
MoveWideConst::maybe_from_u64(!n)
}
fn imm_logic_from_u64(&mut self, n: u64, ty: Type) -> Option<ImmLogic> {
fn imm_logic_from_u64(&mut self, ty: Type, n: u64) -> Option<ImmLogic> {
let ty = if ty.bits() < 32 { I32 } else { ty };
ImmLogic::maybe_from_u64(n, ty)
}
fn imm_logic_from_imm64(&mut self, n: Imm64, ty: Type) -> Option<ImmLogic> {
self.imm_logic_from_u64(n.bits() as u64, ty)
fn imm_logic_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ImmLogic> {
self.imm_logic_from_u64(ty, n.bits() as u64)
}
fn imm12_from_u64(&mut self, n: u64) -> Option<Imm12> {
@@ -104,7 +104,7 @@ where
ImmShift::maybe_from_u64(n.into()).unwrap()
}
fn lshl_from_imm64(&mut self, n: Imm64, ty: Type) -> Option<ShiftOpAndAmt> {
fn lshl_from_imm64(&mut self, ty: Type, n: Imm64) -> Option<ShiftOpAndAmt> {
let shiftimm = ShiftOpShiftImm::maybe_from_shift(n.bits() as u64)?;
let shiftee_bits = ty_bits(ty);
if shiftee_bits <= std::u8::MAX as usize {
@@ -292,7 +292,7 @@ where
ImmLogic::maybe_from_u64(mask, I32).unwrap()
}
fn imm_shift_from_imm64(&mut self, val: Imm64, ty: Type) -> Option<ImmShift> {
fn imm_shift_from_imm64(&mut self, ty: Type, val: Imm64) -> Option<ImmShift> {
let imm_value = (val.bits() as u64) & ((ty.bits() - 1) as u64);
ImmShift::maybe_from_u64(imm_value)
}

View File

@@ -1,4 +1,4 @@
src/clif.isle 443b34b797fc8ace
src/prelude.isle a7915a6b88310eb5
src/isa/aarch64/inst.isle a2c0ae729bfa24a8
src/isa/aarch64/lower.isle 15641ca7f0ac061a
src/isa/aarch64/inst.isle 21a43af20be377d2
src/isa/aarch64/lower.isle 75ad8450963e3829

File diff suppressed because it is too large Load Diff

View File

@@ -937,14 +937,18 @@
;; Detect specific integer values
(decl i64_nonequal (i64 i64) i64)
(extern extractor i64_nonequal i64_nonequal (out in))
(decl pure i64_nonequal (i64 i64) i64)
(extern constructor i64_nonequal i64_nonequal)
(decl i64_nonzero (i64) i64)
(extractor (i64_nonzero val) (i64_nonequal val <0))
(decl pure i64_nonzero (i64) i64)
(rule (i64_nonzero x)
(if (i64_nonequal x 0))
x)
(decl i64_not_neg1 (i64) i64)
(extractor (i64_not_neg1 val) (i64_nonequal val <-1))
(decl pure i64_not_neg1 (i64) i64)
(rule (i64_not_neg1 x)
(if (i64_nonequal x -1))
x)
;; Integer type casts (with the rust `as` semantics).
@@ -1116,12 +1120,13 @@
;; Form the sum of two offset values, and check that the result is
;; a valid `MemArg::Symbol` offset (i.e. is even and fits into i32).
(decl memarg_symbol_offset_sum (i64 i32) i64)
(extern extractor memarg_symbol_offset_sum memarg_symbol_offset_sum (in out))
(decl pure memarg_symbol_offset_sum (i64 i64) i32)
(extern constructor memarg_symbol_offset_sum memarg_symbol_offset_sum)
;; Likewise, but just check a single offset value.
(decl memarg_symbol_offset (i32) i64)
(extractor (memarg_symbol_offset offset) (memarg_symbol_offset_sum <0 offset))
(decl pure memarg_symbol_offset (i64) i32)
(rule (memarg_symbol_offset x)
(memarg_symbol_offset_sum x 0))
;; Lower an address into a `MemArg`.
@@ -1130,29 +1135,33 @@
(rule (lower_address flags addr (i64_from_offset offset))
(memarg_reg_plus_off addr offset flags))
(rule (lower_address flags (def_inst (iadd x y)) (i64_from_offset 0))
(rule (lower_address flags (iadd x y) (i64_from_offset 0))
(memarg_reg_plus_reg x y flags))
(rule (lower_address flags
(def_inst (symbol_value (symbol_value_data name (reloc_distance_near) offset)))
(i64_from_offset (memarg_symbol_offset_sum <offset final_offset)))
(symbol_value (symbol_value_data name (reloc_distance_near) sym_offset))
(i64_from_offset offset))
(if-let final_offset (memarg_symbol_offset_sum offset sym_offset))
(memarg_symbol name final_offset flags))
;; Test whether a `load` address will be lowered to a `MemArg::Symbol`.
(decl load_sym (Inst) Inst)
(extractor (load_sym inst)
(and inst
(load _ (def_inst (symbol_value (symbol_value_data _ (reloc_distance_near) offset)))
(i64_from_offset (memarg_symbol_offset_sum <offset _)))))
(decl uload16_sym (Inst) Inst)
(extractor (uload16_sym inst)
(and inst
(uload16 _ (def_inst (symbol_value (symbol_value_data _ (reloc_distance_near) offset)))
(i64_from_offset (memarg_symbol_offset_sum <offset _)))))
(decl pure load_sym (Inst) Inst)
(rule (load_sym inst)
(if-let (load _ (symbol_value (symbol_value_data _ (reloc_distance_near) sym_offset))
(i64_from_offset load_offset))
inst)
(if (memarg_symbol_offset_sum sym_offset load_offset))
inst)
(decl pure uload16_sym (Inst) Inst)
(rule (uload16_sym inst)
(if-let (uload16 _ (symbol_value (symbol_value_data _ (reloc_distance_near) sym_offset))
(i64_from_offset load_offset))
inst)
(if (memarg_symbol_offset_sum sym_offset load_offset))
inst)
;; Helpers for stack-slot addresses ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -1170,11 +1179,11 @@
;; A value that is the result of a sign-extend from a 32-bit value.
(decl sext32_value (Value) Value)
(extractor (sext32_value x) (def_inst (sextend (and x (value_type $I32)))))
(extractor (sext32_value x) (sextend (and x (value_type $I32))))
;; A value that is the result of a zero-extend from a 32-bit value.
(decl zext32_value (Value) Value)
(extractor (zext32_value x) (def_inst (uextend (and x (value_type $I32)))))
(extractor (zext32_value x) (uextend (and x (value_type $I32))))
;; Helpers for sinkable loads ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
@@ -1777,8 +1786,8 @@
;; Similarly, because we cannot allocate temp registers, if an instruction
;; requires matching source and destination registers, this needs to be handled
;; by the user. Another helper to verify that constraint.
(decl same_reg (WritableReg) Reg)
(extern extractor same_reg same_reg (in))
(decl pure same_reg (WritableReg Reg) Reg)
(extern constructor same_reg same_reg)
;; Push a `MInst.AluRRR` instruction to a sequence.
(decl push_alu_reg (VecMInstBuilder ALUOp WritableReg Reg Reg) Reg)
@@ -1788,7 +1797,8 @@
;; Push a `MInst.AluRUImm32Shifted` instruction to a sequence.
(decl push_alu_uimm32shifted (VecMInstBuilder ALUOp WritableReg Reg UImm32Shifted) Reg)
(rule (push_alu_uimm32shifted ib op (real_reg dst) (same_reg <dst) imm)
(rule (push_alu_uimm32shifted ib op (real_reg dst) r imm)
(if (same_reg dst r))
(let ((_ Unit (inst_builder_push ib (MInst.AluRUImm32Shifted op dst imm))))
dst))
@@ -1801,7 +1811,8 @@
;; Push a `MInst.RxSBG` instruction to a sequence.
(decl push_rxsbg (VecMInstBuilder RxSBGOp WritableReg Reg Reg u8 u8 i8) Reg)
(rule (push_rxsbg ib op (real_reg dst) (same_reg <dst) src start_bit end_bit rotate_amt)
(rule (push_rxsbg ib op (real_reg dst) r src start_bit end_bit rotate_amt)
(if (same_reg dst r))
(let ((_ Unit (inst_builder_push ib
(MInst.RxSBG op dst src start_bit end_bit rotate_amt))))
dst))
@@ -2088,9 +2099,9 @@
(rule (emit_put_in_reg_zext32 dst (and (value_type (fits_in_16 ty)) (sinkable_load load)))
(emit_zext32_mem dst ty (sink_load load)))
(rule (emit_put_in_reg_zext32 dst val @ (value_type (fits_in_16 ty)))
(emit_zext32_reg dst ty (put_in_reg val)))
(emit_zext32_reg dst ty val))
(rule (emit_put_in_reg_zext32 dst val @ (value_type (ty_32_or_64 ty)))
(emit_mov ty dst (put_in_reg val)))
(emit_mov ty dst val))
;; Place `Value` into destination, sign-extending to 32 bits if smaller. (Non-SSA form.)
(decl emit_put_in_reg_sext32 (WritableReg Value) Unit)
@@ -2099,9 +2110,9 @@
(rule (emit_put_in_reg_sext32 dst (and (value_type (fits_in_16 ty)) (sinkable_load load)))
(emit_sext32_mem dst ty (sink_load load)))
(rule (emit_put_in_reg_sext32 dst val @ (value_type (fits_in_16 ty)))
(emit_sext32_reg dst ty (put_in_reg val)))
(emit_sext32_reg dst ty val))
(rule (emit_put_in_reg_sext32 dst val @ (value_type (ty_32_or_64 ty)))
(emit_mov ty dst (put_in_reg val)))
(emit_mov ty dst val))
;; Place `Value` into destination, zero-extending to 64 bits if smaller. (Non-SSA form.)
(decl emit_put_in_reg_zext64 (WritableReg Value) Unit)
@@ -2110,9 +2121,9 @@
(rule (emit_put_in_reg_zext64 dst (and (value_type (gpr32_ty ty)) (sinkable_load load)))
(emit_zext64_mem dst ty (sink_load load)))
(rule (emit_put_in_reg_zext64 dst val @ (value_type (gpr32_ty ty)))
(emit_zext64_reg dst ty (put_in_reg val)))
(emit_zext64_reg dst ty val))
(rule (emit_put_in_reg_zext64 dst val @ (value_type (gpr64_ty ty)))
(emit_mov ty dst (put_in_reg val)))
(emit_mov ty dst val))
;; Place `Value` into destination, sign-extending to 64 bits if smaller. (Non-SSA form.)
(decl emit_put_in_reg_sext64 (WritableReg Value) Unit)
@@ -2121,9 +2132,9 @@
(rule (emit_put_in_reg_sext64 dst (and (value_type (gpr32_ty ty)) (sinkable_load load)))
(emit_sext64_mem dst ty (sink_load load)))
(rule (emit_put_in_reg_sext64 dst val @ (value_type (gpr32_ty ty)))
(emit_sext64_reg dst ty (put_in_reg val)))
(emit_sext64_reg dst ty val))
(rule (emit_put_in_reg_sext64 dst val @ (value_type (gpr64_ty ty)))
(emit_mov ty dst (put_in_reg val)))
(emit_mov ty dst val))
;; Place `Value` into a register, zero-extending to 32 bits if smaller.
(decl put_in_reg_zext32 (Value) Reg)
@@ -2132,9 +2143,9 @@
(rule (put_in_reg_zext32 (and (value_type (fits_in_16 ty)) (sinkable_load load)))
(zext32_mem ty (sink_load load)))
(rule (put_in_reg_zext32 val @ (value_type (fits_in_16 ty)))
(zext32_reg ty (put_in_reg val)))
(zext32_reg ty val))
(rule (put_in_reg_zext32 val @ (value_type (ty_32_or_64 _ty)))
(put_in_reg val))
val)
;; Place `Value` into a register, sign-extending to 32 bits if smaller.
(decl put_in_reg_sext32 (Value) Reg)
@@ -2143,9 +2154,9 @@
(rule (put_in_reg_sext32 (and (value_type (fits_in_16 ty)) (sinkable_load load)))
(sext32_mem ty (sink_load load)))
(rule (put_in_reg_sext32 val @ (value_type (fits_in_16 ty)))
(sext32_reg ty (put_in_reg val)))
(sext32_reg ty val))
(rule (put_in_reg_sext32 val @ (value_type (ty_32_or_64 _ty)))
(put_in_reg val))
val)
;; Place `Value` into a register, zero-extending to 64 bits if smaller.
(decl put_in_reg_zext64 (Value) Reg)
@@ -2154,9 +2165,9 @@
(rule (put_in_reg_zext64 (and (value_type (gpr32_ty ty)) (sinkable_load load)))
(zext64_mem ty (sink_load load)))
(rule (put_in_reg_zext64 val @ (value_type (gpr32_ty ty)))
(zext64_reg ty (put_in_reg val)))
(zext64_reg ty val))
(rule (put_in_reg_zext64 val @ (value_type (gpr64_ty ty)))
(put_in_reg val))
val)
;; Place `Value` into a register, sign-extending to 64 bits if smaller.
(decl put_in_reg_sext64 (Value) Reg)
@@ -2165,9 +2176,9 @@
(rule (put_in_reg_sext64 (and (value_type (gpr32_ty ty)) (sinkable_load load)))
(sext64_mem ty (sink_load load)))
(rule (put_in_reg_sext64 val @ (value_type (gpr32_ty ty)))
(sext64_reg ty (put_in_reg val)))
(sext64_reg ty val))
(rule (put_in_reg_sext64 val @ (value_type (gpr64_ty ty)))
(put_in_reg val))
val)
;; Place `Value` into the low half of a register pair, zero-extending
;; to 32 bits if smaller. The high half is taken from the input.

View File

@@ -341,7 +341,9 @@
;; If the `avoid_div_traps` flag is true, we perform the check explicitly.
;; This still can be omittted if the divisor is a non-zero immediate.
(decl zero_divisor_check_needed (Value) bool)
(rule (zero_divisor_check_needed (i64_from_value (i64_nonzero _))) $false)
(rule (zero_divisor_check_needed (i64_from_value x))
(if (i64_nonzero x))
$false)
(rule (zero_divisor_check_needed (value_type (allow_div_traps))) $false)
(rule (zero_divisor_check_needed _) $true)
@@ -422,7 +424,9 @@
;; minimum (signed) integer value is divided by -1, so if the divisor
;; is any immediate different from -1, the check can be omitted.
(decl div_overflow_check_needed (Value) bool)
(rule (div_overflow_check_needed (i64_from_value (i64_not_neg1 _))) $false)
(rule (div_overflow_check_needed (i64_from_value x))
(if (i64_not_neg1 x))
$false)
(rule (div_overflow_check_needed _) $true)
;; Perform the integer-overflow check if necessary. This implements:
@@ -1168,7 +1172,8 @@
;; Load the address of a symbol, target reachable via PC-relative instruction.
(rule (lower (symbol_value (symbol_value_data name (reloc_distance_near)
(memarg_symbol_offset offset))))
off)))
(if-let offset (memarg_symbol_offset off))
(load_addr (memarg_symbol name offset (memflags_trusted))))
;; Load the address of a symbol, general case.
@@ -1984,14 +1989,16 @@
;; Note that the ISA only provides instructions with a PC-relative memory
;; address here, so we need to check whether the sinkable load matches this.
(rule (icmpu_val $true x @ (value_type (fits_in_64 ty))
(sinkable_load_16 (load_sym y)))
(sinkable_load_16 ld))
(if-let y (load_sym ld))
(icmpu_mem_zext16 (ty_ext32 ty) (put_in_reg_zext32 x) (sink_load y)))
;; Compare (unsigned) a register and zero-extended memory.
;; Note that the ISA only provides instructions with a PC-relative memory
;; address here, so we need to check whether the sinkable load matches this.
(rule (icmpu_val $true x @ (value_type (fits_in_64 ty))
(sinkable_uload16 (uload16_sym y)))
(sinkable_uload16 ld))
(if-let y (uload16_sym ld))
(icmpu_mem_zext16 ty x (sink_uload16 y)))
(rule (icmpu_val $true x @ (value_type (fits_in_64 ty)) (sinkable_uload32 y))
(icmpu_mem_zext32 ty x (sink_uload32 y)))

View File

@@ -486,9 +486,9 @@ where
}
#[inline]
fn same_reg(&mut self, src: Reg, dst: WritableReg) -> Option<()> {
fn same_reg(&mut self, dst: WritableReg, src: Reg) -> Option<Reg> {
if dst.to_reg() == src {
Some(())
Some(src)
} else {
None
}

View File

@@ -1,4 +1,4 @@
src/clif.isle 443b34b797fc8ace
src/prelude.isle a7915a6b88310eb5
src/isa/s390x/inst.isle 8218bd9e8556446b
src/isa/s390x/lower.isle 6a8de81f8dc4e568
src/isa/s390x/inst.isle 36c2500563cdd4e6
src/isa/s390x/lower.isle e5c946ab8a265b77

File diff suppressed because it is too large Load Diff

View File

@@ -793,8 +793,8 @@
;; A helper to both check that the `Imm64` and `Offset32` values sum to less
;; than 32-bits AND return this summed `u32` value. Also, the `Imm64` will be
;; zero-extended from `Type` up to 64 bits. This is useful for `to_amode`.
(decl sum_extend_fits_in_32_bits (Type Imm64 u32) Offset32)
(extern extractor sum_extend_fits_in_32_bits sum_extend_fits_in_32_bits (in in out))
(decl pure sum_extend_fits_in_32_bits (Type Imm64 Offset32) u32)
(extern constructor sum_extend_fits_in_32_bits sum_extend_fits_in_32_bits)
;; To generate an address for a memory access, we can pattern-match various CLIF
;; sub-trees to x64's complex addressing modes (`Amode`). In pseudo-code:
@@ -828,14 +828,18 @@
;; extractor to check that the offset and constant value (`c`, the in
;; parameter), when summed will fit into x64's 32-bit displacement, returned as
;; `sum` (the out parameter). The syntax for this could be improved (TODO).
(rule (to_amode flags (iadd (iconst c) base) _offset @ (sum_extend_fits_in_32_bits <$I64 <c sum))
(rule (to_amode flags (iadd (iconst c) base) offset)
(if-let sum (sum_extend_fits_in_32_bits $I64 c offset))
(amode_imm_reg_flags sum (put_in_gpr base) flags))
(rule (to_amode flags (iadd base (iconst c)) _offset @ (sum_extend_fits_in_32_bits <$I64 <c sum))
(rule (to_amode flags (iadd base (iconst c)) offset)
(if-let sum (sum_extend_fits_in_32_bits $I64 c offset))
(amode_imm_reg_flags sum (put_in_gpr base) flags))
;; ...matches (uextend(iconst c) ...); see notes above.
(rule (to_amode flags (iadd (has_type ty (uextend (iconst c))) base) _offset @ (sum_extend_fits_in_32_bits <ty <c sum))
(rule (to_amode flags (iadd (has_type ty (uextend (iconst c))) base) offset)
(if-let sum (sum_extend_fits_in_32_bits $I64 c offset))
(amode_imm_reg_flags sum (put_in_gpr base) flags))
(rule (to_amode flags (iadd base (has_type ty (uextend (iconst c)))) _offset @ (sum_extend_fits_in_32_bits <ty <c sum))
(rule (to_amode flags (iadd base (has_type ty (uextend (iconst c)))) offset)
(if-let sum (sum_extend_fits_in_32_bits $I64 c offset))
(amode_imm_reg_flags sum (put_in_gpr base) flags))
;; ...else only matches (iadd(a b))
(rule (to_amode flags (iadd base index) offset)

View File

@@ -526,9 +526,9 @@ where
#[inline]
fn sum_extend_fits_in_32_bits(
&mut self,
offset: Offset32,
extend_from_ty: Type,
constant_value: Imm64,
offset: Offset32,
) -> Option<u32> {
let offset: i64 = offset.into();
let constant_value: u64 = constant_value.bits() as u64;

View File

@@ -1,4 +1,4 @@
src/clif.isle 443b34b797fc8ace
src/prelude.isle a7915a6b88310eb5
src/isa/x64/inst.isle a63b8ede292f2e20
src/isa/x64/inst.isle 65f15f51eefe0ce3
src/isa/x64/lower.isle 4c567e9157f84afb

File diff suppressed because it is too large Load Diff