diff --git a/cranelift/codegen/meta/src/cdsl/ast.rs b/cranelift/codegen/meta/src/cdsl/ast.rs deleted file mode 100644 index f4f2afe273..0000000000 --- a/cranelift/codegen/meta/src/cdsl/ast.rs +++ /dev/null @@ -1,751 +0,0 @@ -use crate::cdsl::instructions::{InstSpec, Instruction, InstructionPredicate}; -use crate::cdsl::operands::{OperandKind, OperandKindFields}; -use crate::cdsl::types::ValueType; -use crate::cdsl::typevar::{TypeSetBuilder, TypeVar}; - -use cranelift_entity::{entity_impl, PrimaryMap, SparseMap, SparseMapValue}; - -use std::fmt; -use std::iter::IntoIterator; - -pub(crate) enum Expr { - Var(VarIndex), - Literal(Literal), -} - -impl Expr { - pub fn maybe_literal(&self) -> Option<&Literal> { - match &self { - Expr::Literal(lit) => Some(lit), - _ => None, - } - } - - pub fn maybe_var(&self) -> Option { - if let Expr::Var(var) = &self { - Some(*var) - } else { - None - } - } - - pub fn unwrap_var(&self) -> VarIndex { - self.maybe_var() - .expect("tried to unwrap a non-Var content in Expr::unwrap_var") - } - - pub fn to_rust_code(&self, var_pool: &VarPool) -> String { - match self { - Expr::Var(var_index) => var_pool.get(*var_index).to_rust_code(), - Expr::Literal(literal) => literal.to_rust_code(), - } - } -} - -/// An AST definition associates a set of variables with the values produced by an expression. -pub(crate) struct Def { - pub apply: Apply, - pub defined_vars: Vec, -} - -impl Def { - pub fn to_comment_string(&self, var_pool: &VarPool) -> String { - let results = self - .defined_vars - .iter() - .map(|&x| var_pool.get(x).name.as_str()) - .collect::>(); - - let results = if results.len() == 1 { - results[0].to_string() - } else { - format!("({})", results.join(", ")) - }; - - format!("{} := {}", results, self.apply.to_comment_string(var_pool)) - } -} - -pub(crate) struct DefPool { - pool: PrimaryMap, -} - -impl DefPool { - pub fn new() -> Self { - Self { - pool: PrimaryMap::new(), - } - } - pub fn get(&self, index: DefIndex) -> &Def { - self.pool.get(index).unwrap() - } - pub fn next_index(&self) -> DefIndex { - self.pool.next_key() - } - pub fn create_inst(&mut self, apply: Apply, defined_vars: Vec) -> DefIndex { - self.pool.push(Def { - apply, - defined_vars, - }) - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub(crate) struct DefIndex(u32); -entity_impl!(DefIndex); - -/// A definition which would lead to generate a block creation. -#[derive(Clone)] -pub(crate) struct Block { - /// Instruction index after which the block entry is set. - pub location: DefIndex, - /// Variable holding the new created block. - pub name: VarIndex, -} - -pub(crate) struct BlockPool { - pool: SparseMap, -} - -impl SparseMapValue for Block { - fn key(&self) -> DefIndex { - self.location - } -} - -impl BlockPool { - pub fn new() -> Self { - Self { - pool: SparseMap::new(), - } - } - pub fn get(&self, index: DefIndex) -> Option<&Block> { - self.pool.get(index) - } - pub fn create_block(&mut self, name: VarIndex, location: DefIndex) { - if self.pool.contains_key(location) { - panic!("Attempt to insert 2 blocks after the same instruction") - } - self.pool.insert(Block { location, name }); - } - pub fn is_empty(&self) -> bool { - self.pool.is_empty() - } -} - -// Implement IntoIterator such that we can iterate over blocks which are in the block pool. -impl<'a> IntoIterator for &'a BlockPool { - type Item = <&'a SparseMap as IntoIterator>::Item; - type IntoIter = <&'a SparseMap as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.pool.into_iter() - } -} - -#[derive(Clone, Debug)] -pub(crate) enum Literal { - /// A value of an enumerated immediate operand. - /// - /// Some immediate operand kinds like `intcc` and `floatcc` have an enumerated range of values - /// corresponding to a Rust enum type. An `Enumerator` object is an AST leaf node representing one - /// of the values. - Enumerator { - rust_type: &'static str, - value: &'static str, - }, - - /// A bitwise value of an immediate operand, used for bitwise exact floating point constants. - Bits { rust_type: &'static str, value: u64 }, - - /// A value of an integer immediate operand. - Int(i64), - - /// A empty list of variable set of arguments. - EmptyVarArgs, -} - -impl Literal { - pub fn enumerator_for(kind: &OperandKind, value: &'static str) -> Self { - let value = match &kind.fields { - OperandKindFields::ImmEnum(values) => values.get(value).unwrap_or_else(|| { - panic!( - "nonexistent value '{}' in enumeration '{}'", - value, kind.rust_type - ) - }), - _ => panic!("enumerator is for enum values"), - }; - Literal::Enumerator { - rust_type: kind.rust_type, - value, - } - } - - pub fn bits(kind: &OperandKind, bits: u64) -> Self { - match kind.fields { - OperandKindFields::ImmValue => {} - _ => panic!("bits_of is for immediate scalar types"), - } - Literal::Bits { - rust_type: kind.rust_type, - value: bits, - } - } - - pub fn constant(kind: &OperandKind, value: i64) -> Self { - match kind.fields { - OperandKindFields::ImmValue => {} - _ => panic!("constant is for immediate scalar types"), - } - Literal::Int(value) - } - - pub fn empty_vararg() -> Self { - Literal::EmptyVarArgs - } - - pub fn to_rust_code(&self) -> String { - match self { - Literal::Enumerator { rust_type, value } => format!("{}::{}", rust_type, value), - Literal::Bits { rust_type, value } => format!("{}::with_bits({:#x})", rust_type, value), - Literal::Int(val) => val.to_string(), - Literal::EmptyVarArgs => "&[]".into(), - } - } -} - -#[derive(Clone, Copy, Debug)] -pub(crate) enum PatternPosition { - Source, - Destination, -} - -/// A free variable. -/// -/// When variables are used in `XForms` with source and destination patterns, they are classified -/// as follows: -/// -/// Input values: Uses in the source pattern with no preceding def. These may appear as inputs in -/// the destination pattern too, but no new inputs can be introduced. -/// -/// Output values: Variables that are defined in both the source and destination pattern. These -/// values may have uses outside the source pattern, and the destination pattern must compute the -/// same value. -/// -/// Intermediate values: Values that are defined in the source pattern, but not in the destination -/// pattern. These may have uses outside the source pattern, so the defining instruction can't be -/// deleted immediately. -/// -/// Temporary values are defined only in the destination pattern. -pub(crate) struct Var { - pub name: String, - - /// The `Def` defining this variable in a source pattern. - pub src_def: Option, - - /// The `Def` defining this variable in a destination pattern. - pub dst_def: Option, - - /// TypeVar representing the type of this variable. - type_var: Option, - - /// Is this the original type variable, or has it be redefined with set_typevar? - is_original_type_var: bool, -} - -impl Var { - fn new(name: String) -> Self { - Self { - name, - src_def: None, - dst_def: None, - type_var: None, - is_original_type_var: false, - } - } - - /// Is this an input value to the src pattern? - pub fn is_input(&self) -> bool { - self.src_def.is_none() && self.dst_def.is_none() - } - - /// Is this an output value, defined in both src and dst patterns? - pub fn is_output(&self) -> bool { - self.src_def.is_some() && self.dst_def.is_some() - } - - /// Is this an intermediate value, defined only in the src pattern? - pub fn is_intermediate(&self) -> bool { - self.src_def.is_some() && self.dst_def.is_none() - } - - /// Is this a temp value, defined only in the dst pattern? - pub fn is_temp(&self) -> bool { - self.src_def.is_none() && self.dst_def.is_some() - } - - /// Get the def of this variable according to the position. - pub fn get_def(&self, position: PatternPosition) -> Option { - match position { - PatternPosition::Source => self.src_def, - PatternPosition::Destination => self.dst_def, - } - } - - pub fn set_def(&mut self, position: PatternPosition, def: DefIndex) { - assert!( - self.get_def(position).is_none(), - "redefinition of variable {}", - self.name - ); - match position { - PatternPosition::Source => { - self.src_def = Some(def); - } - PatternPosition::Destination => { - self.dst_def = Some(def); - } - } - } - - /// Get the type variable representing the type of this variable. - pub fn get_or_create_typevar(&mut self) -> TypeVar { - match &self.type_var { - Some(tv) => tv.clone(), - None => { - // Create a new type var in which we allow all types. - let tv = TypeVar::new( - format!("typeof_{}", self.name), - format!("Type of the pattern variable {:?}", self), - TypeSetBuilder::all(), - ); - self.type_var = Some(tv.clone()); - self.is_original_type_var = true; - tv - } - } - } - pub fn get_typevar(&self) -> Option { - self.type_var.clone() - } - pub fn set_typevar(&mut self, tv: TypeVar) { - self.is_original_type_var = if let Some(previous_tv) = &self.type_var { - *previous_tv == tv - } else { - false - }; - self.type_var = Some(tv); - } - - /// Check if this variable has a free type variable. If not, the type of this variable is - /// computed from the type of another variable. - pub fn has_free_typevar(&self) -> bool { - match &self.type_var { - Some(tv) => tv.base.is_none() && self.is_original_type_var, - None => false, - } - } - - pub fn to_rust_code(&self) -> String { - self.name.clone() - } - fn rust_type(&self) -> String { - self.type_var.as_ref().unwrap().to_rust_code() - } -} - -impl fmt::Debug for Var { - fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { - fmt.write_fmt(format_args!( - "Var({}{}{})", - self.name, - if self.src_def.is_some() { ", src" } else { "" }, - if self.dst_def.is_some() { ", dst" } else { "" } - )) - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub(crate) struct VarIndex(u32); -entity_impl!(VarIndex); - -pub(crate) struct VarPool { - pool: PrimaryMap, -} - -impl VarPool { - pub fn new() -> Self { - Self { - pool: PrimaryMap::new(), - } - } - pub fn get(&self, index: VarIndex) -> &Var { - self.pool.get(index).unwrap() - } - pub fn get_mut(&mut self, index: VarIndex) -> &mut Var { - self.pool.get_mut(index).unwrap() - } - pub fn create(&mut self, name: impl Into) -> VarIndex { - self.pool.push(Var::new(name.into())) - } -} - -/// Contains constants created in the AST that must be inserted into the true [ConstantPool] when -/// the legalizer code is generated. The constant data is named in the order it is inserted; -/// inserting data using [insert] will avoid duplicates. -/// -/// [ConstantPool]: ../../../cranelift_codegen/ir/constant/struct.ConstantPool.html -/// [insert]: ConstPool::insert -pub(crate) struct ConstPool { - pool: Vec>, -} - -impl ConstPool { - /// Create an empty constant pool. - pub fn new() -> Self { - Self { pool: vec![] } - } - - /// Create a name for a constant from its position in the pool. - fn create_name(position: usize) -> String { - format!("const{}", position) - } - - /// Insert constant data into the pool, returning the name of the variable used to reference it. - /// This method will search for data that matches the new data and return the existing constant - /// name to avoid duplicates. - pub fn insert(&mut self, data: Vec) -> String { - let possible_position = self.pool.iter().position(|d| d == &data); - let position = if let Some(found_position) = possible_position { - found_position - } else { - let new_position = self.pool.len(); - self.pool.push(data); - new_position - }; - ConstPool::create_name(position) - } - - /// Iterate over the name/value pairs in the pool. - pub fn iter(&self) -> impl Iterator)> { - self.pool - .iter() - .enumerate() - .map(|(i, v)| (ConstPool::create_name(i), v)) - } -} - -/// Apply an instruction to arguments. -/// -/// An `Apply` AST expression is created by using function call syntax on instructions. This -/// applies to both bound and unbound polymorphic instructions. -pub(crate) struct Apply { - pub inst: Instruction, - pub args: Vec, - pub value_types: Vec, -} - -impl Apply { - pub fn new(target: InstSpec, args: Vec) -> Self { - let (inst, value_types) = match target { - InstSpec::Inst(inst) => (inst, Vec::new()), - InstSpec::Bound(bound_inst) => (bound_inst.inst, bound_inst.value_types), - }; - - // Apply should only operate on concrete value types, not "any". - let value_types = value_types - .into_iter() - .map(|vt| vt.expect()) - .collect(); - - // Basic check on number of arguments. - assert!( - inst.operands_in.len() == args.len(), - "incorrect number of arguments in instruction {}", - inst.name - ); - - // Check that the kinds of Literals arguments match the expected operand. - for &imm_index in &inst.imm_opnums { - let arg = &args[imm_index]; - if let Some(literal) = arg.maybe_literal() { - let op = &inst.operands_in[imm_index]; - match &op.kind.fields { - OperandKindFields::ImmEnum(values) => { - if let Literal::Enumerator { value, .. } = literal { - assert!( - values.iter().any(|(_key, v)| v == value), - "Nonexistent enum value '{}' passed to field of kind '{}' -- \ - did you use the right enum?", - value, - op.kind.rust_type - ); - } else { - panic!( - "Passed non-enum field value {:?} to field of kind {}", - literal, op.kind.rust_type - ); - } - } - OperandKindFields::ImmValue => match &literal { - Literal::Enumerator { value, .. } => panic!( - "Expected immediate value in immediate field of kind '{}', \ - obtained enum value '{}'", - op.kind.rust_type, value - ), - Literal::Bits { .. } | Literal::Int(_) | Literal::EmptyVarArgs => {} - }, - _ => { - panic!( - "Literal passed to non-literal field of kind {}", - op.kind.rust_type - ); - } - } - } - } - - Self { - inst, - args, - value_types, - } - } - - fn to_comment_string(&self, var_pool: &VarPool) -> String { - let args = self - .args - .iter() - .map(|arg| arg.to_rust_code(var_pool)) - .collect::>() - .join(", "); - - let mut inst_and_bound_types = vec![self.inst.name.to_string()]; - inst_and_bound_types.extend(self.value_types.iter().map(|vt| vt.to_string())); - let inst_name = inst_and_bound_types.join("."); - - format!("{}({})", inst_name, args) - } - - pub fn inst_predicate(&self, var_pool: &VarPool) -> InstructionPredicate { - let mut pred = InstructionPredicate::new(); - for (format_field, &op_num) in self - .inst - .format - .imm_fields - .iter() - .zip(self.inst.imm_opnums.iter()) - { - let arg = &self.args[op_num]; - if arg.maybe_var().is_some() { - // Ignore free variables for now. - continue; - } - pred = pred.and(InstructionPredicate::new_is_field_equal_ast( - &*self.inst.format, - format_field, - arg.to_rust_code(var_pool), - )); - } - - // Add checks for any bound secondary type variables. We can't check the controlling type - // variable this way since it may not appear as the type of an operand. - if self.value_types.len() > 1 { - let poly = self - .inst - .polymorphic_info - .as_ref() - .expect("must have polymorphic info if it has bounded types"); - for (bound_type, type_var) in - self.value_types[1..].iter().zip(poly.other_typevars.iter()) - { - pred = pred.and(InstructionPredicate::new_typevar_check( - &self.inst, type_var, bound_type, - )); - } - } - - pred - } - - /// Same as `inst_predicate()`, but also check the controlling type variable. - pub fn inst_predicate_with_ctrl_typevar(&self, var_pool: &VarPool) -> InstructionPredicate { - let mut pred = self.inst_predicate(var_pool); - - if !self.value_types.is_empty() { - let bound_type = &self.value_types[0]; - let poly = self.inst.polymorphic_info.as_ref().unwrap(); - let type_check = if poly.use_typevar_operand { - InstructionPredicate::new_typevar_check(&self.inst, &poly.ctrl_typevar, bound_type) - } else { - InstructionPredicate::new_ctrl_typevar_check(&bound_type) - }; - pred = pred.and(type_check); - } - - pred - } - - pub fn rust_builder(&self, defined_vars: &[VarIndex], var_pool: &VarPool) -> String { - let mut args = self - .args - .iter() - .map(|expr| expr.to_rust_code(var_pool)) - .collect::>() - .join(", "); - - // Do we need to pass an explicit type argument? - if let Some(poly) = &self.inst.polymorphic_info { - if !poly.use_typevar_operand { - args = format!("{}, {}", var_pool.get(defined_vars[0]).rust_type(), args); - } - } - - format!("{}({})", self.inst.snake_name(), args) - } -} - -// Simple helpers for legalize actions construction. - -pub(crate) enum DummyExpr { - Var(DummyVar), - Literal(Literal), - Constant(DummyConstant), - Apply(InstSpec, Vec), - Block(DummyVar), -} - -#[derive(Clone)] -pub(crate) struct DummyVar { - pub name: String, -} - -impl Into for DummyVar { - fn into(self) -> DummyExpr { - DummyExpr::Var(self) - } -} -impl Into for Literal { - fn into(self) -> DummyExpr { - DummyExpr::Literal(self) - } -} - -#[derive(Clone)] -pub(crate) struct DummyConstant(pub(crate) Vec); - -impl Into for DummyConstant { - fn into(self) -> DummyExpr { - DummyExpr::Constant(self) - } -} - -pub(crate) fn var(name: &str) -> DummyVar { - DummyVar { - name: name.to_owned(), - } -} - -pub(crate) struct DummyDef { - pub expr: DummyExpr, - pub defined_vars: Vec, -} - -pub(crate) struct ExprBuilder { - expr: DummyExpr, -} - -impl ExprBuilder { - pub fn apply(inst: InstSpec, args: Vec) -> Self { - let expr = DummyExpr::Apply(inst, args); - Self { expr } - } - - pub fn assign_to(self, defined_vars: Vec) -> DummyDef { - DummyDef { - expr: self.expr, - defined_vars, - } - } - - pub fn block(name: DummyVar) -> Self { - let expr = DummyExpr::Block(name); - Self { expr } - } -} - -macro_rules! def_rhs { - // inst(a, b, c) - ($inst:ident($($src:expr),*)) => { - ExprBuilder::apply($inst.into(), vec![$($src.clone().into()),*]) - }; - - // inst.type(a, b, c) - ($inst:ident.$type:ident($($src:expr),*)) => { - ExprBuilder::apply($inst.bind($type).into(), vec![$($src.clone().into()),*]) - }; -} - -// Helper macro to define legalization recipes. -macro_rules! def { - // x = ... - ($dest:ident = $($tt:tt)*) => { - def_rhs!($($tt)*).assign_to(vec![$dest.clone()]) - }; - - // (x, y, ...) = ... - (($($dest:ident),*) = $($tt:tt)*) => { - def_rhs!($($tt)*).assign_to(vec![$($dest.clone()),*]) - }; - - // An instruction with no results. - ($($tt:tt)*) => { - def_rhs!($($tt)*).assign_to(Vec::new()) - } -} - -// Helper macro to define legalization recipes. -macro_rules! block { - // a basic block definition, splitting the current block in 2. - ($block: ident) => { - ExprBuilder::block($block).assign_to(Vec::new()) - }; -} - -#[cfg(test)] -mod tests { - use crate::cdsl::ast::ConstPool; - - #[test] - fn const_pool_returns_var_names() { - let mut c = ConstPool::new(); - assert_eq!(c.insert([0, 1, 2].to_vec()), "const0"); - assert_eq!(c.insert([1, 2, 3].to_vec()), "const1"); - } - - #[test] - fn const_pool_avoids_duplicates() { - let data = [0, 1, 2].to_vec(); - let mut c = ConstPool::new(); - assert_eq!(c.pool.len(), 0); - - assert_eq!(c.insert(data.clone()), "const0"); - assert_eq!(c.pool.len(), 1); - - assert_eq!(c.insert(data), "const0"); - assert_eq!(c.pool.len(), 1); - } - - #[test] - fn const_pool_iterates() { - let mut c = ConstPool::new(); - c.insert([0, 1, 2].to_vec()); - c.insert([3, 4, 5].to_vec()); - - let mut iter = c.iter(); - assert_eq!(iter.next(), Some(("const0".to_owned(), &vec![0, 1, 2]))); - assert_eq!(iter.next(), Some(("const1".to_owned(), &vec![3, 4, 5]))); - assert_eq!(iter.next(), None); - } -} diff --git a/cranelift/codegen/meta/src/cdsl/cpu_modes.rs b/cranelift/codegen/meta/src/cdsl/cpu_modes.rs deleted file mode 100644 index e42a27da2a..0000000000 --- a/cranelift/codegen/meta/src/cdsl/cpu_modes.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; - -use crate::cdsl::types::ValueType; -use crate::cdsl::xform::TransformGroupIndex; - -pub(crate) struct CpuMode { - pub name: &'static str, - default_legalize: Option, - monomorphic_legalize: Option, - typed_legalize: HashMap, -} - -impl CpuMode { - /// Returns a deterministically ordered, deduplicated list of TransformGroupIndex for the directly - /// reachable set of TransformGroup this TargetIsa uses. - pub fn direct_transform_groups(&self) -> Vec { - let mut set = HashSet::new(); - if let Some(i) = &self.default_legalize { - set.insert(*i); - } - if let Some(i) = &self.monomorphic_legalize { - set.insert(*i); - } - set.extend(self.typed_legalize.values().cloned()); - let mut ret = Vec::from_iter(set); - ret.sort(); - ret - } -} diff --git a/cranelift/codegen/meta/src/cdsl/instructions.rs b/cranelift/codegen/meta/src/cdsl/instructions.rs index f7258ea300..50b3a82cc1 100644 --- a/cranelift/codegen/meta/src/cdsl/instructions.rs +++ b/cranelift/codegen/meta/src/cdsl/instructions.rs @@ -5,7 +5,7 @@ use std::fmt::{Display, Error, Formatter}; use std::rc::Rc; use crate::cdsl::camel_case; -use crate::cdsl::formats::{FormatField, InstructionFormat}; +use crate::cdsl::formats::InstructionFormat; use crate::cdsl::operands::Operand; use crate::cdsl::type_inference::Constraint; use crate::cdsl::types::{LaneType, ReferenceType, ValueType}; @@ -21,46 +21,20 @@ pub(crate) type AllInstructions = PrimaryMap; pub(crate) struct InstructionGroupBuilder<'all_inst> { all_instructions: &'all_inst mut AllInstructions, - own_instructions: Vec, } impl<'all_inst> InstructionGroupBuilder<'all_inst> { pub fn new(all_instructions: &'all_inst mut AllInstructions) -> Self { Self { all_instructions, - own_instructions: Vec::new(), } } pub fn push(&mut self, builder: InstructionBuilder) { let opcode_number = OpcodeNumber(self.all_instructions.next_key().as_u32()); let inst = builder.build(opcode_number); - // Note this clone is cheap, since Instruction is a Rc<> wrapper for InstructionContent. - self.own_instructions.push(inst.clone()); self.all_instructions.push(inst); } - - pub fn build(self) -> InstructionGroup { - InstructionGroup { - instructions: self.own_instructions, - } - } -} - -/// Every instruction must belong to exactly one instruction group. A given -/// target architecture can support instructions from multiple groups, and it -/// does not necessarily support all instructions in a group. -pub(crate) struct InstructionGroup { - instructions: Vec, -} - -impl InstructionGroup { - pub fn by_name(&self, name: &'static str) -> &Instruction { - self.instructions - .iter() - .find(|inst| inst.name == name) - .unwrap_or_else(|| panic!("instruction with name '{}' does not exist", name)) - } } /// Instructions can have parameters bound to them to specialize them for more specific encodings @@ -143,17 +117,6 @@ impl InstructionContent { &self.name } } - - pub fn all_typevars(&self) -> Vec<&TypeVar> { - match &self.polymorphic_info { - Some(poly) => { - let mut result = vec![&poly.ctrl_typevar]; - result.extend(&poly.other_typevars); - result - } - None => Vec::new(), - } - } } pub(crate) type Instruction = Rc; @@ -375,20 +338,6 @@ impl InstructionBuilder { } } -/// A thin wrapper like Option, but with more precise semantics. -#[derive(Clone)] -pub(crate) enum ValueTypeOrAny { - ValueType(ValueType), -} - -impl ValueTypeOrAny { - pub fn expect(self) -> ValueType { - match self { - ValueTypeOrAny::ValueType(vt) => vt, - } - } -} - /// An parameter used for binding instructions to specific types or values pub(crate) enum BindParameter { Lane(LaneType), @@ -439,7 +388,7 @@ impl Display for Immediate { #[derive(Clone)] pub(crate) struct BoundInstruction { pub inst: Instruction, - pub value_types: Vec, + pub value_types: Vec, pub immediate_values: Vec, } @@ -502,11 +451,11 @@ impl Bindable for BoundInstruction { match parameter.into() { BindParameter::Lane(lane_type) => modified .value_types - .push(ValueTypeOrAny::ValueType(lane_type.into())), + .push(lane_type.into()), BindParameter::Reference(reference_type) => { modified .value_types - .push(ValueTypeOrAny::ValueType(reference_type.into())); + .push(reference_type.into()); } } modified.verify_bindings().unwrap(); @@ -719,206 +668,6 @@ fn is_ctrl_typevar_candidate( Ok(other_typevars) } -#[derive(Clone, Hash, PartialEq, Eq)] -pub(crate) enum FormatPredicateKind { - /// Is the field member equal to the expected value (stored here)? - IsEqual(String), -} - -#[derive(Clone, Hash, PartialEq, Eq)] -pub(crate) struct FormatPredicateNode { - format_name: &'static str, - member_name: &'static str, - kind: FormatPredicateKind, -} - -impl FormatPredicateNode { - fn new_raw( - format: &InstructionFormat, - member_name: &'static str, - kind: FormatPredicateKind, - ) -> Self { - Self { - format_name: format.name, - member_name, - kind, - } - } - - fn rust_predicate(&self) -> String { - match &self.kind { - FormatPredicateKind::IsEqual(arg) => { - format!("predicates::is_equal({}, {})", self.member_name, arg) - } - } - } -} - -#[derive(Clone, Hash, PartialEq, Eq)] -pub(crate) enum TypePredicateNode { - /// Is the value argument (at the index designated by the first member) the same type as the - /// type name (second member)? - TypeVarCheck(usize, String), - - /// Is the controlling type variable the same type as the one designated by the type name - /// (only member)? - CtrlTypeVarCheck(String), -} - -impl TypePredicateNode { - fn rust_predicate(&self, func_str: &str) -> String { - match self { - TypePredicateNode::TypeVarCheck(index, value_type_name) => format!( - "{}.dfg.value_type(args[{}]) == {}", - func_str, index, value_type_name - ), - TypePredicateNode::CtrlTypeVarCheck(value_type_name) => { - format!("{}.dfg.ctrl_typevar(inst) == {}", func_str, value_type_name) - } - } - } -} - -/// A basic node in an instruction predicate: either an atom, or an AND of two conditions. -#[derive(Clone, Hash, PartialEq, Eq)] -pub(crate) enum InstructionPredicateNode { - FormatPredicate(FormatPredicateNode), - - TypePredicate(TypePredicateNode), - - /// An AND-combination of two or more other predicates. - And(Vec), -} - -impl InstructionPredicateNode { - fn rust_predicate(&self, func_str: &str) -> String { - match self { - InstructionPredicateNode::FormatPredicate(node) => node.rust_predicate(), - InstructionPredicateNode::TypePredicate(node) => node.rust_predicate(func_str), - InstructionPredicateNode::And(nodes) => nodes - .iter() - .map(|x| x.rust_predicate(func_str)) - .collect::>() - .join(" && "), - } - } -} - -#[derive(Clone, Hash, PartialEq, Eq)] -pub(crate) struct InstructionPredicate { - node: Option, -} - -impl Into for InstructionPredicateNode { - fn into(self) -> InstructionPredicate { - InstructionPredicate { node: Some(self) } - } -} - -impl InstructionPredicate { - pub fn new() -> Self { - Self { node: None } - } - - pub fn new_typevar_check( - inst: &Instruction, - type_var: &TypeVar, - value_type: &ValueType, - ) -> InstructionPredicateNode { - let index = inst - .value_opnums - .iter() - .enumerate() - .find(|(_, &op_num)| inst.operands_in[op_num].type_var().unwrap() == type_var) - .unwrap() - .0; - InstructionPredicateNode::TypePredicate(TypePredicateNode::TypeVarCheck( - index, - value_type.rust_name(), - )) - } - - pub fn new_ctrl_typevar_check(value_type: &ValueType) -> InstructionPredicateNode { - InstructionPredicateNode::TypePredicate(TypePredicateNode::CtrlTypeVarCheck( - value_type.rust_name(), - )) - } - - /// Used only for the AST module, which directly passes in the format field. - pub fn new_is_field_equal_ast( - format: &InstructionFormat, - field: &FormatField, - imm_value: String, - ) -> InstructionPredicateNode { - InstructionPredicateNode::FormatPredicate(FormatPredicateNode::new_raw( - format, - field.member, - FormatPredicateKind::IsEqual(imm_value), - )) - } - - pub fn and(mut self, new_node: InstructionPredicateNode) -> Self { - let node = self.node; - let mut and_nodes = match node { - Some(node) => match node { - InstructionPredicateNode::And(nodes) => nodes, - _ => vec![node], - }, - _ => Vec::new(), - }; - and_nodes.push(new_node); - self.node = Some(InstructionPredicateNode::And(and_nodes)); - self - } - - pub fn rust_predicate(&self, func_str: &str) -> Option { - self.node.as_ref().map(|root| root.rust_predicate(func_str)) - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub(crate) struct InstructionPredicateNumber(u32); -entity_impl!(InstructionPredicateNumber); - -pub(crate) type InstructionPredicateMap = - PrimaryMap; - -/// An instruction specification, containing an instruction that has bound types or not. -pub(crate) enum InstSpec { - Inst(Instruction), - Bound(BoundInstruction), -} - -impl InstSpec { - pub fn inst(&self) -> &Instruction { - match &self { - InstSpec::Inst(inst) => inst, - InstSpec::Bound(bound_inst) => &bound_inst.inst, - } - } -} - -impl Bindable for InstSpec { - fn bind(&self, parameter: impl Into) -> BoundInstruction { - match self { - InstSpec::Inst(inst) => inst.bind(parameter.into()), - InstSpec::Bound(inst) => inst.bind(parameter.into()), - } - } -} - -impl Into for &Instruction { - fn into(self) -> InstSpec { - InstSpec::Inst(self.clone()) - } -} - -impl Into for BoundInstruction { - fn into(self) -> InstSpec { - InstSpec::Bound(self) - } -} - #[cfg(test)] mod test { use super::*; diff --git a/cranelift/codegen/meta/src/cdsl/isa.rs b/cranelift/codegen/meta/src/cdsl/isa.rs index 28844c97fb..7eb7c30517 100644 --- a/cranelift/codegen/meta/src/cdsl/isa.rs +++ b/cranelift/codegen/meta/src/cdsl/isa.rs @@ -1,89 +1,18 @@ -use std::collections::HashSet; -use std::iter::FromIterator; - -use crate::cdsl::cpu_modes::CpuMode; -use crate::cdsl::instructions::InstructionPredicateMap; -use crate::cdsl::recipes::Recipes; use crate::cdsl::regs::IsaRegs; use crate::cdsl::settings::SettingGroup; -use crate::cdsl::xform::{TransformGroupIndex, TransformGroups}; pub(crate) struct TargetIsa { pub name: &'static str, pub settings: SettingGroup, pub regs: IsaRegs, - pub recipes: Recipes, - pub cpu_modes: Vec, - pub encodings_predicates: InstructionPredicateMap, - - /// TransformGroupIndex are global to all the ISAs, while we want to have indices into the - /// local array of transform groups that are directly used. We use this map to get this - /// information. - pub local_transform_groups: Vec, } impl TargetIsa { - pub fn new( - name: &'static str, - settings: SettingGroup, - regs: IsaRegs, - recipes: Recipes, - cpu_modes: Vec, - encodings_predicates: InstructionPredicateMap, - ) -> Self { - // Compute the local TransformGroup index. - let mut local_transform_groups = Vec::new(); - for cpu_mode in &cpu_modes { - let transform_groups = cpu_mode.direct_transform_groups(); - for group_index in transform_groups { - // find() is fine here: the number of transform group is < 5 as of June 2019. - if local_transform_groups - .iter() - .find(|&val| group_index == *val) - .is_none() - { - local_transform_groups.push(group_index); - } - } - } - + pub fn new(name: &'static str, settings: SettingGroup, regs: IsaRegs) -> Self { Self { name, settings, regs, - recipes, - cpu_modes, - encodings_predicates, - local_transform_groups, } } - - /// Returns a deterministically ordered, deduplicated list of TransformGroupIndex for the - /// transitive set of TransformGroup this TargetIsa uses. - pub fn transitive_transform_groups( - &self, - all_groups: &TransformGroups, - ) -> Vec { - let mut set = HashSet::new(); - - for &root in self.local_transform_groups.iter() { - set.insert(root); - let mut base = root; - // Follow the chain of chain_with. - while let Some(chain_with) = &all_groups.get(base).chain_with { - set.insert(*chain_with); - base = *chain_with; - } - } - - let mut vec = Vec::from_iter(set); - vec.sort(); - vec - } - - /// Returns a deterministically ordered, deduplicated list of TransformGroupIndex for the directly - /// reachable set of TransformGroup this TargetIsa uses. - pub fn direct_transform_groups(&self) -> &Vec { - &self.local_transform_groups - } } diff --git a/cranelift/codegen/meta/src/cdsl/mod.rs b/cranelift/codegen/meta/src/cdsl/mod.rs index bd08ebfe41..a1fb8d4ea3 100644 --- a/cranelift/codegen/meta/src/cdsl/mod.rs +++ b/cranelift/codegen/meta/src/cdsl/mod.rs @@ -3,20 +3,15 @@ //! This module defines the classes that are used to define Cranelift //! instructions and other entities. -#[macro_use] -pub mod ast; -pub mod cpu_modes; pub mod formats; pub mod instructions; pub mod isa; pub mod operands; -pub mod recipes; pub mod regs; pub mod settings; pub mod type_inference; pub mod types; pub mod typevar; -pub mod xform; /// A macro that converts boolean settings into predicates to look more natural. #[macro_export] diff --git a/cranelift/codegen/meta/src/cdsl/recipes.rs b/cranelift/codegen/meta/src/cdsl/recipes.rs deleted file mode 100644 index ea9ce5f20b..0000000000 --- a/cranelift/codegen/meta/src/cdsl/recipes.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::rc::Rc; - -use cranelift_entity::{entity_impl, PrimaryMap}; - -use crate::cdsl::formats::InstructionFormat; -use crate::cdsl::instructions::InstructionPredicate; -use crate::cdsl::regs::RegClassIndex; -use crate::cdsl::settings::SettingPredicateNumber; - -/// A specific register in a register class. -/// -/// A register is identified by the top-level register class it belongs to and -/// its first register unit. -/// -/// Specific registers are used to describe constraints on instructions where -/// some operands must use a fixed register. -/// -/// Register instances can be created with the constructor, or accessed as -/// attributes on the register class: `GPR.rcx`. -#[derive(Copy, Clone, Hash, PartialEq, Eq)] -pub(crate) struct Register { - pub regclass: RegClassIndex, - pub unit: u8, -} - -/// An operand that must be in a stack slot. -/// -/// A `Stack` object can be used to indicate an operand constraint for a value -/// operand that must live in a stack slot. -#[derive(Copy, Clone, Hash, PartialEq)] -pub(crate) struct Stack { - pub regclass: RegClassIndex, -} - -#[derive(Clone, Hash, PartialEq)] -pub(crate) struct BranchRange { - pub inst_size: u64, - pub range: u64, -} - -#[derive(Copy, Clone, Hash, PartialEq)] -pub(crate) enum OperandConstraint { - RegClass(RegClassIndex), - FixedReg(Register), - TiedInput(usize), - Stack(Stack), -} - -impl Into for RegClassIndex { - fn into(self) -> OperandConstraint { - OperandConstraint::RegClass(self) - } -} - -impl Into for Register { - fn into(self) -> OperandConstraint { - OperandConstraint::FixedReg(self) - } -} - -impl Into for usize { - fn into(self) -> OperandConstraint { - OperandConstraint::TiedInput(self) - } -} - -impl Into for Stack { - fn into(self) -> OperandConstraint { - OperandConstraint::Stack(self) - } -} - -/// A recipe for encoding instructions with a given format. -/// -/// Many different instructions can be encoded by the same recipe, but they -/// must all have the same instruction format. -/// -/// The `operands_in` and `operands_out` arguments are tuples specifying the register -/// allocation constraints for the value operands and results respectively. The -/// possible constraints for an operand are: -/// -/// - A `RegClass` specifying the set of allowed registers. -/// - A `Register` specifying a fixed-register operand. -/// - An integer indicating that this result is tied to a value operand, so -/// they must use the same register. -/// - A `Stack` specifying a value in a stack slot. -/// -/// The `branch_range` argument must be provided for recipes that can encode -/// branch instructions. It is an `(origin, bits)` tuple describing the exact -/// range that can be encoded in a branch instruction. -#[derive(Clone)] -pub(crate) struct EncodingRecipe { - /// Short mnemonic name for this recipe. - pub name: String, - - /// Associated instruction format. - pub format: Rc, - - /// Base number of bytes in the binary encoded instruction. - pub base_size: u64, - - /// Tuple of register constraints for value operands. - pub operands_in: Vec, - - /// Tuple of register constraints for results. - pub operands_out: Vec, - - /// Function name to use when computing actual size. - pub compute_size: &'static str, - - /// `(origin, bits)` range for branches. - pub branch_range: Option, - - /// This instruction clobbers `iflags` and `fflags`; true by default. - pub clobbers_flags: bool, - - /// Instruction predicate. - pub inst_predicate: Option, - - /// ISA predicate. - pub isa_predicate: Option, - - /// Rust code for binary emission. - pub emit: Option, -} - -// Implement PartialEq ourselves: take all the fields into account but the name. -impl PartialEq for EncodingRecipe { - fn eq(&self, other: &Self) -> bool { - Rc::ptr_eq(&self.format, &other.format) - && self.base_size == other.base_size - && self.operands_in == other.operands_in - && self.operands_out == other.operands_out - && self.compute_size == other.compute_size - && self.branch_range == other.branch_range - && self.clobbers_flags == other.clobbers_flags - && self.inst_predicate == other.inst_predicate - && self.isa_predicate == other.isa_predicate - && self.emit == other.emit - } -} - -// To allow using it in a hashmap. -impl Eq for EncodingRecipe {} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub(crate) struct EncodingRecipeNumber(u32); -entity_impl!(EncodingRecipeNumber); - -pub(crate) type Recipes = PrimaryMap; - -#[derive(Clone)] -pub(crate) struct EncodingRecipeBuilder { - pub name: String, - format: Rc, - pub base_size: u64, - pub operands_in: Option>, - pub operands_out: Option>, - pub compute_size: Option<&'static str>, - pub branch_range: Option, - pub emit: Option, - clobbers_flags: Option, - inst_predicate: Option, - isa_predicate: Option, -} diff --git a/cranelift/codegen/meta/src/cdsl/type_inference.rs b/cranelift/codegen/meta/src/cdsl/type_inference.rs index e17c305f9c..76fc1284f2 100644 --- a/cranelift/codegen/meta/src/cdsl/type_inference.rs +++ b/cranelift/codegen/meta/src/cdsl/type_inference.rs @@ -1,8 +1,4 @@ -use crate::cdsl::ast::{Def, DefIndex, DefPool, Var, VarIndex, VarPool}; -use crate::cdsl::typevar::{DerivedFunc, TypeSet, TypeVar}; - -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; +use crate::cdsl::typevar::TypeVar; #[derive(Debug, Hash, PartialEq, Eq)] pub(crate) enum Constraint { @@ -11,651 +7,4 @@ pub(crate) enum Constraint { /// 1) They have the same number of lanes /// 2) In a lane tv1 has at least as many bits as tv2. WiderOrEq(TypeVar, TypeVar), - - /// Constraint specifying that two derived type vars must have the same runtime type. - Eq(TypeVar, TypeVar), - - /// Constraint specifying that a type var must belong to some typeset. - InTypeset(TypeVar, TypeSet), -} - -impl Constraint { - fn translate_with TypeVar>(&self, func: F) -> Constraint { - match self { - Constraint::WiderOrEq(lhs, rhs) => { - let lhs = func(&lhs); - let rhs = func(&rhs); - Constraint::WiderOrEq(lhs, rhs) - } - Constraint::Eq(lhs, rhs) => { - let lhs = func(&lhs); - let rhs = func(&rhs); - Constraint::Eq(lhs, rhs) - } - Constraint::InTypeset(tv, ts) => { - let tv = func(&tv); - Constraint::InTypeset(tv, ts.clone()) - } - } - } - - /// Creates a new constraint by replacing type vars by their hashmap equivalent. - fn translate_with_map( - &self, - original_to_own_typevar: &HashMap<&TypeVar, TypeVar>, - ) -> Constraint { - self.translate_with(|tv| substitute(original_to_own_typevar, tv)) - } - - /// Creates a new constraint by replacing type vars by their canonical equivalent. - fn translate_with_env(&self, type_env: &TypeEnvironment) -> Constraint { - self.translate_with(|tv| type_env.get_equivalent(tv)) - } - - fn is_trivial(&self) -> bool { - match self { - Constraint::WiderOrEq(lhs, rhs) => { - // Trivially true. - if lhs == rhs { - return true; - } - - let ts1 = lhs.get_typeset(); - let ts2 = rhs.get_typeset(); - - // Trivially true. - if ts1.is_wider_or_equal(&ts2) { - return true; - } - - // Trivially false. - if ts1.is_narrower(&ts2) { - return true; - } - - // Trivially false. - if (&ts1.lanes & &ts2.lanes).is_empty() { - return true; - } - - self.is_concrete() - } - Constraint::Eq(lhs, rhs) => lhs == rhs || self.is_concrete(), - Constraint::InTypeset(_, _) => { - // The way InTypeset are made, they would always be trivial if we were applying the - // same logic as the Python code did, so ignore this. - self.is_concrete() - } - } - } - - /// Returns true iff all the referenced type vars are singletons. - fn is_concrete(&self) -> bool { - match self { - Constraint::WiderOrEq(lhs, rhs) => { - lhs.singleton_type().is_some() && rhs.singleton_type().is_some() - } - Constraint::Eq(lhs, rhs) => { - lhs.singleton_type().is_some() && rhs.singleton_type().is_some() - } - Constraint::InTypeset(tv, _) => tv.singleton_type().is_some(), - } - } - - fn typevar_args(&self) -> Vec<&TypeVar> { - match self { - Constraint::WiderOrEq(lhs, rhs) => vec![lhs, rhs], - Constraint::Eq(lhs, rhs) => vec![lhs, rhs], - Constraint::InTypeset(tv, _) => vec![tv], - } - } -} - -#[derive(Clone, Copy)] -enum TypeEnvRank { - Singleton = 5, - Input = 4, - Intermediate = 3, - Output = 2, - Temp = 1, - Internal = 0, -} - -/// Class encapsulating the necessary bookkeeping for type inference. -pub(crate) struct TypeEnvironment { - vars: HashSet, - ranks: HashMap, - equivalency_map: HashMap, - pub constraints: Vec, -} - -impl TypeEnvironment { - fn new() -> Self { - TypeEnvironment { - vars: HashSet::new(), - ranks: HashMap::new(), - equivalency_map: HashMap::new(), - constraints: Vec::new(), - } - } - - fn register(&mut self, var_index: VarIndex, var: &mut Var) { - self.vars.insert(var_index); - let rank = if var.is_input() { - TypeEnvRank::Input - } else if var.is_intermediate() { - TypeEnvRank::Intermediate - } else if var.is_output() { - TypeEnvRank::Output - } else { - assert!(var.is_temp()); - TypeEnvRank::Temp - }; - self.ranks.insert(var.get_or_create_typevar(), rank); - } - - fn add_constraint(&mut self, constraint: Constraint) { - if self.constraints.iter().any(|item| *item == constraint) { - return; - } - - // Check extra conditions for InTypeset constraints. - if let Constraint::InTypeset(tv, _) = &constraint { - assert!( - tv.base.is_none(), - "type variable is {:?}, while expecting none", - tv - ); - assert!( - tv.name.starts_with("typeof_"), - "Name \"{}\" should start with \"typeof_\"", - tv.name - ); - } - - self.constraints.push(constraint); - } - - /// Returns the canonical representative of the equivalency class of the given argument, or - /// duplicates it if it's not there yet. - pub fn get_equivalent(&self, tv: &TypeVar) -> TypeVar { - let mut tv = tv; - while let Some(found) = self.equivalency_map.get(tv) { - tv = found; - } - match &tv.base { - Some(parent) => self - .get_equivalent(&parent.type_var) - .derived(parent.derived_func), - None => tv.clone(), - } - } - - /// Get the rank of tv in the partial order: - /// - TVs directly associated with a Var get their rank from the Var (see register()). - /// - Internally generated non-derived TVs implicitly get the lowest rank (0). - /// - Derived variables get their rank from their free typevar. - /// - Singletons have the highest rank. - /// - TVs associated with vars in a source pattern have a higher rank than TVs associated with - /// temporary vars. - fn rank(&self, tv: &TypeVar) -> u8 { - let actual_tv = match tv.base { - Some(_) => tv.free_typevar(), - None => Some(tv.clone()), - }; - - let rank = match actual_tv { - Some(actual_tv) => match self.ranks.get(&actual_tv) { - Some(rank) => Some(*rank), - None => { - assert!( - !actual_tv.name.starts_with("typeof_"), - "variable {} should be explicitly ranked", - actual_tv.name - ); - None - } - }, - None => None, - }; - - let rank = match rank { - Some(rank) => rank, - None => { - if tv.singleton_type().is_some() { - TypeEnvRank::Singleton - } else { - TypeEnvRank::Internal - } - } - }; - - rank as u8 - } - - /// Record the fact that the free tv1 is part of the same equivalence class as tv2. The - /// canonical representative of the merged class is tv2's canonical representative. - fn record_equivalent(&mut self, tv1: TypeVar, tv2: TypeVar) { - assert!(tv1.base.is_none()); - assert!(self.get_equivalent(&tv1) == tv1); - if let Some(tv2_base) = &tv2.base { - // Ensure there are no cycles. - assert!(self.get_equivalent(&tv2_base.type_var) != tv1); - } - self.equivalency_map.insert(tv1, tv2); - } - - /// Get the free typevars in the current type environment. - pub fn free_typevars(&self, var_pool: &mut VarPool) -> Vec { - let mut typevars = Vec::new(); - typevars.extend(self.equivalency_map.keys().cloned()); - typevars.extend( - self.vars - .iter() - .map(|&var_index| var_pool.get_mut(var_index).get_or_create_typevar()), - ); - - let set: HashSet = HashSet::from_iter( - typevars - .iter() - .map(|tv| self.get_equivalent(tv).free_typevar()) - .filter(|opt_tv| { - // Filter out singleton types. - opt_tv.is_some() - }) - .map(|tv| tv.unwrap()), - ); - Vec::from_iter(set) - } - - /// Normalize by collapsing any roots that don't correspond to a concrete type var AND have a - /// single type var derived from them or equivalent to them. - /// - /// e.g. if we have a root of the tree that looks like: - /// - /// typeof_a typeof_b - /// \\ / - /// typeof_x - /// | - /// half_width(1) - /// | - /// 1 - /// - /// we want to collapse the linear path between 1 and typeof_x. The resulting graph is: - /// - /// typeof_a typeof_b - /// \\ / - /// typeof_x - fn normalize(&mut self, var_pool: &mut VarPool) { - let source_tvs: HashSet = HashSet::from_iter( - self.vars - .iter() - .map(|&var_index| var_pool.get_mut(var_index).get_or_create_typevar()), - ); - - let mut children: HashMap> = HashMap::new(); - - // Insert all the parents found by the derivation relationship. - for type_var in self.equivalency_map.values() { - if type_var.base.is_none() { - continue; - } - - let parent_tv = type_var.free_typevar(); - if parent_tv.is_none() { - // Ignore this type variable, it's a singleton. - continue; - } - let parent_tv = parent_tv.unwrap(); - - children - .entry(parent_tv) - .or_insert_with(HashSet::new) - .insert(type_var.clone()); - } - - // Insert all the explicit equivalency links. - for (equivalent_tv, canon_tv) in self.equivalency_map.iter() { - children - .entry(canon_tv.clone()) - .or_insert_with(HashSet::new) - .insert(equivalent_tv.clone()); - } - - // Remove links that are straight paths up to typevar of variables. - for free_root in self.free_typevars(var_pool) { - let mut root = &free_root; - while !source_tvs.contains(&root) - && children.contains_key(&root) - && children.get(&root).unwrap().len() == 1 - { - let child = children.get(&root).unwrap().iter().next().unwrap(); - assert_eq!(self.equivalency_map[child], root.clone()); - self.equivalency_map.remove(child); - root = child; - } - } - } - - /// Extract a clean type environment from self, that only mentions type vars associated with - /// real variables. - fn extract(self, var_pool: &mut VarPool) -> TypeEnvironment { - let vars_tv: HashSet = HashSet::from_iter( - self.vars - .iter() - .map(|&var_index| var_pool.get_mut(var_index).get_or_create_typevar()), - ); - - let mut new_equivalency_map: HashMap = HashMap::new(); - for tv in &vars_tv { - let canon_tv = self.get_equivalent(tv); - if *tv != canon_tv { - new_equivalency_map.insert(tv.clone(), canon_tv.clone()); - } - - // Sanity check: the translated type map should only refer to real variables. - assert!(vars_tv.contains(tv)); - let canon_free_tv = canon_tv.free_typevar(); - assert!(canon_free_tv.is_none() || vars_tv.contains(&canon_free_tv.unwrap())); - } - - let mut new_constraints: HashSet = HashSet::new(); - for constraint in &self.constraints { - let constraint = constraint.translate_with_env(&self); - if constraint.is_trivial() || new_constraints.contains(&constraint) { - continue; - } - - // Sanity check: translated constraints should refer only to real variables. - for arg in constraint.typevar_args() { - let arg_free_tv = arg.free_typevar(); - assert!(arg_free_tv.is_none() || vars_tv.contains(&arg_free_tv.unwrap())); - } - - new_constraints.insert(constraint); - } - - TypeEnvironment { - vars: self.vars, - ranks: self.ranks, - equivalency_map: new_equivalency_map, - constraints: Vec::from_iter(new_constraints), - } - } -} - -/// Replaces an external type variable according to the following rules: -/// - if a local copy is present in the map, return it. -/// - or if it's derived, create a local derived one that recursively substitutes the parent. -/// - or return itself. -fn substitute(map: &HashMap<&TypeVar, TypeVar>, external_type_var: &TypeVar) -> TypeVar { - match map.get(&external_type_var) { - Some(own_type_var) => own_type_var.clone(), - None => match &external_type_var.base { - Some(parent) => { - let parent_substitute = substitute(map, &parent.type_var); - TypeVar::derived(&parent_substitute, parent.derived_func) - } - None => external_type_var.clone(), - }, - } -} - -/// Normalize a (potentially derived) typevar using the following rules: -/// -/// - vector and width derived functions commute -/// {HALF,DOUBLE}VECTOR({HALF,DOUBLE}WIDTH(base)) -> -/// {HALF,DOUBLE}WIDTH({HALF,DOUBLE}VECTOR(base)) -/// -/// - half/double pairs collapse -/// {HALF,DOUBLE}WIDTH({DOUBLE,HALF}WIDTH(base)) -> base -/// {HALF,DOUBLE}VECTOR({DOUBLE,HALF}VECTOR(base)) -> base -fn canonicalize_derivations(tv: TypeVar) -> TypeVar { - let base = match &tv.base { - Some(base) => base, - None => return tv, - }; - - let derived_func = base.derived_func; - - if let Some(base_base) = &base.type_var.base { - let base_base_tv = &base_base.type_var; - match (derived_func, base_base.derived_func) { - (DerivedFunc::HalfWidth, DerivedFunc::DoubleWidth) - | (DerivedFunc::DoubleWidth, DerivedFunc::HalfWidth) - | (DerivedFunc::HalfVector, DerivedFunc::DoubleVector) - | (DerivedFunc::DoubleVector, DerivedFunc::HalfVector) => { - // Cancelling bijective transformations. This doesn't hide any overflow issues - // since derived type sets are checked upon derivaion, and base typesets are only - // allowed to shrink. - return canonicalize_derivations(base_base_tv.clone()); - } - (DerivedFunc::HalfWidth, DerivedFunc::HalfVector) - | (DerivedFunc::HalfWidth, DerivedFunc::DoubleVector) - | (DerivedFunc::DoubleWidth, DerivedFunc::DoubleVector) - | (DerivedFunc::DoubleWidth, DerivedFunc::HalfVector) => { - // Arbitrarily put WIDTH derivations before VECTOR derivations, since they commute. - return canonicalize_derivations( - base_base_tv - .derived(derived_func) - .derived(base_base.derived_func), - ); - } - _ => {} - }; - } - - canonicalize_derivations(base.type_var.clone()).derived(derived_func) -} - -/// Given typevars tv1 and tv2 (which could be derived from one another), constrain their typesets -/// to be the same. When one is derived from the other, repeat the constrain process until -/// a fixed point is reached. -fn constrain_fixpoint(tv1: &TypeVar, tv2: &TypeVar) { - loop { - let old_tv1_ts = tv1.get_typeset().clone(); - tv2.constrain_types(tv1.clone()); - if tv1.get_typeset() == old_tv1_ts { - break; - } - } - - let old_tv2_ts = tv2.get_typeset(); - tv1.constrain_types(tv2.clone()); - // The above loop should ensure that all reference cycles have been handled. - assert!(old_tv2_ts == tv2.get_typeset()); -} - -/// Unify tv1 and tv2 in the given type environment. tv1 must have a rank greater or equal to tv2's -/// one, modulo commutations. -fn unify(tv1: &TypeVar, tv2: &TypeVar, type_env: &mut TypeEnvironment) -> Result<(), String> { - let tv1 = canonicalize_derivations(type_env.get_equivalent(tv1)); - let tv2 = canonicalize_derivations(type_env.get_equivalent(tv2)); - - if tv1 == tv2 { - // Already unified. - return Ok(()); - } - - if type_env.rank(&tv2) < type_env.rank(&tv1) { - // Make sure tv1 always has the smallest rank, since real variables have the higher rank - // and we want them to be the canonical representatives of their equivalency classes. - return unify(&tv2, &tv1, type_env); - } - - constrain_fixpoint(&tv1, &tv2); - - if tv1.get_typeset().size() == 0 || tv2.get_typeset().size() == 0 { - return Err(format!( - "Error: empty type created when unifying {} and {}", - tv1.name, tv2.name - )); - } - - let base = match &tv1.base { - Some(base) => base, - None => { - type_env.record_equivalent(tv1, tv2); - return Ok(()); - } - }; - - if let Some(inverse) = base.derived_func.inverse() { - return unify(&base.type_var, &tv2.derived(inverse), type_env); - } - - type_env.add_constraint(Constraint::Eq(tv1, tv2)); - Ok(()) -} - -/// Perform type inference on one Def in the current type environment and return an updated type -/// environment or error. -/// -/// At a high level this works by creating fresh copies of each formal type var in the Def's -/// instruction's signature, and unifying the formal typevar with the corresponding actual typevar. -fn infer_definition( - def: &Def, - var_pool: &mut VarPool, - type_env: TypeEnvironment, - last_type_index: &mut usize, -) -> Result { - let apply = &def.apply; - let inst = &apply.inst; - - let mut type_env = type_env; - let free_formal_tvs = inst.all_typevars(); - - let mut original_to_own_typevar: HashMap<&TypeVar, TypeVar> = HashMap::new(); - for &tv in &free_formal_tvs { - assert!(original_to_own_typevar - .insert( - tv, - TypeVar::copy_from(tv, format!("own_{}", last_type_index)) - ) - .is_none()); - *last_type_index += 1; - } - - // Update the mapping with any explicity bound type vars: - for (i, value_type) in apply.value_types.iter().enumerate() { - let singleton = TypeVar::new_singleton(value_type.clone()); - assert!(original_to_own_typevar - .insert(free_formal_tvs[i], singleton) - .is_some()); - } - - // Get fresh copies for each typevar in the signature (both free and derived). - let mut formal_tvs = Vec::new(); - formal_tvs.extend(inst.value_results.iter().map(|&i| { - substitute( - &original_to_own_typevar, - inst.operands_out[i].type_var().unwrap(), - ) - })); - formal_tvs.extend(inst.value_opnums.iter().map(|&i| { - substitute( - &original_to_own_typevar, - inst.operands_in[i].type_var().unwrap(), - ) - })); - - // Get the list of actual vars. - let mut actual_vars = Vec::new(); - actual_vars.extend(inst.value_results.iter().map(|&i| def.defined_vars[i])); - actual_vars.extend( - inst.value_opnums - .iter() - .map(|&i| apply.args[i].unwrap_var()), - ); - - // Get the list of the actual TypeVars. - let mut actual_tvs = Vec::new(); - for var_index in actual_vars { - let var = var_pool.get_mut(var_index); - type_env.register(var_index, var); - actual_tvs.push(var.get_or_create_typevar()); - } - - // Make sure we start unifying with the control type variable first, by putting it at the - // front of both vectors. - if let Some(poly) = &inst.polymorphic_info { - let own_ctrl_tv = &original_to_own_typevar[&poly.ctrl_typevar]; - let ctrl_index = formal_tvs.iter().position(|tv| tv == own_ctrl_tv).unwrap(); - if ctrl_index != 0 { - formal_tvs.swap(0, ctrl_index); - actual_tvs.swap(0, ctrl_index); - } - } - - // Unify each actual type variable with the corresponding formal type variable. - for (actual_tv, formal_tv) in actual_tvs.iter().zip(&formal_tvs) { - if let Err(msg) = unify(actual_tv, formal_tv, &mut type_env) { - return Err(format!( - "fail ti on {} <: {}: {}", - actual_tv.name, formal_tv.name, msg - )); - } - } - - // Add any instruction specific constraints. - for constraint in &inst.constraints { - type_env.add_constraint(constraint.translate_with_map(&original_to_own_typevar)); - } - - Ok(type_env) -} - -/// Perform type inference on an transformation. Return an updated type environment or error. -pub(crate) fn infer_transform( - src: DefIndex, - dst: &[DefIndex], - def_pool: &DefPool, - var_pool: &mut VarPool, -) -> Result { - let mut type_env = TypeEnvironment::new(); - let mut last_type_index = 0; - - // Execute type inference on the source pattern. - type_env = infer_definition(def_pool.get(src), var_pool, type_env, &mut last_type_index) - .map_err(|err| format!("In src pattern: {}", err))?; - - // Collect the type sets once after applying the source patterm; we'll compare the typesets - // after we've also considered the destination pattern, and will emit supplementary InTypeset - // checks if they don't match. - let src_typesets = type_env - .vars - .iter() - .map(|&var_index| { - let var = var_pool.get_mut(var_index); - let tv = type_env.get_equivalent(&var.get_or_create_typevar()); - (var_index, tv.get_typeset()) - }) - .collect::>(); - - // Execute type inference on the destination pattern. - for (i, &def_index) in dst.iter().enumerate() { - let def = def_pool.get(def_index); - type_env = infer_definition(def, var_pool, type_env, &mut last_type_index) - .map_err(|err| format!("line {}: {}", i, err))?; - } - - for (var_index, src_typeset) in src_typesets { - let var = var_pool.get(var_index); - if !var.has_free_typevar() { - continue; - } - let tv = type_env.get_equivalent(&var.get_typevar().unwrap()); - let new_typeset = tv.get_typeset(); - assert!( - new_typeset.is_subset(&src_typeset), - "type sets can only get narrower" - ); - if new_typeset != src_typeset { - type_env.add_constraint(Constraint::InTypeset(tv.clone(), new_typeset.clone())); - } - } - - type_env.normalize(var_pool); - - Ok(type_env.extract(var_pool)) } diff --git a/cranelift/codegen/meta/src/cdsl/typevar.rs b/cranelift/codegen/meta/src/cdsl/typevar.rs index c1027bf847..af1ba966a4 100644 --- a/cranelift/codegen/meta/src/cdsl/typevar.rs +++ b/cranelift/codegen/meta/src/cdsl/typevar.rs @@ -1,5 +1,5 @@ use std::cell::RefCell; -use std::collections::{BTreeSet, HashSet}; +use std::collections::BTreeSet; use std::fmt; use std::hash; use std::iter::FromIterator; @@ -269,52 +269,6 @@ impl TypeVar { pub fn merge_lanes(&self) -> TypeVar { self.derived(DerivedFunc::MergeLanes) } - - /// Constrain the range of types this variable can assume to a subset of those in the typeset - /// ts. - /// May mutate itself if it's not derived, or its parent if it is. - pub fn constrain_types_by_ts(&self, type_set: TypeSet) { - match &self.base { - Some(base) => { - base.type_var - .constrain_types_by_ts(type_set.preimage(base.derived_func)); - } - None => { - self.content - .borrow_mut() - .type_set - .inplace_intersect_with(&type_set); - } - } - } - - /// Constrain the range of types this variable can assume to a subset of those `other` can - /// assume. - /// May mutate itself if it's not derived, or its parent if it is. - pub fn constrain_types(&self, other: TypeVar) { - if self == &other { - return; - } - self.constrain_types_by_ts(other.get_typeset()); - } - - /// Get a Rust expression that computes the type of this type variable. - pub fn to_rust_code(&self) -> String { - match &self.base { - Some(base) => format!( - "{}.{}().unwrap()", - base.type_var.to_rust_code(), - base.derived_func.name() - ), - None => { - if let Some(singleton) = self.singleton_type() { - singleton.rust_name() - } else { - self.name.clone() - } - } - } - } } impl Into for &TypeVar { @@ -392,19 +346,6 @@ impl DerivedFunc { DerivedFunc::MergeLanes => "merge_lanes", } } - - /// Returns the inverse function of this one, if it is a bijection. - pub fn inverse(self) -> Option { - match self { - DerivedFunc::HalfWidth => Some(DerivedFunc::DoubleWidth), - DerivedFunc::DoubleWidth => Some(DerivedFunc::HalfWidth), - DerivedFunc::HalfVector => Some(DerivedFunc::DoubleVector), - DerivedFunc::DoubleVector => Some(DerivedFunc::HalfVector), - DerivedFunc::MergeLanes => Some(DerivedFunc::SplitLanes), - DerivedFunc::SplitLanes => Some(DerivedFunc::MergeLanes), - _ => None, - } - } } #[derive(Debug, Hash)] @@ -594,94 +535,6 @@ impl TypeSet { assert_eq!(types.len(), 1); types.remove(0) } - - /// Return the inverse image of self across the derived function func. - fn preimage(&self, func: DerivedFunc) -> TypeSet { - if self.size() == 0 { - // The inverse of the empty set is itself. - return self.clone(); - } - - match func { - DerivedFunc::LaneOf => { - let mut copy = self.clone(); - copy.lanes = - NumSet::from_iter((0..=MAX_LANES.trailing_zeros()).map(|i| u16::pow(2, i))); - copy - } - DerivedFunc::AsBool => { - let mut copy = self.clone(); - if self.bools.contains(&1) { - copy.ints = NumSet::from_iter(vec![8, 16, 32, 64, 128]); - copy.floats = NumSet::from_iter(vec![32, 64]); - } else { - copy.ints = &self.bools - &NumSet::from_iter(vec![1]); - copy.floats = &self.bools & &NumSet::from_iter(vec![32, 64]); - // If b1 is not in our typeset, than lanes=1 cannot be in the pre-image, as - // as_bool() of scalars is always b1. - copy.lanes = &self.lanes - &NumSet::from_iter(vec![1]); - } - copy - } - DerivedFunc::HalfWidth => self.double_width(), - DerivedFunc::DoubleWidth => self.half_width(), - DerivedFunc::HalfVector => self.double_vector(), - DerivedFunc::DoubleVector => self.half_vector(), - DerivedFunc::SplitLanes => self.double_width().half_vector(), - DerivedFunc::MergeLanes => self.half_width().double_vector(), - } - } - - pub fn inplace_intersect_with(&mut self, other: &TypeSet) { - self.lanes = &self.lanes & &other.lanes; - self.ints = &self.ints & &other.ints; - self.floats = &self.floats & &other.floats; - self.bools = &self.bools & &other.bools; - self.refs = &self.refs & &other.refs; - - let mut new_specials = Vec::new(); - for spec in &self.specials { - if let Some(spec) = other.specials.iter().find(|&other_spec| other_spec == spec) { - new_specials.push(*spec); - } - } - self.specials = new_specials; - } - - pub fn is_subset(&self, other: &TypeSet) -> bool { - self.lanes.is_subset(&other.lanes) - && self.ints.is_subset(&other.ints) - && self.floats.is_subset(&other.floats) - && self.bools.is_subset(&other.bools) - && self.refs.is_subset(&other.refs) - && { - let specials: HashSet = HashSet::from_iter(self.specials.clone()); - let other_specials = HashSet::from_iter(other.specials.clone()); - specials.is_subset(&other_specials) - } - } - - pub fn is_wider_or_equal(&self, other: &TypeSet) -> bool { - set_wider_or_equal(&self.ints, &other.ints) - && set_wider_or_equal(&self.floats, &other.floats) - && set_wider_or_equal(&self.bools, &other.bools) - && set_wider_or_equal(&self.refs, &other.refs) - } - - pub fn is_narrower(&self, other: &TypeSet) -> bool { - set_narrower(&self.ints, &other.ints) - && set_narrower(&self.floats, &other.floats) - && set_narrower(&self.bools, &other.bools) - && set_narrower(&self.refs, &other.refs) - } -} - -fn set_wider_or_equal(s1: &NumSet, s2: &NumSet) -> bool { - !s1.is_empty() && !s2.is_empty() && s1.iter().min() >= s2.iter().max() -} - -fn set_narrower(s1: &NumSet, s2: &NumSet) -> bool { - !s1.is_empty() && !s2.is_empty() && s1.iter().min() < s2.iter().max() } impl fmt::Debug for TypeSet { @@ -806,18 +659,6 @@ impl TypeSetBuilder { self.specials, ) } - - pub fn all() -> TypeSet { - TypeSetBuilder::new() - .ints(Interval::All) - .floats(Interval::All) - .bools(Interval::All) - .refs(Interval::All) - .simd_lanes(Interval::All) - .specials(ValueType::all_special_types().collect()) - .includes_scalars(true) - .build() - } } #[derive(PartialEq)] @@ -1054,135 +895,6 @@ fn test_forward_images() { ); } -#[test] -fn test_backward_images() { - let empty_set = TypeSetBuilder::new().build(); - - // LaneOf. - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(1..1) - .ints(8..8) - .floats(32..32) - .build() - .preimage(DerivedFunc::LaneOf), - TypeSetBuilder::new() - .simd_lanes(Interval::All) - .ints(8..8) - .floats(32..32) - .build() - ); - assert_eq!(empty_set.preimage(DerivedFunc::LaneOf), empty_set); - - // AsBool. - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(1..4) - .bools(1..128) - .build() - .preimage(DerivedFunc::AsBool), - TypeSetBuilder::new() - .simd_lanes(1..4) - .ints(Interval::All) - .bools(Interval::All) - .floats(Interval::All) - .build() - ); - - // Double vector. - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(1..1) - .ints(8..8) - .build() - .preimage(DerivedFunc::DoubleVector) - .size(), - 0 - ); - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(1..16) - .ints(8..16) - .floats(32..32) - .build() - .preimage(DerivedFunc::DoubleVector), - TypeSetBuilder::new() - .simd_lanes(1..8) - .ints(8..16) - .floats(32..32) - .build(), - ); - - // Half vector. - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(256..256) - .ints(8..8) - .build() - .preimage(DerivedFunc::HalfVector) - .size(), - 0 - ); - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(64..128) - .bools(1..32) - .build() - .preimage(DerivedFunc::HalfVector), - TypeSetBuilder::new() - .simd_lanes(128..256) - .bools(1..32) - .build(), - ); - - // Half width. - assert_eq!( - TypeSetBuilder::new() - .ints(128..128) - .floats(64..64) - .bools(128..128) - .build() - .preimage(DerivedFunc::HalfWidth) - .size(), - 0 - ); - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(64..256) - .bools(1..64) - .build() - .preimage(DerivedFunc::HalfWidth), - TypeSetBuilder::new() - .simd_lanes(64..256) - .bools(16..128) - .build(), - ); - - // Double width. - assert_eq!( - TypeSetBuilder::new() - .ints(8..8) - .floats(32..32) - .bools(1..8) - .build() - .preimage(DerivedFunc::DoubleWidth) - .size(), - 0 - ); - assert_eq!( - TypeSetBuilder::new() - .simd_lanes(1..16) - .ints(8..16) - .floats(32..64) - .build() - .preimage(DerivedFunc::DoubleWidth), - TypeSetBuilder::new() - .simd_lanes(1..16) - .ints(8..8) - .floats(32..32) - .build() - ); -} #[test] #[should_panic] diff --git a/cranelift/codegen/meta/src/cdsl/xform.rs b/cranelift/codegen/meta/src/cdsl/xform.rs deleted file mode 100644 index da0cc5ba71..0000000000 --- a/cranelift/codegen/meta/src/cdsl/xform.rs +++ /dev/null @@ -1,478 +0,0 @@ -use crate::cdsl::ast::{ - Apply, BlockPool, ConstPool, DefIndex, DefPool, DummyDef, DummyExpr, Expr, PatternPosition, - VarIndex, VarPool, -}; -use crate::cdsl::instructions::Instruction; -use crate::cdsl::type_inference::{infer_transform, TypeEnvironment}; -use crate::cdsl::typevar::TypeVar; - -use cranelift_entity::{entity_impl, PrimaryMap}; - -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; - -/// An instruction transformation consists of a source and destination pattern. -/// -/// Patterns are expressed in *register transfer language* as tuples of Def or Expr nodes. A -/// pattern may optionally have a sequence of TypeConstraints, that additionally limit the set of -/// cases when it applies. -/// -/// The source pattern can contain only a single instruction. -pub(crate) struct Transform { - pub src: DefIndex, - pub dst: Vec, - pub var_pool: VarPool, - pub def_pool: DefPool, - pub block_pool: BlockPool, - pub const_pool: ConstPool, - pub type_env: TypeEnvironment, -} - -type SymbolTable = HashMap; - -impl Transform { - fn new(src: DummyDef, dst: Vec) -> Self { - let mut var_pool = VarPool::new(); - let mut def_pool = DefPool::new(); - let mut block_pool = BlockPool::new(); - let mut const_pool = ConstPool::new(); - - let mut input_vars: Vec = Vec::new(); - let mut defined_vars: Vec = Vec::new(); - - // Maps variable names to our own Var copies. - let mut symbol_table: SymbolTable = SymbolTable::new(); - - // Rewrite variables in src and dst using our own copies. - let src = rewrite_def_list( - PatternPosition::Source, - vec![src], - &mut symbol_table, - &mut input_vars, - &mut defined_vars, - &mut var_pool, - &mut def_pool, - &mut block_pool, - &mut const_pool, - )[0]; - - let num_src_inputs = input_vars.len(); - - let dst = rewrite_def_list( - PatternPosition::Destination, - dst, - &mut symbol_table, - &mut input_vars, - &mut defined_vars, - &mut var_pool, - &mut def_pool, - &mut block_pool, - &mut const_pool, - ); - - // Sanity checks. - for &var_index in &input_vars { - assert!( - var_pool.get(var_index).is_input(), - "'{:?}' used as both input and def", - var_pool.get(var_index) - ); - } - assert!( - input_vars.len() == num_src_inputs, - "extra input vars in dst pattern: {:?}", - input_vars - .iter() - .map(|&i| var_pool.get(i)) - .skip(num_src_inputs) - .collect::>() - ); - - // Perform type inference and cleanup. - let type_env = infer_transform(src, &dst, &def_pool, &mut var_pool).unwrap(); - - // Sanity check: the set of inferred free type variables should be a subset of the type - // variables corresponding to Vars appearing in the source pattern. - { - let free_typevars: HashSet = - HashSet::from_iter(type_env.free_typevars(&mut var_pool)); - let src_tvs = HashSet::from_iter( - input_vars - .clone() - .iter() - .chain( - defined_vars - .iter() - .filter(|&&var_index| !var_pool.get(var_index).is_temp()), - ) - .map(|&var_index| var_pool.get(var_index).get_typevar()) - .filter(|maybe_var| maybe_var.is_some()) - .map(|var| var.unwrap()), - ); - if !free_typevars.is_subset(&src_tvs) { - let missing_tvs = (&free_typevars - &src_tvs) - .iter() - .map(|tv| tv.name.clone()) - .collect::>() - .join(", "); - panic!("Some free vars don't appear in src: {}", missing_tvs); - } - } - - for &var_index in input_vars.iter().chain(defined_vars.iter()) { - let var = var_pool.get_mut(var_index); - let canon_tv = type_env.get_equivalent(&var.get_or_create_typevar()); - var.set_typevar(canon_tv); - } - - Self { - src, - dst, - var_pool, - def_pool, - block_pool, - const_pool, - type_env, - } - } - - fn verify_legalize(&self) { - let def = self.def_pool.get(self.src); - for &var_index in def.defined_vars.iter() { - let defined_var = self.var_pool.get(var_index); - assert!( - defined_var.is_output(), - "{:?} not defined in the destination pattern", - defined_var - ); - } - } -} - -/// Inserts, if not present, a name in the `symbol_table`. Then returns its index in the variable -/// pool `var_pool`. If the variable was not present in the symbol table, then add it to the list of -/// `defined_vars`. -fn var_index( - name: &str, - symbol_table: &mut SymbolTable, - defined_vars: &mut Vec, - var_pool: &mut VarPool, -) -> VarIndex { - let name = name.to_string(); - match symbol_table.get(&name) { - Some(&existing_var) => existing_var, - None => { - // Materialize the variable. - let new_var = var_pool.create(name.clone()); - symbol_table.insert(name, new_var); - defined_vars.push(new_var); - new_var - } - } -} - -/// Given a list of symbols defined in a Def, rewrite them to local symbols. Yield the new locals. -fn rewrite_defined_vars( - position: PatternPosition, - dummy_def: &DummyDef, - def_index: DefIndex, - symbol_table: &mut SymbolTable, - defined_vars: &mut Vec, - var_pool: &mut VarPool, -) -> Vec { - let mut new_defined_vars = Vec::new(); - for var in &dummy_def.defined_vars { - let own_var = var_index(&var.name, symbol_table, defined_vars, var_pool); - var_pool.get_mut(own_var).set_def(position, def_index); - new_defined_vars.push(own_var); - } - new_defined_vars -} - -/// Find all uses of variables in `expr` and replace them with our own local symbols. -fn rewrite_expr( - position: PatternPosition, - dummy_expr: DummyExpr, - symbol_table: &mut SymbolTable, - input_vars: &mut Vec, - var_pool: &mut VarPool, - const_pool: &mut ConstPool, -) -> Apply { - let (apply_target, dummy_args) = if let DummyExpr::Apply(apply_target, dummy_args) = dummy_expr - { - (apply_target, dummy_args) - } else { - panic!("we only rewrite apply expressions"); - }; - - assert_eq!( - apply_target.inst().operands_in.len(), - dummy_args.len(), - "number of arguments in instruction {} is incorrect\nexpected: {:?}", - apply_target.inst().name, - apply_target - .inst() - .operands_in - .iter() - .map(|operand| format!("{}: {}", operand.name, operand.kind.rust_type)) - .collect::>(), - ); - - let mut args = Vec::new(); - for (i, arg) in dummy_args.into_iter().enumerate() { - match arg { - DummyExpr::Var(var) => { - let own_var = var_index(&var.name, symbol_table, input_vars, var_pool); - let var = var_pool.get(own_var); - assert!( - var.is_input() || var.get_def(position).is_some(), - "{:?} used as both input and def", - var - ); - args.push(Expr::Var(own_var)); - } - DummyExpr::Literal(literal) => { - assert!(!apply_target.inst().operands_in[i].is_value()); - args.push(Expr::Literal(literal)); - } - DummyExpr::Constant(constant) => { - let const_name = const_pool.insert(constant.0); - // Here we abuse var_index by passing an empty, immediately-dropped vector to - // `defined_vars`; the reason for this is that unlike the `Var` case above, - // constants will create a variable that is not an input variable (it is tracked - // instead by ConstPool). - let const_var = var_index(&const_name, symbol_table, &mut vec![], var_pool); - args.push(Expr::Var(const_var)); - } - DummyExpr::Apply(..) => { - panic!("Recursive apply is not allowed."); - } - DummyExpr::Block(_block) => { - panic!("Blocks are not valid arguments."); - } - } - } - - Apply::new(apply_target, args) -} - -#[allow(clippy::too_many_arguments)] -fn rewrite_def_list( - position: PatternPosition, - dummy_defs: Vec, - symbol_table: &mut SymbolTable, - input_vars: &mut Vec, - defined_vars: &mut Vec, - var_pool: &mut VarPool, - def_pool: &mut DefPool, - block_pool: &mut BlockPool, - const_pool: &mut ConstPool, -) -> Vec { - let mut new_defs = Vec::new(); - // Register variable names of new blocks first as a block name can be used to jump forward. Thus - // the name has to be registered first to avoid misinterpreting it as an input-var. - for dummy_def in dummy_defs.iter() { - if let DummyExpr::Block(ref var) = dummy_def.expr { - var_index(&var.name, symbol_table, defined_vars, var_pool); - } - } - - // Iterate over the definitions and blocks, to map variables names to inputs or outputs. - for dummy_def in dummy_defs { - let def_index = def_pool.next_index(); - - let new_defined_vars = rewrite_defined_vars( - position, - &dummy_def, - def_index, - symbol_table, - defined_vars, - var_pool, - ); - if let DummyExpr::Block(var) = dummy_def.expr { - let var_index = *symbol_table - .get(&var.name) - .or_else(|| { - panic!( - "Block {} was not registered during the first visit", - var.name - ) - }) - .unwrap(); - var_pool.get_mut(var_index).set_def(position, def_index); - block_pool.create_block(var_index, def_index); - } else { - let new_apply = rewrite_expr( - position, - dummy_def.expr, - symbol_table, - input_vars, - var_pool, - const_pool, - ); - - assert!( - def_pool.next_index() == def_index, - "shouldn't have created new defs in the meanwhile" - ); - assert_eq!( - new_apply.inst.value_results.len(), - new_defined_vars.len(), - "number of Var results in instruction is incorrect" - ); - - new_defs.push(def_pool.create_inst(new_apply, new_defined_vars)); - } - } - new_defs -} - -/// A group of related transformations. -pub(crate) struct TransformGroup { - pub name: &'static str, - pub doc: &'static str, - pub chain_with: Option, - pub isa_name: Option<&'static str>, - pub id: TransformGroupIndex, - - /// Maps Instruction camel_case names to custom legalization functions names. - pub custom_legalizes: HashMap, - pub transforms: Vec, -} - -impl TransformGroup { - pub fn rust_name(&self) -> String { - match self.isa_name { - Some(_) => { - // This is a function in the same module as the LEGALIZE_ACTIONS table referring to - // it. - self.name.to_string() - } - None => format!("crate::legalizer::{}", self.name), - } - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub(crate) struct TransformGroupIndex(u32); -entity_impl!(TransformGroupIndex); - -pub(crate) struct TransformGroupBuilder { - name: &'static str, - doc: &'static str, - chain_with: Option, - isa_name: Option<&'static str>, - pub custom_legalizes: HashMap, - pub transforms: Vec, -} - -impl TransformGroupBuilder { - pub fn new(name: &'static str, doc: &'static str) -> Self { - Self { - name, - doc, - chain_with: None, - isa_name: None, - custom_legalizes: HashMap::new(), - transforms: Vec::new(), - } - } - - pub fn chain_with(mut self, next_id: TransformGroupIndex) -> Self { - assert!(self.chain_with.is_none()); - self.chain_with = Some(next_id); - self - } - - /// Add a custom legalization action for `inst`. - /// - /// The `func_name` parameter is the fully qualified name of a Rust function which takes the - /// same arguments as the `isa::Legalize` actions. - /// - /// The custom function will be called to legalize `inst` and any return value is ignored. - pub fn custom_legalize(&mut self, inst: &Instruction, func_name: &'static str) { - assert!( - self.custom_legalizes - .insert(inst.camel_name.clone(), func_name) - .is_none(), - "custom legalization action for {} inserted twice", - inst.name - ); - } - - /// Add a legalization pattern to this group. - pub fn legalize(&mut self, src: DummyDef, dst: Vec) { - let transform = Transform::new(src, dst); - transform.verify_legalize(); - self.transforms.push(transform); - } - - pub fn build_and_add_to(self, owner: &mut TransformGroups) -> TransformGroupIndex { - let next_id = owner.next_key(); - owner.add(TransformGroup { - name: self.name, - doc: self.doc, - isa_name: self.isa_name, - id: next_id, - chain_with: self.chain_with, - custom_legalizes: self.custom_legalizes, - transforms: self.transforms, - }) - } -} - -pub(crate) struct TransformGroups { - groups: PrimaryMap, -} - -impl TransformGroups { - pub fn new() -> Self { - Self { - groups: PrimaryMap::new(), - } - } - pub fn add(&mut self, new_group: TransformGroup) -> TransformGroupIndex { - for group in self.groups.values() { - assert!( - group.name != new_group.name, - "trying to insert {} for the second time", - new_group.name - ); - } - self.groups.push(new_group) - } - pub fn get(&self, id: TransformGroupIndex) -> &TransformGroup { - &self.groups[id] - } - fn next_key(&self) -> TransformGroupIndex { - self.groups.next_key() - } - pub fn by_name(&self, name: &'static str) -> &TransformGroup { - for group in self.groups.values() { - if group.name == name { - return group; - } - } - panic!("transform group with name {} not found", name); - } -} - -#[test] -#[should_panic] -fn test_double_custom_legalization() { - use crate::cdsl::formats::InstructionFormatBuilder; - use crate::cdsl::instructions::{AllInstructions, InstructionBuilder, InstructionGroupBuilder}; - - let nullary = InstructionFormatBuilder::new("nullary").build(); - - let mut dummy_all = AllInstructions::new(); - let mut inst_group = InstructionGroupBuilder::new(&mut dummy_all); - inst_group.push(InstructionBuilder::new("dummy", "doc", &nullary)); - - let inst_group = inst_group.build(); - let dummy_inst = inst_group.by_name("dummy"); - - let mut transform_group = TransformGroupBuilder::new("test", "doc"); - transform_group.custom_legalize(&dummy_inst, "custom 1"); - transform_group.custom_legalize(&dummy_inst, "custom 2"); -} diff --git a/cranelift/codegen/meta/src/gen_legalizer.rs b/cranelift/codegen/meta/src/gen_legalizer.rs deleted file mode 100644 index 7b56b8db48..0000000000 --- a/cranelift/codegen/meta/src/gen_legalizer.rs +++ /dev/null @@ -1,734 +0,0 @@ -//! Generate transformations to legalize instructions without encodings. -use crate::cdsl::ast::{Def, DefPool, Expr, VarPool}; -use crate::cdsl::isa::TargetIsa; -use crate::cdsl::operands::Operand; -use crate::cdsl::type_inference::Constraint; -use crate::cdsl::typevar::{TypeSet, TypeVar}; -use crate::cdsl::xform::{Transform, TransformGroup, TransformGroups}; - -use crate::error; -use crate::gen_inst::gen_typesets_table; -use crate::srcgen::Formatter; -use crate::unique_table::UniqueTable; - -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; - -/// Given a `Def` node, emit code that extracts all the instruction fields from -/// `pos.func.dfg[iref]`. -/// -/// Create local variables named after the `Var` instances in `node`. -/// -/// Also create a local variable named `predicate` with the value of the evaluated instruction -/// predicate, or `true` if the node has no predicate. -fn unwrap_inst(transform: &Transform, fmt: &mut Formatter) -> bool { - let var_pool = &transform.var_pool; - let def_pool = &transform.def_pool; - - let def = def_pool.get(transform.src); - let apply = &def.apply; - let inst = &apply.inst; - let iform = &inst.format; - - fmt.comment(format!( - "Unwrap fields from instruction format {}", - def.to_comment_string(&transform.var_pool) - )); - - // Extract the Var arguments. - let arg_names = apply - .args - .iter() - .enumerate() - .filter(|(arg_num, _)| { - // Variable args are specially handled after extracting args. - !inst.operands_in[*arg_num].is_varargs() - }) - .map(|(arg_num, arg)| match &arg { - Expr::Var(var_index) => var_pool.get(*var_index).name.as_ref(), - Expr::Literal(_) => { - let n = inst.imm_opnums.iter().position(|&i| i == arg_num).unwrap(); - iform.imm_fields[n].member - } - }) - .collect::>() - .join(", "); - - // May we need "args" in the values consumed by predicates? - let emit_args = iform.num_value_operands >= 1 || iform.has_value_list; - - // We need a tuple: - // - if there's at least one value operand, then we emit a variable for the value, and the - // value list as args. - // - otherwise, if there's the count of immediate operands added to the presence of a value list exceeds one. - let need_tuple = if iform.num_value_operands >= 1 { - true - } else { - let mut imm_and_varargs = inst - .operands_in - .iter() - .filter(|op| op.is_immediate_or_entityref()) - .count(); - if iform.has_value_list { - imm_and_varargs += 1; - } - imm_and_varargs > 1 - }; - - let maybe_args = if emit_args { ", args" } else { "" }; - let defined_values = format!("{}{}", arg_names, maybe_args); - - let tuple_or_value = if need_tuple { - format!("({})", defined_values) - } else { - defined_values - }; - - fmtln!( - fmt, - "let {} = if let ir::InstructionData::{} {{", - tuple_or_value, - iform.name - ); - - fmt.indent(|fmt| { - // Fields are encoded directly. - for field in &iform.imm_fields { - fmtln!(fmt, "{},", field.member); - } - - if iform.has_value_list || iform.num_value_operands > 1 { - fmt.line("ref args,"); - } else if iform.num_value_operands == 1 { - fmt.line("arg,"); - } - - fmt.line(".."); - fmt.outdented_line("} = pos.func.dfg[inst] {"); - - if iform.has_value_list { - fmt.line("let args = args.as_slice(&pos.func.dfg.value_lists);"); - } else if iform.num_value_operands == 1 { - fmt.line("let args = [arg];") - } - - // Generate the values for the tuple. - let emit_one_value = - |fmt: &mut Formatter, needs_comma: bool, op_num: usize, op: &Operand| { - let comma = if needs_comma { "," } else { "" }; - if op.is_immediate_or_entityref() { - let n = inst.imm_opnums.iter().position(|&i| i == op_num).unwrap(); - fmtln!(fmt, "{}{}", iform.imm_fields[n].member, comma); - } else if op.is_value() { - let n = inst.value_opnums.iter().position(|&i| i == op_num).unwrap(); - fmtln!(fmt, "pos.func.dfg.resolve_aliases(args[{}]),", n); - } else { - // This is a value list argument or a varargs. - assert!(iform.has_value_list || op.is_varargs()); - } - }; - - if need_tuple { - fmt.line("("); - fmt.indent(|fmt| { - for (op_num, op) in inst.operands_in.iter().enumerate() { - let needs_comma = emit_args || op_num + 1 < inst.operands_in.len(); - emit_one_value(fmt, needs_comma, op_num, op); - } - if emit_args { - fmt.line("args"); - } - }); - fmt.line(")"); - } else { - // Only one of these can be true at the same time, otherwise we'd need a tuple. - emit_one_value(fmt, false, 0, &inst.operands_in[0]); - if emit_args { - fmt.line("args"); - } - } - - fmt.outdented_line("} else {"); - fmt.line(r#"unreachable!("bad instruction format")"#); - }); - fmtln!(fmt, "};"); - fmt.empty_line(); - - assert_eq!(inst.operands_in.len(), apply.args.len()); - for (i, op) in inst.operands_in.iter().enumerate() { - if op.is_varargs() { - let name = &var_pool - .get(apply.args[i].maybe_var().expect("vararg without name")) - .name; - let n = inst - .imm_opnums - .iter() - .chain(inst.value_opnums.iter()) - .max() - .copied() - .unwrap_or(0); - fmtln!(fmt, "let {} = &Vec::from(&args[{}..]);", name, n); - } - } - - for &op_num in &inst.value_opnums { - let arg = &apply.args[op_num]; - if let Some(var_index) = arg.maybe_var() { - let var = var_pool.get(var_index); - if var.has_free_typevar() { - fmtln!( - fmt, - "let typeof_{} = pos.func.dfg.value_type({});", - var.name, - var.name - ); - } - } - } - - // If the definition creates results, detach the values and place them in locals. - let mut replace_inst = false; - if !def.defined_vars.is_empty() { - if def.defined_vars - == def_pool - .get(var_pool.get(def.defined_vars[0]).dst_def.unwrap()) - .defined_vars - { - // Special case: The instruction replacing node defines the exact same values. - fmt.comment(format!( - "Results handled by {}.", - def_pool - .get(var_pool.get(def.defined_vars[0]).dst_def.unwrap()) - .to_comment_string(var_pool) - )); - - fmt.line("let r = pos.func.dfg.inst_results(inst);"); - for (i, &var_index) in def.defined_vars.iter().enumerate() { - let var = var_pool.get(var_index); - fmtln!(fmt, "let {} = &r[{}];", var.name, i); - fmtln!( - fmt, - "let typeof_{} = pos.func.dfg.value_type(*{});", - var.name, - var.name - ); - } - - replace_inst = true; - } else { - // Boring case: Detach the result values, capture them in locals. - for &var_index in &def.defined_vars { - fmtln!(fmt, "let {};", var_pool.get(var_index).name); - } - - fmt.line("{"); - fmt.indent(|fmt| { - fmt.line("let r = pos.func.dfg.inst_results(inst);"); - for i in 0..def.defined_vars.len() { - let var = var_pool.get(def.defined_vars[i]); - fmtln!(fmt, "{} = r[{}];", var.name, i); - } - }); - fmt.line("}"); - - for &var_index in &def.defined_vars { - let var = var_pool.get(var_index); - if var.has_free_typevar() { - fmtln!( - fmt, - "let typeof_{} = pos.func.dfg.value_type({});", - var.name, - var.name - ); - } - } - } - } - replace_inst -} - -fn build_derived_expr(tv: &TypeVar) -> String { - let base = match &tv.base { - Some(base) => base, - None => { - assert!(tv.name.starts_with("typeof_")); - return format!("Some({})", tv.name); - } - }; - let base_expr = build_derived_expr(&base.type_var); - format!( - "{}.map(|t: crate::ir::Type| t.{}())", - base_expr, - base.derived_func.name() - ) -} - -/// Emit rust code for the given check. -/// -/// The emitted code is a statement redefining the `predicate` variable like this: -/// let predicate = predicate && ... -fn emit_runtime_typecheck<'a>( - constraint: &'a Constraint, - type_sets: &mut UniqueTable<'a, TypeSet>, - fmt: &mut Formatter, -) { - match constraint { - Constraint::InTypeset(tv, ts) => { - let ts_index = type_sets.add(&ts); - fmt.comment(format!( - "{} must belong to {:?}", - tv.name, - type_sets.get(ts_index) - )); - fmtln!( - fmt, - "let predicate = predicate && TYPE_SETS[{}].contains({});", - ts_index, - tv.name - ); - } - Constraint::Eq(tv1, tv2) => { - fmtln!( - fmt, - "let predicate = predicate && match ({}, {}) {{", - build_derived_expr(tv1), - build_derived_expr(tv2) - ); - fmt.indent(|fmt| { - fmt.line("(Some(a), Some(b)) => a == b,"); - fmt.comment("On overflow, constraint doesn\'t apply"); - fmt.line("_ => false,"); - }); - fmtln!(fmt, "};"); - } - Constraint::WiderOrEq(tv1, tv2) => { - fmtln!( - fmt, - "let predicate = predicate && match ({}, {}) {{", - build_derived_expr(tv1), - build_derived_expr(tv2) - ); - fmt.indent(|fmt| { - fmt.line("(Some(a), Some(b)) => a.wider_or_equal(b),"); - fmt.comment("On overflow, constraint doesn\'t apply"); - fmt.line("_ => false,"); - }); - fmtln!(fmt, "};"); - } - } -} - -/// Determine if `node` represents one of the value splitting instructions: `isplit` or `vsplit. -/// These instructions are lowered specially by the `legalize::split` module. -fn is_value_split(def: &Def) -> bool { - let name = &def.apply.inst.name; - name == "isplit" || name == "vsplit" -} - -fn emit_dst_inst(def: &Def, def_pool: &DefPool, var_pool: &VarPool, fmt: &mut Formatter) { - let defined_vars = { - let vars = def - .defined_vars - .iter() - .map(|&var_index| var_pool.get(var_index).name.as_ref()) - .collect::>(); - if vars.len() == 1 { - vars[0].to_string() - } else { - format!("({})", vars.join(", ")) - } - }; - - if is_value_split(def) { - // Split instructions are not emitted with the builder, but by calling special functions in - // the `legalizer::split` module. These functions will eliminate concat-split patterns. - fmt.line("let curpos = pos.position();"); - fmt.line("let srcloc = pos.srcloc();"); - fmtln!( - fmt, - "let {} = split::{}(pos.func, cfg, curpos, srcloc, {});", - defined_vars, - def.apply.inst.snake_name(), - def.apply.args[0].to_rust_code(var_pool) - ); - return; - } - - if def.defined_vars.is_empty() { - // This node doesn't define any values, so just insert the new instruction. - fmtln!( - fmt, - "pos.ins().{};", - def.apply.rust_builder(&def.defined_vars, var_pool) - ); - return; - } - - if let Some(src_def0) = var_pool.get(def.defined_vars[0]).src_def { - if def.defined_vars == def_pool.get(src_def0).defined_vars { - // The replacement instruction defines the exact same values as the source pattern. - // Unwrapping would have left the results intact. Replace the whole instruction. - fmtln!( - fmt, - "let {} = pos.func.dfg.replace(inst).{};", - defined_vars, - def.apply.rust_builder(&def.defined_vars, var_pool) - ); - - // We need to bump the cursor so following instructions are inserted *after* the - // replaced instruction. - fmt.line("if pos.current_inst() == Some(inst) {"); - fmt.indent(|fmt| { - fmt.line("pos.next_inst();"); - }); - fmt.line("}"); - return; - } - } - - // Insert a new instruction. - let mut builder = format!("let {} = pos.ins()", defined_vars); - - if def.defined_vars.len() == 1 && var_pool.get(def.defined_vars[0]).is_output() { - // Reuse the single source result value. - builder = format!( - "{}.with_result({})", - builder, - var_pool.get(def.defined_vars[0]).to_rust_code() - ); - } else if def - .defined_vars - .iter() - .any(|&var_index| var_pool.get(var_index).is_output()) - { - // There are more than one output values that can be reused. - let array = def - .defined_vars - .iter() - .map(|&var_index| { - let var = var_pool.get(var_index); - if var.is_output() { - format!("Some({})", var.name) - } else { - "None".into() - } - }) - .collect::>() - .join(", "); - builder = format!("{}.with_results([{}])", builder, array); - } - - fmtln!( - fmt, - "{}.{};", - builder, - def.apply.rust_builder(&def.defined_vars, var_pool) - ); -} - -/// Emit code for `transform`, assuming that the opcode of transform's root instruction -/// has already been matched. -/// -/// `inst: Inst` is the variable to be replaced. It is pointed to by `pos: Cursor`. -/// `dfg: DataFlowGraph` is available and mutable. -fn gen_transform<'a>( - replace_inst: bool, - transform: &'a Transform, - type_sets: &mut UniqueTable<'a, TypeSet>, - fmt: &mut Formatter, -) { - // Evaluate the instruction predicate if any. - let apply = &transform.def_pool.get(transform.src).apply; - - let inst_predicate = apply - .inst_predicate_with_ctrl_typevar(&transform.var_pool) - .rust_predicate("pos.func"); - - let has_extra_constraints = !transform.type_env.constraints.is_empty(); - if has_extra_constraints { - // Extra constraints rely on the predicate being a variable that we can rebind as we add - // more constraint predicates. - if let Some(pred) = &inst_predicate { - fmt.multi_line(&format!("let predicate = {};", pred)); - } else { - fmt.line("let predicate = true;"); - } - } - - // Emit any runtime checks; these will rebind `predicate` emitted right above. - for constraint in &transform.type_env.constraints { - emit_runtime_typecheck(constraint, type_sets, fmt); - } - - let do_expand = |fmt: &mut Formatter| { - // Emit any constants that must be created before use. - for (name, value) in transform.const_pool.iter() { - fmtln!( - fmt, - "let {} = pos.func.dfg.constants.insert(vec!{:?}.into());", - name, - value - ); - } - - // If we are adding some blocks, we need to recall the original block, such that we can - // recompute it. - if !transform.block_pool.is_empty() { - fmt.line("let orig_block = pos.current_block().unwrap();"); - } - - // If we're going to delete `inst`, we need to detach its results first so they can be - // reattached during pattern expansion. - if !replace_inst { - fmt.line("pos.func.dfg.clear_results(inst);"); - } - - // Emit new block creation. - for block in &transform.block_pool { - let var = transform.var_pool.get(block.name); - fmtln!(fmt, "let {} = pos.func.dfg.make_block();", var.name); - } - - // Emit the destination pattern. - for &def_index in &transform.dst { - if let Some(block) = transform.block_pool.get(def_index) { - let var = transform.var_pool.get(block.name); - fmtln!(fmt, "pos.insert_block({});", var.name); - } - emit_dst_inst( - transform.def_pool.get(def_index), - &transform.def_pool, - &transform.var_pool, - fmt, - ); - } - - // Insert a new block after the last instruction, if needed. - let def_next_index = transform.def_pool.next_index(); - if let Some(block) = transform.block_pool.get(def_next_index) { - let var = transform.var_pool.get(block.name); - fmtln!(fmt, "pos.insert_block({});", var.name); - } - - // Delete the original instruction if we didn't have an opportunity to replace it. - if !replace_inst { - fmt.line("let removed = pos.remove_inst();"); - fmt.line("debug_assert_eq!(removed, inst);"); - } - - if transform.block_pool.is_empty() { - if transform.def_pool.get(transform.src).apply.inst.is_branch { - // A branch might have been legalized into multiple branches, so we need to recompute - // the cfg. - fmt.line("cfg.recompute_block(pos.func, pos.current_block().unwrap());"); - } - } else { - // Update CFG for the new blocks. - fmt.line("cfg.recompute_block(pos.func, orig_block);"); - for block in &transform.block_pool { - let var = transform.var_pool.get(block.name); - fmtln!(fmt, "cfg.recompute_block(pos.func, {});", var.name); - } - } - - fmt.line("return true;"); - }; - - // Guard the actual expansion by `predicate`. - if has_extra_constraints { - fmt.line("if predicate {"); - fmt.indent(|fmt| { - do_expand(fmt); - }); - fmt.line("}"); - } else if let Some(pred) = &inst_predicate { - fmt.multi_line(&format!("if {} {{", pred)); - fmt.indent(|fmt| { - do_expand(fmt); - }); - fmt.line("}"); - } else { - // Unconditional transform (there was no predicate), just emit it. - do_expand(fmt); - } -} - -fn gen_transform_group<'a>( - group: &'a TransformGroup, - transform_groups: &TransformGroups, - type_sets: &mut UniqueTable<'a, TypeSet>, - fmt: &mut Formatter, -) { - fmt.doc_comment(group.doc); - fmt.line("#[allow(unused_variables,unused_assignments,unused_imports,non_snake_case)]"); - - // Function arguments. - fmtln!(fmt, "pub fn {}(", group.name); - fmt.indent(|fmt| { - fmt.line("inst: crate::ir::Inst,"); - fmt.line("func: &mut crate::ir::Function,"); - fmt.line("cfg: &mut crate::flowgraph::ControlFlowGraph,"); - fmt.line("isa: &dyn crate::isa::TargetIsa,"); - }); - fmtln!(fmt, ") -> bool {"); - - // Function body. - fmt.indent(|fmt| { - fmt.line("use crate::ir::InstBuilder;"); - fmt.line("use crate::cursor::{Cursor, FuncCursor};"); - fmt.line("let mut pos = FuncCursor::new(func).at_inst(inst);"); - fmt.line("pos.use_srcloc(inst);"); - - // Group the transforms by opcode so we can generate a big switch. - // Preserve ordering. - let mut inst_to_transforms = HashMap::new(); - for transform in &group.transforms { - let def_index = transform.src; - let inst = &transform.def_pool.get(def_index).apply.inst; - inst_to_transforms - .entry(inst.camel_name.clone()) - .or_insert_with(Vec::new) - .push(transform); - } - - let mut sorted_inst_names = Vec::from_iter(inst_to_transforms.keys()); - sorted_inst_names.sort(); - - fmt.line("{"); - fmt.indent(|fmt| { - fmt.line("match pos.func.dfg[inst].opcode() {"); - fmt.indent(|fmt| { - for camel_name in sorted_inst_names { - fmtln!(fmt, "ir::Opcode::{} => {{", camel_name); - fmt.indent(|fmt| { - let transforms = inst_to_transforms.get(camel_name).unwrap(); - - // Unwrap the source instruction, create local variables for the input variables. - let replace_inst = unwrap_inst(&transforms[0], fmt); - fmt.empty_line(); - - for (i, transform) in transforms.iter().enumerate() { - if i > 0 { - fmt.empty_line(); - } - gen_transform(replace_inst, transform, type_sets, fmt); - } - }); - fmtln!(fmt, "}"); - fmt.empty_line(); - } - - // Emit the custom transforms. The Rust compiler will complain about any overlap with - // the normal transforms. - let mut sorted_custom_legalizes = Vec::from_iter(&group.custom_legalizes); - sorted_custom_legalizes.sort(); - for (inst_camel_name, func_name) in sorted_custom_legalizes { - fmtln!(fmt, "ir::Opcode::{} => {{", inst_camel_name); - fmt.indent(|fmt| { - fmtln!(fmt, "{}(inst, func, cfg, isa);", func_name); - fmt.line("return true;"); - }); - fmtln!(fmt, "}"); - fmt.empty_line(); - } - - // We'll assume there are uncovered opcodes. - fmt.line("_ => {},"); - }); - fmt.line("}"); - }); - fmt.line("}"); - - // If we fall through, nothing was expanded; call the chain if any. - match &group.chain_with { - Some(group_id) => fmtln!( - fmt, - "{}(inst, func, cfg, isa)", - transform_groups.get(*group_id).rust_name() - ), - None => fmt.line("false"), - }; - }); - fmtln!(fmt, "}"); - fmt.empty_line(); -} - -/// Generate legalization functions for `isa` and add any shared `TransformGroup`s -/// encountered to `shared_groups`. -/// -/// Generate `TYPE_SETS` and `LEGALIZE_ACTIONS` tables. -fn gen_isa( - isa: &TargetIsa, - transform_groups: &TransformGroups, - shared_group_names: &mut HashSet<&'static str>, - fmt: &mut Formatter, -) { - let mut type_sets = UniqueTable::new(); - for group_index in isa.transitive_transform_groups(transform_groups) { - let group = transform_groups.get(group_index); - match group.isa_name { - Some(isa_name) => { - assert!( - isa_name == isa.name, - "ISA-specific legalizations must be used by the same ISA" - ); - gen_transform_group(group, transform_groups, &mut type_sets, fmt); - } - None => { - shared_group_names.insert(group.name); - } - } - } - - gen_typesets_table(&type_sets, fmt); - - let direct_groups = isa.direct_transform_groups(); - fmtln!( - fmt, - "pub static LEGALIZE_ACTIONS: [isa::Legalize; {}] = [", - direct_groups.len() - ); - fmt.indent(|fmt| { - for &group_index in direct_groups { - fmtln!(fmt, "{},", transform_groups.get(group_index).rust_name()); - } - }); - fmtln!(fmt, "];"); -} - -/// Generate the legalizer files. -pub(crate) fn generate( - isas: &[TargetIsa], - transform_groups: &TransformGroups, - extra_legalization_groups: &[&'static str], - filename_prefix: &str, - out_dir: &str, -) -> Result<(), error::Error> { - let mut shared_group_names = HashSet::new(); - - for isa in isas { - let mut fmt = Formatter::new(); - gen_isa(isa, transform_groups, &mut shared_group_names, &mut fmt); - fmt.update_file(format!("{}-{}.rs", filename_prefix, isa.name), out_dir)?; - } - - // Add extra legalization groups that were explicitly requested. - for group in extra_legalization_groups { - shared_group_names.insert(group); - } - - // Generate shared legalize groups. - let mut fmt = Formatter::new(); - // Generate shared legalize groups. - let mut type_sets = UniqueTable::new(); - let mut sorted_shared_group_names = Vec::from_iter(shared_group_names); - sorted_shared_group_names.sort(); - for group_name in &sorted_shared_group_names { - let group = transform_groups.by_name(group_name); - gen_transform_group(group, transform_groups, &mut type_sets, &mut fmt); - } - gen_typesets_table(&type_sets, &mut fmt); - fmt.update_file(format!("{}r.rs", filename_prefix), out_dir)?; - - Ok(()) -} diff --git a/cranelift/codegen/meta/src/isa/arm32/mod.rs b/cranelift/codegen/meta/src/isa/arm32/mod.rs index 2dc58e4053..f524a87ea7 100644 --- a/cranelift/codegen/meta/src/isa/arm32/mod.rs +++ b/cranelift/codegen/meta/src/isa/arm32/mod.rs @@ -1,6 +1,4 @@ -use crate::cdsl::instructions::InstructionPredicateMap; use crate::cdsl::isa::TargetIsa; -use crate::cdsl::recipes::Recipes; use crate::cdsl::regs::{IsaRegs, IsaRegsBuilder, RegBankBuilder, RegClassBuilder}; use crate::cdsl::settings::{SettingGroup, SettingGroupBuilder}; @@ -52,20 +50,5 @@ pub(crate) fn define(shared_defs: &mut SharedDefinitions) -> TargetIsa { let settings = define_settings(&shared_defs.settings); let regs = define_regs(); - let cpu_modes = vec![]; - - // TODO implement arm32 recipes. - let recipes = Recipes::new(); - - // TODO implement arm32 encodings and predicates. - let encodings_predicates = InstructionPredicateMap::new(); - - TargetIsa::new( - "arm32", - settings, - regs, - recipes, - cpu_modes, - encodings_predicates, - ) + TargetIsa::new("arm32", settings, regs) } diff --git a/cranelift/codegen/meta/src/isa/arm64/mod.rs b/cranelift/codegen/meta/src/isa/arm64/mod.rs index 3ae57fbb62..a8920f703d 100644 --- a/cranelift/codegen/meta/src/isa/arm64/mod.rs +++ b/cranelift/codegen/meta/src/isa/arm64/mod.rs @@ -1,6 +1,4 @@ -use crate::cdsl::instructions::InstructionPredicateMap; use crate::cdsl::isa::TargetIsa; -use crate::cdsl::recipes::Recipes; use crate::cdsl::regs::{IsaRegs, IsaRegsBuilder, RegBankBuilder, RegClassBuilder}; use crate::cdsl::settings::{SettingGroup, SettingGroupBuilder}; @@ -51,20 +49,5 @@ pub(crate) fn define(shared_defs: &mut SharedDefinitions) -> TargetIsa { let settings = define_settings(&shared_defs.settings); let regs = define_registers(); - let cpu_modes = vec![]; - - // TODO implement arm64 recipes. - let recipes = Recipes::new(); - - // TODO implement arm64 encodings and predicates. - let encodings_predicates = InstructionPredicateMap::new(); - - TargetIsa::new( - "arm64", - settings, - regs, - recipes, - cpu_modes, - encodings_predicates, - ) + TargetIsa::new("arm64", settings, regs) } diff --git a/cranelift/codegen/meta/src/isa/s390x/mod.rs b/cranelift/codegen/meta/src/isa/s390x/mod.rs index 97a5947080..a4fb05a9f5 100644 --- a/cranelift/codegen/meta/src/isa/s390x/mod.rs +++ b/cranelift/codegen/meta/src/isa/s390x/mod.rs @@ -1,6 +1,4 @@ -use crate::cdsl::instructions::InstructionPredicateMap; use crate::cdsl::isa::TargetIsa; -use crate::cdsl::recipes::Recipes; use crate::cdsl::regs::IsaRegsBuilder; use crate::cdsl::settings::{SettingGroup, SettingGroupBuilder}; @@ -46,17 +44,6 @@ fn define_settings(_shared: &SettingGroup) -> SettingGroup { pub(crate) fn define(shared_defs: &mut SharedDefinitions) -> TargetIsa { let settings = define_settings(&shared_defs.settings); let regs = IsaRegsBuilder::new().build(); - let recipes = Recipes::new(); - let encodings_predicates = InstructionPredicateMap::new(); - let cpu_modes = vec![]; - - TargetIsa::new( - "s390x", - settings, - regs, - recipes, - cpu_modes, - encodings_predicates, - ) + TargetIsa::new("s390x", settings, regs) } diff --git a/cranelift/codegen/meta/src/isa/x86/mod.rs b/cranelift/codegen/meta/src/isa/x86/mod.rs index 7c3e4c6877..e21bfe1485 100644 --- a/cranelift/codegen/meta/src/isa/x86/mod.rs +++ b/cranelift/codegen/meta/src/isa/x86/mod.rs @@ -1,6 +1,4 @@ -use crate::cdsl::instructions::{InstructionGroupBuilder, InstructionPredicateMap}; use crate::cdsl::isa::TargetIsa; -use crate::cdsl::recipes::Recipes; use crate::cdsl::regs::IsaRegsBuilder; use crate::shared::Definitions as SharedDefinitions; @@ -10,16 +8,5 @@ pub(crate) mod settings; pub(crate) fn define(shared_defs: &mut SharedDefinitions) -> TargetIsa { let settings = settings::define(&shared_defs.settings); - let inst_group = InstructionGroupBuilder::new(&mut shared_defs.all_instructions).build(); - - let cpu_modes = vec![]; - - TargetIsa::new( - "x86", - settings, - IsaRegsBuilder::new().build(), - Recipes::new(), - cpu_modes, - InstructionPredicateMap::new(), - ) + TargetIsa::new("x86", settings, IsaRegsBuilder::new().build()) } diff --git a/cranelift/codegen/meta/src/lib.rs b/cranelift/codegen/meta/src/lib.rs index bfa1cd0056..d8972702a3 100644 --- a/cranelift/codegen/meta/src/lib.rs +++ b/cranelift/codegen/meta/src/lib.rs @@ -8,7 +8,6 @@ pub mod error; pub mod isa; mod gen_inst; -mod gen_legalizer; mod gen_registers; mod gen_settings; mod gen_types; @@ -55,21 +54,6 @@ pub fn generate( &out_dir, )?; - let extra_legalization_groups: &[&'static str] = if !new_backend_isas.is_empty() { - // The new backend only requires the "expand" legalization group. - &["expand"] - } else { - &[] - }; - - gen_legalizer::generate( - &target_isas, - &shared_defs.transform_groups, - extra_legalization_groups, - "legalize", - &out_dir, - )?; - for isa in target_isas { gen_registers::generate(&isa, &format!("registers-{}.rs", isa.name), &out_dir)?; diff --git a/cranelift/codegen/meta/src/shared/instructions.rs b/cranelift/codegen/meta/src/shared/instructions.rs index 0565645d4d..d869f8629c 100644 --- a/cranelift/codegen/meta/src/shared/instructions.rs +++ b/cranelift/codegen/meta/src/shared/instructions.rs @@ -1,7 +1,7 @@ #![allow(non_snake_case)] use crate::cdsl::instructions::{ - AllInstructions, InstructionBuilder as Inst, InstructionGroup, InstructionGroupBuilder, + AllInstructions, InstructionBuilder as Inst, InstructionGroupBuilder, }; use crate::cdsl::operands::Operand; use crate::cdsl::type_inference::Constraint::WiderOrEq; @@ -767,7 +767,7 @@ pub(crate) fn define( formats: &Formats, imm: &Immediates, entities: &EntityRefs, -) -> InstructionGroup { +) { let mut ig = InstructionGroupBuilder::new(all_instructions); define_control_flow(&mut ig, formats, imm, entities); @@ -4647,6 +4647,4 @@ pub(crate) fn define( ) .other_side_effects(true), ); - - ig.build() } diff --git a/cranelift/codegen/meta/src/shared/legalize.rs b/cranelift/codegen/meta/src/shared/legalize.rs deleted file mode 100644 index 9a0d6cffde..0000000000 --- a/cranelift/codegen/meta/src/shared/legalize.rs +++ /dev/null @@ -1,1087 +0,0 @@ -use crate::cdsl::ast::{var, ExprBuilder, Literal}; -use crate::cdsl::instructions::{Bindable, Instruction, InstructionGroup}; -use crate::cdsl::xform::{TransformGroupBuilder, TransformGroups}; - -use crate::shared::immediates::Immediates; -use crate::shared::types::Float::{F32, F64}; -use crate::shared::types::Int::{I128, I16, I32, I64, I8}; -use cranelift_codegen_shared::condcodes::{CondCode, IntCC}; - -#[allow(clippy::many_single_char_names, clippy::cognitive_complexity)] -pub(crate) fn define(insts: &InstructionGroup, imm: &Immediates) -> TransformGroups { - let mut narrow = TransformGroupBuilder::new( - "narrow", - r#" - Legalize instructions by narrowing. - - The transformations in the 'narrow' group work by expressing - instructions in terms of smaller types. Operations on vector types are - expressed in terms of vector types with fewer lanes, and integer - operations are expressed in terms of smaller integer types. - "#, - ); - - let mut widen = TransformGroupBuilder::new( - "widen", - r#" - Legalize instructions by widening. - - The transformations in the 'widen' group work by expressing - instructions in terms of larger types. - "#, - ); - - let mut expand = TransformGroupBuilder::new( - "expand", - r#" - Legalize instructions by expansion. - - Rewrite instructions in terms of other instructions, generally - operating on the same types as the original instructions. - "#, - ); - - // List of instructions. - let band = insts.by_name("band"); - let band_imm = insts.by_name("band_imm"); - let band_not = insts.by_name("band_not"); - let bint = insts.by_name("bint"); - let bitrev = insts.by_name("bitrev"); - let bnot = insts.by_name("bnot"); - let bor = insts.by_name("bor"); - let bor_imm = insts.by_name("bor_imm"); - let bor_not = insts.by_name("bor_not"); - let brnz = insts.by_name("brnz"); - let brz = insts.by_name("brz"); - let br_icmp = insts.by_name("br_icmp"); - let br_table = insts.by_name("br_table"); - let bxor = insts.by_name("bxor"); - let bxor_imm = insts.by_name("bxor_imm"); - let bxor_not = insts.by_name("bxor_not"); - let cls = insts.by_name("cls"); - let clz = insts.by_name("clz"); - let ctz = insts.by_name("ctz"); - let copy = insts.by_name("copy"); - let fabs = insts.by_name("fabs"); - let f32const = insts.by_name("f32const"); - let f64const = insts.by_name("f64const"); - let fcopysign = insts.by_name("fcopysign"); - let fcvt_from_sint = insts.by_name("fcvt_from_sint"); - let fneg = insts.by_name("fneg"); - let iadd = insts.by_name("iadd"); - let iadd_cin = insts.by_name("iadd_cin"); - let iadd_cout = insts.by_name("iadd_cout"); - let iadd_carry = insts.by_name("iadd_carry"); - let iadd_ifcin = insts.by_name("iadd_ifcin"); - let iadd_ifcout = insts.by_name("iadd_ifcout"); - let iadd_imm = insts.by_name("iadd_imm"); - let icmp = insts.by_name("icmp"); - let icmp_imm = insts.by_name("icmp_imm"); - let iconcat = insts.by_name("iconcat"); - let iconst = insts.by_name("iconst"); - let ifcmp = insts.by_name("ifcmp"); - let ifcmp_imm = insts.by_name("ifcmp_imm"); - let imul = insts.by_name("imul"); - let imul_imm = insts.by_name("imul_imm"); - let ireduce = insts.by_name("ireduce"); - let irsub_imm = insts.by_name("irsub_imm"); - let ishl = insts.by_name("ishl"); - let ishl_imm = insts.by_name("ishl_imm"); - let isplit = insts.by_name("isplit"); - let istore8 = insts.by_name("istore8"); - let istore16 = insts.by_name("istore16"); - let isub = insts.by_name("isub"); - let isub_bin = insts.by_name("isub_bin"); - let isub_bout = insts.by_name("isub_bout"); - let isub_borrow = insts.by_name("isub_borrow"); - let isub_ifbin = insts.by_name("isub_ifbin"); - let isub_ifbout = insts.by_name("isub_ifbout"); - let jump = insts.by_name("jump"); - let load = insts.by_name("load"); - let popcnt = insts.by_name("popcnt"); - let resumable_trapnz = insts.by_name("resumable_trapnz"); - let rotl = insts.by_name("rotl"); - let rotl_imm = insts.by_name("rotl_imm"); - let rotr = insts.by_name("rotr"); - let rotr_imm = insts.by_name("rotr_imm"); - let sdiv = insts.by_name("sdiv"); - let sdiv_imm = insts.by_name("sdiv_imm"); - let select = insts.by_name("select"); - let sextend = insts.by_name("sextend"); - let sshr = insts.by_name("sshr"); - let sshr_imm = insts.by_name("sshr_imm"); - let srem = insts.by_name("srem"); - let srem_imm = insts.by_name("srem_imm"); - let store = insts.by_name("store"); - let udiv = insts.by_name("udiv"); - let udiv_imm = insts.by_name("udiv_imm"); - let uextend = insts.by_name("uextend"); - let uload8 = insts.by_name("uload8"); - let uload16 = insts.by_name("uload16"); - let umulhi = insts.by_name("umulhi"); - let ushr = insts.by_name("ushr"); - let ushr_imm = insts.by_name("ushr_imm"); - let urem = insts.by_name("urem"); - let urem_imm = insts.by_name("urem_imm"); - let trapif = insts.by_name("trapif"); - let trapnz = insts.by_name("trapnz"); - let trapz = insts.by_name("trapz"); - - // Custom expansions for memory objects. - expand.custom_legalize(insts.by_name("global_value"), "expand_global_value"); - expand.custom_legalize(insts.by_name("heap_addr"), "expand_heap_addr"); - expand.custom_legalize(insts.by_name("table_addr"), "expand_table_addr"); - - // Custom expansions for calls. - expand.custom_legalize(insts.by_name("call"), "expand_call"); - - // Custom expansions that need to change the CFG. - // TODO: Add sufficient XForm syntax that we don't need to hand-code these. - expand.custom_legalize(trapz, "expand_cond_trap"); - expand.custom_legalize(trapnz, "expand_cond_trap"); - expand.custom_legalize(resumable_trapnz, "expand_cond_trap"); - expand.custom_legalize(br_table, "expand_br_table"); - expand.custom_legalize(select, "expand_select"); - widen.custom_legalize(select, "expand_select"); // small ints - - // Custom expansions for floating point constants. - // These expansions require bit-casting or creating constant pool entries. - expand.custom_legalize(f32const, "expand_fconst"); - expand.custom_legalize(f64const, "expand_fconst"); - - // Custom expansions for stack memory accesses. - expand.custom_legalize(insts.by_name("stack_load"), "expand_stack_load"); - expand.custom_legalize(insts.by_name("stack_store"), "expand_stack_store"); - - // Custom expansions for small stack memory acccess. - widen.custom_legalize(insts.by_name("stack_load"), "expand_stack_load"); - widen.custom_legalize(insts.by_name("stack_store"), "expand_stack_store"); - - // List of variables to reuse in patterns. - let x = var("x"); - let y = var("y"); - let z = var("z"); - let a = var("a"); - let a1 = var("a1"); - let a2 = var("a2"); - let a3 = var("a3"); - let a4 = var("a4"); - let b = var("b"); - let b1 = var("b1"); - let b2 = var("b2"); - let b3 = var("b3"); - let b4 = var("b4"); - let b_in = var("b_in"); - let b_int = var("b_int"); - let c = var("c"); - let c1 = var("c1"); - let c2 = var("c2"); - let c3 = var("c3"); - let c4 = var("c4"); - let c_in = var("c_in"); - let c_int = var("c_int"); - let d = var("d"); - let d1 = var("d1"); - let d2 = var("d2"); - let d3 = var("d3"); - let d4 = var("d4"); - let e = var("e"); - let e1 = var("e1"); - let e2 = var("e2"); - let e3 = var("e3"); - let e4 = var("e4"); - let f = var("f"); - let f1 = var("f1"); - let f2 = var("f2"); - let xl = var("xl"); - let xh = var("xh"); - let yl = var("yl"); - let yh = var("yh"); - let al = var("al"); - let ah = var("ah"); - let cc = var("cc"); - let block = var("block"); - let ptr = var("ptr"); - let flags = var("flags"); - let offset = var("off"); - let vararg = var("vararg"); - - narrow.custom_legalize(load, "narrow_load"); - narrow.custom_legalize(store, "narrow_store"); - - // iconst.i64 can't be legalized in the meta langage (because integer literals can't be - // embedded as part of arguments), so use a custom legalization for now. - narrow.custom_legalize(iconst, "narrow_iconst"); - - for &(ty, ty_half) in &[(I128, I64), (I64, I32)] { - let inst = uextend.bind(ty).bind(ty_half); - narrow.legalize( - def!(a = inst(x)), - vec![ - def!(ah = iconst(Literal::constant(&imm.imm64, 0))), - def!(a = iconcat(x, ah)), - ], - ); - } - - for &(ty, ty_half, shift) in &[(I128, I64, 63), (I64, I32, 31)] { - let inst = sextend.bind(ty).bind(ty_half); - narrow.legalize( - def!(a = inst(x)), - vec![ - def!(ah = sshr_imm(x, Literal::constant(&imm.imm64, shift))), // splat sign bit to whole number - def!(a = iconcat(x, ah)), - ], - ); - } - - for &bin_op in &[band, bor, bxor, band_not, bor_not, bxor_not] { - narrow.legalize( - def!(a = bin_op(x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!(al = bin_op(xl, yl)), - def!(ah = bin_op(xh, yh)), - def!(a = iconcat(al, ah)), - ], - ); - } - - narrow.legalize( - def!(a = bnot(x)), - vec![ - def!((xl, xh) = isplit(x)), - def!(al = bnot(xl)), - def!(ah = bnot(xh)), - def!(a = iconcat(al, ah)), - ], - ); - - narrow.legalize( - def!(a = select(c, x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!(al = select(c, xl, yl)), - def!(ah = select(c, xh, yh)), - def!(a = iconcat(al, ah)), - ], - ); - - for &ty in &[I128, I64] { - let block = var("block"); - let block1 = var("block1"); - let block2 = var("block2"); - - narrow.legalize( - def!(brz.ty(x, block, vararg)), - vec![ - def!((xl, xh) = isplit(x)), - def!( - a = icmp_imm( - Literal::enumerator_for(&imm.intcc, "eq"), - xl, - Literal::constant(&imm.imm64, 0) - ) - ), - def!( - b = icmp_imm( - Literal::enumerator_for(&imm.intcc, "eq"), - xh, - Literal::constant(&imm.imm64, 0) - ) - ), - def!(c = band(a, b)), - def!(brnz(c, block, vararg)), - ], - ); - - narrow.legalize( - def!(brnz.ty(x, block1, vararg)), - vec![ - def!((xl, xh) = isplit(x)), - def!(brnz(xl, block1, vararg)), - def!(jump(block2, Literal::empty_vararg())), - block!(block2), - def!(brnz(xh, block1, vararg)), - ], - ); - } - - narrow.legalize( - def!(a = popcnt.I128(x)), - vec![ - def!((xl, xh) = isplit(x)), - def!(e1 = popcnt(xl)), - def!(e2 = popcnt(xh)), - def!(e3 = iadd(e1, e2)), - def!(a = uextend(e3)), - ], - ); - - // TODO(ryzokuken): benchmark this and decide if branching is a faster - // approach than evaluating boolean expressions. - - narrow.custom_legalize(icmp_imm, "narrow_icmp_imm"); - - let intcc_eq = Literal::enumerator_for(&imm.intcc, "eq"); - let intcc_ne = Literal::enumerator_for(&imm.intcc, "ne"); - for &(int_ty, int_ty_half) in &[(I64, I32), (I128, I64)] { - narrow.legalize( - def!(b = icmp.int_ty(intcc_eq, x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!(b1 = icmp.int_ty_half(intcc_eq, xl, yl)), - def!(b2 = icmp.int_ty_half(intcc_eq, xh, yh)), - def!(b = band(b1, b2)), - ], - ); - - narrow.legalize( - def!(b = icmp.int_ty(intcc_ne, x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!(b1 = icmp.int_ty_half(intcc_ne, xl, yl)), - def!(b2 = icmp.int_ty_half(intcc_ne, xh, yh)), - def!(b = bor(b1, b2)), - ], - ); - - use IntCC::*; - for cc in &[ - SignedGreaterThan, - SignedGreaterThanOrEqual, - SignedLessThan, - SignedLessThanOrEqual, - UnsignedGreaterThan, - UnsignedGreaterThanOrEqual, - UnsignedLessThan, - UnsignedLessThanOrEqual, - ] { - let intcc_cc = Literal::enumerator_for(&imm.intcc, cc.to_static_str()); - let cc1 = Literal::enumerator_for(&imm.intcc, cc.without_equal().to_static_str()); - let cc2 = - Literal::enumerator_for(&imm.intcc, cc.inverse().without_equal().to_static_str()); - let cc3 = Literal::enumerator_for(&imm.intcc, cc.unsigned().to_static_str()); - narrow.legalize( - def!(b = icmp.int_ty(intcc_cc, x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - // X = cc1 || (!cc2 && cc3) - def!(b1 = icmp.int_ty_half(cc1, xh, yh)), - def!(b2 = icmp.int_ty_half(cc2, xh, yh)), - def!(b3 = icmp.int_ty_half(cc3, xl, yl)), - def!(c1 = bnot(b2)), - def!(c2 = band(c1, b3)), - def!(b = bor(b1, c2)), - ], - ); - } - } - - // TODO(ryzokuken): explore the perf diff w/ x86_umulx and consider have a - // separate legalization for x86. - for &ty in &[I64, I128] { - narrow.legalize( - def!(a = imul.ty(x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!(a1 = imul(xh, yl)), - def!(a2 = imul(xl, yh)), - def!(a3 = iadd(a1, a2)), - def!(a4 = umulhi(xl, yl)), - def!(ah = iadd(a3, a4)), - def!(al = imul(xl, yl)), - def!(a = iconcat(al, ah)), - ], - ); - } - - let zero = Literal::constant(&imm.imm64, 0); - narrow.legalize( - def!(a = iadd_imm.I128(x, c)), - vec![ - def!(yh = iconst.I64(zero)), - def!(yl = iconst.I64(c)), - def!(y = iconcat.I64(yh, yl)), - def!(a = iadd(x, y)), - ], - ); - - // Widen instructions with one input operand. - for &op in &[bnot, popcnt] { - for &int_ty in &[I8, I16] { - widen.legalize( - def!(a = op.int_ty(b)), - vec![ - def!(x = uextend.I32(b)), - def!(z = op.I32(x)), - def!(a = ireduce.int_ty(z)), - ], - ); - } - } - - // Widen instructions with two input operands. - let mut widen_two_arg = |signed: bool, op: &Instruction| { - for &int_ty in &[I8, I16] { - let sign_ext_op = if signed { sextend } else { uextend }; - widen.legalize( - def!(a = op.int_ty(b, c)), - vec![ - def!(x = sign_ext_op.I32(b)), - def!(y = sign_ext_op.I32(c)), - def!(z = op.I32(x, y)), - def!(a = ireduce.int_ty(z)), - ], - ); - } - }; - - for bin_op in &[ - iadd, isub, imul, udiv, urem, band, bor, bxor, band_not, bor_not, bxor_not, - ] { - widen_two_arg(false, bin_op); - } - for bin_op in &[sdiv, srem] { - widen_two_arg(true, bin_op); - } - - // Widen instructions using immediate operands. - let mut widen_imm = |signed: bool, op: &Instruction| { - for &int_ty in &[I8, I16] { - let sign_ext_op = if signed { sextend } else { uextend }; - widen.legalize( - def!(a = op.int_ty(b, c)), - vec![ - def!(x = sign_ext_op.I32(b)), - def!(z = op.I32(x, c)), - def!(a = ireduce.int_ty(z)), - ], - ); - } - }; - - for bin_op in &[ - iadd_imm, imul_imm, udiv_imm, urem_imm, band_imm, bor_imm, bxor_imm, irsub_imm, - ] { - widen_imm(false, bin_op); - } - for bin_op in &[sdiv_imm, srem_imm] { - widen_imm(true, bin_op); - } - - for &(int_ty, num) in &[(I8, 24), (I16, 16)] { - let imm = Literal::constant(&imm.imm64, -num); - - widen.legalize( - def!(a = clz.int_ty(b)), - vec![ - def!(c = uextend.I32(b)), - def!(d = clz.I32(c)), - def!(e = iadd_imm(d, imm)), - def!(a = ireduce.int_ty(e)), - ], - ); - - widen.legalize( - def!(a = cls.int_ty(b)), - vec![ - def!(c = sextend.I32(b)), - def!(d = cls.I32(c)), - def!(e = iadd_imm(d, imm)), - def!(a = ireduce.int_ty(e)), - ], - ); - } - - for &(int_ty, num) in &[(I8, 1 << 8), (I16, 1 << 16)] { - let num = Literal::constant(&imm.imm64, num); - widen.legalize( - def!(a = ctz.int_ty(b)), - vec![ - def!(c = uextend.I32(b)), - // When `b` is zero, returns the size of x in bits. - def!(d = bor_imm(c, num)), - def!(e = ctz.I32(d)), - def!(a = ireduce.int_ty(e)), - ], - ); - } - - // iconst - for &int_ty in &[I8, I16] { - widen.legalize( - def!(a = iconst.int_ty(b)), - vec![def!(c = iconst.I32(b)), def!(a = ireduce.int_ty(c))], - ); - } - - for &extend_op in &[uextend, sextend] { - // The sign extension operators have two typevars: the result has one and controls the - // instruction, then the input has one. - let bound = extend_op.bind(I16).bind(I8); - widen.legalize( - def!(a = bound(b)), - vec![def!(c = extend_op.I32(b)), def!(a = ireduce(c))], - ); - } - - widen.legalize( - def!(store.I8(flags, a, ptr, offset)), - vec![ - def!(b = uextend.I32(a)), - def!(istore8(flags, b, ptr, offset)), - ], - ); - - widen.legalize( - def!(store.I16(flags, a, ptr, offset)), - vec![ - def!(b = uextend.I32(a)), - def!(istore16(flags, b, ptr, offset)), - ], - ); - - widen.legalize( - def!(a = load.I8(flags, ptr, offset)), - vec![ - def!(b = uload8.I32(flags, ptr, offset)), - def!(a = ireduce(b)), - ], - ); - - widen.legalize( - def!(a = load.I16(flags, ptr, offset)), - vec![ - def!(b = uload16.I32(flags, ptr, offset)), - def!(a = ireduce(b)), - ], - ); - - for &int_ty in &[I8, I16] { - widen.legalize( - def!(br_table.int_ty(x, y, z)), - vec![def!(b = uextend.I32(x)), def!(br_table(b, y, z))], - ); - } - - for &int_ty in &[I8, I16] { - widen.legalize( - def!(a = bint.int_ty(b)), - vec![def!(x = bint.I32(b)), def!(a = ireduce.int_ty(x))], - ); - } - - for &int_ty in &[I8, I16] { - for &op in &[ishl, ishl_imm, ushr, ushr_imm] { - widen.legalize( - def!(a = op.int_ty(b, c)), - vec![ - def!(x = uextend.I32(b)), - def!(z = op.I32(x, c)), - def!(a = ireduce.int_ty(z)), - ], - ); - } - - for &op in &[sshr, sshr_imm] { - widen.legalize( - def!(a = op.int_ty(b, c)), - vec![ - def!(x = sextend.I32(b)), - def!(z = op.I32(x, c)), - def!(a = ireduce.int_ty(z)), - ], - ); - } - - for cc in &["eq", "ne", "ugt", "ult", "uge", "ule"] { - let w_cc = Literal::enumerator_for(&imm.intcc, cc); - widen.legalize( - def!(a = icmp_imm.int_ty(w_cc, b, c)), - vec![def!(x = uextend.I32(b)), def!(a = icmp_imm(w_cc, x, c))], - ); - widen.legalize( - def!(a = icmp.int_ty(w_cc, b, c)), - vec![ - def!(x = uextend.I32(b)), - def!(y = uextend.I32(c)), - def!(a = icmp.I32(w_cc, x, y)), - ], - ); - } - - for cc in &["sgt", "slt", "sge", "sle"] { - let w_cc = Literal::enumerator_for(&imm.intcc, cc); - widen.legalize( - def!(a = icmp_imm.int_ty(w_cc, b, c)), - vec![def!(x = sextend.I32(b)), def!(a = icmp_imm(w_cc, x, c))], - ); - - widen.legalize( - def!(a = icmp.int_ty(w_cc, b, c)), - vec![ - def!(x = sextend.I32(b)), - def!(y = sextend.I32(c)), - def!(a = icmp(w_cc, x, y)), - ], - ); - } - } - - for &ty in &[I8, I16] { - widen.legalize( - def!(brz.ty(x, block, vararg)), - vec![def!(a = uextend.I32(x)), def!(brz(a, block, vararg))], - ); - - widen.legalize( - def!(brnz.ty(x, block, vararg)), - vec![def!(a = uextend.I32(x)), def!(brnz(a, block, vararg))], - ); - } - - for &(ty_half, ty) in &[(I64, I128), (I32, I64)] { - let inst = ireduce.bind(ty_half).bind(ty); - expand.legalize( - def!(a = inst(x)), - vec![def!((b, c) = isplit(x)), def!(a = copy(b))], - ); - } - - // Expand integer operations with carry for RISC architectures that don't have - // the flags. - let intcc_ult = Literal::enumerator_for(&imm.intcc, "ult"); - expand.legalize( - def!((a, c) = iadd_cout(x, y)), - vec![def!(a = iadd(x, y)), def!(c = icmp(intcc_ult, a, x))], - ); - - let intcc_ugt = Literal::enumerator_for(&imm.intcc, "ugt"); - expand.legalize( - def!((a, b) = isub_bout(x, y)), - vec![def!(a = isub(x, y)), def!(b = icmp(intcc_ugt, a, x))], - ); - - expand.legalize( - def!(a = iadd_cin(x, y, c)), - vec![ - def!(a1 = iadd(x, y)), - def!(c_int = bint(c)), - def!(a = iadd(a1, c_int)), - ], - ); - - expand.legalize( - def!(a = isub_bin(x, y, b)), - vec![ - def!(a1 = isub(x, y)), - def!(b_int = bint(b)), - def!(a = isub(a1, b_int)), - ], - ); - - expand.legalize( - def!((a, c) = iadd_carry(x, y, c_in)), - vec![ - def!((a1, c1) = iadd_cout(x, y)), - def!(c_int = bint(c_in)), - def!((a, c2) = iadd_cout(a1, c_int)), - def!(c = bor(c1, c2)), - ], - ); - - expand.legalize( - def!((a, b) = isub_borrow(x, y, b_in)), - vec![ - def!((a1, b1) = isub_bout(x, y)), - def!(b_int = bint(b_in)), - def!((a, b2) = isub_bout(a1, b_int)), - def!(b = bor(b1, b2)), - ], - ); - - // Expansion for fcvt_from_sint for smaller integer types. - // This uses expand and not widen because the controlling type variable for - // this instruction is f32/f64, which is legalized as part of the expand - // group. - for &dest_ty in &[F32, F64] { - for &src_ty in &[I8, I16] { - let bound_inst = fcvt_from_sint.bind(dest_ty).bind(src_ty); - expand.legalize( - def!(a = bound_inst(b)), - vec![ - def!(x = sextend.I32(b)), - def!(a = fcvt_from_sint.dest_ty(x)), - ], - ); - } - } - - // Expansions for immediate operands that are out of range. - for &(inst_imm, inst) in &[ - (iadd_imm, iadd), - (imul_imm, imul), - (sdiv_imm, sdiv), - (udiv_imm, udiv), - (srem_imm, srem), - (urem_imm, urem), - (band_imm, band), - (bor_imm, bor), - (bxor_imm, bxor), - (ifcmp_imm, ifcmp), - ] { - expand.legalize( - def!(a = inst_imm(x, y)), - vec![def!(a1 = iconst(y)), def!(a = inst(x, a1))], - ); - } - - expand.legalize( - def!(a = irsub_imm(y, x)), - vec![def!(a1 = iconst(x)), def!(a = isub(a1, y))], - ); - - // Rotates and shifts. - for &(inst_imm, inst) in &[ - (rotl_imm, rotl), - (rotr_imm, rotr), - (ishl_imm, ishl), - (sshr_imm, sshr), - (ushr_imm, ushr), - ] { - expand.legalize( - def!(a = inst_imm(x, y)), - vec![def!(a1 = iconst.I32(y)), def!(a = inst(x, a1))], - ); - } - - expand.legalize( - def!(a = icmp_imm(cc, x, y)), - vec![def!(a1 = iconst(y)), def!(a = icmp(cc, x, a1))], - ); - - //# Expansions for *_not variants of bitwise ops. - for &(inst_not, inst) in &[(band_not, band), (bor_not, bor), (bxor_not, bxor)] { - expand.legalize( - def!(a = inst_not(x, y)), - vec![def!(a1 = bnot(y)), def!(a = inst(x, a1))], - ); - } - - //# Expand bnot using xor. - let minus_one = Literal::constant(&imm.imm64, -1); - expand.legalize( - def!(a = bnot(x)), - vec![def!(y = iconst(minus_one)), def!(a = bxor(x, y))], - ); - - //# Expand bitrev - //# Adapted from Stack Overflow. - //# https://stackoverflow.com/questions/746171/most-efficient-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c - let imm64_1 = Literal::constant(&imm.imm64, 1); - let imm64_2 = Literal::constant(&imm.imm64, 2); - let imm64_4 = Literal::constant(&imm.imm64, 4); - - widen.legalize( - def!(a = bitrev.I8(x)), - vec![ - def!(a1 = band_imm(x, Literal::constant(&imm.imm64, 0xaa))), - def!(a2 = ushr_imm(a1, imm64_1)), - def!(a3 = band_imm(x, Literal::constant(&imm.imm64, 0x55))), - def!(a4 = ishl_imm(a3, imm64_1)), - def!(b = bor(a2, a4)), - def!(b1 = band_imm(b, Literal::constant(&imm.imm64, 0xcc))), - def!(b2 = ushr_imm(b1, imm64_2)), - def!(b3 = band_imm(b, Literal::constant(&imm.imm64, 0x33))), - def!(b4 = ishl_imm(b3, imm64_2)), - def!(c = bor(b2, b4)), - def!(c1 = band_imm(c, Literal::constant(&imm.imm64, 0xf0))), - def!(c2 = ushr_imm(c1, imm64_4)), - def!(c3 = band_imm(c, Literal::constant(&imm.imm64, 0x0f))), - def!(c4 = ishl_imm(c3, imm64_4)), - def!(a = bor(c2, c4)), - ], - ); - - let imm64_8 = Literal::constant(&imm.imm64, 8); - - widen.legalize( - def!(a = bitrev.I16(x)), - vec![ - def!(a1 = band_imm(x, Literal::constant(&imm.imm64, 0xaaaa))), - def!(a2 = ushr_imm(a1, imm64_1)), - def!(a3 = band_imm(x, Literal::constant(&imm.imm64, 0x5555))), - def!(a4 = ishl_imm(a3, imm64_1)), - def!(b = bor(a2, a4)), - def!(b1 = band_imm(b, Literal::constant(&imm.imm64, 0xcccc))), - def!(b2 = ushr_imm(b1, imm64_2)), - def!(b3 = band_imm(b, Literal::constant(&imm.imm64, 0x3333))), - def!(b4 = ishl_imm(b3, imm64_2)), - def!(c = bor(b2, b4)), - def!(c1 = band_imm(c, Literal::constant(&imm.imm64, 0xf0f0))), - def!(c2 = ushr_imm(c1, imm64_4)), - def!(c3 = band_imm(c, Literal::constant(&imm.imm64, 0x0f0f))), - def!(c4 = ishl_imm(c3, imm64_4)), - def!(d = bor(c2, c4)), - def!(d1 = band_imm(d, Literal::constant(&imm.imm64, 0xff00))), - def!(d2 = ushr_imm(d1, imm64_8)), - def!(d3 = band_imm(d, Literal::constant(&imm.imm64, 0x00ff))), - def!(d4 = ishl_imm(d3, imm64_8)), - def!(a = bor(d2, d4)), - ], - ); - - let imm64_16 = Literal::constant(&imm.imm64, 16); - - expand.legalize( - def!(a = bitrev.I32(x)), - vec![ - def!(a1 = band_imm(x, Literal::constant(&imm.imm64, 0xaaaa_aaaa))), - def!(a2 = ushr_imm(a1, imm64_1)), - def!(a3 = band_imm(x, Literal::constant(&imm.imm64, 0x5555_5555))), - def!(a4 = ishl_imm(a3, imm64_1)), - def!(b = bor(a2, a4)), - def!(b1 = band_imm(b, Literal::constant(&imm.imm64, 0xcccc_cccc))), - def!(b2 = ushr_imm(b1, imm64_2)), - def!(b3 = band_imm(b, Literal::constant(&imm.imm64, 0x3333_3333))), - def!(b4 = ishl_imm(b3, imm64_2)), - def!(c = bor(b2, b4)), - def!(c1 = band_imm(c, Literal::constant(&imm.imm64, 0xf0f0_f0f0))), - def!(c2 = ushr_imm(c1, imm64_4)), - def!(c3 = band_imm(c, Literal::constant(&imm.imm64, 0x0f0f_0f0f))), - def!(c4 = ishl_imm(c3, imm64_4)), - def!(d = bor(c2, c4)), - def!(d1 = band_imm(d, Literal::constant(&imm.imm64, 0xff00_ff00))), - def!(d2 = ushr_imm(d1, imm64_8)), - def!(d3 = band_imm(d, Literal::constant(&imm.imm64, 0x00ff_00ff))), - def!(d4 = ishl_imm(d3, imm64_8)), - def!(e = bor(d2, d4)), - def!(e1 = ushr_imm(e, imm64_16)), - def!(e2 = ishl_imm(e, imm64_16)), - def!(a = bor(e1, e2)), - ], - ); - - #[allow(overflowing_literals)] - let imm64_0xaaaaaaaaaaaaaaaa = Literal::constant(&imm.imm64, 0xaaaa_aaaa_aaaa_aaaa); - let imm64_0x5555555555555555 = Literal::constant(&imm.imm64, 0x5555_5555_5555_5555); - #[allow(overflowing_literals)] - let imm64_0xcccccccccccccccc = Literal::constant(&imm.imm64, 0xcccc_cccc_cccc_cccc); - let imm64_0x3333333333333333 = Literal::constant(&imm.imm64, 0x3333_3333_3333_3333); - #[allow(overflowing_literals)] - let imm64_0xf0f0f0f0f0f0f0f0 = Literal::constant(&imm.imm64, 0xf0f0_f0f0_f0f0_f0f0); - let imm64_0x0f0f0f0f0f0f0f0f = Literal::constant(&imm.imm64, 0x0f0f_0f0f_0f0f_0f0f); - #[allow(overflowing_literals)] - let imm64_0xff00ff00ff00ff00 = Literal::constant(&imm.imm64, 0xff00_ff00_ff00_ff00); - let imm64_0x00ff00ff00ff00ff = Literal::constant(&imm.imm64, 0x00ff_00ff_00ff_00ff); - #[allow(overflowing_literals)] - let imm64_0xffff0000ffff0000 = Literal::constant(&imm.imm64, 0xffff_0000_ffff_0000); - let imm64_0x0000ffff0000ffff = Literal::constant(&imm.imm64, 0x0000_ffff_0000_ffff); - let imm64_32 = Literal::constant(&imm.imm64, 32); - - expand.legalize( - def!(a = bitrev.I64(x)), - vec![ - def!(a1 = band_imm(x, imm64_0xaaaaaaaaaaaaaaaa)), - def!(a2 = ushr_imm(a1, imm64_1)), - def!(a3 = band_imm(x, imm64_0x5555555555555555)), - def!(a4 = ishl_imm(a3, imm64_1)), - def!(b = bor(a2, a4)), - def!(b1 = band_imm(b, imm64_0xcccccccccccccccc)), - def!(b2 = ushr_imm(b1, imm64_2)), - def!(b3 = band_imm(b, imm64_0x3333333333333333)), - def!(b4 = ishl_imm(b3, imm64_2)), - def!(c = bor(b2, b4)), - def!(c1 = band_imm(c, imm64_0xf0f0f0f0f0f0f0f0)), - def!(c2 = ushr_imm(c1, imm64_4)), - def!(c3 = band_imm(c, imm64_0x0f0f0f0f0f0f0f0f)), - def!(c4 = ishl_imm(c3, imm64_4)), - def!(d = bor(c2, c4)), - def!(d1 = band_imm(d, imm64_0xff00ff00ff00ff00)), - def!(d2 = ushr_imm(d1, imm64_8)), - def!(d3 = band_imm(d, imm64_0x00ff00ff00ff00ff)), - def!(d4 = ishl_imm(d3, imm64_8)), - def!(e = bor(d2, d4)), - def!(e1 = band_imm(e, imm64_0xffff0000ffff0000)), - def!(e2 = ushr_imm(e1, imm64_16)), - def!(e3 = band_imm(e, imm64_0x0000ffff0000ffff)), - def!(e4 = ishl_imm(e3, imm64_16)), - def!(f = bor(e2, e4)), - def!(f1 = ushr_imm(f, imm64_32)), - def!(f2 = ishl_imm(f, imm64_32)), - def!(a = bor(f1, f2)), - ], - ); - - narrow.legalize( - def!(a = bitrev.I128(x)), - vec![ - def!((xl, xh) = isplit(x)), - def!(yh = bitrev(xl)), - def!(yl = bitrev(xh)), - def!(a = iconcat(yl, yh)), - ], - ); - - // Floating-point sign manipulations. - for &(ty, const_inst, minus_zero) in &[ - (F32, f32const, &Literal::bits(&imm.ieee32, 0x8000_0000)), - ( - F64, - f64const, - &Literal::bits(&imm.ieee64, 0x8000_0000_0000_0000), - ), - ] { - expand.legalize( - def!(a = fabs.ty(x)), - vec![def!(b = const_inst(minus_zero)), def!(a = band_not(x, b))], - ); - - expand.legalize( - def!(a = fneg.ty(x)), - vec![def!(b = const_inst(minus_zero)), def!(a = bxor(x, b))], - ); - - expand.legalize( - def!(a = fcopysign.ty(x, y)), - vec![ - def!(b = const_inst(minus_zero)), - def!(a1 = band_not(x, b)), - def!(a2 = band(y, b)), - def!(a = bor(a1, a2)), - ], - ); - } - - expand.custom_legalize(br_icmp, "expand_br_icmp"); - - let mut groups = TransformGroups::new(); - - let narrow_id = narrow.build_and_add_to(&mut groups); - let expand_id = expand.build_and_add_to(&mut groups); - - // Expansions using CPU flags. - let mut expand_flags = TransformGroupBuilder::new( - "expand_flags", - r#" - Instruction expansions for architectures with flags. - - Expand some instructions using CPU flags, then fall back to the normal - expansions. Not all architectures support CPU flags, so these patterns - are kept separate. - "#, - ) - .chain_with(expand_id); - - let imm64_0 = Literal::constant(&imm.imm64, 0); - let intcc_ne = Literal::enumerator_for(&imm.intcc, "ne"); - let intcc_eq = Literal::enumerator_for(&imm.intcc, "eq"); - - expand_flags.legalize( - def!(trapnz(x, c)), - vec![ - def!(a = ifcmp_imm(x, imm64_0)), - def!(trapif(intcc_ne, a, c)), - ], - ); - - expand_flags.legalize( - def!(trapz(x, c)), - vec![ - def!(a = ifcmp_imm(x, imm64_0)), - def!(trapif(intcc_eq, a, c)), - ], - ); - - expand_flags.build_and_add_to(&mut groups); - - // Narrow legalizations using CPU flags. - let mut narrow_flags = TransformGroupBuilder::new( - "narrow_flags", - r#" - Narrow instructions for architectures with flags. - - Narrow some instructions using CPU flags, then fall back to the normal - legalizations. Not all architectures support CPU flags, so these - patterns are kept separate. - "#, - ) - .chain_with(narrow_id); - - narrow_flags.legalize( - def!(a = iadd(x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!((al, c) = iadd_ifcout(xl, yl)), - def!(ah = iadd_ifcin(xh, yh, c)), - def!(a = iconcat(al, ah)), - ], - ); - - narrow_flags.legalize( - def!(a = isub(x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!((al, b) = isub_ifbout(xl, yl)), - def!(ah = isub_ifbin(xh, yh, b)), - def!(a = iconcat(al, ah)), - ], - ); - - narrow_flags.build_and_add_to(&mut groups); - - // TODO(ryzokuken): figure out a way to legalize iadd_c* to iadd_ifc* (and - // similarly isub_b* to isub_ifb*) on expand_flags so that this isn't required. - // Narrow legalizations for ISAs that don't have CPU flags. - let mut narrow_no_flags = TransformGroupBuilder::new( - "narrow_no_flags", - r#" - Narrow instructions for architectures without flags. - - Narrow some instructions avoiding the use of CPU flags, then fall back - to the normal legalizations. Not all architectures support CPU flags, - so these patterns are kept separate. - "#, - ) - .chain_with(narrow_id); - - narrow_no_flags.legalize( - def!(a = iadd(x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!((al, c) = iadd_cout(xl, yl)), - def!(ah = iadd_cin(xh, yh, c)), - def!(a = iconcat(al, ah)), - ], - ); - - narrow_no_flags.legalize( - def!(a = isub(x, y)), - vec![ - def!((xl, xh) = isplit(x)), - def!((yl, yh) = isplit(y)), - def!((al, b) = isub_bout(xl, yl)), - def!(ah = isub_bin(xh, yh, b)), - def!(a = iconcat(al, ah)), - ], - ); - - narrow_no_flags.build_and_add_to(&mut groups); - - // TODO The order of declarations unfortunately matters to be compatible with the Python code. - // When it's all migrated, we can put this next to the narrow/expand build_and_add_to calls - // above. - widen.build_and_add_to(&mut groups); - - groups -} diff --git a/cranelift/codegen/meta/src/shared/mod.rs b/cranelift/codegen/meta/src/shared/mod.rs index b185262ccd..53ad796c8c 100644 --- a/cranelift/codegen/meta/src/shared/mod.rs +++ b/cranelift/codegen/meta/src/shared/mod.rs @@ -4,14 +4,12 @@ pub mod entities; pub mod formats; pub mod immediates; pub mod instructions; -pub mod legalize; pub mod settings; pub mod types; use crate::cdsl::formats::{FormatStructure, InstructionFormat}; -use crate::cdsl::instructions::{AllInstructions, InstructionGroup}; +use crate::cdsl::instructions::{AllInstructions}; use crate::cdsl::settings::SettingGroup; -use crate::cdsl::xform::TransformGroups; use crate::shared::entities::EntityRefs; use crate::shared::formats::Formats; @@ -24,10 +22,8 @@ use std::rc::Rc; pub(crate) struct Definitions { pub settings: SettingGroup, pub all_instructions: AllInstructions, - pub instructions: InstructionGroup, pub imm: Immediates, pub formats: Formats, - pub transform_groups: TransformGroups, pub entities: EntityRefs, } @@ -37,17 +33,13 @@ pub(crate) fn define() -> Definitions { let immediates = Immediates::new(); let entities = EntityRefs::new(); let formats = Formats::new(&immediates, &entities); - let instructions = - instructions::define(&mut all_instructions, &formats, &immediates, &entities); - let transform_groups = legalize::define(&instructions, &immediates); + instructions::define(&mut all_instructions, &formats, &immediates, &entities); Definitions { settings: settings::define(), all_instructions, - instructions, imm: immediates, formats, - transform_groups, entities, } } diff --git a/cranelift/codegen/meta/src/srcgen.rs b/cranelift/codegen/meta/src/srcgen.rs index 0e8d4eccf0..21e3d5e904 100644 --- a/cranelift/codegen/meta/src/srcgen.rs +++ b/cranelift/codegen/meta/src/srcgen.rs @@ -77,15 +77,6 @@ impl Formatter { } } - /// Get a string containing whitespace outdented one level. Used for - /// lines of code that are inside a single indented block. - fn get_outdent(&mut self) -> String { - self.indent_pop(); - let s = self.get_indent(); - self.indent_push(); - s - } - /// Add an indented line. pub fn line(&mut self, contents: impl AsRef) { let indented_line = format!("{}{}\n", self.get_indent(), contents.as_ref()); @@ -97,12 +88,6 @@ impl Formatter { self.lines.push("\n".to_string()); } - /// Emit a line outdented one level. - pub fn outdented_line(&mut self, s: &str) { - let new_line = format!("{}{}\n", self.get_outdent(), s); - self.lines.push(new_line); - } - /// Write `self.lines` to a file. pub fn update_file( &self, diff --git a/cranelift/codegen/meta/src/unique_table.rs b/cranelift/codegen/meta/src/unique_table.rs index 65ef7e8b4a..50c664e4df 100644 --- a/cranelift/codegen/meta/src/unique_table.rs +++ b/cranelift/codegen/meta/src/unique_table.rs @@ -32,9 +32,6 @@ impl<'entries, T: Eq + Hash> UniqueTable<'entries, T> { pub fn len(&self) -> usize { self.table.len() } - pub fn get(&self, index: usize) -> &T { - self.table[index] - } pub fn iter(&self) -> slice::Iter<&'entries T> { self.table.iter() }