Allow binding immediates to instructions (#1012)

This change should make the code more clear (and less code) when adding encodings for instructions with specific immediates; e.g., a constant with a 0 immediate could be encoded as an XOR with something like `const.bind(...)` without explicitly creating the necessary predicates. It has several parts:
* Introduce Bindable trait to instructions
* Convert all instruction bindings to use Bindable::bind()
* Add ability to bind immediates to BoundInstruction
This is an attempt to reduce some of the issues in #955.
This commit is contained in:
Andrew Brown
2019-10-10 08:54:46 -07:00
committed by GitHub
parent f1c25c2c5a
commit 6d690e5275
6 changed files with 477 additions and 341 deletions

View File

@@ -1,5 +1,4 @@
use std::rc::Rc; use crate::cdsl::formats::FormatRegistry;
use crate::cdsl::instructions::{ use crate::cdsl::instructions::{
InstSpec, Instruction, InstructionPredicate, InstructionPredicateNode, InstSpec, Instruction, InstructionPredicate, InstructionPredicateNode,
InstructionPredicateNumber, InstructionPredicateRegistry, ValueTypeOrAny, InstructionPredicateNumber, InstructionPredicateRegistry, ValueTypeOrAny,
@@ -7,6 +6,8 @@ use crate::cdsl::instructions::{
use crate::cdsl::recipes::{EncodingRecipeNumber, Recipes}; use crate::cdsl::recipes::{EncodingRecipeNumber, Recipes};
use crate::cdsl::settings::SettingPredicateNumber; use crate::cdsl::settings::SettingPredicateNumber;
use crate::cdsl::types::ValueType; use crate::cdsl::types::ValueType;
use std::rc::Rc;
use std::string::ToString;
/// Encoding for a concrete instruction. /// Encoding for a concrete instruction.
/// ///
@@ -61,19 +62,25 @@ pub(crate) struct EncodingBuilder {
} }
impl EncodingBuilder { impl EncodingBuilder {
pub fn new(inst: InstSpec, recipe: EncodingRecipeNumber, encbits: u16) -> Self { pub fn new(
inst: InstSpec,
recipe: EncodingRecipeNumber,
encbits: u16,
formats: &FormatRegistry,
) -> Self {
let (inst_predicate, bound_type) = match &inst { let (inst_predicate, bound_type) = match &inst {
InstSpec::Bound(inst) => { InstSpec::Bound(inst) => {
let other_typevars = &inst.inst.polymorphic_info.as_ref().unwrap().other_typevars; let other_typevars = &inst.inst.polymorphic_info.as_ref().unwrap().other_typevars;
assert!( assert_eq!(
inst.value_types.len() == other_typevars.len() + 1, inst.value_types.len(),
other_typevars.len() + 1,
"partially bound polymorphic instruction" "partially bound polymorphic instruction"
); );
// Add secondary type variables to the instruction predicate. // Add secondary type variables to the instruction predicate.
let value_types = &inst.value_types; let value_types = &inst.value_types;
let mut inst_predicate = None; let mut inst_predicate: Option<InstructionPredicate> = None;
for (typevar, value_type) in other_typevars.iter().zip(value_types.iter().skip(1)) { for (typevar, value_type) in other_typevars.iter().zip(value_types.iter().skip(1)) {
let value_type = match value_type { let value_type = match value_type {
ValueTypeOrAny::Any => continue, ValueTypeOrAny::Any => continue,
@@ -84,6 +91,24 @@ impl EncodingBuilder {
inst_predicate = Some(type_predicate.into()); inst_predicate = Some(type_predicate.into());
} }
// Add immediate value predicates
for (immediate_value, immediate_operand) in inst
.immediate_values
.iter()
.zip(inst.inst.operands_in.iter().filter(|o| o.is_immediate()))
{
let immediate_predicate = InstructionPredicate::new_is_field_equal(
formats.get(inst.inst.format),
immediate_operand.name,
immediate_value.to_string(),
);
inst_predicate = if let Some(type_predicate) = inst_predicate {
Some(type_predicate.and(immediate_predicate))
} else {
Some(immediate_predicate.into())
}
}
let ctrl_type = value_types[0] let ctrl_type = value_types[0]
.clone() .clone()
.expect("Controlling type shouldn't be Any"); .expect("Controlling type shouldn't be Any");

View File

@@ -2,6 +2,7 @@ use cranelift_entity::{entity_impl, PrimaryMap};
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::fmt::{Display, Error, Formatter};
use std::ops; use std::ops;
use std::rc::Rc; use std::rc::Rc;
@@ -13,6 +14,7 @@ use crate::cdsl::operands::Operand;
use crate::cdsl::type_inference::Constraint; use crate::cdsl::type_inference::Constraint;
use crate::cdsl::types::{LaneType, ReferenceType, ValueType, VectorType}; use crate::cdsl::types::{LaneType, ReferenceType, ValueType, VectorType};
use crate::cdsl::typevar::TypeVar; use crate::cdsl::typevar::TypeVar;
use crate::shared::types::{Bool, Float, Int, Reference};
use cranelift_codegen_shared::condcodes::IntCC; use cranelift_codegen_shared::condcodes::IntCC;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
@@ -80,6 +82,14 @@ impl InstructionGroup {
} }
} }
/// Instructions can have parameters bound to them to specialize them for more specific encodings
/// (e.g. the encoding for adding two float types may be different than that of adding two
/// integer types)
pub trait Bindable {
/// Bind a parameter to an instruction
fn bind(&self, parameter: impl Into<BindParameter>) -> BoundInstruction;
}
#[derive(Debug)] #[derive(Debug)]
pub struct PolymorphicInfo { pub struct PolymorphicInfo {
pub use_typevar_operand: bool, pub use_typevar_operand: bool,
@@ -173,30 +183,11 @@ impl Instruction {
None => Vec::new(), None => Vec::new(),
} }
} }
}
pub fn bind(&self, lane_type: impl Into<LaneType>) -> BoundInstruction { impl Bindable for Instruction {
bind(self.clone(), Some(lane_type.into()), Vec::new()) fn bind(&self, parameter: impl Into<BindParameter>) -> BoundInstruction {
} BoundInstruction::new(self).bind(parameter)
pub fn bind_ref(&self, reference_type: impl Into<ReferenceType>) -> BoundInstruction {
bind_ref(self.clone(), Some(reference_type.into()), Vec::new())
}
pub fn bind_vector_from_lane(
&self,
lane_type: impl Into<LaneType>,
vector_size_in_bits: u64,
) -> BoundInstruction {
bind_vector(
self.clone(),
lane_type.into(),
vector_size_in_bits,
Vec::new(),
)
}
pub fn bind_any(&self) -> BoundInstruction {
bind(self.clone(), None, Vec::new())
} }
} }
@@ -407,36 +398,163 @@ impl ValueTypeOrAny {
} }
} }
/// The number of bits in the vector
type VectorBitWidth = u64;
/// An parameter used for binding instructions to specific types or values
pub enum BindParameter {
Any,
Lane(LaneType),
Vector(LaneType, VectorBitWidth),
Reference(ReferenceType),
Immediate(Immediate),
}
/// Constructor for more easily building vector parameters from any lane type
pub fn vector(parameter: impl Into<LaneType>, vector_size: VectorBitWidth) -> BindParameter {
BindParameter::Vector(parameter.into(), vector_size)
}
impl From<Int> for BindParameter {
fn from(ty: Int) -> Self {
BindParameter::Lane(ty.into())
}
}
impl From<Bool> for BindParameter {
fn from(ty: Bool) -> Self {
BindParameter::Lane(ty.into())
}
}
impl From<Float> for BindParameter {
fn from(ty: Float) -> Self {
BindParameter::Lane(ty.into())
}
}
impl From<LaneType> for BindParameter {
fn from(ty: LaneType) -> Self {
BindParameter::Lane(ty)
}
}
impl From<Reference> for BindParameter {
fn from(ty: Reference) -> Self {
BindParameter::Reference(ty.into())
}
}
impl From<Immediate> for BindParameter {
fn from(imm: Immediate) -> Self {
BindParameter::Immediate(imm)
}
}
#[derive(Clone)]
pub enum Immediate {
UInt8(u8),
UInt128(u128),
}
impl Display for Immediate {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
Immediate::UInt8(x) => write!(f, "{}", x),
Immediate::UInt128(x) => write!(f, "{}", x),
}
}
}
#[derive(Clone)] #[derive(Clone)]
pub struct BoundInstruction { pub struct BoundInstruction {
pub inst: Instruction, pub inst: Instruction,
pub value_types: Vec<ValueTypeOrAny>, pub value_types: Vec<ValueTypeOrAny>,
pub immediate_values: Vec<Immediate>,
} }
impl BoundInstruction { impl BoundInstruction {
pub fn bind(self, lane_type: impl Into<LaneType>) -> BoundInstruction { /// Construct a new bound instruction (with nothing bound yet) from an instruction
bind(self.inst, Some(lane_type.into()), self.value_types) fn new(inst: &Instruction) -> Self {
BoundInstruction {
inst: inst.clone(),
value_types: vec![],
immediate_values: vec![],
}
} }
pub fn bind_ref(self, reference_type: impl Into<ReferenceType>) -> BoundInstruction { /// Verify that the bindings for a BoundInstruction are correct.
bind_ref(self.inst, Some(reference_type.into()), self.value_types) fn verify_bindings(&self) -> Result<(), String> {
} // Verify that binding types to the instruction does not violate the polymorphic rules.
if !self.value_types.is_empty() {
match &self.inst.polymorphic_info {
Some(poly) => {
if self.value_types.len() > 1 + poly.other_typevars.len() {
return Err(format!(
"trying to bind too many types for {}",
self.inst.name
));
}
}
None => {
return Err(format!(
"trying to bind a type for {} which is not a polymorphic instruction",
self.inst.name
));
}
}
}
pub fn bind_vector_from_lane( // Verify that only the right number of immediates are bound.
self, let immediate_count = self
lane_type: impl Into<LaneType>, .inst
vector_size_in_bits: u64, .operands_in
) -> BoundInstruction { .iter()
bind_vector( .filter(|o| o.is_immediate())
self.inst, .count();
lane_type.into(), if self.immediate_values.len() > immediate_count {
vector_size_in_bits, return Err(format!(
self.value_types, "trying to bind too many immediates ({}) to instruction {} which only expects {} \
) immediates",
} self.immediate_values.len(),
self.inst.name,
immediate_count
));
}
pub fn bind_any(self) -> BoundInstruction { Ok(())
bind(self.inst, None, self.value_types) }
}
impl Bindable for BoundInstruction {
fn bind(&self, parameter: impl Into<BindParameter>) -> BoundInstruction {
let mut modified = self.clone();
match parameter.into() {
BindParameter::Any => modified.value_types.push(ValueTypeOrAny::Any),
BindParameter::Lane(lane_type) => modified
.value_types
.push(ValueTypeOrAny::ValueType(lane_type.into())),
BindParameter::Vector(lane_type, vector_size_in_bits) => {
let num_lanes = vector_size_in_bits / lane_type.lane_bits();
assert!(
num_lanes >= 2,
"Minimum lane number for bind_vector is 2, found {}.",
num_lanes,
);
let vector_type = ValueType::Vector(VectorType::new(lane_type, num_lanes));
modified
.value_types
.push(ValueTypeOrAny::ValueType(vector_type));
}
BindParameter::Reference(reference_type) => {
modified
.value_types
.push(ValueTypeOrAny::ValueType(reference_type.into()));
}
BindParameter::Immediate(immediate) => modified.immediate_values.push(immediate),
}
modified.verify_bindings().unwrap();
modified
} }
} }
@@ -1124,17 +1242,13 @@ impl InstSpec {
InstSpec::Bound(bound_inst) => &bound_inst.inst, InstSpec::Bound(bound_inst) => &bound_inst.inst,
} }
} }
pub fn bind(&self, lane_type: impl Into<LaneType>) -> BoundInstruction { }
match self {
InstSpec::Inst(inst) => inst.bind(lane_type),
InstSpec::Bound(inst) => inst.clone().bind(lane_type),
}
}
pub fn bind_ref(&self, reference_type: impl Into<ReferenceType>) -> BoundInstruction { impl Bindable for InstSpec {
fn bind(&self, parameter: impl Into<BindParameter>) -> BoundInstruction {
match self { match self {
InstSpec::Inst(inst) => inst.bind_ref(reference_type), InstSpec::Inst(inst) => inst.bind(parameter.into()),
InstSpec::Bound(inst) => inst.clone().bind_ref(reference_type), InstSpec::Bound(inst) => inst.bind(parameter.into()),
} }
} }
} }
@@ -1151,79 +1265,94 @@ impl Into<InstSpec> for BoundInstruction {
} }
} }
/// Helper bind reused by {Bound,}Instruction::bind. #[cfg(test)]
fn bind( mod test {
inst: Instruction, use super::*;
lane_type: Option<LaneType>, use crate::cdsl::formats::InstructionFormatBuilder;
mut value_types: Vec<ValueTypeOrAny>, use crate::cdsl::operands::{OperandBuilder, OperandKindBuilder, OperandKindFields};
) -> BoundInstruction { use crate::cdsl::typevar::TypeSetBuilder;
match lane_type { use crate::shared::types::Int::{I32, I64};
Some(lane_type) => {
value_types.push(ValueTypeOrAny::ValueType(lane_type.into())); fn field_to_operand(index: usize, field: OperandKindFields) -> Operand {
} // pretend the index string is &'static
None => { let name = Box::leak(index.to_string().into_boxed_str());
value_types.push(ValueTypeOrAny::Any); let kind = OperandKindBuilder::new(name, field).build();
} let operand = OperandBuilder::new(name, kind).build();
operand
} }
verify_polymorphic_binding(&inst, &value_types); fn field_to_operands(types: Vec<OperandKindFields>) -> Vec<Operand> {
types
BoundInstruction { inst, value_types } .iter()
} .enumerate()
.map(|(i, f)| field_to_operand(i, f.clone()))
/// Helper bind for reference types reused by {Bound,}Instruction::bind_ref. .collect()
fn bind_ref(
inst: Instruction,
reference_type: Option<ReferenceType>,
mut value_types: Vec<ValueTypeOrAny>,
) -> BoundInstruction {
match reference_type {
Some(reference_type) => {
value_types.push(ValueTypeOrAny::ValueType(reference_type.into()));
}
None => {
value_types.push(ValueTypeOrAny::Any);
}
} }
verify_polymorphic_binding(&inst, &value_types); fn build_fake_instruction(
inputs: Vec<OperandKindFields>,
BoundInstruction { inst, value_types } outputs: Vec<OperandKindFields>,
} ) -> Instruction {
// setup a format from the input operands
/// Helper bind for vector types reused by {Bound,}Instruction::bind. let mut formats = FormatRegistry::new();
fn bind_vector( let mut format = InstructionFormatBuilder::new("fake");
inst: Instruction, for (i, f) in inputs.iter().enumerate() {
lane_type: LaneType, match f {
vector_size_in_bits: u64, OperandKindFields::TypeVar(_) => format = format.value(),
mut value_types: Vec<ValueTypeOrAny>, OperandKindFields::ImmValue => {
) -> BoundInstruction { format = format.imm(&field_to_operand(i, f.clone()).kind)
let num_lanes = vector_size_in_bits / lane_type.lane_bits(); }
assert!( _ => {}
num_lanes >= 2, };
"Minimum lane number for bind_vector is 2, found {}.",
num_lanes,
);
let vector_type = ValueType::Vector(VectorType::new(lane_type, num_lanes));
value_types.push(ValueTypeOrAny::ValueType(vector_type));
verify_polymorphic_binding(&inst, &value_types);
BoundInstruction { inst, value_types }
}
/// Helper to verify that binding types to the instruction does not violate polymorphic rules
fn verify_polymorphic_binding(inst: &Instruction, value_types: &Vec<ValueTypeOrAny>) {
match &inst.polymorphic_info {
Some(poly) => {
assert!(
value_types.len() <= 1 + poly.other_typevars.len(),
format!("trying to bind too many types for {}", inst.name)
);
}
None => {
panic!(format!(
"trying to bind a type for {} which is not a polymorphic instruction",
inst.name
));
} }
formats.insert(format);
// create the fake instruction
InstructionBuilder::new("fake", "A fake instruction for testing.")
.operands_in(field_to_operands(inputs).iter().collect())
.operands_out(field_to_operands(outputs).iter().collect())
.build(&formats, OpcodeNumber(42))
}
#[test]
fn ensure_bound_instructions_can_bind_lane_types() {
let type1 = TypeSetBuilder::new().ints(8..64).build();
let in1 = OperandKindFields::TypeVar(TypeVar::new("a", "...", type1));
let inst = build_fake_instruction(vec![in1], vec![]);
inst.bind(LaneType::IntType(I32));
}
#[test]
fn ensure_bound_instructions_can_bind_immediates() {
let inst = build_fake_instruction(vec![OperandKindFields::ImmValue], vec![]);
let bound_inst = inst.bind(Immediate::UInt8(42));
assert!(bound_inst.verify_bindings().is_ok());
}
#[test]
#[should_panic]
fn ensure_instructions_fail_to_bind() {
let inst = build_fake_instruction(vec![], vec![]);
inst.bind(BindParameter::Lane(LaneType::IntType(I32)));
// trying to bind to an instruction with no inputs should fail
}
#[test]
#[should_panic]
fn ensure_bound_instructions_fail_to_bind_too_many_types() {
let type1 = TypeSetBuilder::new().ints(8..64).build();
let in1 = OperandKindFields::TypeVar(TypeVar::new("a", "...", type1));
let inst = build_fake_instruction(vec![in1], vec![]);
inst.bind(LaneType::IntType(I32))
.bind(LaneType::IntType(I64));
}
#[test]
#[should_panic]
fn ensure_instructions_fail_to_bind_too_many_immediates() {
let inst = build_fake_instruction(vec![OperandKindFields::ImmValue], vec![]);
inst.bind(BindParameter::Immediate(Immediate::UInt8(0)))
.bind(BindParameter::Immediate(Immediate::UInt8(1)));
// trying to bind too many immediates to an instruction should fail
} }
} }

View File

@@ -1,7 +1,7 @@
use crate::cdsl::ast::{Apply, Expr, Literal, VarPool}; use crate::cdsl::ast::{Apply, Expr, Literal, VarPool};
use crate::cdsl::encodings::{Encoding, EncodingBuilder}; use crate::cdsl::encodings::{Encoding, EncodingBuilder};
use crate::cdsl::instructions::{ use crate::cdsl::instructions::{
BoundInstruction, InstSpec, InstructionPredicateNode, InstructionPredicateRegistry, Bindable, BoundInstruction, InstSpec, InstructionPredicateNode, InstructionPredicateRegistry,
}; };
use crate::cdsl::recipes::{EncodingRecipeNumber, Recipes}; use crate::cdsl::recipes::{EncodingRecipeNumber, Recipes};
use crate::cdsl::settings::SettingGroup; use crate::cdsl::settings::SettingGroup;
@@ -13,27 +13,34 @@ use crate::shared::types::Reference::{R32, R64};
use crate::shared::Definitions as SharedDefinitions; use crate::shared::Definitions as SharedDefinitions;
use super::recipes::RecipeGroup; use super::recipes::RecipeGroup;
use crate::cdsl::formats::FormatRegistry;
fn enc(inst: impl Into<InstSpec>, recipe: EncodingRecipeNumber, bits: u16) -> EncodingBuilder {
EncodingBuilder::new(inst.into(), recipe, bits)
}
pub(crate) struct PerCpuModeEncodings<'defs> { pub(crate) struct PerCpuModeEncodings<'defs> {
pub inst_pred_reg: InstructionPredicateRegistry, pub inst_pred_reg: InstructionPredicateRegistry,
pub enc32: Vec<Encoding>, pub enc32: Vec<Encoding>,
pub enc64: Vec<Encoding>, pub enc64: Vec<Encoding>,
recipes: &'defs Recipes, recipes: &'defs Recipes,
formats: &'defs FormatRegistry,
} }
impl<'defs> PerCpuModeEncodings<'defs> { impl<'defs> PerCpuModeEncodings<'defs> {
fn new(recipes: &'defs Recipes) -> Self { fn new(recipes: &'defs Recipes, formats: &'defs FormatRegistry) -> Self {
Self { Self {
inst_pred_reg: InstructionPredicateRegistry::new(), inst_pred_reg: InstructionPredicateRegistry::new(),
enc32: Vec::new(), enc32: Vec::new(),
enc64: Vec::new(), enc64: Vec::new(),
recipes, recipes,
formats,
} }
} }
fn enc(
&self,
inst: impl Into<InstSpec>,
recipe: EncodingRecipeNumber,
bits: u16,
) -> EncodingBuilder {
EncodingBuilder::new(inst.into(), recipe, bits, self.formats)
}
fn add32(&mut self, encoding: EncodingBuilder) { fn add32(&mut self, encoding: EncodingBuilder) {
self.enc32 self.enc32
.push(encoding.build(self.recipes, &mut self.inst_pred_reg)); .push(encoding.build(self.recipes, &mut self.inst_pred_reg));
@@ -169,7 +176,7 @@ pub(crate) fn define<'defs>(
let use_m = isa_settings.predicate_by_name("use_m"); let use_m = isa_settings.predicate_by_name("use_m");
// Definitions. // Definitions.
let mut e = PerCpuModeEncodings::new(&recipes.recipes); let mut e = PerCpuModeEncodings::new(&recipes.recipes, &shared_defs.format_registry);
// Basic arithmetic binary instructions are encoded in an R-type instruction. // Basic arithmetic binary instructions are encoded in an R-type instruction.
for &(inst, inst_imm, f3, f7) in &[ for &(inst, inst_imm, f3, f7) in &[
@@ -179,26 +186,26 @@ pub(crate) fn define<'defs>(
(bor, Some(bor_imm), 0b110, 0b0000000), (bor, Some(bor_imm), 0b110, 0b0000000),
(band, Some(band_imm), 0b111, 0b0000000), (band, Some(band_imm), 0b111, 0b0000000),
] { ] {
e.add32(enc(inst.bind(I32), r_r, op_bits(f3, f7))); e.add32(e.enc(inst.bind(I32), r_r, op_bits(f3, f7)));
e.add64(enc(inst.bind(I64), r_r, op_bits(f3, f7))); e.add64(e.enc(inst.bind(I64), r_r, op_bits(f3, f7)));
// Immediate versions for add/xor/or/and. // Immediate versions for add/xor/or/and.
if let Some(inst_imm) = inst_imm { if let Some(inst_imm) = inst_imm {
e.add32(enc(inst_imm.bind(I32), r_ii, opimm_bits(f3, 0))); e.add32(e.enc(inst_imm.bind(I32), r_ii, opimm_bits(f3, 0)));
e.add64(enc(inst_imm.bind(I64), r_ii, opimm_bits(f3, 0))); e.add64(e.enc(inst_imm.bind(I64), r_ii, opimm_bits(f3, 0)));
} }
} }
// 32-bit ops in RV64. // 32-bit ops in RV64.
e.add64(enc(iadd.bind(I32), r_r, op32_bits(0b000, 0b0000000))); e.add64(e.enc(iadd.bind(I32), r_r, op32_bits(0b000, 0b0000000)));
e.add64(enc(isub.bind(I32), r_r, op32_bits(0b000, 0b0100000))); e.add64(e.enc(isub.bind(I32), r_r, op32_bits(0b000, 0b0100000)));
// There are no andiw/oriw/xoriw variations. // There are no andiw/oriw/xoriw variations.
e.add64(enc(iadd_imm.bind(I32), r_ii, opimm32_bits(0b000, 0))); e.add64(e.enc(iadd_imm.bind(I32), r_ii, opimm32_bits(0b000, 0)));
// Use iadd_imm with %x0 to materialize constants. // Use iadd_imm with %x0 to materialize constants.
e.add32(enc(iconst.bind(I32), r_iz, opimm_bits(0b0, 0))); e.add32(e.enc(iconst.bind(I32), r_iz, opimm_bits(0b0, 0)));
e.add64(enc(iconst.bind(I32), r_iz, opimm_bits(0b0, 0))); e.add64(e.enc(iconst.bind(I32), r_iz, opimm_bits(0b0, 0)));
e.add64(enc(iconst.bind(I64), r_iz, opimm_bits(0b0, 0))); e.add64(e.enc(iconst.bind(I64), r_iz, opimm_bits(0b0, 0)));
// Dynamic shifts have the same masking semantics as the clif base instructions. // Dynamic shifts have the same masking semantics as the clif base instructions.
for &(inst, inst_imm, f3, f7) in &[ for &(inst, inst_imm, f3, f7) in &[
@@ -206,17 +213,17 @@ pub(crate) fn define<'defs>(
(ushr, ushr_imm, 0b101, 0b0), (ushr, ushr_imm, 0b101, 0b0),
(sshr, sshr_imm, 0b101, 0b100000), (sshr, sshr_imm, 0b101, 0b100000),
] { ] {
e.add32(enc(inst.bind(I32).bind(I32), r_r, op_bits(f3, f7))); e.add32(e.enc(inst.bind(I32).bind(I32), r_r, op_bits(f3, f7)));
e.add64(enc(inst.bind(I64).bind(I64), r_r, op_bits(f3, f7))); e.add64(e.enc(inst.bind(I64).bind(I64), r_r, op_bits(f3, f7)));
e.add64(enc(inst.bind(I32).bind(I32), r_r, op32_bits(f3, f7))); e.add64(e.enc(inst.bind(I32).bind(I32), r_r, op32_bits(f3, f7)));
// Allow i32 shift amounts in 64-bit shifts. // Allow i32 shift amounts in 64-bit shifts.
e.add64(enc(inst.bind(I64).bind(I32), r_r, op_bits(f3, f7))); e.add64(e.enc(inst.bind(I64).bind(I32), r_r, op_bits(f3, f7)));
e.add64(enc(inst.bind(I32).bind(I64), r_r, op32_bits(f3, f7))); e.add64(e.enc(inst.bind(I32).bind(I64), r_r, op32_bits(f3, f7)));
// Immediate shifts. // Immediate shifts.
e.add32(enc(inst_imm.bind(I32), r_rshamt, opimm_bits(f3, f7))); e.add32(e.enc(inst_imm.bind(I32), r_rshamt, opimm_bits(f3, f7)));
e.add64(enc(inst_imm.bind(I64), r_rshamt, opimm_bits(f3, f7))); e.add64(e.enc(inst_imm.bind(I64), r_rshamt, opimm_bits(f3, f7)));
e.add64(enc(inst_imm.bind(I32), r_rshamt, opimm32_bits(f3, f7))); e.add64(e.enc(inst_imm.bind(I32), r_rshamt, opimm32_bits(f3, f7)));
} }
// Signed and unsigned integer 'less than'. There are no 'w' variants for comparing 32-bit // Signed and unsigned integer 'less than'. There are no 'w' variants for comparing 32-bit
@@ -242,20 +249,20 @@ pub(crate) fn define<'defs>(
let icmp_i32 = icmp.bind(I32); let icmp_i32 = icmp.bind(I32);
let icmp_i64 = icmp.bind(I64); let icmp_i64 = icmp.bind(I64);
e.add32( e.add32(
enc(icmp_i32.clone(), r_ricmp, op_bits(0b010, 0b0000000)) e.enc(icmp_i32.clone(), r_ricmp, op_bits(0b010, 0b0000000))
.inst_predicate(icmp_instp(&icmp_i32, "slt")), .inst_predicate(icmp_instp(&icmp_i32, "slt")),
); );
e.add64( e.add64(
enc(icmp_i64.clone(), r_ricmp, op_bits(0b010, 0b0000000)) e.enc(icmp_i64.clone(), r_ricmp, op_bits(0b010, 0b0000000))
.inst_predicate(icmp_instp(&icmp_i64, "slt")), .inst_predicate(icmp_instp(&icmp_i64, "slt")),
); );
e.add32( e.add32(
enc(icmp_i32.clone(), r_ricmp, op_bits(0b011, 0b0000000)) e.enc(icmp_i32.clone(), r_ricmp, op_bits(0b011, 0b0000000))
.inst_predicate(icmp_instp(&icmp_i32, "ult")), .inst_predicate(icmp_instp(&icmp_i32, "ult")),
); );
e.add64( e.add64(
enc(icmp_i64.clone(), r_ricmp, op_bits(0b011, 0b0000000)) e.enc(icmp_i64.clone(), r_ricmp, op_bits(0b011, 0b0000000))
.inst_predicate(icmp_instp(&icmp_i64, "ult")), .inst_predicate(icmp_instp(&icmp_i64, "ult")),
); );
@@ -263,42 +270,51 @@ pub(crate) fn define<'defs>(
let icmp_i32 = icmp_imm.bind(I32); let icmp_i32 = icmp_imm.bind(I32);
let icmp_i64 = icmp_imm.bind(I64); let icmp_i64 = icmp_imm.bind(I64);
e.add32( e.add32(
enc(icmp_i32.clone(), r_iicmp, opimm_bits(0b010, 0)) e.enc(icmp_i32.clone(), r_iicmp, opimm_bits(0b010, 0))
.inst_predicate(icmp_instp(&icmp_i32, "slt")), .inst_predicate(icmp_instp(&icmp_i32, "slt")),
); );
e.add64( e.add64(
enc(icmp_i64.clone(), r_iicmp, opimm_bits(0b010, 0)) e.enc(icmp_i64.clone(), r_iicmp, opimm_bits(0b010, 0))
.inst_predicate(icmp_instp(&icmp_i64, "slt")), .inst_predicate(icmp_instp(&icmp_i64, "slt")),
); );
e.add32( e.add32(
enc(icmp_i32.clone(), r_iicmp, opimm_bits(0b011, 0)) e.enc(icmp_i32.clone(), r_iicmp, opimm_bits(0b011, 0))
.inst_predicate(icmp_instp(&icmp_i32, "ult")), .inst_predicate(icmp_instp(&icmp_i32, "ult")),
); );
e.add64( e.add64(
enc(icmp_i64.clone(), r_iicmp, opimm_bits(0b011, 0)) e.enc(icmp_i64.clone(), r_iicmp, opimm_bits(0b011, 0))
.inst_predicate(icmp_instp(&icmp_i64, "ult")), .inst_predicate(icmp_instp(&icmp_i64, "ult")),
); );
} }
// Integer constants with the low 12 bits clear are materialized by lui. // Integer constants with the low 12 bits clear are materialized by lui.
e.add32(enc(iconst.bind(I32), r_u, lui_bits())); e.add32(e.enc(iconst.bind(I32), r_u, lui_bits()));
e.add64(enc(iconst.bind(I32), r_u, lui_bits())); e.add64(e.enc(iconst.bind(I32), r_u, lui_bits()));
e.add64(enc(iconst.bind(I64), r_u, lui_bits())); e.add64(e.enc(iconst.bind(I64), r_u, lui_bits()));
// "M" Standard Extension for Integer Multiplication and Division. // "M" Standard Extension for Integer Multiplication and Division.
// Gated by the `use_m` flag. // Gated by the `use_m` flag.
e.add32(enc(imul.bind(I32), r_r, op_bits(0b000, 0b00000001)).isa_predicate(use_m)); e.add32(
e.add64(enc(imul.bind(I64), r_r, op_bits(0b000, 0b00000001)).isa_predicate(use_m)); e.enc(imul.bind(I32), r_r, op_bits(0b000, 0b00000001))
e.add64(enc(imul.bind(I32), r_r, op32_bits(0b000, 0b00000001)).isa_predicate(use_m)); .isa_predicate(use_m),
);
e.add64(
e.enc(imul.bind(I64), r_r, op_bits(0b000, 0b00000001))
.isa_predicate(use_m),
);
e.add64(
e.enc(imul.bind(I32), r_r, op32_bits(0b000, 0b00000001))
.isa_predicate(use_m),
);
// Control flow. // Control flow.
// Unconditional branches. // Unconditional branches.
e.add32(enc(jump, r_uj, jal_bits())); e.add32(e.enc(jump, r_uj, jal_bits()));
e.add64(enc(jump, r_uj, jal_bits())); e.add64(e.enc(jump, r_uj, jal_bits()));
e.add32(enc(call, r_uj_call, jal_bits())); e.add32(e.enc(call, r_uj_call, jal_bits()));
e.add64(enc(call, r_uj_call, jal_bits())); e.add64(e.enc(call, r_uj_call, jal_bits()));
// Conditional branches. // Conditional branches.
{ {
@@ -338,101 +354,81 @@ pub(crate) fn define<'defs>(
("uge", 0b111), ("uge", 0b111),
] { ] {
e.add32( e.add32(
enc(br_icmp_i32.clone(), r_sb, branch_bits(f3)) e.enc(br_icmp_i32.clone(), r_sb, branch_bits(f3))
.inst_predicate(br_icmp_instp(&br_icmp_i32, cond)), .inst_predicate(br_icmp_instp(&br_icmp_i32, cond)),
); );
e.add64( e.add64(
enc(br_icmp_i64.clone(), r_sb, branch_bits(f3)) e.enc(br_icmp_i64.clone(), r_sb, branch_bits(f3))
.inst_predicate(br_icmp_instp(&br_icmp_i64, cond)), .inst_predicate(br_icmp_instp(&br_icmp_i64, cond)),
); );
} }
} }
for &(inst, f3) in &[(brz, 0b000), (brnz, 0b001)] { for &(inst, f3) in &[(brz, 0b000), (brnz, 0b001)] {
e.add32(enc(inst.bind(I32), r_sb_zero, branch_bits(f3))); e.add32(e.enc(inst.bind(I32), r_sb_zero, branch_bits(f3)));
e.add64(enc(inst.bind(I64), r_sb_zero, branch_bits(f3))); e.add64(e.enc(inst.bind(I64), r_sb_zero, branch_bits(f3)));
e.add32(enc(inst.bind(B1), r_sb_zero, branch_bits(f3))); e.add32(e.enc(inst.bind(B1), r_sb_zero, branch_bits(f3)));
e.add64(enc(inst.bind(B1), r_sb_zero, branch_bits(f3))); e.add64(e.enc(inst.bind(B1), r_sb_zero, branch_bits(f3)));
} }
// Returns are a special case of jalr_bits using %x1 to hold the return address. // Returns are a special case of jalr_bits using %x1 to hold the return address.
// The return address is provided by a special-purpose `link` return value that // The return address is provided by a special-purpose `link` return value that
// is added by legalize_signature(). // is added by legalize_signature().
e.add32(enc(return_, r_iret, jalr_bits())); e.add32(e.enc(return_, r_iret, jalr_bits()));
e.add64(enc(return_, r_iret, jalr_bits())); e.add64(e.enc(return_, r_iret, jalr_bits()));
e.add32(enc(call_indirect.bind(I32), r_icall, jalr_bits())); e.add32(e.enc(call_indirect.bind(I32), r_icall, jalr_bits()));
e.add64(enc(call_indirect.bind(I64), r_icall, jalr_bits())); e.add64(e.enc(call_indirect.bind(I64), r_icall, jalr_bits()));
// Spill and fill. // Spill and fill.
e.add32(enc(spill.bind(I32), r_gp_sp, store_bits(0b010))); e.add32(e.enc(spill.bind(I32), r_gp_sp, store_bits(0b010)));
e.add64(enc(spill.bind(I32), r_gp_sp, store_bits(0b010))); e.add64(e.enc(spill.bind(I32), r_gp_sp, store_bits(0b010)));
e.add64(enc(spill.bind(I64), r_gp_sp, store_bits(0b011))); e.add64(e.enc(spill.bind(I64), r_gp_sp, store_bits(0b011)));
e.add32(enc(fill.bind(I32), r_gp_fi, load_bits(0b010))); e.add32(e.enc(fill.bind(I32), r_gp_fi, load_bits(0b010)));
e.add64(enc(fill.bind(I32), r_gp_fi, load_bits(0b010))); e.add64(e.enc(fill.bind(I32), r_gp_fi, load_bits(0b010)));
e.add64(enc(fill.bind(I64), r_gp_fi, load_bits(0b011))); e.add64(e.enc(fill.bind(I64), r_gp_fi, load_bits(0b011)));
// No-op fills, created by late-stage redundant-fill removal. // No-op fills, created by late-stage redundant-fill removal.
for &ty in &[I64, I32] { for &ty in &[I64, I32] {
e.add64(enc(fill_nop.bind(ty), r_fillnull, 0)); e.add64(e.enc(fill_nop.bind(ty), r_fillnull, 0));
e.add32(enc(fill_nop.bind(ty), r_fillnull, 0)); e.add32(e.enc(fill_nop.bind(ty), r_fillnull, 0));
} }
e.add64(enc(fill_nop.bind(B1), r_fillnull, 0)); e.add64(e.enc(fill_nop.bind(B1), r_fillnull, 0));
e.add32(enc(fill_nop.bind(B1), r_fillnull, 0)); e.add32(e.enc(fill_nop.bind(B1), r_fillnull, 0));
// Register copies. // Register copies.
e.add32(enc(copy.bind(I32), r_icopy, opimm_bits(0b000, 0))); e.add32(e.enc(copy.bind(I32), r_icopy, opimm_bits(0b000, 0)));
e.add64(enc(copy.bind(I64), r_icopy, opimm_bits(0b000, 0))); e.add64(e.enc(copy.bind(I64), r_icopy, opimm_bits(0b000, 0)));
e.add64(enc(copy.bind(I32), r_icopy, opimm32_bits(0b000, 0))); e.add64(e.enc(copy.bind(I32), r_icopy, opimm32_bits(0b000, 0)));
e.add32(enc(regmove.bind(I32), r_irmov, opimm_bits(0b000, 0))); e.add32(e.enc(regmove.bind(I32), r_irmov, opimm_bits(0b000, 0)));
e.add64(enc(regmove.bind(I64), r_irmov, opimm_bits(0b000, 0))); e.add64(e.enc(regmove.bind(I64), r_irmov, opimm_bits(0b000, 0)));
e.add64(enc(regmove.bind(I32), r_irmov, opimm32_bits(0b000, 0))); e.add64(e.enc(regmove.bind(I32), r_irmov, opimm32_bits(0b000, 0)));
e.add32(enc(copy.bind(B1), r_icopy, opimm_bits(0b000, 0))); e.add32(e.enc(copy.bind(B1), r_icopy, opimm_bits(0b000, 0)));
e.add64(enc(copy.bind(B1), r_icopy, opimm_bits(0b000, 0))); e.add64(e.enc(copy.bind(B1), r_icopy, opimm_bits(0b000, 0)));
e.add32(enc(regmove.bind(B1), r_irmov, opimm_bits(0b000, 0))); e.add32(e.enc(regmove.bind(B1), r_irmov, opimm_bits(0b000, 0)));
e.add64(enc(regmove.bind(B1), r_irmov, opimm_bits(0b000, 0))); e.add64(e.enc(regmove.bind(B1), r_irmov, opimm_bits(0b000, 0)));
// Stack-slot-to-the-same-stack-slot copy, which is guaranteed to turn // Stack-slot-to-the-same-stack-slot copy, which is guaranteed to turn
// into a no-op. // into a no-op.
// The same encoding is generated for both the 64- and 32-bit architectures. // The same encoding is generated for both the 64- and 32-bit architectures.
for &ty in &[I64, I32, I16, I8] { for &ty in &[I64, I32, I16, I8] {
e.add32(enc(copy_nop.bind(ty), r_stacknull, 0)); e.add32(e.enc(copy_nop.bind(ty), r_stacknull, 0));
e.add64(enc(copy_nop.bind(ty), r_stacknull, 0)); e.add64(e.enc(copy_nop.bind(ty), r_stacknull, 0));
} }
for &ty in &[F64, F32] { for &ty in &[F64, F32] {
e.add32(enc(copy_nop.bind(ty), r_stacknull, 0)); e.add32(e.enc(copy_nop.bind(ty), r_stacknull, 0));
e.add64(enc(copy_nop.bind(ty), r_stacknull, 0)); e.add64(e.enc(copy_nop.bind(ty), r_stacknull, 0));
} }
// Copy-to-SSA // Copy-to-SSA
e.add32(enc( e.add32(e.enc(copy_to_ssa.bind(I32), r_copytossa, opimm_bits(0b000, 0)));
copy_to_ssa.bind(I32), e.add64(e.enc(copy_to_ssa.bind(I64), r_copytossa, opimm_bits(0b000, 0)));
r_copytossa, e.add64(e.enc(copy_to_ssa.bind(I32), r_copytossa, opimm32_bits(0b000, 0)));
opimm_bits(0b000, 0), e.add32(e.enc(copy_to_ssa.bind(B1), r_copytossa, opimm_bits(0b000, 0)));
)); e.add64(e.enc(copy_to_ssa.bind(B1), r_copytossa, opimm_bits(0b000, 0)));
e.add64(enc( e.add32(e.enc(copy_to_ssa.bind(R32), r_copytossa, opimm_bits(0b000, 0)));
copy_to_ssa.bind(I64), e.add64(e.enc(copy_to_ssa.bind(R64), r_copytossa, opimm_bits(0b000, 0)));
r_copytossa,
opimm_bits(0b000, 0),
));
e.add64(enc(
copy_to_ssa.bind(I32),
r_copytossa,
opimm32_bits(0b000, 0),
));
e.add32(enc(copy_to_ssa.bind(B1), r_copytossa, opimm_bits(0b000, 0)));
e.add64(enc(copy_to_ssa.bind(B1), r_copytossa, opimm_bits(0b000, 0)));
e.add32(enc(
copy_to_ssa.bind_ref(R32),
r_copytossa,
opimm_bits(0b000, 0),
));
e.add64(enc(
copy_to_ssa.bind_ref(R64),
r_copytossa,
opimm_bits(0b000, 0),
));
e e
} }

View File

@@ -5,8 +5,8 @@ use std::collections::HashMap;
use crate::cdsl::encodings::{Encoding, EncodingBuilder}; use crate::cdsl::encodings::{Encoding, EncodingBuilder};
use crate::cdsl::instructions::{ use crate::cdsl::instructions::{
InstSpec, Instruction, InstructionGroup, InstructionPredicate, InstructionPredicateNode, vector, Bindable, InstSpec, Instruction, InstructionGroup, InstructionPredicate,
InstructionPredicateRegistry, InstructionPredicateNode, InstructionPredicateRegistry,
}; };
use crate::cdsl::recipes::{EncodingRecipe, EncodingRecipeNumber, Recipes}; use crate::cdsl::recipes::{EncodingRecipe, EncodingRecipeNumber, Recipes};
use crate::cdsl::settings::{SettingGroup, SettingPredicateNumber}; use crate::cdsl::settings::{SettingGroup, SettingPredicateNumber};
@@ -20,23 +20,27 @@ use crate::shared::Definitions as SharedDefinitions;
use crate::isa::x86::opcodes::*; use crate::isa::x86::opcodes::*;
use super::recipes::{RecipeGroup, Template}; use super::recipes::{RecipeGroup, Template};
use crate::cdsl::formats::FormatRegistry;
use crate::cdsl::instructions::BindParameter::Any;
pub(crate) struct PerCpuModeEncodings { pub(crate) struct PerCpuModeEncodings<'defs> {
pub enc32: Vec<Encoding>, pub enc32: Vec<Encoding>,
pub enc64: Vec<Encoding>, pub enc64: Vec<Encoding>,
pub recipes: Recipes, pub recipes: Recipes,
recipes_by_name: HashMap<String, EncodingRecipeNumber>, recipes_by_name: HashMap<String, EncodingRecipeNumber>,
pub inst_pred_reg: InstructionPredicateRegistry, pub inst_pred_reg: InstructionPredicateRegistry,
formats: &'defs FormatRegistry,
} }
impl PerCpuModeEncodings { impl<'defs> PerCpuModeEncodings<'defs> {
fn new() -> Self { fn new(formats: &'defs FormatRegistry) -> Self {
Self { Self {
enc32: Vec::new(), enc32: Vec::new(),
enc64: Vec::new(), enc64: Vec::new(),
recipes: Recipes::new(), recipes: Recipes::new(),
recipes_by_name: HashMap::new(), recipes_by_name: HashMap::new(),
inst_pred_reg: InstructionPredicateRegistry::new(), inst_pred_reg: InstructionPredicateRegistry::new(),
formats,
} }
} }
@@ -69,7 +73,7 @@ impl PerCpuModeEncodings {
{ {
let (recipe, bits) = template.build(); let (recipe, bits) = template.build();
let recipe_number = self.add_recipe(recipe); let recipe_number = self.add_recipe(recipe);
let builder = EncodingBuilder::new(inst.into(), recipe_number, bits); let builder = EncodingBuilder::new(inst.into(), recipe_number, bits, self.formats);
builder_closure(builder).build(&self.recipes, &mut self.inst_pred_reg) builder_closure(builder).build(&self.recipes, &mut self.inst_pred_reg)
} }
@@ -101,7 +105,7 @@ impl PerCpuModeEncodings {
} }
fn enc32_rec(&mut self, inst: impl Into<InstSpec>, recipe: &EncodingRecipe, bits: u16) { fn enc32_rec(&mut self, inst: impl Into<InstSpec>, recipe: &EncodingRecipe, bits: u16) {
let recipe_number = self.add_recipe(recipe.clone()); let recipe_number = self.add_recipe(recipe.clone());
let builder = EncodingBuilder::new(inst.into(), recipe_number, bits); let builder = EncodingBuilder::new(inst.into(), recipe_number, bits, self.formats);
let encoding = builder.build(&self.recipes, &mut self.inst_pred_reg); let encoding = builder.build(&self.recipes, &mut self.inst_pred_reg);
self.enc32.push(encoding); self.enc32.push(encoding);
} }
@@ -134,7 +138,7 @@ impl PerCpuModeEncodings {
} }
fn enc64_rec(&mut self, inst: impl Into<InstSpec>, recipe: &EncodingRecipe, bits: u16) { fn enc64_rec(&mut self, inst: impl Into<InstSpec>, recipe: &EncodingRecipe, bits: u16) {
let recipe_number = self.add_recipe(recipe.clone()); let recipe_number = self.add_recipe(recipe.clone());
let builder = EncodingBuilder::new(inst.into(), recipe_number, bits); let builder = EncodingBuilder::new(inst.into(), recipe_number, bits, self.formats);
let encoding = builder.build(&self.recipes, &mut self.inst_pred_reg); let encoding = builder.build(&self.recipes, &mut self.inst_pred_reg);
self.enc64.push(encoding); self.enc64.push(encoding);
} }
@@ -207,8 +211,8 @@ impl PerCpuModeEncodings {
/// Add encodings for `inst.r64` to X86_64 with a REX.W prefix. /// Add encodings for `inst.r64` to X86_64 with a REX.W prefix.
fn enc_r32_r64_rex_only(&mut self, inst: impl Into<InstSpec>, template: Template) { fn enc_r32_r64_rex_only(&mut self, inst: impl Into<InstSpec>, template: Template) {
let inst: InstSpec = inst.into(); let inst: InstSpec = inst.into();
self.enc32(inst.bind_ref(R32), template.nonrex()); self.enc32(inst.bind(R32), template.nonrex());
self.enc64(inst.bind_ref(R64), template.rex().w()); self.enc64(inst.bind(R64), template.rex().w());
} }
/// Add encodings for `inst` to X86_64 with and without a REX prefix. /// Add encodings for `inst` to X86_64 with and without a REX prefix.
@@ -281,18 +285,18 @@ impl PerCpuModeEncodings {
/// Add encodings for `inst.i64` to X86_64 with a REX prefix, using the `w_bit` /// Add encodings for `inst.i64` to X86_64 with a REX prefix, using the `w_bit`
/// argument to determine whether or not to set the REX.W bit. /// argument to determine whether or not to set the REX.W bit.
fn enc_i32_i64_ld_st(&mut self, inst: &Instruction, w_bit: bool, template: Template) { fn enc_i32_i64_ld_st(&mut self, inst: &Instruction, w_bit: bool, template: Template) {
self.enc32(inst.clone().bind(I32).bind_any(), template.clone()); self.enc32(inst.clone().bind(I32).bind(Any), template.clone());
// REX-less encoding must come after REX encoding so we don't use it by // REX-less encoding must come after REX encoding so we don't use it by
// default. Otherwise reg-alloc would never use r8 and up. // default. Otherwise reg-alloc would never use r8 and up.
self.enc64(inst.clone().bind(I32).bind_any(), template.clone().rex()); self.enc64(inst.clone().bind(I32).bind(Any), template.clone().rex());
self.enc64(inst.clone().bind(I32).bind_any(), template.clone()); self.enc64(inst.clone().bind(I32).bind(Any), template.clone());
if w_bit { if w_bit {
self.enc64(inst.clone().bind(I64).bind_any(), template.rex().w()); self.enc64(inst.clone().bind(I64).bind(Any), template.rex().w());
} else { } else {
self.enc64(inst.clone().bind(I64).bind_any(), template.clone().rex()); self.enc64(inst.clone().bind(I64).bind(Any), template.clone().rex());
self.enc64(inst.clone().bind(I64).bind_any(), template); self.enc64(inst.clone().bind(I64).bind(Any), template);
} }
} }
@@ -366,12 +370,12 @@ impl PerCpuModeEncodings {
// Definitions. // Definitions.
pub(crate) fn define( pub(crate) fn define<'defs>(
shared_defs: &SharedDefinitions, shared_defs: &'defs SharedDefinitions,
settings: &SettingGroup, settings: &SettingGroup,
x86: &InstructionGroup, x86: &InstructionGroup,
r: &RecipeGroup, r: &RecipeGroup,
) -> PerCpuModeEncodings { ) -> PerCpuModeEncodings<'defs> {
let shared = &shared_defs.instructions; let shared = &shared_defs.instructions;
let formats = &shared_defs.format_registry; let formats = &shared_defs.format_registry;
@@ -681,7 +685,7 @@ pub(crate) fn define(
let use_sse41_simd = settings.predicate_by_name("use_sse41_simd"); let use_sse41_simd = settings.predicate_by_name("use_sse41_simd");
// Definitions. // Definitions.
let mut e = PerCpuModeEncodings::new(); let mut e = PerCpuModeEncodings::new(formats);
// The pinned reg is fixed to a certain value entirely user-controlled, so it generates nothing! // The pinned reg is fixed to a certain value entirely user-controlled, so it generates nothing!
e.enc64_rec(get_pinned_reg.bind(I64), rec_get_pinned_reg, 0); e.enc64_rec(get_pinned_reg.bind(I64), rec_get_pinned_reg, 0);
@@ -742,15 +746,11 @@ pub(crate) fn define(
e.enc64(regmove.bind(ty), rec_rmov.opcodes(&MOV_STORE).rex()); e.enc64(regmove.bind(ty), rec_rmov.opcodes(&MOV_STORE).rex());
} }
e.enc64(regmove.bind(I64), rec_rmov.opcodes(&MOV_STORE).rex().w()); e.enc64(regmove.bind(I64), rec_rmov.opcodes(&MOV_STORE).rex().w());
e.enc64(regmove.bind(B64), rec_rmov.opcodes(&MOV_STORE).rex().w());
e.enc_both(regmove.bind(B1), rec_rmov.opcodes(&MOV_STORE)); e.enc_both(regmove.bind(B1), rec_rmov.opcodes(&MOV_STORE));
e.enc_both(regmove.bind(I8), rec_rmov.opcodes(&MOV_STORE)); e.enc_both(regmove.bind(I8), rec_rmov.opcodes(&MOV_STORE));
e.enc32(regmove.bind_ref(R32), rec_rmov.opcodes(&MOV_STORE)); e.enc32(regmove.bind(R32), rec_rmov.opcodes(&MOV_STORE));
e.enc64(regmove.bind_ref(R32), rec_rmov.opcodes(&MOV_STORE).rex()); e.enc64(regmove.bind(R32), rec_rmov.opcodes(&MOV_STORE).rex());
e.enc64( e.enc64(regmove.bind(R64), rec_rmov.opcodes(&MOV_STORE).rex().w());
regmove.bind_ref(R64),
rec_rmov.opcodes(&MOV_STORE).rex().w(),
);
e.enc_i32_i64(iadd_imm, rec_r_ib.opcodes(&ADD_IMM8_SIGN_EXTEND).rrr(0)); e.enc_i32_i64(iadd_imm, rec_r_ib.opcodes(&ADD_IMM8_SIGN_EXTEND).rrr(0));
e.enc_i32_i64(iadd_imm, rec_r_id.opcodes(&ADD_IMM).rrr(0)); e.enc_i32_i64(iadd_imm, rec_r_id.opcodes(&ADD_IMM).rrr(0));
@@ -834,19 +834,19 @@ pub(crate) fn define(
// Cannot use enc_i32_i64 for this pattern because instructions require // Cannot use enc_i32_i64 for this pattern because instructions require
// to bind any. // to bind any.
e.enc32( e.enc32(
inst.bind(I32).bind_any(), inst.bind(I32).bind(Any),
rec_rc.opcodes(&ROTATE_CL).rrr(rrr), rec_rc.opcodes(&ROTATE_CL).rrr(rrr),
); );
e.enc64( e.enc64(
inst.bind(I64).bind_any(), inst.bind(I64).bind(Any),
rec_rc.opcodes(&ROTATE_CL).rrr(rrr).rex().w(), rec_rc.opcodes(&ROTATE_CL).rrr(rrr).rex().w(),
); );
e.enc64( e.enc64(
inst.bind(I32).bind_any(), inst.bind(I32).bind(Any),
rec_rc.opcodes(&ROTATE_CL).rrr(rrr).rex(), rec_rc.opcodes(&ROTATE_CL).rrr(rrr).rex(),
); );
e.enc64( e.enc64(
inst.bind(I32).bind_any(), inst.bind(I32).bind(Any),
rec_rc.opcodes(&ROTATE_CL).rrr(rrr), rec_rc.opcodes(&ROTATE_CL).rrr(rrr),
); );
} }
@@ -970,7 +970,7 @@ pub(crate) fn define(
for recipe in &[rec_st, rec_stDisp8, rec_stDisp32] { for recipe in &[rec_st, rec_stDisp8, rec_stDisp32] {
e.enc_i32_i64_ld_st(store, true, recipe.opcodes(&MOV_STORE)); e.enc_i32_i64_ld_st(store, true, recipe.opcodes(&MOV_STORE));
e.enc_x86_64(istore32.bind(I64).bind_any(), recipe.opcodes(&MOV_STORE)); e.enc_x86_64(istore32.bind(I64).bind(Any), recipe.opcodes(&MOV_STORE));
e.enc_i32_i64_ld_st(istore16, false, recipe.opcodes(&MOV_STORE_16)); e.enc_i32_i64_ld_st(istore16, false, recipe.opcodes(&MOV_STORE_16));
} }
@@ -979,14 +979,8 @@ pub(crate) fn define(
// the corresponding st* recipes when a REX prefix is applied. // the corresponding st* recipes when a REX prefix is applied.
for recipe in &[rec_st_abcd, rec_stDisp8_abcd, rec_stDisp32_abcd] { for recipe in &[rec_st_abcd, rec_stDisp8_abcd, rec_stDisp32_abcd] {
e.enc_both( e.enc_both(istore8.bind(I32).bind(Any), recipe.opcodes(&MOV_BYTE_STORE));
istore8.bind(I32).bind_any(), e.enc_x86_64(istore8.bind(I64).bind(Any), recipe.opcodes(&MOV_BYTE_STORE));
recipe.opcodes(&MOV_BYTE_STORE),
);
e.enc_x86_64(
istore8.bind(I64).bind_any(),
recipe.opcodes(&MOV_BYTE_STORE),
);
} }
e.enc_i32_i64(spill, rec_spillSib32.opcodes(&MOV_STORE)); e.enc_i32_i64(spill, rec_spillSib32.opcodes(&MOV_STORE));
@@ -1121,12 +1115,9 @@ pub(crate) fn define(
); );
// Float loads and stores. // Float loads and stores.
e.enc_both(load.bind(F32).bind_any(), rec_fld.opcodes(&MOVSS_LOAD)); e.enc_both(load.bind(F32).bind(Any), rec_fld.opcodes(&MOVSS_LOAD));
e.enc_both(load.bind(F32).bind_any(), rec_fldDisp8.opcodes(&MOVSS_LOAD)); e.enc_both(load.bind(F32).bind(Any), rec_fldDisp8.opcodes(&MOVSS_LOAD));
e.enc_both( e.enc_both(load.bind(F32).bind(Any), rec_fldDisp32.opcodes(&MOVSS_LOAD));
load.bind(F32).bind_any(),
rec_fldDisp32.opcodes(&MOVSS_LOAD),
);
e.enc_both( e.enc_both(
load_complex.bind(F32), load_complex.bind(F32),
@@ -1141,12 +1132,9 @@ pub(crate) fn define(
rec_fldWithIndexDisp32.opcodes(&MOVSS_LOAD), rec_fldWithIndexDisp32.opcodes(&MOVSS_LOAD),
); );
e.enc_both(load.bind(F64).bind_any(), rec_fld.opcodes(&MOVSD_LOAD)); e.enc_both(load.bind(F64).bind(Any), rec_fld.opcodes(&MOVSD_LOAD));
e.enc_both(load.bind(F64).bind_any(), rec_fldDisp8.opcodes(&MOVSD_LOAD)); e.enc_both(load.bind(F64).bind(Any), rec_fldDisp8.opcodes(&MOVSD_LOAD));
e.enc_both( e.enc_both(load.bind(F64).bind(Any), rec_fldDisp32.opcodes(&MOVSD_LOAD));
load.bind(F64).bind_any(),
rec_fldDisp32.opcodes(&MOVSD_LOAD),
);
e.enc_both( e.enc_both(
load_complex.bind(F64), load_complex.bind(F64),
@@ -1161,13 +1149,13 @@ pub(crate) fn define(
rec_fldWithIndexDisp32.opcodes(&MOVSD_LOAD), rec_fldWithIndexDisp32.opcodes(&MOVSD_LOAD),
); );
e.enc_both(store.bind(F32).bind_any(), rec_fst.opcodes(&MOVSS_STORE)); e.enc_both(store.bind(F32).bind(Any), rec_fst.opcodes(&MOVSS_STORE));
e.enc_both( e.enc_both(
store.bind(F32).bind_any(), store.bind(F32).bind(Any),
rec_fstDisp8.opcodes(&MOVSS_STORE), rec_fstDisp8.opcodes(&MOVSS_STORE),
); );
e.enc_both( e.enc_both(
store.bind(F32).bind_any(), store.bind(F32).bind(Any),
rec_fstDisp32.opcodes(&MOVSS_STORE), rec_fstDisp32.opcodes(&MOVSS_STORE),
); );
@@ -1184,13 +1172,13 @@ pub(crate) fn define(
rec_fstWithIndexDisp32.opcodes(&MOVSS_STORE), rec_fstWithIndexDisp32.opcodes(&MOVSS_STORE),
); );
e.enc_both(store.bind(F64).bind_any(), rec_fst.opcodes(&MOVSD_STORE)); e.enc_both(store.bind(F64).bind(Any), rec_fst.opcodes(&MOVSD_STORE));
e.enc_both( e.enc_both(
store.bind(F64).bind_any(), store.bind(F64).bind(Any),
rec_fstDisp8.opcodes(&MOVSD_STORE), rec_fstDisp8.opcodes(&MOVSD_STORE),
); );
e.enc_both( e.enc_both(
store.bind(F64).bind_any(), store.bind(F64).bind(Any),
rec_fstDisp32.opcodes(&MOVSD_STORE), rec_fstDisp32.opcodes(&MOVSD_STORE),
); );
@@ -1727,7 +1715,7 @@ pub(crate) fn define(
// PSHUFB, 8-bit shuffle using two XMM registers. // PSHUFB, 8-bit shuffle using two XMM registers.
for ty in ValueType::all_lane_types().filter(allowed_simd_type) { for ty in ValueType::all_lane_types().filter(allowed_simd_type) {
let instruction = x86_pshufb.bind_vector_from_lane(ty, sse_vector_size); let instruction = x86_pshufb.bind(vector(ty, sse_vector_size));
let template = rec_fa.nonrex().opcodes(&PSHUFB); let template = rec_fa.nonrex().opcodes(&PSHUFB);
e.enc32_isap(instruction.clone(), template.clone(), use_ssse3_simd); e.enc32_isap(instruction.clone(), template.clone(), use_ssse3_simd);
e.enc64_isap(instruction, template, use_ssse3_simd); e.enc64_isap(instruction, template, use_ssse3_simd);
@@ -1735,7 +1723,7 @@ pub(crate) fn define(
// PSHUFD, 32-bit shuffle using one XMM register and a u8 immediate. // PSHUFD, 32-bit shuffle using one XMM register and a u8 immediate.
for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 32) { for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 32) {
let instruction = x86_pshufd.bind_vector_from_lane(ty, sse_vector_size); let instruction = x86_pshufd.bind(vector(ty, sse_vector_size));
let template = rec_r_ib_unsigned_fpr.nonrex().opcodes(&PSHUFD); let template = rec_r_ib_unsigned_fpr.nonrex().opcodes(&PSHUFD);
e.enc32(instruction.clone(), template.clone()); e.enc32(instruction.clone(), template.clone());
e.enc64(instruction, template); e.enc64(instruction, template);
@@ -1745,7 +1733,7 @@ pub(crate) fn define(
// to the Intel manual: "When the destination operand is an XMM register, the source operand is // to the Intel manual: "When the destination operand is an XMM register, the source operand is
// written to the low doubleword of the register and the register is zero-extended to 128 bits." // written to the low doubleword of the register and the register is zero-extended to 128 bits."
for ty in ValueType::all_lane_types().filter(allowed_simd_type) { for ty in ValueType::all_lane_types().filter(allowed_simd_type) {
let instruction = scalar_to_vector.bind_vector_from_lane(ty, sse_vector_size); let instruction = scalar_to_vector.bind(vector(ty, sse_vector_size));
if ty.is_float() { if ty.is_float() {
e.enc_32_64_rec(instruction, rec_null_fpr, 0); e.enc_32_64_rec(instruction, rec_null_fpr, 0);
} else { } else {
@@ -1767,7 +1755,7 @@ pub(crate) fn define(
_ => panic!("invalid size for SIMD insertlane"), _ => panic!("invalid size for SIMD insertlane"),
}; };
let instruction = x86_pinsr.bind_vector_from_lane(ty, sse_vector_size); let instruction = x86_pinsr.bind(vector(ty, sse_vector_size));
let template = rec_r_ib_unsigned_r.opcodes(opcode); let template = rec_r_ib_unsigned_r.opcodes(opcode);
if ty.lane_bits() < 64 { if ty.lane_bits() < 64 {
e.enc_32_64_maybe_isap(instruction, template.nonrex(), isap); e.enc_32_64_maybe_isap(instruction, template.nonrex(), isap);
@@ -1780,21 +1768,21 @@ pub(crate) fn define(
// For legalizing insertlane with floats, INSERTPS from SSE4.1. // For legalizing insertlane with floats, INSERTPS from SSE4.1.
{ {
let instruction = x86_insertps.bind_vector_from_lane(F32, sse_vector_size); let instruction = x86_insertps.bind(vector(F32, sse_vector_size));
let template = rec_fa_ib.nonrex().opcodes(&INSERTPS); let template = rec_fa_ib.nonrex().opcodes(&INSERTPS);
e.enc_32_64_maybe_isap(instruction, template, Some(use_sse41_simd)); e.enc_32_64_maybe_isap(instruction, template, Some(use_sse41_simd));
} }
// For legalizing insertlane with floats, MOVSD from SSE2. // For legalizing insertlane with floats, MOVSD from SSE2.
{ {
let instruction = x86_movsd.bind_vector_from_lane(F64, sse_vector_size); let instruction = x86_movsd.bind(vector(F64, sse_vector_size));
let template = rec_fa.nonrex().opcodes(&MOVSD_LOAD); let template = rec_fa.nonrex().opcodes(&MOVSD_LOAD);
e.enc_32_64_maybe_isap(instruction, template, None); // from SSE2 e.enc_32_64_maybe_isap(instruction, template, None); // from SSE2
} }
// For legalizing insertlane with floats, MOVLHPS from SSE. // For legalizing insertlane with floats, MOVLHPS from SSE.
{ {
let instruction = x86_movlhps.bind_vector_from_lane(F64, sse_vector_size); let instruction = x86_movlhps.bind(vector(F64, sse_vector_size));
let template = rec_fa.nonrex().opcodes(&MOVLHPS); let template = rec_fa.nonrex().opcodes(&MOVLHPS);
e.enc_32_64_maybe_isap(instruction, template, None); // from SSE e.enc_32_64_maybe_isap(instruction, template, None); // from SSE
} }
@@ -1808,7 +1796,7 @@ pub(crate) fn define(
_ => panic!("invalid size for SIMD extractlane"), _ => panic!("invalid size for SIMD extractlane"),
}; };
let instruction = x86_pextr.bind_vector_from_lane(ty, sse_vector_size); let instruction = x86_pextr.bind(vector(ty, sse_vector_size));
let template = rec_r_ib_unsigned_gpr.opcodes(opcode); let template = rec_r_ib_unsigned_gpr.opcodes(opcode);
if ty.lane_bits() < 64 { if ty.lane_bits() < 64 {
e.enc_32_64_maybe_isap(instruction, template.nonrex(), Some(use_sse41_simd)); e.enc_32_64_maybe_isap(instruction, template.nonrex(), Some(use_sse41_simd));
@@ -1825,8 +1813,8 @@ pub(crate) fn define(
ValueType::all_lane_types().filter(|t| allowed_simd_type(t) && *t != from_type) ValueType::all_lane_types().filter(|t| allowed_simd_type(t) && *t != from_type)
{ {
let instruction = raw_bitcast let instruction = raw_bitcast
.bind_vector_from_lane(to_type, sse_vector_size) .bind(vector(to_type, sse_vector_size))
.bind_vector_from_lane(from_type, sse_vector_size); .bind(vector(from_type, sse_vector_size));
e.enc_32_64_rec(instruction, rec_null_fpr, 0); e.enc_32_64_rec(instruction, rec_null_fpr, 0);
} }
} }
@@ -1837,7 +1825,7 @@ pub(crate) fn define(
for lane_type in ValueType::all_lane_types().filter(allowed_simd_type) { for lane_type in ValueType::all_lane_types().filter(allowed_simd_type) {
e.enc_32_64_rec( e.enc_32_64_rec(
raw_bitcast raw_bitcast
.bind_vector_from_lane(lane_type, sse_vector_size) .bind(vector(lane_type, sse_vector_size))
.bind(*float_type), .bind(*float_type),
rec_null_fpr, rec_null_fpr,
0, 0,
@@ -1845,7 +1833,7 @@ pub(crate) fn define(
e.enc_32_64_rec( e.enc_32_64_rec(
raw_bitcast raw_bitcast
.bind(*float_type) .bind(*float_type)
.bind_vector_from_lane(lane_type, sse_vector_size), .bind(vector(lane_type, sse_vector_size)),
rec_null_fpr, rec_null_fpr,
0, 0,
); );
@@ -1857,7 +1845,7 @@ pub(crate) fn define(
// encoding first // encoding first
for ty in ValueType::all_lane_types().filter(allowed_simd_type) { for ty in ValueType::all_lane_types().filter(allowed_simd_type) {
let f_unary_const = formats.get(formats.by_name("UnaryConst")); let f_unary_const = formats.get(formats.by_name("UnaryConst"));
let instruction = vconst.bind_vector_from_lane(ty, sse_vector_size); let instruction = vconst.bind(vector(ty, sse_vector_size));
let is_zero_128bit = let is_zero_128bit =
InstructionPredicate::new_is_all_zeroes_128bit(f_unary_const, "constant_handle"); InstructionPredicate::new_is_all_zeroes_128bit(f_unary_const, "constant_handle");
@@ -1881,14 +1869,14 @@ pub(crate) fn define(
// MOVQ + MOVHPD + MOVQ + MOVLPD (this allows the constants to be immediates instead of stored // MOVQ + MOVHPD + MOVQ + MOVLPD (this allows the constants to be immediates instead of stored
// in memory) but some performance measurements are needed. // in memory) but some performance measurements are needed.
for ty in ValueType::all_lane_types().filter(allowed_simd_type) { for ty in ValueType::all_lane_types().filter(allowed_simd_type) {
let instruction = vconst.bind_vector_from_lane(ty, sse_vector_size); let instruction = vconst.bind(vector(ty, sse_vector_size));
let template = rec_vconst.nonrex().opcodes(&MOVUPS_LOAD); let template = rec_vconst.nonrex().opcodes(&MOVUPS_LOAD);
e.enc_32_64_maybe_isap(instruction, template, None); // from SSE e.enc_32_64_maybe_isap(instruction, template, None); // from SSE
} }
// SIMD bor using ORPS // SIMD bor using ORPS
for ty in ValueType::all_lane_types().filter(allowed_simd_type) { for ty in ValueType::all_lane_types().filter(allowed_simd_type) {
let instruction = bor.bind_vector_from_lane(ty, sse_vector_size); let instruction = bor.bind(vector(ty, sse_vector_size));
let template = rec_fa.nonrex().opcodes(&ORPS); let template = rec_fa.nonrex().opcodes(&ORPS);
e.enc_32_64_maybe_isap(instruction, template, None); // from SSE e.enc_32_64_maybe_isap(instruction, template, None); // from SSE
} }
@@ -1898,87 +1886,87 @@ pub(crate) fn define(
// alignment or type-specific encodings, see https://github.com/CraneStation/cranelift/issues/1039). // alignment or type-specific encodings, see https://github.com/CraneStation/cranelift/issues/1039).
for ty in ValueType::all_lane_types().filter(allowed_simd_type) { for ty in ValueType::all_lane_types().filter(allowed_simd_type) {
// Store // Store
let bound_store = store.bind_vector_from_lane(ty, sse_vector_size).bind_any(); let bound_store = store.bind(vector(ty, sse_vector_size)).bind(Any);
e.enc_32_64(bound_store.clone(), rec_fst.opcodes(&MOVUPS_STORE)); e.enc_32_64(bound_store.clone(), rec_fst.opcodes(&MOVUPS_STORE));
e.enc_32_64(bound_store.clone(), rec_fstDisp8.opcodes(&MOVUPS_STORE)); e.enc_32_64(bound_store.clone(), rec_fstDisp8.opcodes(&MOVUPS_STORE));
e.enc_32_64(bound_store, rec_fstDisp32.opcodes(&MOVUPS_STORE)); e.enc_32_64(bound_store, rec_fstDisp32.opcodes(&MOVUPS_STORE));
// Load // Load
let bound_load = load.bind_vector_from_lane(ty, sse_vector_size).bind_any(); let bound_load = load.bind(vector(ty, sse_vector_size)).bind(Any);
e.enc_32_64(bound_load.clone(), rec_fld.opcodes(&MOVUPS_LOAD)); e.enc_32_64(bound_load.clone(), rec_fld.opcodes(&MOVUPS_LOAD));
e.enc_32_64(bound_load.clone(), rec_fldDisp8.opcodes(&MOVUPS_LOAD)); e.enc_32_64(bound_load.clone(), rec_fldDisp8.opcodes(&MOVUPS_LOAD));
e.enc_32_64(bound_load, rec_fldDisp32.opcodes(&MOVUPS_LOAD)); e.enc_32_64(bound_load, rec_fldDisp32.opcodes(&MOVUPS_LOAD));
// Spill // Spill
let bound_spill = spill.bind_vector_from_lane(ty, sse_vector_size); let bound_spill = spill.bind(vector(ty, sse_vector_size));
e.enc_32_64(bound_spill, rec_fspillSib32.opcodes(&MOVUPS_STORE)); e.enc_32_64(bound_spill, rec_fspillSib32.opcodes(&MOVUPS_STORE));
let bound_regspill = regspill.bind_vector_from_lane(ty, sse_vector_size); let bound_regspill = regspill.bind(vector(ty, sse_vector_size));
e.enc_32_64(bound_regspill, rec_fregspill32.opcodes(&MOVUPS_STORE)); e.enc_32_64(bound_regspill, rec_fregspill32.opcodes(&MOVUPS_STORE));
// Fill // Fill
let bound_fill = fill.bind_vector_from_lane(ty, sse_vector_size); let bound_fill = fill.bind(vector(ty, sse_vector_size));
e.enc_32_64(bound_fill, rec_ffillSib32.opcodes(&MOVUPS_LOAD)); e.enc_32_64(bound_fill, rec_ffillSib32.opcodes(&MOVUPS_LOAD));
let bound_regfill = regfill.bind_vector_from_lane(ty, sse_vector_size); let bound_regfill = regfill.bind(vector(ty, sse_vector_size));
e.enc_32_64(bound_regfill, rec_fregfill32.opcodes(&MOVUPS_LOAD)); e.enc_32_64(bound_regfill, rec_fregfill32.opcodes(&MOVUPS_LOAD));
let bound_fill_nop = fill_nop.bind_vector_from_lane(ty, sse_vector_size); let bound_fill_nop = fill_nop.bind(vector(ty, sse_vector_size));
e.enc_32_64_rec(bound_fill_nop, rec_ffillnull, 0); e.enc_32_64_rec(bound_fill_nop, rec_ffillnull, 0);
// Regmove // Regmove
let bound_regmove = regmove.bind_vector_from_lane(ty, sse_vector_size); let bound_regmove = regmove.bind(vector(ty, sse_vector_size));
e.enc_32_64(bound_regmove, rec_frmov.opcodes(&MOVAPS_LOAD)); e.enc_32_64(bound_regmove, rec_frmov.opcodes(&MOVAPS_LOAD));
// Copy // Copy
let bound_copy = copy.bind_vector_from_lane(ty, sse_vector_size); let bound_copy = copy.bind(vector(ty, sse_vector_size));
e.enc_32_64(bound_copy, rec_furm.opcodes(&MOVAPS_LOAD)); e.enc_32_64(bound_copy, rec_furm.opcodes(&MOVAPS_LOAD));
let bound_copy_nop = copy_nop.bind_vector_from_lane(ty, sse_vector_size); let bound_copy_nop = copy_nop.bind(vector(ty, sse_vector_size));
e.enc_32_64_rec(bound_copy_nop, rec_stacknull, 0); e.enc_32_64_rec(bound_copy_nop, rec_stacknull, 0);
} }
// SIMD integer addition // SIMD integer addition
for (ty, opcodes) in &[(I8, &PADDB), (I16, &PADDW), (I32, &PADDD), (I64, &PADDQ)] { for (ty, opcodes) in &[(I8, &PADDB), (I16, &PADDW), (I32, &PADDD), (I64, &PADDQ)] {
let iadd = iadd.bind_vector_from_lane(ty.clone(), sse_vector_size); let iadd = iadd.bind(vector(ty.clone(), sse_vector_size));
e.enc_32_64(iadd, rec_fa.opcodes(*opcodes)); e.enc_32_64(iadd, rec_fa.opcodes(*opcodes));
} }
// SIMD integer saturating addition // SIMD integer saturating addition
e.enc_32_64( e.enc_32_64(
sadd_sat.bind_vector_from_lane(I8, sse_vector_size), sadd_sat.bind(vector(I8, sse_vector_size)),
rec_fa.opcodes(&PADDSB), rec_fa.opcodes(&PADDSB),
); );
e.enc_32_64( e.enc_32_64(
sadd_sat.bind_vector_from_lane(I16, sse_vector_size), sadd_sat.bind(vector(I16, sse_vector_size)),
rec_fa.opcodes(&PADDSW), rec_fa.opcodes(&PADDSW),
); );
e.enc_32_64( e.enc_32_64(
uadd_sat.bind_vector_from_lane(I8, sse_vector_size), uadd_sat.bind(vector(I8, sse_vector_size)),
rec_fa.opcodes(&PADDUSB), rec_fa.opcodes(&PADDUSB),
); );
e.enc_32_64( e.enc_32_64(
uadd_sat.bind_vector_from_lane(I16, sse_vector_size), uadd_sat.bind(vector(I16, sse_vector_size)),
rec_fa.opcodes(&PADDUSW), rec_fa.opcodes(&PADDUSW),
); );
// SIMD integer subtraction // SIMD integer subtraction
for (ty, opcodes) in &[(I8, &PSUBB), (I16, &PSUBW), (I32, &PSUBD), (I64, &PSUBQ)] { for (ty, opcodes) in &[(I8, &PSUBB), (I16, &PSUBW), (I32, &PSUBD), (I64, &PSUBQ)] {
let isub = isub.bind_vector_from_lane(ty.clone(), sse_vector_size); let isub = isub.bind(vector(ty.clone(), sse_vector_size));
e.enc_32_64(isub, rec_fa.opcodes(*opcodes)); e.enc_32_64(isub, rec_fa.opcodes(*opcodes));
} }
// SIMD integer saturating subtraction // SIMD integer saturating subtraction
e.enc_32_64( e.enc_32_64(
ssub_sat.bind_vector_from_lane(I8, sse_vector_size), ssub_sat.bind(vector(I8, sse_vector_size)),
rec_fa.opcodes(&PSUBSB), rec_fa.opcodes(&PSUBSB),
); );
e.enc_32_64( e.enc_32_64(
ssub_sat.bind_vector_from_lane(I16, sse_vector_size), ssub_sat.bind(vector(I16, sse_vector_size)),
rec_fa.opcodes(&PSUBSW), rec_fa.opcodes(&PSUBSW),
); );
e.enc_32_64( e.enc_32_64(
usub_sat.bind_vector_from_lane(I8, sse_vector_size), usub_sat.bind(vector(I8, sse_vector_size)),
rec_fa.opcodes(&PSUBUSB), rec_fa.opcodes(&PSUBUSB),
); );
e.enc_32_64( e.enc_32_64(
usub_sat.bind_vector_from_lane(I16, sse_vector_size), usub_sat.bind(vector(I16, sse_vector_size)),
rec_fa.opcodes(&PSUBUSW), rec_fa.opcodes(&PSUBUSW),
); );
@@ -1988,7 +1976,7 @@ pub(crate) fn define(
(I16, &PMULLW[..], None), (I16, &PMULLW[..], None),
(I32, &PMULLD[..], Some(use_sse41_simd)), (I32, &PMULLD[..], Some(use_sse41_simd)),
] { ] {
let imul = imul.bind_vector_from_lane(ty.clone(), sse_vector_size); let imul = imul.bind(vector(ty.clone(), sse_vector_size));
e.enc_32_64_maybe_isap(imul, rec_fa.opcodes(opcodes), *isap); e.enc_32_64_maybe_isap(imul, rec_fa.opcodes(opcodes), *isap);
} }
@@ -2002,7 +1990,7 @@ pub(crate) fn define(
_ => panic!("invalid size for SIMD icmp"), _ => panic!("invalid size for SIMD icmp"),
}; };
let instruction = icmp.bind_vector_from_lane(ty, sse_vector_size); let instruction = icmp.bind(vector(ty, sse_vector_size));
let f_int_compare = formats.get(formats.by_name("IntCompare")); let f_int_compare = formats.get(formats.by_name("IntCompare"));
let has_eq_condition_code = let has_eq_condition_code =
InstructionPredicate::new_has_condition_code(f_int_compare, IntCC::Equal, "cond"); InstructionPredicate::new_has_condition_code(f_int_compare, IntCC::Equal, "cond");
@@ -2020,10 +2008,10 @@ pub(crate) fn define(
// Reference type instructions // Reference type instructions
// Null references implemented as iconst 0. // Null references implemented as iconst 0.
e.enc32(null.bind_ref(R32), rec_pu_id_ref.opcodes(&MOV_IMM)); e.enc32(null.bind(R32), rec_pu_id_ref.opcodes(&MOV_IMM));
e.enc64(null.bind_ref(R64), rec_pu_id_ref.rex().opcodes(&MOV_IMM)); e.enc64(null.bind(R64), rec_pu_id_ref.rex().opcodes(&MOV_IMM));
e.enc64(null.bind_ref(R64), rec_pu_id_ref.opcodes(&MOV_IMM)); e.enc64(null.bind(R64), rec_pu_id_ref.opcodes(&MOV_IMM));
// is_null, implemented by testing whether the value is 0. // is_null, implemented by testing whether the value is 0.
e.enc_r32_r64_rex_only(is_null, rec_is_zero.opcodes(&TEST_REG)); e.enc_r32_r64_rex_only(is_null, rec_is_zero.opcodes(&TEST_REG));

View File

@@ -1,5 +1,5 @@
use crate::cdsl::ast::{var, ExprBuilder, Literal}; use crate::cdsl::ast::{var, ExprBuilder, Literal};
use crate::cdsl::instructions::InstructionGroup; use crate::cdsl::instructions::{vector, Bindable, InstructionGroup};
use crate::cdsl::types::ValueType; use crate::cdsl::types::ValueType;
use crate::cdsl::xform::TransformGroupBuilder; use crate::cdsl::xform::TransformGroupBuilder;
use crate::shared::types::Float::F64; use crate::shared::types::Float::F64;
@@ -322,10 +322,8 @@ pub(crate) fn define(shared: &mut SharedDefinitions, x86_instructions: &Instruct
// SIMD splat: 8-bits // SIMD splat: 8-bits
for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 8) { for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 8) {
let splat_any8x16 = splat.bind_vector_from_lane(ty, sse_vector_size); let splat_any8x16 = splat.bind(vector(ty, sse_vector_size));
let bitcast_f64_to_any8x16 = raw_bitcast let bitcast_f64_to_any8x16 = raw_bitcast.bind(vector(ty, sse_vector_size)).bind(F64);
.bind_vector_from_lane(ty, sse_vector_size)
.bind(F64);
narrow.legalize( narrow.legalize(
def!(y = splat_any8x16(x)), def!(y = splat_any8x16(x)),
vec![ vec![
@@ -340,13 +338,13 @@ pub(crate) fn define(shared: &mut SharedDefinitions, x86_instructions: &Instruct
// SIMD splat: 16-bits // SIMD splat: 16-bits
for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 16) { for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 16) {
let splat_x16x8 = splat.bind_vector_from_lane(ty, sse_vector_size); let splat_x16x8 = splat.bind(vector(ty, sse_vector_size));
let raw_bitcast_any16x8_to_i32x4 = raw_bitcast let raw_bitcast_any16x8_to_i32x4 = raw_bitcast
.bind_vector_from_lane(I32, sse_vector_size) .bind(vector(I32, sse_vector_size))
.bind_vector_from_lane(ty, sse_vector_size); .bind(vector(ty, sse_vector_size));
let raw_bitcast_i32x4_to_any16x8 = raw_bitcast let raw_bitcast_i32x4_to_any16x8 = raw_bitcast
.bind_vector_from_lane(ty, sse_vector_size) .bind(vector(ty, sse_vector_size))
.bind_vector_from_lane(I32, sse_vector_size); .bind(vector(I32, sse_vector_size));
narrow.legalize( narrow.legalize(
def!(y = splat_x16x8(x)), def!(y = splat_x16x8(x)),
vec![ vec![
@@ -361,7 +359,7 @@ pub(crate) fn define(shared: &mut SharedDefinitions, x86_instructions: &Instruct
// SIMD splat: 32-bits // SIMD splat: 32-bits
for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 32) { for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 32) {
let splat_any32x4 = splat.bind_vector_from_lane(ty, sse_vector_size); let splat_any32x4 = splat.bind(vector(ty, sse_vector_size));
narrow.legalize( narrow.legalize(
def!(y = splat_any32x4(x)), def!(y = splat_any32x4(x)),
vec![ vec![
@@ -373,7 +371,7 @@ pub(crate) fn define(shared: &mut SharedDefinitions, x86_instructions: &Instruct
// SIMD splat: 64-bits // SIMD splat: 64-bits
for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 64) { for ty in ValueType::all_lane_types().filter(|t| t.lane_bits() == 64) {
let splat_any64x2 = splat.bind_vector_from_lane(ty, sse_vector_size); let splat_any64x2 = splat.bind(vector(ty, sse_vector_size));
narrow.legalize( narrow.legalize(
def!(y = splat_any64x2(x)), def!(y = splat_any64x2(x)),
vec![ vec![

View File

@@ -1,5 +1,5 @@
use crate::cdsl::ast::{var, ExprBuilder, Literal}; use crate::cdsl::ast::{var, ExprBuilder, Literal};
use crate::cdsl::instructions::{Instruction, InstructionGroup}; use crate::cdsl::instructions::{Bindable, Instruction, InstructionGroup};
use crate::cdsl::xform::{TransformGroupBuilder, TransformGroups}; use crate::cdsl::xform::{TransformGroupBuilder, TransformGroups};
use crate::shared::immediates::Immediates; use crate::shared::immediates::Immediates;