Rename the 'cretonne' crate to 'cretonne-codegen'.

This fixes the next part of #287.
This commit is contained in:
Dan Gohman
2018-04-17 08:48:02 -07:00
parent 7767186dd0
commit 24fa169e1f
254 changed files with 265 additions and 264 deletions

View File

@@ -0,0 +1,266 @@
//! Cretonne instruction builder.
//!
//! A `Builder` provides a convenient interface for inserting instructions into a Cretonne
//! function. Many of its methods are generated from the meta language instruction definitions.
use ir;
use ir::types;
use ir::{DataFlowGraph, InstructionData};
use ir::{Inst, Opcode, Type, Value};
use isa;
/// Base trait for instruction builders.
///
/// The `InstBuilderBase` trait provides the basic functionality required by the methods of the
/// generated `InstBuilder` trait. These methods should not normally be used directly. Use the
/// methods in the `InstBuilder` trait instead.
///
/// Any data type that implements `InstBuilderBase` also gets all the methods of the `InstBuilder`
/// trait.
pub trait InstBuilderBase<'f>: Sized {
/// Get an immutable reference to the data flow graph that will hold the constructed
/// instructions.
fn data_flow_graph(&self) -> &DataFlowGraph;
/// Get a mutable reference to the data flow graph that will hold the constructed
/// instructions.
fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph;
/// Insert an instruction and return a reference to it, consuming the builder.
///
/// The result types may depend on a controlling type variable. For non-polymorphic
/// instructions with multiple results, pass `VOID` for the `ctrl_typevar` argument.
fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph);
}
// Include trait code generated by `lib/codegen/meta/gen_instr.py`.
//
// This file defines the `InstBuilder` trait as an extension of `InstBuilderBase` with methods per
// instruction format and per opcode.
include!(concat!(env!("OUT_DIR"), "/inst_builder.rs"));
/// Any type implementing `InstBuilderBase` gets all the `InstBuilder` methods for free.
impl<'f, T: InstBuilderBase<'f>> InstBuilder<'f> for T {}
/// Base trait for instruction inserters.
///
/// This is an alternative base trait for an instruction builder to implement.
///
/// An instruction inserter can be adapted into an instruction builder by wrapping it in an
/// `InsertBuilder`. This provides some common functionality for instruction builders that insert
/// new instructions, as opposed to the `ReplaceBuilder` which overwrites existing instructions.
pub trait InstInserterBase<'f>: Sized {
/// Get an immutable reference to the data flow graph.
fn data_flow_graph(&self) -> &DataFlowGraph;
/// Get a mutable reference to the data flow graph.
fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph;
/// Insert a new instruction which belongs to the DFG.
fn insert_built_inst(self, inst: Inst, ctrl_typevar: Type) -> &'f mut DataFlowGraph;
}
use std::marker::PhantomData;
/// Builder that inserts an instruction at the current position.
///
/// An `InsertBuilder` is a wrapper for an `InstInserterBase` that turns it into an instruction
/// builder with some additional facilities for creating instructions that reuse existing values as
/// their results.
pub struct InsertBuilder<'f, IIB: InstInserterBase<'f>> {
inserter: IIB,
unused: PhantomData<&'f u32>,
}
impl<'f, IIB: InstInserterBase<'f>> InsertBuilder<'f, IIB> {
/// Create a new builder which inserts instructions at `pos`.
/// The `dfg` and `pos.layout` references should be from the same `Function`.
pub fn new(inserter: IIB) -> InsertBuilder<'f, IIB> {
InsertBuilder {
inserter,
unused: PhantomData,
}
}
/// Reuse result values in `reuse`.
///
/// Convert this builder into one that will reuse the provided result values instead of
/// allocating new ones. The provided values for reuse must not be attached to anything. Any
/// missing result values will be allocated as normal.
///
/// The `reuse` argument is expected to be an array of `Option<Value>`.
pub fn with_results<Array>(self, reuse: Array) -> InsertReuseBuilder<'f, IIB, Array>
where
Array: AsRef<[Option<Value>]>,
{
InsertReuseBuilder {
inserter: self.inserter,
reuse,
unused: PhantomData,
}
}
/// Reuse a single result value.
///
/// Convert this into a builder that will reuse `v` as the single result value. The reused
/// result value `v` must not be attached to anything.
///
/// This method should only be used when building an instruction with exactly one result. Use
/// `with_results()` for the more general case.
pub fn with_result(self, v: Value) -> InsertReuseBuilder<'f, IIB, [Option<Value>; 1]> {
// TODO: Specialize this to return a different builder that just attaches `v` instead of
// calling `make_inst_results_reusing()`.
self.with_results([Some(v)])
}
}
impl<'f, IIB: InstInserterBase<'f>> InstBuilderBase<'f> for InsertBuilder<'f, IIB> {
fn data_flow_graph(&self) -> &DataFlowGraph {
self.inserter.data_flow_graph()
}
fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph {
self.inserter.data_flow_graph_mut()
}
fn build(mut self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) {
let inst;
{
let dfg = self.inserter.data_flow_graph_mut();
inst = dfg.make_inst(data);
dfg.make_inst_results(inst, ctrl_typevar);
}
(inst, self.inserter.insert_built_inst(inst, ctrl_typevar))
}
}
/// Builder that inserts a new instruction like `InsertBuilder`, but reusing result values.
pub struct InsertReuseBuilder<'f, IIB, Array>
where
IIB: InstInserterBase<'f>,
Array: AsRef<[Option<Value>]>,
{
inserter: IIB,
reuse: Array,
unused: PhantomData<&'f u32>,
}
impl<'f, IIB, Array> InstBuilderBase<'f> for InsertReuseBuilder<'f, IIB, Array>
where
IIB: InstInserterBase<'f>,
Array: AsRef<[Option<Value>]>,
{
fn data_flow_graph(&self) -> &DataFlowGraph {
self.inserter.data_flow_graph()
}
fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph {
self.inserter.data_flow_graph_mut()
}
fn build(mut self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) {
let inst;
{
let dfg = self.inserter.data_flow_graph_mut();
inst = dfg.make_inst(data);
// Make an `Interator<Item = Option<Value>>`.
let ru = self.reuse.as_ref().iter().cloned();
dfg.make_inst_results_reusing(inst, ctrl_typevar, ru);
}
(inst, self.inserter.insert_built_inst(inst, ctrl_typevar))
}
}
/// Instruction builder that replaces an existing instruction.
///
/// The inserted instruction will have the same `Inst` number as the old one.
///
/// If the old instruction still has result values attached, it is assumed that the new instruction
/// produces the same number and types of results. The old result values are preserved. If the
/// replacement instruction format does not support multiple results, the builder panics. It is a
/// bug to leave result values dangling.
pub struct ReplaceBuilder<'f> {
dfg: &'f mut DataFlowGraph,
inst: Inst,
}
impl<'f> ReplaceBuilder<'f> {
/// Create a `ReplaceBuilder` that will overwrite `inst`.
pub fn new(dfg: &'f mut DataFlowGraph, inst: Inst) -> ReplaceBuilder {
ReplaceBuilder { dfg, inst }
}
}
impl<'f> InstBuilderBase<'f> for ReplaceBuilder<'f> {
fn data_flow_graph(&self) -> &DataFlowGraph {
self.dfg
}
fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph {
self.dfg
}
fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) {
// Splat the new instruction on top of the old one.
self.dfg[self.inst] = data;
if !self.dfg.has_results(self.inst) {
// The old result values were either detached or non-existent.
// Construct new ones.
self.dfg.make_inst_results(self.inst, ctrl_typevar);
}
(self.inst, self.dfg)
}
}
#[cfg(test)]
mod tests {
use cursor::{Cursor, FuncCursor};
use ir::condcodes::*;
use ir::types::*;
use ir::{Function, InstBuilder, ValueDef};
#[test]
fn types() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let arg0 = func.dfg.append_ebb_param(ebb0, I32);
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
// Explicit types.
let v0 = pos.ins().iconst(I32, 3);
assert_eq!(pos.func.dfg.value_type(v0), I32);
// Inferred from inputs.
let v1 = pos.ins().iadd(arg0, v0);
assert_eq!(pos.func.dfg.value_type(v1), I32);
// Formula.
let cmp = pos.ins().icmp(IntCC::Equal, arg0, v0);
assert_eq!(pos.func.dfg.value_type(cmp), B1);
}
#[test]
fn reuse_results() {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
let arg0 = func.dfg.append_ebb_param(ebb0, I32);
let mut pos = FuncCursor::new(&mut func);
pos.insert_ebb(ebb0);
let v0 = pos.ins().iadd_imm(arg0, 17);
assert_eq!(pos.func.dfg.value_type(v0), I32);
let iadd = pos.prev_inst().unwrap();
assert_eq!(pos.func.dfg.value_def(v0), ValueDef::Result(iadd, 0));
// Detach v0 and reuse it for a different instruction.
pos.func.dfg.clear_results(iadd);
let v0b = pos.ins().with_result(v0).iconst(I32, 3);
assert_eq!(v0, v0b);
assert_eq!(pos.current_inst(), Some(iadd));
let iconst = pos.prev_inst().unwrap();
assert!(iadd != iconst);
assert_eq!(pos.func.dfg.value_def(v0), ValueDef::Result(iconst, 0));
}
}

View File

@@ -0,0 +1,358 @@
//! Condition codes for the Cretonne code generator.
//!
//! A condition code here is an enumerated type that determined how to compare two numbers. There
//! are different rules for comparing integers and floating point numbers, so they use different
//! condition codes.
use std::fmt::{self, Display, Formatter};
use std::str::FromStr;
/// Common traits of condition codes.
pub trait CondCode: Copy {
/// Get the inverse condition code of `self`.
///
/// The inverse condition code produces the opposite result for all comparisons.
/// That is, `cmp CC, x, y` is true if and only if `cmp CC.inverse(), x, y` is false.
#[must_use]
fn inverse(self) -> Self;
/// Get the reversed condition code for `self`.
///
/// The reversed condition code produces the same result as swapping `x` and `y` in the
/// comparison. That is, `cmp CC, x, y` is the same as `cmp CC.reverse(), y, x`.
#[must_use]
fn reverse(self) -> Self;
}
/// Condition code for comparing integers.
///
/// This condition code is used by the `icmp` instruction to compare integer values. There are
/// separate codes for comparing the integers as signed or unsigned numbers where it makes a
/// difference.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum IntCC {
/// `==`.
Equal,
/// `!=`.
NotEqual,
/// Signed `<`.
SignedLessThan,
/// Signed `>=`.
SignedGreaterThanOrEqual,
/// Signed `>`.
SignedGreaterThan,
/// Signed `<=`.
SignedLessThanOrEqual,
/// Unsigned `<`.
UnsignedLessThan,
/// Unsigned `>=`.
UnsignedGreaterThanOrEqual,
/// Unsigned `>`.
UnsignedGreaterThan,
/// Unsigned `<=`.
UnsignedLessThanOrEqual,
}
impl CondCode for IntCC {
fn inverse(self) -> Self {
use self::IntCC::*;
match self {
Equal => NotEqual,
NotEqual => Equal,
SignedLessThan => SignedGreaterThanOrEqual,
SignedGreaterThanOrEqual => SignedLessThan,
SignedGreaterThan => SignedLessThanOrEqual,
SignedLessThanOrEqual => SignedGreaterThan,
UnsignedLessThan => UnsignedGreaterThanOrEqual,
UnsignedGreaterThanOrEqual => UnsignedLessThan,
UnsignedGreaterThan => UnsignedLessThanOrEqual,
UnsignedLessThanOrEqual => UnsignedGreaterThan,
}
}
fn reverse(self) -> Self {
use self::IntCC::*;
match self {
Equal => Equal,
NotEqual => NotEqual,
SignedGreaterThan => SignedLessThan,
SignedGreaterThanOrEqual => SignedLessThanOrEqual,
SignedLessThan => SignedGreaterThan,
SignedLessThanOrEqual => SignedGreaterThanOrEqual,
UnsignedGreaterThan => UnsignedLessThan,
UnsignedGreaterThanOrEqual => UnsignedLessThanOrEqual,
UnsignedLessThan => UnsignedGreaterThan,
UnsignedLessThanOrEqual => UnsignedGreaterThanOrEqual,
}
}
}
impl Display for IntCC {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use self::IntCC::*;
f.write_str(match *self {
Equal => "eq",
NotEqual => "ne",
SignedGreaterThan => "sgt",
SignedGreaterThanOrEqual => "sge",
SignedLessThan => "slt",
SignedLessThanOrEqual => "sle",
UnsignedGreaterThan => "ugt",
UnsignedGreaterThanOrEqual => "uge",
UnsignedLessThan => "ult",
UnsignedLessThanOrEqual => "ule",
})
}
}
impl FromStr for IntCC {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::IntCC::*;
match s {
"eq" => Ok(Equal),
"ne" => Ok(NotEqual),
"sge" => Ok(SignedGreaterThanOrEqual),
"sgt" => Ok(SignedGreaterThan),
"sle" => Ok(SignedLessThanOrEqual),
"slt" => Ok(SignedLessThan),
"uge" => Ok(UnsignedGreaterThanOrEqual),
"ugt" => Ok(UnsignedGreaterThan),
"ule" => Ok(UnsignedLessThanOrEqual),
"ult" => Ok(UnsignedLessThan),
_ => Err(()),
}
}
}
/// Condition code for comparing floating point numbers.
///
/// This condition code is used by the `fcmp` instruction to compare floating point values. Two
/// IEEE floating point values relate in exactly one of four ways:
///
/// 1. `UN` - unordered when either value is NaN.
/// 2. `EQ` - equal numerical value.
/// 3. `LT` - `x` is less than `y`.
/// 4. `GT` - `x` is greater than `y`.
///
/// Note that `0.0` and `-0.0` relate as `EQ` because they both represent the number 0.
///
/// The condition codes described here are used to produce a single boolean value from the
/// comparison. The 14 condition codes here cover every possible combination of the relation above
/// except the impossible `!UN & !EQ & !LT & !GT` and the always true `UN | EQ | LT | GT`.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum FloatCC {
/// EQ | LT | GT
Ordered,
/// UN
Unordered,
/// EQ
Equal,
/// The C '!=' operator is the inverse of '==': `NotEqual`.
/// UN | LT | GT
NotEqual,
/// LT | GT
OrderedNotEqual,
/// UN | EQ
UnorderedOrEqual,
/// LT
LessThan,
/// LT | EQ
LessThanOrEqual,
/// GT
GreaterThan,
/// GT | EQ
GreaterThanOrEqual,
/// UN | LT
UnorderedOrLessThan,
/// UN | LT | EQ
UnorderedOrLessThanOrEqual,
/// UN | GT
UnorderedOrGreaterThan,
/// UN | GT | EQ
UnorderedOrGreaterThanOrEqual,
}
impl CondCode for FloatCC {
fn inverse(self) -> Self {
use self::FloatCC::*;
match self {
Ordered => Unordered,
Unordered => Ordered,
Equal => NotEqual,
NotEqual => Equal,
OrderedNotEqual => UnorderedOrEqual,
UnorderedOrEqual => OrderedNotEqual,
LessThan => UnorderedOrGreaterThanOrEqual,
LessThanOrEqual => UnorderedOrGreaterThan,
GreaterThan => UnorderedOrLessThanOrEqual,
GreaterThanOrEqual => UnorderedOrLessThan,
UnorderedOrLessThan => GreaterThanOrEqual,
UnorderedOrLessThanOrEqual => GreaterThan,
UnorderedOrGreaterThan => LessThanOrEqual,
UnorderedOrGreaterThanOrEqual => LessThan,
}
}
fn reverse(self) -> Self {
use self::FloatCC::*;
match self {
Ordered => Ordered,
Unordered => Unordered,
Equal => Equal,
NotEqual => NotEqual,
OrderedNotEqual => OrderedNotEqual,
UnorderedOrEqual => UnorderedOrEqual,
LessThan => GreaterThan,
LessThanOrEqual => GreaterThanOrEqual,
GreaterThan => LessThan,
GreaterThanOrEqual => LessThanOrEqual,
UnorderedOrLessThan => UnorderedOrGreaterThan,
UnorderedOrLessThanOrEqual => UnorderedOrGreaterThanOrEqual,
UnorderedOrGreaterThan => UnorderedOrLessThan,
UnorderedOrGreaterThanOrEqual => UnorderedOrLessThanOrEqual,
}
}
}
impl Display for FloatCC {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use self::FloatCC::*;
f.write_str(match *self {
Ordered => "ord",
Unordered => "uno",
Equal => "eq",
NotEqual => "ne",
OrderedNotEqual => "one",
UnorderedOrEqual => "ueq",
LessThan => "lt",
LessThanOrEqual => "le",
GreaterThan => "gt",
GreaterThanOrEqual => "ge",
UnorderedOrLessThan => "ult",
UnorderedOrLessThanOrEqual => "ule",
UnorderedOrGreaterThan => "ugt",
UnorderedOrGreaterThanOrEqual => "uge",
})
}
}
impl FromStr for FloatCC {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::FloatCC::*;
match s {
"ord" => Ok(Ordered),
"uno" => Ok(Unordered),
"eq" => Ok(Equal),
"ne" => Ok(NotEqual),
"one" => Ok(OrderedNotEqual),
"ueq" => Ok(UnorderedOrEqual),
"lt" => Ok(LessThan),
"le" => Ok(LessThanOrEqual),
"gt" => Ok(GreaterThan),
"ge" => Ok(GreaterThanOrEqual),
"ult" => Ok(UnorderedOrLessThan),
"ule" => Ok(UnorderedOrLessThanOrEqual),
"ugt" => Ok(UnorderedOrGreaterThan),
"uge" => Ok(UnorderedOrGreaterThanOrEqual),
_ => Err(()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::string::ToString;
static INT_ALL: [IntCC; 10] = [
IntCC::Equal,
IntCC::NotEqual,
IntCC::SignedLessThan,
IntCC::SignedGreaterThanOrEqual,
IntCC::SignedGreaterThan,
IntCC::SignedLessThanOrEqual,
IntCC::UnsignedLessThan,
IntCC::UnsignedGreaterThanOrEqual,
IntCC::UnsignedGreaterThan,
IntCC::UnsignedLessThanOrEqual,
];
#[test]
fn int_inverse() {
for r in &INT_ALL {
let cc = *r;
let inv = cc.inverse();
assert!(cc != inv);
assert_eq!(inv.inverse(), cc);
}
}
#[test]
fn int_reverse() {
for r in &INT_ALL {
let cc = *r;
let rev = cc.reverse();
assert_eq!(rev.reverse(), cc);
}
}
#[test]
fn int_display() {
for r in &INT_ALL {
let cc = *r;
assert_eq!(cc.to_string().parse(), Ok(cc));
}
assert_eq!("bogus".parse::<IntCC>(), Err(()));
}
static FLOAT_ALL: [FloatCC; 14] = [
FloatCC::Ordered,
FloatCC::Unordered,
FloatCC::Equal,
FloatCC::NotEqual,
FloatCC::OrderedNotEqual,
FloatCC::UnorderedOrEqual,
FloatCC::LessThan,
FloatCC::LessThanOrEqual,
FloatCC::GreaterThan,
FloatCC::GreaterThanOrEqual,
FloatCC::UnorderedOrLessThan,
FloatCC::UnorderedOrLessThanOrEqual,
FloatCC::UnorderedOrGreaterThan,
FloatCC::UnorderedOrGreaterThanOrEqual,
];
#[test]
fn float_inverse() {
for r in &FLOAT_ALL {
let cc = *r;
let inv = cc.inverse();
assert!(cc != inv);
assert_eq!(inv.inverse(), cc);
}
}
#[test]
fn float_reverse() {
for r in &FLOAT_ALL {
let cc = *r;
let rev = cc.reverse();
assert_eq!(rev.reverse(), cc);
}
}
#[test]
fn float_display() {
for r in &FLOAT_ALL {
let cc = *r;
assert_eq!(cc.to_string().parse(), Ok(cc));
}
assert_eq!("bogus".parse::<FloatCC>(), Err(()));
}
}

1191
lib/codegen/src/ir/dfg.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,286 @@
//! Cretonne IR entity references.
//!
//! Instructions in Cretonne IR need to reference other entities in the function. This can be other
//! parts of the function like extended basic blocks or stack slots, or it can be external entities
//! that are declared in the function preamble in the text format.
//!
//! These entity references in instruction operands are not implemented as Rust references both
//! because Rust's ownership and mutability rules make it difficult, and because 64-bit pointers
//! take up a lot of space, and we want a compact in-memory representation. Instead, entity
//! references are structs wrapping a `u32` index into a table in the `Function` main data
//! structure. There is a separate index type for each entity type, so we don't lose type safety.
//!
//! The `entities` module defines public types for the entity references along with constants
//! representing an invalid reference. We prefer to use `Option<EntityRef>` whenever possible, but
//! unfortunately that type is twice as large as the 32-bit index type on its own. Thus, compact
//! data structures use the `PackedOption<EntityRef>` representation, while function arguments and
//! return values prefer the more Rust-like `Option<EntityRef>` variant.
//!
//! The entity references all implement the `Display` trait in a way that matches the textual IR
//! format.
use std::fmt;
use std::u32;
/// An opaque reference to an extended basic block in a function.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Ebb(u32);
entity_impl!(Ebb, "ebb");
impl Ebb {
/// Create a new EBB reference from its number. This corresponds to the `ebbNN` representation.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<Ebb> {
if n < u32::MAX { Some(Ebb(n)) } else { None }
}
}
/// An opaque reference to an SSA value.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Value(u32);
entity_impl!(Value, "v");
impl Value {
/// Create a value from its number representation.
/// This is the number in the `vNN` notation.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<Value> {
if n < u32::MAX / 2 {
Some(Value(n))
} else {
None
}
}
}
/// An opaque reference to an instruction in a function.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Inst(u32);
entity_impl!(Inst, "inst");
/// An opaque reference to a stack slot.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct StackSlot(u32);
entity_impl!(StackSlot, "ss");
impl StackSlot {
/// Create a new stack slot reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<StackSlot> {
if n < u32::MAX {
Some(StackSlot(n))
} else {
None
}
}
}
/// An opaque reference to a global variable.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct GlobalVar(u32);
entity_impl!(GlobalVar, "gv");
impl GlobalVar {
/// Create a new global variable reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<GlobalVar> {
if n < u32::MAX {
Some(GlobalVar(n))
} else {
None
}
}
}
/// An opaque reference to a jump table.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct JumpTable(u32);
entity_impl!(JumpTable, "jt");
impl JumpTable {
/// Create a new jump table reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<JumpTable> {
if n < u32::MAX {
Some(JumpTable(n))
} else {
None
}
}
}
/// A reference to an external function.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct FuncRef(u32);
entity_impl!(FuncRef, "fn");
impl FuncRef {
/// Create a new external function reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<FuncRef> {
if n < u32::MAX { Some(FuncRef(n)) } else { None }
}
}
/// A reference to a function signature.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct SigRef(u32);
entity_impl!(SigRef, "sig");
impl SigRef {
/// Create a new function signature reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<SigRef> {
if n < u32::MAX { Some(SigRef(n)) } else { None }
}
}
/// A reference to a heap.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct Heap(u32);
entity_impl!(Heap, "heap");
impl Heap {
/// Create a new heap reference from its number.
///
/// This method is for use by the parser.
pub fn with_number(n: u32) -> Option<Heap> {
if n < u32::MAX { Some(Heap(n)) } else { None }
}
}
/// A reference to any of the entities defined in this module.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum AnyEntity {
/// The whole function.
Function,
/// An extended basic block.
Ebb(Ebb),
/// An instruction.
Inst(Inst),
/// An SSA value.
Value(Value),
/// A stack slot.
StackSlot(StackSlot),
/// A Global variable.
GlobalVar(GlobalVar),
/// A jump table.
JumpTable(JumpTable),
/// An external function.
FuncRef(FuncRef),
/// A function call signature.
SigRef(SigRef),
/// A heap.
Heap(Heap),
}
impl fmt::Display for AnyEntity {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
AnyEntity::Function => write!(f, "function"),
AnyEntity::Ebb(r) => r.fmt(f),
AnyEntity::Inst(r) => r.fmt(f),
AnyEntity::Value(r) => r.fmt(f),
AnyEntity::StackSlot(r) => r.fmt(f),
AnyEntity::GlobalVar(r) => r.fmt(f),
AnyEntity::JumpTable(r) => r.fmt(f),
AnyEntity::FuncRef(r) => r.fmt(f),
AnyEntity::SigRef(r) => r.fmt(f),
AnyEntity::Heap(r) => r.fmt(f),
}
}
}
impl fmt::Debug for AnyEntity {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(self as &fmt::Display).fmt(f)
}
}
impl From<Ebb> for AnyEntity {
fn from(r: Ebb) -> AnyEntity {
AnyEntity::Ebb(r)
}
}
impl From<Inst> for AnyEntity {
fn from(r: Inst) -> AnyEntity {
AnyEntity::Inst(r)
}
}
impl From<Value> for AnyEntity {
fn from(r: Value) -> AnyEntity {
AnyEntity::Value(r)
}
}
impl From<StackSlot> for AnyEntity {
fn from(r: StackSlot) -> AnyEntity {
AnyEntity::StackSlot(r)
}
}
impl From<GlobalVar> for AnyEntity {
fn from(r: GlobalVar) -> AnyEntity {
AnyEntity::GlobalVar(r)
}
}
impl From<JumpTable> for AnyEntity {
fn from(r: JumpTable) -> AnyEntity {
AnyEntity::JumpTable(r)
}
}
impl From<FuncRef> for AnyEntity {
fn from(r: FuncRef) -> AnyEntity {
AnyEntity::FuncRef(r)
}
}
impl From<SigRef> for AnyEntity {
fn from(r: SigRef) -> AnyEntity {
AnyEntity::SigRef(r)
}
}
impl From<Heap> for AnyEntity {
fn from(r: Heap) -> AnyEntity {
AnyEntity::Heap(r)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::string::ToString;
use std::u32;
#[test]
fn value_with_number() {
assert_eq!(Value::with_number(0).unwrap().to_string(), "v0");
assert_eq!(Value::with_number(1).unwrap().to_string(), "v1");
assert_eq!(Value::with_number(u32::MAX / 2), None);
assert!(Value::with_number(u32::MAX / 2 - 1).is_some());
}
#[test]
fn memory() {
use packed_option::PackedOption;
use std::mem;
// This is the whole point of `PackedOption`.
assert_eq!(
mem::size_of::<Value>(),
mem::size_of::<PackedOption<Value>>()
);
}
}

View File

@@ -0,0 +1,456 @@
//! External function calls.
//!
//! To a Cretonne function, all functions are "external". Directly called functions must be
//! declared in the preamble, and all function calls must have a signature.
//!
//! This module declares the data types used to represent external functions and call signatures.
use ir::{ArgumentLoc, ExternalName, SigRef, Type};
use isa::{RegInfo, RegUnit};
use std::cmp;
use std::fmt;
use std::str::FromStr;
use std::vec::Vec;
/// Function signature.
///
/// The function signature describes the types of formal parameters and return values along with
/// other details that are needed to call a function correctly.
///
/// A signature can optionally include ISA-specific ABI information which specifies exactly how
/// arguments and return values are passed.
#[derive(Clone, Debug)]
pub struct Signature {
/// The arguments passed to the function.
pub params: Vec<AbiParam>,
/// Values returned from the function.
pub returns: Vec<AbiParam>,
/// Calling convention.
pub call_conv: CallConv,
/// When the signature has been legalized to a specific ISA, this holds the size of the
/// argument array on the stack. Before legalization, this is `None`.
///
/// This can be computed from the legalized `params` array as the maximum (offset plus
/// byte size) of the `ArgumentLoc::Stack(offset)` argument.
pub argument_bytes: Option<u32>,
}
impl Signature {
/// Create a new blank signature.
pub fn new(call_conv: CallConv) -> Self {
Self {
params: Vec::new(),
returns: Vec::new(),
call_conv,
argument_bytes: None,
}
}
/// Clear the signature so it is identical to a fresh one returned by `new()`.
pub fn clear(&mut self, call_conv: CallConv) {
self.params.clear();
self.returns.clear();
self.call_conv = call_conv;
self.argument_bytes = None;
}
/// Compute the size of the stack arguments and mark signature as legalized.
///
/// Even if there are no stack arguments, this will set `params` to `Some(0)` instead
/// of `None`. This indicates that the signature has been legalized.
pub fn compute_argument_bytes(&mut self) {
let bytes = self.params
.iter()
.filter_map(|arg| match arg.location {
ArgumentLoc::Stack(offset) if offset >= 0 => {
Some(offset as u32 + arg.value_type.bytes())
}
_ => None,
})
.fold(0, cmp::max);
self.argument_bytes = Some(bytes);
}
/// Return an object that can display `self` with correct register names.
pub fn display<'a, R: Into<Option<&'a RegInfo>>>(&'a self, regs: R) -> DisplaySignature<'a> {
DisplaySignature(self, regs.into())
}
/// Find the index of a presumed unique special-purpose parameter.
pub fn special_param_index(&self, purpose: ArgumentPurpose) -> Option<usize> {
self.params.iter().rposition(|arg| arg.purpose == purpose)
}
}
/// Wrapper type capable of displaying a `Signature` with correct register names.
pub struct DisplaySignature<'a>(&'a Signature, Option<&'a RegInfo>);
fn write_list(f: &mut fmt::Formatter, args: &[AbiParam], regs: Option<&RegInfo>) -> fmt::Result {
match args.split_first() {
None => {}
Some((first, rest)) => {
write!(f, "{}", first.display(regs))?;
for arg in rest {
write!(f, ", {}", arg.display(regs))?;
}
}
}
Ok(())
}
impl<'a> fmt::Display for DisplaySignature<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "(")?;
write_list(f, &self.0.params, self.1)?;
write!(f, ")")?;
if !self.0.returns.is_empty() {
write!(f, " -> ")?;
write_list(f, &self.0.returns, self.1)?;
}
write!(f, " {}", self.0.call_conv)
}
}
impl fmt::Display for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.display(None).fmt(f)
}
}
/// Function parameter or return value descriptor.
///
/// This describes the value type being passed to or from a function along with flags that affect
/// how the argument is passed.
#[derive(Copy, Clone, Debug)]
pub struct AbiParam {
/// Type of the argument value.
pub value_type: Type,
/// Special purpose of argument, or `Normal`.
pub purpose: ArgumentPurpose,
/// Method for extending argument to a full register.
pub extension: ArgumentExtension,
/// ABI-specific location of this argument, or `Unassigned` for arguments that have not yet
/// been legalized.
pub location: ArgumentLoc,
}
impl AbiParam {
/// Create a parameter with default flags.
pub fn new(vt: Type) -> Self {
Self {
value_type: vt,
extension: ArgumentExtension::None,
purpose: ArgumentPurpose::Normal,
location: Default::default(),
}
}
/// Create a special-purpose parameter that is not (yet) bound to a specific register.
pub fn special(vt: Type, purpose: ArgumentPurpose) -> Self {
Self {
value_type: vt,
extension: ArgumentExtension::None,
purpose,
location: Default::default(),
}
}
/// Create a parameter for a special-purpose register.
pub fn special_reg(vt: Type, purpose: ArgumentPurpose, regunit: RegUnit) -> Self {
Self {
value_type: vt,
extension: ArgumentExtension::None,
purpose,
location: ArgumentLoc::Reg(regunit),
}
}
/// Convert `self` to a parameter with the `uext` flag set.
pub fn uext(self) -> Self {
debug_assert!(self.value_type.is_int(), "uext on {} arg", self.value_type);
Self {
extension: ArgumentExtension::Uext,
..self
}
}
/// Convert `self` to a parameter type with the `sext` flag set.
pub fn sext(self) -> Self {
debug_assert!(self.value_type.is_int(), "sext on {} arg", self.value_type);
Self {
extension: ArgumentExtension::Sext,
..self
}
}
/// Return an object that can display `self` with correct register names.
pub fn display<'a, R: Into<Option<&'a RegInfo>>>(&'a self, regs: R) -> DisplayAbiParam<'a> {
DisplayAbiParam(self, regs.into())
}
}
/// Wrapper type capable of displaying a `AbiParam` with correct register names.
pub struct DisplayAbiParam<'a>(&'a AbiParam, Option<&'a RegInfo>);
impl<'a> fmt::Display for DisplayAbiParam<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0.value_type)?;
match self.0.extension {
ArgumentExtension::None => {}
ArgumentExtension::Uext => write!(f, " uext")?,
ArgumentExtension::Sext => write!(f, " sext")?,
}
if self.0.purpose != ArgumentPurpose::Normal {
write!(f, " {}", self.0.purpose)?;
}
if self.0.location.is_assigned() {
write!(f, " [{}]", self.0.location.display(self.1))?;
}
Ok(())
}
}
impl fmt::Display for AbiParam {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.display(None).fmt(f)
}
}
/// Function argument extension options.
///
/// On some architectures, small integer function arguments are extended to the width of a
/// general-purpose register.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum ArgumentExtension {
/// No extension, high bits are indeterminate.
None,
/// Unsigned extension: high bits in register are 0.
Uext,
/// Signed extension: high bits in register replicate sign bit.
Sext,
}
/// The special purpose of a function argument.
///
/// Function arguments and return values are used to pass user program values between functions,
/// but they are also used to represent special registers with significance to the ABI such as
/// frame pointers and callee-saved registers.
///
/// The argument purpose is used to indicate any special meaning of an argument or return value.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum ArgumentPurpose {
/// A normal user program value passed to or from a function.
Normal,
/// Struct return pointer.
///
/// When a function needs to return more data than will fit in registers, the caller passes a
/// pointer to a memory location where the return value can be written. In some ABIs, this
/// struct return pointer is passed in a specific register.
///
/// This argument kind can also appear as a return value for ABIs that require a function with
/// a `StructReturn` pointer argument to also return that pointer in a register.
StructReturn,
/// The link register.
///
/// Most RISC architectures implement calls by saving the return address in a designated
/// register rather than pushing it on the stack. This is represented with a `Link` argument.
///
/// Similarly, some return instructions expect the return address in a register represented as
/// a `Link` return value.
Link,
/// The frame pointer.
///
/// This indicates the frame pointer register which has a special meaning in some ABIs.
///
/// The frame pointer appears as an argument and as a return value since it is a callee-saved
/// register.
FramePointer,
/// A callee-saved register.
///
/// Some calling conventions have registers that must be saved by the callee. These registers
/// are represented as `CalleeSaved` arguments and return values.
CalleeSaved,
/// A VM context pointer.
///
/// This is a pointer to a context struct containing details about the current sandbox. It is
/// used as a base pointer for `vmctx` global variables.
VMContext,
/// A signature identifier.
///
/// This is a special-purpose argument used to identify the calling convention expected by the
/// caller in an indirect call. The callee can verify that the expected signature ID matches.
SignatureId,
}
/// Text format names of the `ArgumentPurpose` variants.
static PURPOSE_NAMES: [&str; 7] = ["normal", "sret", "link", "fp", "csr", "vmctx", "sigid"];
impl fmt::Display for ArgumentPurpose {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(PURPOSE_NAMES[*self as usize])
}
}
impl FromStr for ArgumentPurpose {
type Err = ();
fn from_str(s: &str) -> Result<ArgumentPurpose, ()> {
match s {
"normal" => Ok(ArgumentPurpose::Normal),
"sret" => Ok(ArgumentPurpose::StructReturn),
"link" => Ok(ArgumentPurpose::Link),
"fp" => Ok(ArgumentPurpose::FramePointer),
"csr" => Ok(ArgumentPurpose::CalleeSaved),
"vmctx" => Ok(ArgumentPurpose::VMContext),
"sigid" => Ok(ArgumentPurpose::SignatureId),
_ => Err(()),
}
}
}
/// An external function.
///
/// Information about a function that can be called directly with a direct `call` instruction.
#[derive(Clone, Debug)]
pub struct ExtFuncData {
/// Name of the external function.
pub name: ExternalName,
/// Call signature of function.
pub signature: SigRef,
/// Will this function be defined nearby, such that it will always be a certain distance away,
/// after linking? If so, references to it can avoid going through a GOT or PLT. Note that
/// symbols meant to be preemptible cannot be considered colocated.
pub colocated: bool,
}
impl fmt::Display for ExtFuncData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.colocated {
write!(f, "colocated ")?;
}
write!(f, "{} {}", self.name, self.signature)
}
}
/// A Calling convention.
///
/// A function's calling convention determines exactly how arguments and return values are passed,
/// and how stack frames are managed. Since all of these details depend on both the instruction set
/// architecture and possibly the operating system, a function's calling convention is only fully
/// determined by a `(TargetIsa, CallConv)` tuple.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CallConv {
/// The System V-style calling convention.
///
/// This is the System V-style calling convention that a C compiler would
/// use on many platforms.
SystemV,
/// A JIT-compiled WebAssembly function in the SpiderMonkey VM.
SpiderWASM,
}
impl fmt::Display for CallConv {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::CallConv::*;
f.write_str(match *self {
SystemV => "system_v",
SpiderWASM => "spiderwasm",
})
}
}
impl FromStr for CallConv {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::CallConv::*;
match s {
"system_v" => Ok(SystemV),
"spiderwasm" => Ok(SpiderWASM),
_ => Err(()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ir::types::{B8, F32, I32};
use std::string::ToString;
#[test]
fn argument_type() {
let t = AbiParam::new(I32);
assert_eq!(t.to_string(), "i32");
let mut t = t.uext();
assert_eq!(t.to_string(), "i32 uext");
assert_eq!(t.sext().to_string(), "i32 sext");
t.purpose = ArgumentPurpose::StructReturn;
assert_eq!(t.to_string(), "i32 uext sret");
}
#[test]
fn argument_purpose() {
let all_purpose = [
ArgumentPurpose::Normal,
ArgumentPurpose::StructReturn,
ArgumentPurpose::Link,
ArgumentPurpose::FramePointer,
ArgumentPurpose::CalleeSaved,
ArgumentPurpose::VMContext,
];
for (&e, &n) in all_purpose.iter().zip(PURPOSE_NAMES.iter()) {
assert_eq!(e.to_string(), n);
assert_eq!(Ok(e), n.parse());
}
}
#[test]
fn call_conv() {
for &cc in &[CallConv::SystemV, CallConv::SpiderWASM] {
assert_eq!(Ok(cc), cc.to_string().parse())
}
}
#[test]
fn signatures() {
let mut sig = Signature::new(CallConv::SpiderWASM);
assert_eq!(sig.to_string(), "() spiderwasm");
sig.params.push(AbiParam::new(I32));
assert_eq!(sig.to_string(), "(i32) spiderwasm");
sig.returns.push(AbiParam::new(F32));
assert_eq!(sig.to_string(), "(i32) -> f32 spiderwasm");
sig.params.push(AbiParam::new(I32.by(4).unwrap()));
assert_eq!(sig.to_string(), "(i32, i32x4) -> f32 spiderwasm");
sig.returns.push(AbiParam::new(B8));
assert_eq!(sig.to_string(), "(i32, i32x4) -> f32, b8 spiderwasm");
// Test the offset computation algorithm.
assert_eq!(sig.argument_bytes, None);
sig.params[1].location = ArgumentLoc::Stack(8);
sig.compute_argument_bytes();
// An `i32x4` at offset 8 requires a 24-byte argument array.
assert_eq!(sig.argument_bytes, Some(24));
// Order does not matter.
sig.params[0].location = ArgumentLoc::Stack(24);
sig.compute_argument_bytes();
assert_eq!(sig.argument_bytes, Some(28));
// Writing ABI-annotated signatures.
assert_eq!(
sig.to_string(),
"(i32 [24], i32x4 [8]) -> f32, b8 spiderwasm"
);
}
}

View File

@@ -0,0 +1,165 @@
//! External names.
//!
//! These are identifiers for declaring entities defined outside the current
//! function. The name of an external declaration doesn't have any meaning to
//! Cretonne, which compiles functions independently.
use ir::LibCall;
use std::cmp;
use std::fmt::{self, Write};
use std::str::FromStr;
const TESTCASE_NAME_LENGTH: usize = 16;
/// The name of an external is either a reference to a user-defined symbol
/// table, or a short sequence of ascii bytes so that test cases do not have
/// to keep track of a sy mbol table.
///
/// External names are primarily used as keys by code using Cretonne to map
/// from a `cretonne_codegen::ir::FuncRef` or similar to additional associated
/// data.
///
/// External names can also serve as a primitive testing and debugging tool.
/// In particular, many `.cton` test files use function names to identify
/// functions.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ExternalName {
/// A name in a user-defined symbol table. Cretonne does not interpret
/// these numbers in any way.
User {
/// Arbitrary.
namespace: u32,
/// Arbitrary.
index: u32,
},
/// A test case function name of up to 10 ascii characters. This is
/// not intended to be used outside test cases.
TestCase {
/// How many of the bytes in `ascii` are valid?
length: u8,
/// Ascii bytes of the name.
ascii: [u8; TESTCASE_NAME_LENGTH],
},
/// A well-known runtime library function.
LibCall(LibCall),
}
impl ExternalName {
/// Creates a new external name from a sequence of bytes. Caller is expected
/// to guarantee bytes are only ascii alphanumeric or `_`.
///
/// # Examples
///
/// ```rust
/// # use cretonne_codegen::ir::ExternalName;
/// // Create `ExternalName` from a string.
/// let name = ExternalName::testcase("hello");
/// assert_eq!(name.to_string(), "%hello");
/// ```
pub fn testcase<T: AsRef<[u8]>>(v: T) -> ExternalName {
let vec = v.as_ref();
let len = cmp::min(vec.len(), TESTCASE_NAME_LENGTH);
let mut bytes = [0u8; TESTCASE_NAME_LENGTH];
bytes[0..len].copy_from_slice(&vec[0..len]);
ExternalName::TestCase {
length: len as u8,
ascii: bytes,
}
}
/// Create a new external name from user-provided integer indicies.
///
/// # Examples
/// ```rust
/// # use cretonne_codegen::ir::ExternalName;
/// // Create `ExternalName` from integer indicies
/// let name = ExternalName::user(123, 456);
/// assert_eq!(name.to_string(), "u123:456");
/// ```
pub fn user(namespace: u32, index: u32) -> ExternalName {
ExternalName::User {
namespace: namespace,
index: index,
}
}
}
impl Default for ExternalName {
fn default() -> ExternalName {
ExternalName::user(0, 0)
}
}
impl fmt::Display for ExternalName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ExternalName::User { namespace, index } => write!(f, "u{}:{}", namespace, index),
ExternalName::TestCase { length, ascii } => {
f.write_char('%')?;
for byte in ascii.iter().take(length as usize) {
f.write_char(*byte as char)?;
}
Ok(())
}
ExternalName::LibCall(lc) => write!(f, "%{}", lc),
}
}
}
impl FromStr for ExternalName {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
// Try to parse as a libcall name, otherwise it's a test case.
match s.parse() {
Ok(lc) => Ok(ExternalName::LibCall(lc)),
Err(_) => Ok(ExternalName::testcase(s.as_bytes())),
}
}
}
#[cfg(test)]
mod tests {
use super::ExternalName;
use ir::LibCall;
use std::string::ToString;
#[test]
fn display_testcase() {
assert_eq!(ExternalName::testcase("").to_string(), "%");
assert_eq!(ExternalName::testcase("x").to_string(), "%x");
assert_eq!(ExternalName::testcase("x_1").to_string(), "%x_1");
assert_eq!(
ExternalName::testcase("longname12345678").to_string(),
"%longname12345678"
);
// Constructor will silently drop bytes beyond the 16th
assert_eq!(
ExternalName::testcase("longname123456789").to_string(),
"%longname12345678"
);
}
#[test]
fn display_user() {
assert_eq!(ExternalName::user(0, 0).to_string(), "u0:0");
assert_eq!(ExternalName::user(1, 1).to_string(), "u1:1");
assert_eq!(
ExternalName::user(::std::u32::MAX, ::std::u32::MAX).to_string(),
"u4294967295:4294967295"
);
}
#[test]
fn parsing() {
assert_eq!(
"FloorF32".parse(),
Ok(ExternalName::LibCall(LibCall::FloorF32))
);
assert_eq!(
ExternalName::LibCall(LibCall::FloorF32).to_string(),
"%FloorF32"
);
}
}

View File

@@ -0,0 +1,232 @@
//! Intermediate representation of a function.
//!
//! The `Function` struct defined in this module owns all of its extended basic blocks and
//! instructions.
use binemit::CodeOffset;
use entity::{EntityMap, PrimaryMap};
use ir;
use ir::{CallConv, DataFlowGraph, ExternalName, Layout, Signature};
use ir::{Ebb, ExtFuncData, FuncRef, GlobalVar, GlobalVarData, Heap, HeapData, JumpTable,
JumpTableData, SigRef, StackSlot, StackSlotData};
use ir::{EbbOffsets, InstEncodings, JumpTables, SourceLocs, StackSlots, ValueLocations};
use isa::{EncInfo, Legalize, TargetIsa, Encoding};
use std::fmt;
use write::write_function;
/// A function.
///
/// Functions can be cloned, but it is not a very fast operation.
/// The clone will have all the same entity numbers as the original.
#[derive(Clone)]
pub struct Function {
/// Name of this function. Mostly used by `.cton` files.
pub name: ExternalName,
/// Signature of this function.
pub signature: Signature,
/// Stack slots allocated in this function.
pub stack_slots: StackSlots,
/// Global variables referenced.
pub global_vars: PrimaryMap<ir::GlobalVar, ir::GlobalVarData>,
/// Heaps referenced.
pub heaps: PrimaryMap<ir::Heap, ir::HeapData>,
/// Jump tables used in this function.
pub jump_tables: JumpTables,
/// Data flow graph containing the primary definition of all instructions, EBBs and values.
pub dfg: DataFlowGraph,
/// Layout of EBBs and instructions in the function body.
pub layout: Layout,
/// Encoding recipe and bits for the legal instructions.
/// Illegal instructions have the `Encoding::default()` value.
pub encodings: InstEncodings,
/// Location assigned to every value.
pub locations: ValueLocations,
/// Code offsets of the EBB headers.
///
/// This information is only transiently available after the `binemit::relax_branches` function
/// computes it, and it can easily be recomputed by calling that function. It is not included
/// in the textual IR format.
pub offsets: EbbOffsets,
/// Source locations.
///
/// Track the original source location for each instruction. The source locations are not
/// interpreted by Cretonne, only preserved.
pub srclocs: SourceLocs,
}
impl Function {
/// Create a function with the given name and signature.
pub fn with_name_signature(name: ExternalName, sig: Signature) -> Self {
Self {
name,
signature: sig,
stack_slots: StackSlots::new(),
global_vars: PrimaryMap::new(),
heaps: PrimaryMap::new(),
jump_tables: PrimaryMap::new(),
dfg: DataFlowGraph::new(),
layout: Layout::new(),
encodings: EntityMap::new(),
locations: EntityMap::new(),
offsets: EntityMap::new(),
srclocs: EntityMap::new(),
}
}
/// Clear all data structures in this function.
pub fn clear(&mut self) {
self.signature.clear(ir::CallConv::SystemV);
self.stack_slots.clear();
self.global_vars.clear();
self.heaps.clear();
self.jump_tables.clear();
self.dfg.clear();
self.layout.clear();
self.encodings.clear();
self.locations.clear();
self.offsets.clear();
self.srclocs.clear();
}
/// Create a new empty, anonymous function with a SystemV calling convention.
pub fn new() -> Self {
Self::with_name_signature(ExternalName::default(), Signature::new(CallConv::SystemV))
}
/// Creates a jump table in the function, to be used by `br_table` instructions.
pub fn create_jump_table(&mut self, data: JumpTableData) -> JumpTable {
self.jump_tables.push(data)
}
/// Inserts an entry in a previously declared jump table.
pub fn insert_jump_table_entry(&mut self, jt: JumpTable, index: usize, ebb: Ebb) {
self.jump_tables[jt].set_entry(index, ebb);
}
/// Creates a stack slot in the function, to be used by `stack_load`, `stack_store` and
/// `stack_addr` instructions.
pub fn create_stack_slot(&mut self, data: StackSlotData) -> StackSlot {
self.stack_slots.push(data)
}
/// Adds a signature which can later be used to declare an external function import.
pub fn import_signature(&mut self, signature: Signature) -> SigRef {
self.dfg.signatures.push(signature)
}
/// Declare an external function import.
pub fn import_function(&mut self, data: ExtFuncData) -> FuncRef {
self.dfg.ext_funcs.push(data)
}
/// Declares a global variable accessible to the function.
pub fn create_global_var(&mut self, data: GlobalVarData) -> GlobalVar {
self.global_vars.push(data)
}
/// Declares a heap accessible to the function.
pub fn create_heap(&mut self, data: HeapData) -> Heap {
self.heaps.push(data)
}
/// Return an object that can display this function with correct ISA-specific annotations.
pub fn display<'a, I: Into<Option<&'a TargetIsa>>>(&'a self, isa: I) -> DisplayFunction<'a> {
DisplayFunction(self, isa.into())
}
/// Find a presumed unique special-purpose function parameter value.
///
/// Returns the value of the last `purpose` parameter, or `None` if no such parameter exists.
pub fn special_param(&self, purpose: ir::ArgumentPurpose) -> Option<ir::Value> {
let entry = self.layout.entry_block().expect("Function is empty");
self.signature.special_param_index(purpose).map(|i| {
self.dfg.ebb_params(entry)[i]
})
}
/// Get an iterator over the instructions in `ebb`, including offsets and encoded instruction
/// sizes.
///
/// The iterator returns `(offset, inst, size)` tuples, where `offset` if the offset in bytes
/// from the beginning of the function to the instruction, and `size` is the size of the
/// instruction in bytes, or 0 for unencoded instructions.
///
/// This function can only be used after the code layout has been computed by the
/// `binemit::relax_branches()` function.
pub fn inst_offsets<'a>(&'a self, ebb: Ebb, encinfo: &EncInfo) -> InstOffsetIter<'a> {
assert!(
!self.offsets.is_empty(),
"Code layout must be computed first"
);
InstOffsetIter {
encinfo: encinfo.clone(),
encodings: &self.encodings,
offset: self.offsets[ebb],
iter: self.layout.ebb_insts(ebb),
}
}
/// Wrapper around `encode` which assigns `inst` the resulting encoding.
pub fn update_encoding(&mut self, inst: ir::Inst, isa: &TargetIsa) -> Result<(), Legalize> {
self.encode(inst, isa).map(|e| self.encodings[inst] = e)
}
/// Wrapper around `TargetIsa::encode` for encoding an existing instruction
/// in the `Function`.
pub fn encode(&self, inst: ir::Inst, isa: &TargetIsa) -> Result<Encoding, Legalize> {
isa.encode(&self, &self.dfg[inst], self.dfg.ctrl_typevar(inst))
}
}
/// Wrapper type capable of displaying a `Function` with correct ISA annotations.
pub struct DisplayFunction<'a>(&'a Function, Option<&'a TargetIsa>);
impl<'a> fmt::Display for DisplayFunction<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write_function(fmt, self.0, self.1)
}
}
impl fmt::Display for Function {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write_function(fmt, self, None)
}
}
impl fmt::Debug for Function {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write_function(fmt, self, None)
}
}
/// Iterator returning instruction offsets and sizes: `(offset, inst, size)`.
pub struct InstOffsetIter<'a> {
encinfo: EncInfo,
encodings: &'a InstEncodings,
offset: CodeOffset,
iter: ir::layout::Insts<'a>,
}
impl<'a> Iterator for InstOffsetIter<'a> {
type Item = (CodeOffset, ir::Inst, CodeOffset);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|inst| {
let size = self.encinfo.bytes(self.encodings[inst]);
let offset = self.offset;
self.offset += size;
(offset, inst, size)
})
}
}

View File

@@ -0,0 +1,70 @@
//! Global variables.
use ir::immediates::Offset32;
use ir::{ExternalName, GlobalVar};
use std::fmt;
/// Information about a global variable declaration.
#[derive(Clone)]
pub enum GlobalVarData {
/// Variable is part of the VM context struct, it's address is a constant offset from the VM
/// context pointer.
VmCtx {
/// Offset from the `vmctx` pointer to this global.
offset: Offset32,
},
/// Variable is part of a struct pointed to by another global variable.
///
/// The `base` global variable is assumed to contain a pointer to a struct. This global
/// variable lives at an offset into the struct. The memory must be accessible, and
/// naturally aligned to hold a pointer value.
Deref {
/// The base pointer global variable.
base: GlobalVar,
/// Byte offset to be added to the pointer loaded from `base`.
offset: Offset32,
},
/// Variable is at an address identified by a symbolic name. Cretonne itself
/// does not interpret this name; it's used by embedders to link with other
/// data structures.
Sym {
/// The symbolic name.
name: ExternalName,
/// Will this variable be defined nearby, such that it will always be a certain distance
/// away, after linking? If so, references to it can avoid going through a GOT. Note that
/// symbols meant to be preemptible cannot be colocated.
colocated: bool,
},
}
impl GlobalVarData {
/// Assume that `self` is an `GlobalVarData::Sym` and return its name.
pub fn symbol_name(&self) -> &ExternalName {
match *self {
GlobalVarData::Sym { ref name, .. } => name,
_ => panic!("only symbols have names"),
}
}
}
impl fmt::Display for GlobalVarData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
GlobalVarData::VmCtx { offset } => write!(f, "vmctx{}", offset),
GlobalVarData::Deref { base, offset } => write!(f, "deref({}){}", base, offset),
GlobalVarData::Sym {
ref name,
colocated,
} => {
if colocated {
write!(f, "colocated ")?;
}
write!(f, "globalsym {}", name)
}
}
}
}

View File

@@ -0,0 +1,74 @@
//! Heaps.
use ir::GlobalVar;
use ir::immediates::Imm64;
use std::fmt;
/// Information about a heap declaration.
#[derive(Clone)]
pub struct HeapData {
/// Method for determining the heap base address.
pub base: HeapBase,
/// Guaranteed minimum heap size in bytes. Heap accesses before `min_size` don't need bounds
/// checking.
pub min_size: Imm64,
/// Size in bytes of the guard pages following the heap.
pub guard_size: Imm64,
/// Heap style, with additional style-specific info.
pub style: HeapStyle,
}
/// Method for determining the base address of a heap.
#[derive(Clone)]
pub enum HeapBase {
/// The heap base lives in a reserved register.
///
/// This feature is not yet implemented.
ReservedReg,
/// The heap base is in a global variable. The variable must be accessible and naturally
/// aligned for a pointer.
GlobalVar(GlobalVar),
}
/// Style of heap including style-specific information.
#[derive(Clone)]
pub enum HeapStyle {
/// A dynamic heap can be relocated to a different base address when it is grown.
Dynamic {
/// Global variable holding the current bound of the heap in bytes. It is
/// required to be accessible and naturally aligned for a pointer-sized integer.
bound_gv: GlobalVar,
},
/// A static heap has a fixed base address and a number of not-yet-allocated pages before the
/// guard pages.
Static {
/// Heap bound in bytes. The guard pages are allocated after the bound.
bound: Imm64,
},
}
impl fmt::Display for HeapData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self.style {
HeapStyle::Dynamic { .. } => "dynamic",
HeapStyle::Static { .. } => "static",
})?;
match self.base {
HeapBase::ReservedReg => write!(f, " reserved_reg")?,
HeapBase::GlobalVar(gv) => write!(f, " {}", gv)?,
}
write!(f, ", min {}", self.min_size)?;
match self.style {
HeapStyle::Dynamic { bound_gv } => write!(f, ", bound {}", bound_gv)?,
HeapStyle::Static { bound } => write!(f, ", bound {}", bound)?,
}
write!(f, ", guard {}", self.guard_size)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,680 @@
//! Instruction formats and opcodes.
//!
//! The `instructions` module contains definitions for instruction formats, opcodes, and the
//! in-memory representation of IR instructions.
//!
//! A large part of this module is auto-generated from the instruction descriptions in the meta
//! directory.
use std::fmt::{self, Display, Formatter};
use std::ops::{Deref, DerefMut};
use std::str::FromStr;
use std::vec::Vec;
use ir;
use ir::types;
use ir::{Ebb, FuncRef, JumpTable, SigRef, Type, Value};
use isa;
use bitset::BitSet;
use entity;
use ref_slice::{ref_slice, ref_slice_mut};
/// Some instructions use an external list of argument values because there is not enough space in
/// the 16-byte `InstructionData` struct. These value lists are stored in a memory pool in
/// `dfg.value_lists`.
pub type ValueList = entity::EntityList<Value>;
/// Memory pool for holding value lists. See `ValueList`.
pub type ValueListPool = entity::ListPool<Value>;
// Include code generated by `lib/codegen/meta/gen_instr.py`. This file contains:
//
// - The `pub enum InstructionFormat` enum with all the instruction formats.
// - The `pub enum InstructionData` enum with all the instruction data fields.
// - The `pub enum Opcode` definition with all known opcodes,
// - The `const OPCODE_FORMAT: [InstructionFormat; N]` table.
// - The private `fn opcode_name(Opcode) -> &'static str` function, and
// - The hash table `const OPCODE_HASH_TABLE: [Opcode; N]`.
//
// For value type constraints:
//
// - The `const OPCODE_CONSTRAINTS : [OpcodeConstraints; N]` table.
// - The `const TYPE_SETS : [ValueTypeSet; N]` table.
// - The `const OPERAND_CONSTRAINTS : [OperandConstraint; N]` table.
//
include!(concat!(env!("OUT_DIR"), "/opcodes.rs"));
impl Display for Opcode {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", opcode_name(*self))
}
}
impl Opcode {
/// Get the instruction format for this opcode.
pub fn format(self) -> InstructionFormat {
OPCODE_FORMAT[self as usize - 1]
}
/// Get the constraint descriptor for this opcode.
/// Panic if this is called on `NotAnOpcode`.
pub fn constraints(self) -> OpcodeConstraints {
OPCODE_CONSTRAINTS[self as usize - 1]
}
}
// This trait really belongs in lib/reader where it is used by the `.cton` file parser, but since
// it critically depends on the `opcode_name()` function which is needed here anyway, it lives in
// this module. This also saves us from running the build script twice to generate code for the two
// separate crates.
impl FromStr for Opcode {
type Err = &'static str;
/// Parse an Opcode name from a string.
fn from_str(s: &str) -> Result<Opcode, &'static str> {
use constant_hash::{probe, simple_hash, Table};
impl<'a> Table<&'a str> for [Option<Opcode>] {
fn len(&self) -> usize {
self.len()
}
fn key(&self, idx: usize) -> Option<&'a str> {
self[idx].map(opcode_name)
}
}
match probe::<&str, [Option<Opcode>]>(&OPCODE_HASH_TABLE, s, simple_hash(s)) {
Err(_) => Err("Unknown opcode"),
// We unwrap here because probe() should have ensured that the entry
// at this index is not None.
Ok(i) => Ok(OPCODE_HASH_TABLE[i].unwrap()),
}
}
}
/// A variable list of `Value` operands used for function call arguments and passing arguments to
/// basic blocks.
#[derive(Clone, Debug)]
pub struct VariableArgs(Vec<Value>);
impl VariableArgs {
/// Create an empty argument list.
pub fn new() -> Self {
VariableArgs(Vec::new())
}
/// Add an argument to the end.
pub fn push(&mut self, v: Value) {
self.0.push(v)
}
/// Check if the list is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Convert this to a value list in `pool` with `fixed` prepended.
pub fn into_value_list(self, fixed: &[Value], pool: &mut ValueListPool) -> ValueList {
let mut vlist = ValueList::default();
vlist.extend(fixed.iter().cloned(), pool);
vlist.extend(self.0, pool);
vlist
}
}
// Coerce `VariableArgs` into a `&[Value]` slice.
impl Deref for VariableArgs {
type Target = [Value];
fn deref(&self) -> &[Value] {
&self.0
}
}
impl DerefMut for VariableArgs {
fn deref_mut(&mut self) -> &mut [Value] {
&mut self.0
}
}
impl Display for VariableArgs {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
for (i, val) in self.0.iter().enumerate() {
if i == 0 {
write!(fmt, "{}", val)?;
} else {
write!(fmt, ", {}", val)?;
}
}
Ok(())
}
}
impl Default for VariableArgs {
fn default() -> Self {
Self::new()
}
}
/// Analyzing an instruction.
///
/// Avoid large matches on instruction formats by using the methods defined here to examine
/// instructions.
impl InstructionData {
/// Return information about the destination of a branch or jump instruction.
///
/// Any instruction that can transfer control to another EBB reveals its possible destinations
/// here.
pub fn analyze_branch<'a>(&'a self, pool: &'a ValueListPool) -> BranchInfo<'a> {
match *self {
InstructionData::Jump {
destination,
ref args,
..
} => BranchInfo::SingleDest(destination, args.as_slice(pool)),
InstructionData::BranchInt {
destination,
ref args,
..
} |
InstructionData::BranchFloat {
destination,
ref args,
..
} |
InstructionData::Branch {
destination,
ref args,
..
} => BranchInfo::SingleDest(destination, &args.as_slice(pool)[1..]),
InstructionData::BranchIcmp {
destination,
ref args,
..
} => BranchInfo::SingleDest(destination, &args.as_slice(pool)[2..]),
InstructionData::BranchTable { table, .. } => BranchInfo::Table(table),
_ => {
debug_assert!(!self.opcode().is_branch());
BranchInfo::NotABranch
}
}
}
/// Get the single destination of this branch instruction, if it is a single destination
/// branch or jump.
///
/// Multi-destination branches like `br_table` return `None`.
pub fn branch_destination(&self) -> Option<Ebb> {
match *self {
InstructionData::Jump { destination, .. } |
InstructionData::Branch { destination, .. } |
InstructionData::BranchInt { destination, .. } |
InstructionData::BranchFloat { destination, .. } |
InstructionData::BranchIcmp { destination, .. } => Some(destination),
InstructionData::BranchTable { .. } => None,
_ => {
debug_assert!(!self.opcode().is_branch());
None
}
}
}
/// Get a mutable reference to the single destination of this branch instruction, if it is a
/// single destination branch or jump.
///
/// Multi-destination branches like `br_table` return `None`.
pub fn branch_destination_mut(&mut self) -> Option<&mut Ebb> {
match *self {
InstructionData::Jump { ref mut destination, .. } |
InstructionData::Branch { ref mut destination, .. } |
InstructionData::BranchInt { ref mut destination, .. } |
InstructionData::BranchFloat { ref mut destination, .. } |
InstructionData::BranchIcmp { ref mut destination, .. } => Some(destination),
InstructionData::BranchTable { .. } => None,
_ => {
debug_assert!(!self.opcode().is_branch());
None
}
}
}
/// Return information about a call instruction.
///
/// Any instruction that can call another function reveals its call signature here.
pub fn analyze_call<'a>(&'a self, pool: &'a ValueListPool) -> CallInfo<'a> {
match *self {
InstructionData::Call { func_ref, ref args, .. } => {
CallInfo::Direct(func_ref, args.as_slice(pool))
}
InstructionData::IndirectCall { sig_ref, ref args, .. } => {
CallInfo::Indirect(sig_ref, &args.as_slice(pool)[1..])
}
_ => {
debug_assert!(!self.opcode().is_call());
CallInfo::NotACall
}
}
}
}
/// Information about branch and jump instructions.
pub enum BranchInfo<'a> {
/// This is not a branch or jump instruction.
/// This instruction will not transfer control to another EBB in the function, but it may still
/// affect control flow by returning or trapping.
NotABranch,
/// This is a branch or jump to a single destination EBB, possibly taking value arguments.
SingleDest(Ebb, &'a [Value]),
/// This is a jump table branch which can have many destination EBBs.
Table(JumpTable),
}
/// Information about call instructions.
pub enum CallInfo<'a> {
/// This is not a call instruction.
NotACall,
/// This is a direct call to an external function declared in the preamble. See
/// `DataFlowGraph.ext_funcs`.
Direct(FuncRef, &'a [Value]),
/// This is an indirect call with the specified signature. See `DataFlowGraph.signatures`.
Indirect(SigRef, &'a [Value]),
}
/// Value type constraints for a given opcode.
///
/// The `InstructionFormat` determines the constraints on most operands, but `Value` operands and
/// results are not determined by the format. Every `Opcode` has an associated
/// `OpcodeConstraints` object that provides the missing details.
#[derive(Clone, Copy)]
pub struct OpcodeConstraints {
/// Flags for this opcode encoded as a bit field:
///
/// Bits 0-2:
/// Number of fixed result values. This does not include `variable_args` results as are
/// produced by call instructions.
///
/// Bit 3:
/// This opcode is polymorphic and the controlling type variable can be inferred from the
/// designated input operand. This is the `typevar_operand` index given to the
/// `InstructionFormat` meta language object. When this bit is not set, the controlling
/// type variable must be the first output value instead.
///
/// Bit 4:
/// This opcode is polymorphic and the controlling type variable does *not* appear as the
/// first result type.
///
/// Bits 5-7:
/// Number of fixed value arguments. The minimum required number of value operands.
flags: u8,
/// Permitted set of types for the controlling type variable as an index into `TYPE_SETS`.
typeset_offset: u8,
/// Offset into `OPERAND_CONSTRAINT` table of the descriptors for this opcode. The first
/// `fixed_results()` entries describe the result constraints, then follows constraints for the
/// fixed `Value` input operands. (`fixed_value_arguments()` of them).
constraint_offset: u16,
}
impl OpcodeConstraints {
/// Can the controlling type variable for this opcode be inferred from the designated value
/// input operand?
/// This also implies that this opcode is polymorphic.
pub fn use_typevar_operand(self) -> bool {
(self.flags & 0x8) != 0
}
/// Is it necessary to look at the designated value input operand in order to determine the
/// controlling type variable, or is it good enough to use the first return type?
///
/// Most polymorphic instructions produce a single result with the type of the controlling type
/// variable. A few polymorphic instructions either don't produce any results, or produce
/// results with a fixed type. These instructions return `true`.
pub fn requires_typevar_operand(self) -> bool {
(self.flags & 0x10) != 0
}
/// Get the number of *fixed* result values produced by this opcode.
/// This does not include `variable_args` produced by calls.
pub fn fixed_results(self) -> usize {
(self.flags & 0x7) as usize
}
/// Get the number of *fixed* input values required by this opcode.
///
/// This does not include `variable_args` arguments on call and branch instructions.
///
/// The number of fixed input values is usually implied by the instruction format, but
/// instruction formats that use a `ValueList` put both fixed and variable arguments in the
/// list. This method returns the *minimum* number of values required in the value list.
pub fn fixed_value_arguments(self) -> usize {
((self.flags >> 5) & 0x7) as usize
}
/// Get the offset into `TYPE_SETS` for the controlling type variable.
/// Returns `None` if the instruction is not polymorphic.
fn typeset_offset(self) -> Option<usize> {
let offset = usize::from(self.typeset_offset);
if offset < TYPE_SETS.len() {
Some(offset)
} else {
None
}
}
/// Get the offset into OPERAND_CONSTRAINTS where the descriptors for this opcode begin.
fn constraint_offset(self) -> usize {
self.constraint_offset as usize
}
/// Get the value type of result number `n`, having resolved the controlling type variable to
/// `ctrl_type`.
pub fn result_type(self, n: usize, ctrl_type: Type) -> Type {
debug_assert!(n < self.fixed_results(), "Invalid result index");
if let ResolvedConstraint::Bound(t) =
OPERAND_CONSTRAINTS[self.constraint_offset() + n].resolve(ctrl_type)
{
t
} else {
panic!("Result constraints can't be free");
}
}
/// Get the value type of input value number `n`, having resolved the controlling type variable
/// to `ctrl_type`.
///
/// Unlike results, it is possible for some input values to vary freely within a specific
/// `ValueTypeSet`. This is represented with the `ArgumentConstraint::Free` variant.
pub fn value_argument_constraint(self, n: usize, ctrl_type: Type) -> ResolvedConstraint {
debug_assert!(
n < self.fixed_value_arguments(),
"Invalid value argument index"
);
let offset = self.constraint_offset() + self.fixed_results();
OPERAND_CONSTRAINTS[offset + n].resolve(ctrl_type)
}
/// Get the typeset of allowed types for the controlling type variable in a polymorphic
/// instruction.
pub fn ctrl_typeset(self) -> Option<ValueTypeSet> {
self.typeset_offset().map(|offset| TYPE_SETS[offset])
}
/// Is this instruction polymorphic?
pub fn is_polymorphic(self) -> bool {
self.ctrl_typeset().is_some()
}
}
type BitSet8 = BitSet<u8>;
type BitSet16 = BitSet<u16>;
/// A value type set describes the permitted set of types for a type variable.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ValueTypeSet {
/// Allowed lane sizes
pub lanes: BitSet16,
/// Allowed int widths
pub ints: BitSet8,
/// Allowed float widths
pub floats: BitSet8,
/// Allowed bool widths
pub bools: BitSet8,
}
impl ValueTypeSet {
/// Is `scalar` part of the base type set?
///
/// Note that the base type set does not have to be included in the type set proper.
fn is_base_type(&self, scalar: Type) -> bool {
let l2b = scalar.log2_lane_bits();
if scalar.is_int() {
self.ints.contains(l2b)
} else if scalar.is_float() {
self.floats.contains(l2b)
} else if scalar.is_bool() {
self.bools.contains(l2b)
} else {
false
}
}
/// Does `typ` belong to this set?
pub fn contains(&self, typ: Type) -> bool {
let l2l = typ.log2_lane_count();
self.lanes.contains(l2l) && self.is_base_type(typ.lane_type())
}
/// Get an example member of this type set.
///
/// This is used for error messages to avoid suggesting invalid types.
pub fn example(&self) -> Type {
let t = if self.ints.max().unwrap_or(0) > 5 {
types::I32
} else if self.floats.max().unwrap_or(0) > 5 {
types::F32
} else if self.bools.max().unwrap_or(0) > 5 {
types::B32
} else {
types::B1
};
t.by(1 << self.lanes.min().unwrap()).unwrap()
}
}
/// Operand constraints. This describes the value type constraints on a single `Value` operand.
enum OperandConstraint {
/// This operand has a concrete value type.
Concrete(Type),
/// This operand can vary freely within the given type set.
/// The type set is identified by its index into the TYPE_SETS constant table.
Free(u8),
/// This operand is the same type as the controlling type variable.
Same,
/// This operand is `ctrlType.lane_type()`.
LaneOf,
/// This operand is `ctrlType.as_bool()`.
AsBool,
/// This operand is `ctrlType.half_width()`.
HalfWidth,
/// This operand is `ctrlType.double_width()`.
DoubleWidth,
/// This operand is `ctrlType.half_vector()`.
HalfVector,
/// This operand is `ctrlType.double_vector()`.
DoubleVector,
}
impl OperandConstraint {
/// Resolve this operand constraint into a concrete value type, given the value of the
/// controlling type variable.
pub fn resolve(&self, ctrl_type: Type) -> ResolvedConstraint {
use self::OperandConstraint::*;
use self::ResolvedConstraint::Bound;
match *self {
Concrete(t) => Bound(t),
Free(vts) => ResolvedConstraint::Free(TYPE_SETS[vts as usize]),
Same => Bound(ctrl_type),
LaneOf => Bound(ctrl_type.lane_type()),
AsBool => Bound(ctrl_type.as_bool()),
HalfWidth => Bound(ctrl_type.half_width().expect("invalid type for half_width")),
DoubleWidth => Bound(ctrl_type.double_width().expect(
"invalid type for double_width",
)),
HalfVector => Bound(ctrl_type.half_vector().expect(
"invalid type for half_vector",
)),
DoubleVector => Bound(ctrl_type.by(2).expect("invalid type for double_vector")),
}
}
}
/// The type constraint on a value argument once the controlling type variable is known.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ResolvedConstraint {
/// The operand is bound to a known type.
Bound(Type),
/// The operand type can vary freely within the given set.
Free(ValueTypeSet),
}
#[cfg(test)]
mod tests {
use super::*;
use std::string::ToString;
#[test]
fn opcodes() {
use std::mem;
let x = Opcode::Iadd;
let mut y = Opcode::Isub;
assert!(x != y);
y = Opcode::Iadd;
assert_eq!(x, y);
assert_eq!(x.format(), InstructionFormat::Binary);
assert_eq!(format!("{:?}", Opcode::IaddImm), "IaddImm");
assert_eq!(Opcode::IaddImm.to_string(), "iadd_imm");
// Check the matcher.
assert_eq!("iadd".parse::<Opcode>(), Ok(Opcode::Iadd));
assert_eq!("iadd_imm".parse::<Opcode>(), Ok(Opcode::IaddImm));
assert_eq!("iadd\0".parse::<Opcode>(), Err("Unknown opcode"));
assert_eq!("".parse::<Opcode>(), Err("Unknown opcode"));
assert_eq!("\0".parse::<Opcode>(), Err("Unknown opcode"));
// Opcode is a single byte, and because Option<Opcode> originally came to 2 bytes, early on
// Opcode included a variant NotAnOpcode to avoid the unnecessary bloat. Since then the Rust
// compiler has brought in NonZero optimization, meaning that an enum not using the 0 value
// can be optional for no size cost. We want to ensure Option<Opcode> remains small.
assert_eq!(mem::size_of::<Opcode>(), mem::size_of::<Option<Opcode>>());
}
#[test]
fn instruction_data() {
use std::mem;
// The size of the `InstructionData` enum is important for performance. It should not
// exceed 16 bytes. Use `Box<FooData>` out-of-line payloads for instruction formats that
// require more space than that. It would be fine with a data structure smaller than 16
// bytes, but what are the odds of that?
assert_eq!(mem::size_of::<InstructionData>(), 16);
}
#[test]
fn constraints() {
let a = Opcode::Iadd.constraints();
assert!(a.use_typevar_operand());
assert!(!a.requires_typevar_operand());
assert_eq!(a.fixed_results(), 1);
assert_eq!(a.fixed_value_arguments(), 2);
assert_eq!(a.result_type(0, types::I32), types::I32);
assert_eq!(a.result_type(0, types::I8), types::I8);
assert_eq!(
a.value_argument_constraint(0, types::I32),
ResolvedConstraint::Bound(types::I32)
);
assert_eq!(
a.value_argument_constraint(1, types::I32),
ResolvedConstraint::Bound(types::I32)
);
let b = Opcode::Bitcast.constraints();
assert!(!b.use_typevar_operand());
assert!(!b.requires_typevar_operand());
assert_eq!(b.fixed_results(), 1);
assert_eq!(b.fixed_value_arguments(), 1);
assert_eq!(b.result_type(0, types::I32), types::I32);
assert_eq!(b.result_type(0, types::I8), types::I8);
match b.value_argument_constraint(0, types::I32) {
ResolvedConstraint::Free(vts) => assert!(vts.contains(types::F32)),
_ => panic!("Unexpected constraint from value_argument_constraint"),
}
let c = Opcode::Call.constraints();
assert_eq!(c.fixed_results(), 0);
assert_eq!(c.fixed_value_arguments(), 0);
let i = Opcode::CallIndirect.constraints();
assert_eq!(i.fixed_results(), 0);
assert_eq!(i.fixed_value_arguments(), 1);
let cmp = Opcode::Icmp.constraints();
assert!(cmp.use_typevar_operand());
assert!(cmp.requires_typevar_operand());
assert_eq!(cmp.fixed_results(), 1);
assert_eq!(cmp.fixed_value_arguments(), 2);
}
#[test]
fn value_set() {
use ir::types::*;
let vts = ValueTypeSet {
lanes: BitSet16::from_range(0, 8),
ints: BitSet8::from_range(4, 7),
floats: BitSet8::from_range(0, 0),
bools: BitSet8::from_range(3, 7),
};
assert!(!vts.contains(I8));
assert!(vts.contains(I32));
assert!(vts.contains(I64));
assert!(vts.contains(I32X4));
assert!(!vts.contains(F32));
assert!(!vts.contains(B1));
assert!(vts.contains(B8));
assert!(vts.contains(B64));
assert_eq!(vts.example().to_string(), "i32");
let vts = ValueTypeSet {
lanes: BitSet16::from_range(0, 8),
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(5, 7),
bools: BitSet8::from_range(3, 7),
};
assert_eq!(vts.example().to_string(), "f32");
let vts = ValueTypeSet {
lanes: BitSet16::from_range(1, 8),
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(5, 7),
bools: BitSet8::from_range(3, 7),
};
assert_eq!(vts.example().to_string(), "f32x2");
let vts = ValueTypeSet {
lanes: BitSet16::from_range(2, 8),
ints: BitSet8::from_range(0, 0),
floats: BitSet8::from_range(0, 0),
bools: BitSet8::from_range(3, 7),
};
assert!(!vts.contains(B32X2));
assert!(vts.contains(B32X4));
assert_eq!(vts.example().to_string(), "b32x4");
let vts = ValueTypeSet {
// TypeSet(lanes=(1, 256), ints=(8, 64))
lanes: BitSet16::from_range(0, 9),
ints: BitSet8::from_range(3, 7),
floats: BitSet8::from_range(0, 0),
bools: BitSet8::from_range(0, 0),
};
assert!(vts.contains(I32));
assert!(vts.contains(I32X4));
}
}

View File

@@ -0,0 +1,180 @@
//! Jump table representation.
//!
//! Jump tables are declared in the preamble and assigned an `ir::entities::JumpTable` reference.
//! The actual table of destinations is stored in a `JumpTableData` struct defined in this module.
use ir::entities::Ebb;
use packed_option::PackedOption;
use std::fmt::{self, Display, Formatter};
use std::iter;
use std::slice;
use std::vec::Vec;
/// Contents of a jump table.
///
/// All jump tables use 0-based indexing and are expected to be densely populated. They don't need
/// to be completely populated, though. Individual entries can be missing.
#[derive(Clone)]
pub struct JumpTableData {
// Table entries, using `None` as a placeholder for missing entries.
table: Vec<PackedOption<Ebb>>,
// How many `None` holes in table?
holes: usize,
}
impl JumpTableData {
/// Create a new empty jump table.
pub fn new() -> Self {
Self {
table: Vec::new(),
holes: 0,
}
}
/// Create a new empty jump table with the specified capacity.
pub fn with_capacity(capacity: usize) -> Self {
Self {
table: Vec::with_capacity(capacity),
holes: 0,
}
}
/// Get the number of table entries.
pub fn len(&self) -> usize {
self.table.len()
}
/// Set a table entry.
///
/// The table will grow as needed to fit `idx`.
pub fn set_entry(&mut self, idx: usize, dest: Ebb) {
// Resize table to fit `idx`.
if idx >= self.table.len() {
self.holes += idx - self.table.len();
self.table.resize(idx + 1, None.into());
} else if self.table[idx].is_none() {
// We're filling in an existing hole.
self.holes -= 1;
}
self.table[idx] = dest.into();
}
/// Append a table entry.
pub fn push_entry(&mut self, dest: Ebb) {
self.table.push(dest.into())
}
/// Clear a table entry.
///
/// The `br_table` instruction will fall through if given an index corresponding to a cleared
/// table entry.
pub fn clear_entry(&mut self, idx: usize) {
if idx < self.table.len() && self.table[idx].is_some() {
self.holes += 1;
self.table[idx] = None.into();
}
}
/// Get the entry for `idx`, or `None`.
pub fn get_entry(&self, idx: usize) -> Option<Ebb> {
self.table.get(idx).and_then(|e| e.expand())
}
/// Enumerate over all `(idx, dest)` pairs in the table in order.
///
/// This returns an iterator that skips any empty slots in the table.
pub fn entries(&self) -> Entries {
Entries(self.table.iter().cloned().enumerate())
}
/// Checks if any of the entries branch to `ebb`.
pub fn branches_to(&self, ebb: Ebb) -> bool {
self.table.iter().any(|target_ebb| {
target_ebb.expand() == Some(ebb)
})
}
/// Access the whole table as a mutable slice.
pub fn as_mut_slice(&mut self) -> &mut [PackedOption<Ebb>] {
self.table.as_mut_slice()
}
}
/// Enumerate `(idx, dest)` pairs in order.
pub struct Entries<'a>(iter::Enumerate<iter::Cloned<slice::Iter<'a, PackedOption<Ebb>>>>);
impl<'a> Iterator for Entries<'a> {
type Item = (usize, Ebb);
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some((idx, dest)) = self.0.next() {
if let Some(ebb) = dest.expand() {
return Some((idx, ebb));
}
} else {
return None;
}
}
}
}
impl Display for JumpTableData {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
match self.table.first().and_then(|e| e.expand()) {
None => write!(fmt, "jump_table 0")?,
Some(first) => write!(fmt, "jump_table {}", first)?,
}
for dest in self.table.iter().skip(1).map(|e| e.expand()) {
match dest {
None => write!(fmt, ", 0")?,
Some(ebb) => write!(fmt, ", {}", ebb)?,
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::JumpTableData;
use entity::EntityRef;
use ir::Ebb;
use std::string::ToString;
use std::vec::Vec;
#[test]
fn empty() {
let jt = JumpTableData::new();
assert_eq!(jt.get_entry(0), None);
assert_eq!(jt.get_entry(10), None);
assert_eq!(jt.to_string(), "jump_table 0");
let v: Vec<(usize, Ebb)> = jt.entries().collect();
assert_eq!(v, []);
}
#[test]
fn insert() {
let e1 = Ebb::new(1);
let e2 = Ebb::new(2);
let mut jt = JumpTableData::new();
jt.set_entry(0, e1);
jt.set_entry(0, e2);
jt.set_entry(10, e1);
assert_eq!(
jt.to_string(),
"jump_table ebb2, 0, 0, 0, 0, 0, 0, 0, 0, 0, ebb1"
);
let v: Vec<(usize, Ebb)> = jt.entries().collect();
assert_eq!(v, [(0, e2), (10, e1)]);
}
}

1173
lib/codegen/src/ir/layout.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,115 @@
//! Naming well-known routines in the runtime library.
use ir::{types, Opcode, Type};
use std::fmt;
use std::str::FromStr;
/// The name of a runtime library routine.
///
/// Runtime library calls are generated for Cretonne IR instructions that don't have an equivalent
/// ISA instruction or an easy macro expansion. A `LibCall` is used as a well-known name to refer to
/// the runtime library routine. This way, Cretonne doesn't have to know about the naming
/// convention in the embedding VM's runtime library.
///
/// This list is likely to grow over time.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum LibCall {
/// ceil.f32
CeilF32,
/// ceil.f64
CeilF64,
/// floor.f32
FloorF32,
/// floor.f64
FloorF64,
/// trunc.f32
TruncF32,
/// frunc.f64
TruncF64,
/// nearest.f32
NearestF32,
/// nearest.f64
NearestF64,
}
const NAME: [&str; 8] = [
"CeilF32",
"CeilF64",
"FloorF32",
"FloorF64",
"TruncF32",
"TruncF64",
"NearestF32",
"NearestF64",
];
impl fmt::Display for LibCall {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(NAME[*self as usize])
}
}
impl FromStr for LibCall {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"CeilF32" => Ok(LibCall::CeilF32),
"CeilF64" => Ok(LibCall::CeilF64),
"FloorF32" => Ok(LibCall::FloorF32),
"FloorF64" => Ok(LibCall::FloorF64),
"TruncF32" => Ok(LibCall::TruncF32),
"TruncF64" => Ok(LibCall::TruncF64),
"NearestF32" => Ok(LibCall::NearestF32),
"NearestF64" => Ok(LibCall::NearestF64),
_ => Err(()),
}
}
}
impl LibCall {
/// Get the well-known library call name to use as a replacement for an instruction with the
/// given opcode and controlling type variable.
///
/// Returns `None` if no well-known library routine name exists for that instruction.
pub fn for_inst(opcode: Opcode, ctrl_type: Type) -> Option<LibCall> {
Some(match ctrl_type {
types::F32 => {
match opcode {
Opcode::Ceil => LibCall::CeilF32,
Opcode::Floor => LibCall::FloorF32,
Opcode::Trunc => LibCall::TruncF32,
Opcode::Nearest => LibCall::NearestF32,
_ => return None,
}
}
types::F64 => {
match opcode {
Opcode::Ceil => LibCall::CeilF64,
Opcode::Floor => LibCall::FloorF64,
Opcode::Trunc => LibCall::TruncF64,
Opcode::Nearest => LibCall::NearestF64,
_ => return None,
}
}
_ => return None,
})
}
}
#[cfg(test)]
mod test {
use super::*;
use std::string::ToString;
#[test]
fn display() {
assert_eq!(LibCall::CeilF32.to_string(), "CeilF32");
assert_eq!(LibCall::NearestF64.to_string(), "NearestF64");
}
#[test]
fn parsing() {
assert_eq!("FloorF32".parse(), Ok(LibCall::FloorF32));
}
}

View File

@@ -0,0 +1,93 @@
//! Memory operation flags.
use std::fmt;
enum FlagBit {
Notrap,
Aligned,
}
const NAMES: [&str; 2] = ["notrap", "aligned"];
/// Flags for memory operations like load/store.
///
/// Each of these flags introduce a limited form of undefined behavior. The flags each enable
/// certain optimizations that need to make additional assumptions. Generally, the semantics of a
/// program does not change when a flag is removed, but adding a flag will.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct MemFlags {
bits: u8,
}
impl MemFlags {
/// Create a new empty set of flags.
pub fn new() -> Self {
Self { bits: 0 }
}
/// Read a flag bit.
fn read(self, bit: FlagBit) -> bool {
self.bits & (1 << bit as usize) != 0
}
/// Set a flag bit.
fn set(&mut self, bit: FlagBit) {
self.bits |= 1 << bit as usize
}
/// Set a flag bit by name.
///
/// Returns true if the flag was found and set, false for an unknown flag name.
pub fn set_by_name(&mut self, name: &str) -> bool {
match NAMES.iter().position(|&s| s == name) {
Some(bit) => {
self.bits |= 1 << bit;
true
}
None => false,
}
}
/// Test if the `notrap` flag is set.
///
/// Normally, trapping is part of the semantics of a load/store operation. If the platform
/// would cause a trap when accessing the effective address, the Cretonne memory operation is
/// also required to trap.
///
/// The `notrap` flag tells Cretonne that the memory is *accessible*, which means that
/// accesses will not trap. This makes it possible to delete an unused load or a dead store
/// instruction.
pub fn notrap(self) -> bool {
self.read(FlagBit::Notrap)
}
/// Set the `notrap` flag.
pub fn set_notrap(&mut self) {
self.set(FlagBit::Notrap)
}
/// Test if the `aligned` flag is set.
///
/// By default, Cretonne memory instructions work with any unaligned effective address. If the
/// `aligned` flag is set, the instruction is permitted to trap or return a wrong result if the
/// effective address is misaligned.
pub fn aligned(self) -> bool {
self.read(FlagBit::Aligned)
}
/// Set the `aligned` flag.
pub fn set_aligned(&mut self) {
self.set(FlagBit::Aligned)
}
}
impl fmt::Display for MemFlags {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (i, n) in NAMES.iter().enumerate() {
if self.bits & (1 << i) != 0 {
write!(f, " {}", n)?;
}
}
Ok(())
}
}

63
lib/codegen/src/ir/mod.rs Normal file
View File

@@ -0,0 +1,63 @@
//! Representation of Cretonne IR functions.
mod builder;
pub mod condcodes;
pub mod dfg;
pub mod entities;
mod extfunc;
mod extname;
pub mod function;
mod globalvar;
mod heap;
pub mod immediates;
pub mod instructions;
pub mod jumptable;
pub mod layout;
mod libcall;
mod memflags;
mod progpoint;
mod sourceloc;
pub mod stackslot;
mod trapcode;
pub mod types;
mod valueloc;
pub use ir::builder::{InsertBuilder, InstBuilder, InstBuilderBase, InstInserterBase};
pub use ir::dfg::{DataFlowGraph, ValueDef};
pub use ir::entities::{Ebb, FuncRef, GlobalVar, Heap, Inst, JumpTable, SigRef, StackSlot, Value};
pub use ir::extfunc::{AbiParam, ArgumentExtension, ArgumentPurpose, CallConv, ExtFuncData,
Signature};
pub use ir::extname::ExternalName;
pub use ir::function::Function;
pub use ir::globalvar::GlobalVarData;
pub use ir::heap::{HeapBase, HeapData, HeapStyle};
pub use ir::instructions::{InstructionData, Opcode, ValueList, ValueListPool, VariableArgs};
pub use ir::jumptable::JumpTableData;
pub use ir::layout::Layout;
pub use ir::libcall::LibCall;
pub use ir::memflags::MemFlags;
pub use ir::progpoint::{ExpandedProgramPoint, ProgramOrder, ProgramPoint};
pub use ir::sourceloc::SourceLoc;
pub use ir::stackslot::{StackSlotData, StackSlotKind, StackSlots};
pub use ir::trapcode::TrapCode;
pub use ir::types::Type;
pub use ir::valueloc::{ArgumentLoc, ValueLoc};
use binemit;
use entity::{EntityMap, PrimaryMap};
use isa;
/// Map of value locations.
pub type ValueLocations = EntityMap<Value, ValueLoc>;
/// Map of jump tables.
pub type JumpTables = PrimaryMap<JumpTable, JumpTableData>;
/// Map of instruction encodings.
pub type InstEncodings = EntityMap<Inst, isa::Encoding>;
/// Code offsets for EBBs.
pub type EbbOffsets = EntityMap<Ebb, binemit::CodeOffset>;
/// Source locations for instructions.
pub type SourceLocs = EntityMap<Inst, SourceLoc>;

View File

@@ -0,0 +1,164 @@
//! Program points.
use entity::EntityRef;
use ir::{Ebb, Inst, ValueDef};
use std::cmp;
use std::fmt;
use std::u32;
/// A `ProgramPoint` represents a position in a function where the live range of an SSA value can
/// begin or end. It can be either:
///
/// 1. An instruction or
/// 2. An EBB header.
///
/// This corresponds more or less to the lines in the textual form of Cretonne IR.
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ProgramPoint(u32);
impl From<Inst> for ProgramPoint {
fn from(inst: Inst) -> ProgramPoint {
let idx = inst.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2) as u32)
}
}
impl From<Ebb> for ProgramPoint {
fn from(ebb: Ebb) -> ProgramPoint {
let idx = ebb.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2 + 1) as u32)
}
}
impl From<ValueDef> for ProgramPoint {
fn from(def: ValueDef) -> ProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
/// An expanded program point directly exposes the variants, but takes twice the space to
/// represent.
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum ExpandedProgramPoint {
/// An instruction in the function.
Inst(Inst),
/// An EBB header.
Ebb(Ebb),
}
impl ExpandedProgramPoint {
/// Get the instruction we know is inside.
pub fn unwrap_inst(self) -> Inst {
match self {
ExpandedProgramPoint::Inst(x) => x,
ExpandedProgramPoint::Ebb(x) => panic!("expected inst: {}", x),
}
}
}
impl From<Inst> for ExpandedProgramPoint {
fn from(inst: Inst) -> ExpandedProgramPoint {
ExpandedProgramPoint::Inst(inst)
}
}
impl From<Ebb> for ExpandedProgramPoint {
fn from(ebb: Ebb) -> ExpandedProgramPoint {
ExpandedProgramPoint::Ebb(ebb)
}
}
impl From<ValueDef> for ExpandedProgramPoint {
fn from(def: ValueDef) -> ExpandedProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
impl From<ProgramPoint> for ExpandedProgramPoint {
fn from(pp: ProgramPoint) -> ExpandedProgramPoint {
if pp.0 & 1 == 0 {
ExpandedProgramPoint::Inst(Inst::new((pp.0 / 2) as usize))
} else {
ExpandedProgramPoint::Ebb(Ebb::new((pp.0 / 2) as usize))
}
}
}
impl fmt::Display for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ExpandedProgramPoint::Inst(x) => write!(f, "{}", x),
ExpandedProgramPoint::Ebb(x) => write!(f, "{}", x),
}
}
}
impl fmt::Display for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let epp: ExpandedProgramPoint = (*self).into();
epp.fmt(f)
}
}
impl fmt::Debug for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ExpandedProgramPoint({})", self)
}
}
impl fmt::Debug for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ProgramPoint({})", self)
}
}
/// Context for ordering program points.
///
/// `ProgramPoint` objects don't carry enough information to be ordered independently, they need a
/// context providing the program order.
pub trait ProgramOrder {
/// Compare the program points `a` and `b` relative to this program order.
///
/// Return `Less` if `a` appears in the program before `b`.
///
/// This is declared as a generic such that it can be called with `Inst` and `Ebb` arguments
/// directly. Depending on the implementation, there is a good chance performance will be
/// improved for those cases where the type of either argument is known statically.
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
/// Is the range from `inst` to `ebb` just the gap between consecutive EBBs?
///
/// This returns true if `inst` is the terminator in the EBB immediately before `ebb`.
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool;
}
#[cfg(test)]
mod tests {
use super::*;
use entity::EntityRef;
use ir::{Ebb, Inst};
use std::string::ToString;
#[test]
fn convert() {
let i5 = Inst::new(5);
let b3 = Ebb::new(3);
let pp1: ProgramPoint = i5.into();
let pp2: ProgramPoint = b3.into();
assert_eq!(pp1.to_string(), "inst5");
assert_eq!(pp2.to_string(), "ebb3");
}
}

View File

@@ -0,0 +1,63 @@
//! Source locations.
//!
//! Cretonne tracks the original source location of each instruction, and preserves the source
//! location when instructions are transformed.
use std::fmt;
/// A source location.
///
/// This is an opaque 32-bit number attached to each Cretonne IR instruction. Cretonne does not
/// interpret source locations in any way, they are simply preserved from the input to the output.
///
/// The default source location uses the all-ones bit pattern `!0`. It is used for instructions
/// that can't be given a real source location.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SourceLoc(u32);
impl SourceLoc {
/// Create a new source location with the given bits.
pub fn new(bits: u32) -> SourceLoc {
SourceLoc(bits)
}
/// Is this the default source location?
pub fn is_default(self) -> bool {
self == Default::default()
}
/// Read the bits of this source location.
pub fn bits(self) -> u32 {
self.0
}
}
impl Default for SourceLoc {
fn default() -> Self {
SourceLoc(!0)
}
}
impl fmt::Display for SourceLoc {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_default() {
write!(f, "@-")
} else {
write!(f, "@{:04x}", self.0)
}
}
}
#[cfg(test)]
mod tests {
use ir::SourceLoc;
use std::string::ToString;
#[test]
fn display() {
assert_eq!(SourceLoc::default().to_string(), "@-");
assert_eq!(SourceLoc::new(0).to_string(), "@0000");
assert_eq!(SourceLoc::new(16).to_string(), "@0010");
assert_eq!(SourceLoc::new(0xabcdef).to_string(), "@abcdef");
}
}

View File

@@ -0,0 +1,425 @@
//! Stack slots.
//!
//! The `StackSlotData` struct keeps track of a single stack slot in a function.
//!
use entity::{Iter, IterMut, Keys, PrimaryMap};
use ir::{StackSlot, Type};
use packed_option::PackedOption;
use std::cmp;
use std::fmt;
use std::ops::{Index, IndexMut};
use std::slice;
use std::str::FromStr;
use std::vec::Vec;
/// The size of an object on the stack, or the size of a stack frame.
///
/// We don't use `usize` to represent object sizes on the target platform because Cretonne supports
/// cross-compilation, and `usize` is a type that depends on the host platform, not the target
/// platform.
pub type StackSize = u32;
/// A stack offset.
///
/// The location of a stack offset relative to a stack pointer or frame pointer.
pub type StackOffset = i32;
/// The minimum size of a spill slot in bytes.
///
/// ISA implementations are allowed to assume that small types like `b1` and `i8` get a full 4-byte
/// spill slot.
const MIN_SPILL_SLOT_SIZE: StackSize = 4;
/// Get the spill slot size to use for `ty`.
fn spill_size(ty: Type) -> StackSize {
cmp::max(MIN_SPILL_SLOT_SIZE, ty.bytes())
}
/// The kind of a stack slot.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum StackSlotKind {
/// A spill slot. This is a stack slot created by the register allocator.
SpillSlot,
/// An explicit stack slot. This is a chunk of stack memory for use by the `stack_load`
/// and `stack_store` instructions.
ExplicitSlot,
/// An incoming function argument.
///
/// If the current function has more arguments than fits in registers, the remaining arguments
/// are passed on the stack by the caller. These incoming arguments are represented as SSA
/// values assigned to incoming stack slots.
IncomingArg,
/// An outgoing function argument.
///
/// When preparing to call a function whose arguments don't fit in registers, outgoing argument
/// stack slots are used to represent individual arguments in the outgoing call frame. These
/// stack slots are only valid while setting up a call.
OutgoingArg,
/// An emergency spill slot.
///
/// Emergency slots are allocated late when the register's constraint solver needs extra space
/// to shuffle registers around. The are only used briefly, and can be reused.
EmergencySlot,
}
impl FromStr for StackSlotKind {
type Err = ();
fn from_str(s: &str) -> Result<StackSlotKind, ()> {
use self::StackSlotKind::*;
match s {
"explicit_slot" => Ok(ExplicitSlot),
"spill_slot" => Ok(SpillSlot),
"incoming_arg" => Ok(IncomingArg),
"outgoing_arg" => Ok(OutgoingArg),
"emergency_slot" => Ok(EmergencySlot),
_ => Err(()),
}
}
}
impl fmt::Display for StackSlotKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::StackSlotKind::*;
f.write_str(match *self {
ExplicitSlot => "explicit_slot",
SpillSlot => "spill_slot",
IncomingArg => "incoming_arg",
OutgoingArg => "outgoing_arg",
EmergencySlot => "emergency_slot",
})
}
}
/// Contents of a stack slot.
#[derive(Clone, Debug)]
pub struct StackSlotData {
/// The kind of stack slot.
pub kind: StackSlotKind,
/// Size of stack slot in bytes.
pub size: StackSize,
/// Offset of stack slot relative to the stack pointer in the caller.
///
/// On x86, the base address is the stack pointer *before* the return address was pushed. On
/// RISC ISAs, the base address is the value of the stack pointer on entry to the function.
///
/// For `OutgoingArg` stack slots, the offset is relative to the current function's stack
/// pointer immediately before the call.
pub offset: Option<StackOffset>,
}
impl StackSlotData {
/// Create a stack slot with the specified byte size.
pub fn new(kind: StackSlotKind, size: StackSize) -> StackSlotData {
StackSlotData {
kind,
size,
offset: None,
}
}
/// Get the alignment in bytes of this stack slot given the stack pointer alignment.
pub fn alignment(&self, max_align: StackSize) -> StackSize {
debug_assert!(max_align.is_power_of_two());
// We want to find the largest power of two that divides both `self.size` and `max_align`.
// That is the same as isolating the rightmost bit in `x`.
let x = self.size | max_align;
// C.f. Hacker's delight.
x & x.wrapping_neg()
}
}
impl fmt::Display for StackSlotData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {}", self.kind, self.size)?;
if let Some(offset) = self.offset {
write!(f, ", offset {}", offset)?;
}
Ok(())
}
}
/// Stack frame manager.
///
/// Keep track of all the stack slots used by a function.
#[derive(Clone, Debug)]
pub struct StackSlots {
/// All allocated stack slots.
slots: PrimaryMap<StackSlot, StackSlotData>,
/// All the outgoing stack slots, ordered by offset.
outgoing: Vec<StackSlot>,
/// All the emergency slots.
emergency: Vec<StackSlot>,
/// The total size of the stack frame.
///
/// This is the distance from the stack pointer in the current function to the stack pointer in
/// the calling function, so it includes a pushed return address as well as space for outgoing
/// call arguments.
///
/// This is computed by the `layout()` method.
pub frame_size: Option<StackSize>,
}
/// Stack slot manager functions that behave mostly like an entity map.
impl StackSlots {
/// Create an empty stack slot manager.
pub fn new() -> Self {
Self {
slots: PrimaryMap::new(),
outgoing: Vec::new(),
emergency: Vec::new(),
frame_size: None,
}
}
/// Clear out everything.
pub fn clear(&mut self) {
self.slots.clear();
self.outgoing.clear();
self.emergency.clear();
self.frame_size = None;
}
/// Allocate a new stack slot.
///
/// This function should be primarily used by the text format parser. There are more convenient
/// functions for creating specific kinds of stack slots below.
pub fn push(&mut self, data: StackSlotData) -> StackSlot {
self.slots.push(data)
}
/// Check if `ss` is a valid stack slot reference.
pub fn is_valid(&self, ss: StackSlot) -> bool {
self.slots.is_valid(ss)
}
/// Set the offset of a stack slot.
pub fn set_offset(&mut self, ss: StackSlot, offset: StackOffset) {
self.slots[ss].offset = Some(offset);
}
/// Get an iterator over all the stack slot keys.
pub fn iter(&self) -> Iter<StackSlot, StackSlotData> {
self.slots.iter()
}
/// Get an iterator over all the stack slot keys, mutable edition.
pub fn iter_mut(&mut self) -> IterMut<StackSlot, StackSlotData> {
self.slots.iter_mut()
}
/// Get an iterator over all the stack slot records.
pub fn values(&self) -> slice::Iter<StackSlotData> {
self.slots.values()
}
/// Get an iterator over all the stack slot records, mutable edition.
pub fn values_mut(&mut self) -> slice::IterMut<StackSlotData> {
self.slots.values_mut()
}
/// Get an iterator over all the stack slot keys.
pub fn keys(&self) -> Keys<StackSlot> {
self.slots.keys()
}
/// Get a reference to the next stack slot that would be created by `push()`.
///
/// This should just be used by the parser.
pub fn next_key(&self) -> StackSlot {
self.slots.next_key()
}
}
impl Index<StackSlot> for StackSlots {
type Output = StackSlotData;
fn index(&self, ss: StackSlot) -> &StackSlotData {
&self.slots[ss]
}
}
impl IndexMut<StackSlot> for StackSlots {
fn index_mut(&mut self, ss: StackSlot) -> &mut StackSlotData {
&mut self.slots[ss]
}
}
/// Higher-level stack frame manipulation functions.
impl StackSlots {
/// Create a new spill slot for spilling values of type `ty`.
pub fn make_spill_slot(&mut self, ty: Type) -> StackSlot {
self.push(StackSlotData::new(StackSlotKind::SpillSlot, spill_size(ty)))
}
/// Create a stack slot representing an incoming function argument.
pub fn make_incoming_arg(&mut self, ty: Type, offset: StackOffset) -> StackSlot {
let mut data = StackSlotData::new(StackSlotKind::IncomingArg, ty.bytes());
debug_assert!(offset <= StackOffset::max_value() - data.size as StackOffset);
data.offset = Some(offset);
self.push(data)
}
/// Get a stack slot representing an outgoing argument.
///
/// This may create a new stack slot, or reuse an existing outgoing stack slot with the
/// requested offset and size.
///
/// The requested offset is relative to this function's stack pointer immediately before making
/// the call.
pub fn get_outgoing_arg(&mut self, ty: Type, offset: StackOffset) -> StackSlot {
let size = ty.bytes();
// Look for an existing outgoing stack slot with the same offset and size.
let inspos = match self.outgoing.binary_search_by_key(&(offset, size), |&ss| {
(self[ss].offset.unwrap(), self[ss].size)
}) {
Ok(idx) => return self.outgoing[idx],
Err(idx) => idx,
};
// No existing slot found. Make one and insert it into `outgoing`.
let mut data = StackSlotData::new(StackSlotKind::OutgoingArg, size);
debug_assert!(offset <= StackOffset::max_value() - size as StackOffset);
data.offset = Some(offset);
let ss = self.slots.push(data);
self.outgoing.insert(inspos, ss);
ss
}
/// Get an emergency spill slot that can be used to store a `ty` value.
///
/// This may allocate a new slot, or it may reuse an existing emergency spill slot, excluding
/// any slots in the `in_use` list.
pub fn get_emergency_slot(
&mut self,
ty: Type,
in_use: &[PackedOption<StackSlot>],
) -> StackSlot {
let size = spill_size(ty);
// Find the smallest existing slot that can fit the type.
if let Some(&ss) = self.emergency
.iter()
.filter(|&&ss| self[ss].size >= size && !in_use.contains(&ss.into()))
.min_by_key(|&&ss| self[ss].size)
{
return ss;
}
// Alternatively, use the largest available slot and make it larger.
if let Some(&ss) = self.emergency
.iter()
.filter(|&&ss| !in_use.contains(&ss.into()))
.max_by_key(|&&ss| self[ss].size)
{
self.slots[ss].size = size;
return ss;
}
// No existing slot found. Make one and insert it into `emergency`.
let data = StackSlotData::new(StackSlotKind::EmergencySlot, size);
let ss = self.slots.push(data);
self.emergency.push(ss);
ss
}
}
#[cfg(test)]
mod tests {
use super::*;
use ir::Function;
use ir::types;
use std::string::ToString;
#[test]
fn stack_slot() {
let mut func = Function::new();
let ss0 = func.create_stack_slot(StackSlotData::new(StackSlotKind::IncomingArg, 4));
let ss1 = func.create_stack_slot(StackSlotData::new(StackSlotKind::SpillSlot, 8));
assert_eq!(ss0.to_string(), "ss0");
assert_eq!(ss1.to_string(), "ss1");
assert_eq!(func.stack_slots[ss0].size, 4);
assert_eq!(func.stack_slots[ss1].size, 8);
assert_eq!(func.stack_slots[ss0].to_string(), "incoming_arg 4");
assert_eq!(func.stack_slots[ss1].to_string(), "spill_slot 8");
}
#[test]
fn outgoing() {
let mut sss = StackSlots::new();
let ss0 = sss.get_outgoing_arg(types::I32, 8);
let ss1 = sss.get_outgoing_arg(types::I32, 4);
let ss2 = sss.get_outgoing_arg(types::I64, 8);
assert_eq!(sss[ss0].offset, Some(8));
assert_eq!(sss[ss0].size, 4);
assert_eq!(sss[ss1].offset, Some(4));
assert_eq!(sss[ss1].size, 4);
assert_eq!(sss[ss2].offset, Some(8));
assert_eq!(sss[ss2].size, 8);
assert_eq!(sss.get_outgoing_arg(types::I32, 8), ss0);
assert_eq!(sss.get_outgoing_arg(types::I32, 4), ss1);
assert_eq!(sss.get_outgoing_arg(types::I64, 8), ss2);
}
#[test]
fn alignment() {
let slot = StackSlotData::new(StackSlotKind::SpillSlot, 8);
assert_eq!(slot.alignment(4), 4);
assert_eq!(slot.alignment(8), 8);
assert_eq!(slot.alignment(16), 8);
let slot2 = StackSlotData::new(StackSlotKind::ExplicitSlot, 24);
assert_eq!(slot2.alignment(4), 4);
assert_eq!(slot2.alignment(8), 8);
assert_eq!(slot2.alignment(16), 8);
assert_eq!(slot2.alignment(32), 8);
}
#[test]
fn emergency() {
let mut sss = StackSlots::new();
let ss0 = sss.get_emergency_slot(types::I32, &[]);
assert_eq!(sss[ss0].size, 4);
// When a smaller size is requested, we should simply get the same slot back.
assert_eq!(sss.get_emergency_slot(types::I8, &[]), ss0);
assert_eq!(sss[ss0].size, 4);
assert_eq!(sss.get_emergency_slot(types::F32, &[]), ss0);
assert_eq!(sss[ss0].size, 4);
// Ask for a larger size and the slot should grow.
assert_eq!(sss.get_emergency_slot(types::F64, &[]), ss0);
assert_eq!(sss[ss0].size, 8);
// When one slot is in use, we should get a new one.
let ss1 = sss.get_emergency_slot(types::I32, &[None.into(), ss0.into()]);
assert_eq!(sss[ss0].size, 8);
assert_eq!(sss[ss1].size, 4);
// Now we should get the smallest fit of the two available slots.
assert_eq!(sss.get_emergency_slot(types::F32, &[]), ss1);
assert_eq!(sss.get_emergency_slot(types::F64, &[]), ss0);
}
}

View File

@@ -0,0 +1,120 @@
//! Trap codes describing the reason for a trap.
use std::fmt::{self, Display, Formatter};
use std::str::FromStr;
/// A trap code describing the reason for a trap.
///
/// All trap instructions have an explicit trap code.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum TrapCode {
/// The current stack space was exhausted.
///
/// On some platforms, a stack overflow may also be indicated by a segmentation fault from the
/// stack guard page.
StackOverflow,
/// A `heap_addr` instruction detected an out-of-bounds error.
///
/// Some out-of-bounds heap accesses are detected by a segmentation fault on the heap guard
/// pages.
HeapOutOfBounds,
/// Other bounds checking error.
OutOfBounds,
/// Indirect call to a null table entry.
IndirectCallToNull,
/// Signature mismatch on indirect call.
BadSignature,
/// An integer arithmetic operation caused an overflow.
IntegerOverflow,
/// An integer division by zero.
IntegerDivisionByZero,
/// Failed float-to-int conversion.
BadConversionToInteger,
/// Execution has potentially run too long and may be interrupted.
/// This trap is resumable.
Interrupt,
/// A user-defined trap code.
User(u16),
}
impl Display for TrapCode {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use self::TrapCode::*;
let identifier = match *self {
StackOverflow => "stk_ovf",
HeapOutOfBounds => "heap_oob",
OutOfBounds => "oob",
IndirectCallToNull => "icall_null",
BadSignature => "bad_sig",
IntegerOverflow => "int_ovf",
IntegerDivisionByZero => "int_divz",
BadConversionToInteger => "bad_toint",
Interrupt => "interrupt",
User(x) => return write!(f, "user{}", x),
};
f.write_str(identifier)
}
}
impl FromStr for TrapCode {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::TrapCode::*;
match s {
"stk_ovf" => Ok(StackOverflow),
"heap_oob" => Ok(HeapOutOfBounds),
"oob" => Ok(OutOfBounds),
"icall_null" => Ok(IndirectCallToNull),
"bad_sig" => Ok(BadSignature),
"int_ovf" => Ok(IntegerOverflow),
"int_divz" => Ok(IntegerDivisionByZero),
"bad_toint" => Ok(BadConversionToInteger),
"interrupt" => Ok(Interrupt),
_ if s.starts_with("user") => s[4..].parse().map(User).map_err(|_| ()),
_ => Err(()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::string::ToString;
// Everything but user-defined codes.
const CODES: [TrapCode; 8] = [
TrapCode::StackOverflow,
TrapCode::HeapOutOfBounds,
TrapCode::OutOfBounds,
TrapCode::IndirectCallToNull,
TrapCode::BadSignature,
TrapCode::IntegerOverflow,
TrapCode::IntegerDivisionByZero,
TrapCode::BadConversionToInteger,
];
#[test]
fn display() {
for r in &CODES {
let tc = *r;
assert_eq!(tc.to_string().parse(), Ok(tc));
}
assert_eq!("bogus".parse::<TrapCode>(), Err(()));
assert_eq!(TrapCode::User(17).to_string(), "user17");
assert_eq!("user22".parse(), Ok(TrapCode::User(22)));
assert_eq!("user".parse::<TrapCode>(), Err(()));
assert_eq!("user-1".parse::<TrapCode>(), Err(()));
assert_eq!("users".parse::<TrapCode>(), Err(()));
}
}

456
lib/codegen/src/ir/types.rs Normal file
View File

@@ -0,0 +1,456 @@
//! Common types for the Cretonne code generator.
use std::default::Default;
use std::fmt::{self, Debug, Display, Formatter};
/// The type of an SSA value.
///
/// The `VOID` type is only used for instructions that produce no value. It can't be part of a SIMD
/// vector.
///
/// Basic integer types: `I8`, `I16`, `I32`, and `I64`. These types are sign-agnostic.
///
/// Basic floating point types: `F32` and `F64`. IEEE single and double precision.
///
/// Boolean types: `B1`, `B8`, `B16`, `B32`, and `B64`. These all encode 'true' or 'false'. The
/// larger types use redundant bits.
///
/// SIMD vector types have power-of-two lanes, up to 256. Lanes can be any int/float/bool type.
///
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct Type(u8);
/// No type. Used for functions without a return value. Can't be loaded or stored. Can't be part of
/// a SIMD vector.
pub const VOID: Type = Type(0);
/// Start of the lane types. See also `meta/cdsl.types.py`.
const LANE_BASE: u8 = 0x70;
/// Start of the 2-lane vector types.
const VECTOR_BASE: u8 = LANE_BASE + 16;
// Include code generated by `lib/codegen/meta/gen_types.py`. This file contains constant
// definitions for all the scalar types as well as common vector types for 64, 128, 256, and
// 512-bit SIMD vectors.
include!(concat!(env!("OUT_DIR"), "/types.rs"));
impl Type {
/// Get the lane type of this SIMD vector type.
///
/// A lane type is the same as a SIMD vector type with one lane, so it returns itself.
pub fn lane_type(self) -> Type {
if self.0 < VECTOR_BASE {
self
} else {
Type(LANE_BASE | (self.0 & 0x0f))
}
}
/// Get log_2 of the number of bits in a lane.
pub fn log2_lane_bits(self) -> u8 {
match self.lane_type() {
B1 => 0,
B8 | I8 => 3,
B16 | I16 => 4,
B32 | I32 | F32 => 5,
B64 | I64 | F64 => 6,
_ => 0,
}
}
/// Get the number of bits in a lane.
pub fn lane_bits(self) -> u8 {
match self.lane_type() {
B1 => 1,
B8 | I8 => 8,
B16 | I16 => 16,
B32 | I32 | F32 => 32,
B64 | I64 | F64 => 64,
_ => 0,
}
}
/// Get an integer type with the requested number of bits.
pub fn int(bits: u16) -> Option<Type> {
match bits {
8 => Some(I8),
16 => Some(I16),
32 => Some(I32),
64 => Some(I64),
_ => None,
}
}
/// Get a type with the same number of lanes as `self`, but using `lane` as the lane type.
fn replace_lanes(self, lane: Type) -> Type {
debug_assert!(lane.is_lane() && !self.is_special());
Type((lane.0 & 0x0f) | (self.0 & 0xf0))
}
/// Get a type with the same number of lanes as this type, but with the lanes replaced by
/// booleans of the same size.
///
/// Scalar types are treated as vectors with one lane, so they are converted to the multi-bit
/// boolean types.
pub fn as_bool_pedantic(self) -> Type {
// Replace the low 4 bits with the boolean version, preserve the high 4 bits.
self.replace_lanes(match self.lane_type() {
B8 | I8 => B8,
B16 | I16 => B16,
B32 | I32 | F32 => B32,
B64 | I64 | F64 => B64,
_ => B1,
})
}
/// Get a type with the same number of lanes as this type, but with the lanes replaced by
/// booleans of the same size.
///
/// Scalar types are all converted to `b1` which is usually what you want.
pub fn as_bool(self) -> Type {
if !self.is_vector() {
B1
} else {
self.as_bool_pedantic()
}
}
/// Get a type with the same number of lanes as this type, but with lanes that are half the
/// number of bits.
pub fn half_width(self) -> Option<Type> {
Some(self.replace_lanes(match self.lane_type() {
I16 => I8,
I32 => I16,
I64 => I32,
F64 => F32,
B16 => B8,
B32 => B16,
B64 => B32,
_ => return None,
}))
}
/// Get a type with the same number of lanes as this type, but with lanes that are twice the
/// number of bits.
pub fn double_width(self) -> Option<Type> {
Some(self.replace_lanes(match self.lane_type() {
I8 => I16,
I16 => I32,
I32 => I64,
F32 => F64,
B8 => B16,
B16 => B32,
B32 => B64,
_ => return None,
}))
}
/// Is this the VOID type?
pub fn is_void(self) -> bool {
self == VOID
}
/// Is this a special type?
pub fn is_special(self) -> bool {
self.0 < LANE_BASE
}
/// Is this a lane type?
///
/// This is a scalar type that can also appear as the lane type of a SIMD vector.
pub fn is_lane(self) -> bool {
LANE_BASE <= self.0 && self.0 < VECTOR_BASE
}
/// Is this a SIMD vector type?
///
/// A vector type has 2 or more lanes.
pub fn is_vector(self) -> bool {
self.0 >= VECTOR_BASE
}
/// Is this a scalar boolean type?
pub fn is_bool(self) -> bool {
match self {
B1 | B8 | B16 | B32 | B64 => true,
_ => false,
}
}
/// Is this a scalar integer type?
pub fn is_int(self) -> bool {
match self {
I8 | I16 | I32 | I64 => true,
_ => false,
}
}
/// Is this a scalar floating point type?
pub fn is_float(self) -> bool {
match self {
F32 | F64 => true,
_ => false,
}
}
/// Is this a CPU flags type?
pub fn is_flags(self) -> bool {
match self {
IFLAGS | FFLAGS => true,
_ => false,
}
}
/// Get log_2 of the number of lanes in this SIMD vector type.
///
/// All SIMD types have a lane count that is a power of two and no larger than 256, so this
/// will be a number in the range 0-8.
///
/// A scalar type is the same as a SIMD vector type with one lane, so it returns 0.
pub fn log2_lane_count(self) -> u8 {
self.0.saturating_sub(LANE_BASE) >> 4
}
/// Get the number of lanes in this SIMD vector type.
///
/// A scalar type is the same as a SIMD vector type with one lane, so it returns 1.
pub fn lane_count(self) -> u16 {
1 << self.log2_lane_count()
}
/// Get the total number of bits used to represent this type.
pub fn bits(self) -> u16 {
u16::from(self.lane_bits()) * self.lane_count()
}
/// Get the number of bytes used to store this type in memory.
pub fn bytes(self) -> u32 {
(u32::from(self.bits()) + 7) / 8
}
/// Get a SIMD vector type with `n` times more lanes than this one.
///
/// If this is a scalar type, this produces a SIMD type with this as a lane type and `n` lanes.
///
/// If this is already a SIMD vector type, this produces a SIMD vector type with `n *
/// self.lane_count()` lanes.
pub fn by(self, n: u16) -> Option<Type> {
if self.lane_bits() == 0 || !n.is_power_of_two() {
return None;
}
let log2_lanes: u32 = n.trailing_zeros();
let new_type = u32::from(self.0) + (log2_lanes << 4);
if new_type < 0x100 {
Some(Type(new_type as u8))
} else {
None
}
}
/// Get a SIMD vector with half the number of lanes.
///
/// There is no `double_vector()` method. Use `t.by(2)` instead.
pub fn half_vector(self) -> Option<Type> {
if self.is_vector() {
Some(Type(self.0 - 0x10))
} else {
None
}
}
/// Index of this type, for use with hash tables etc.
pub fn index(self) -> usize {
usize::from(self.0)
}
/// True iff:
///
/// 1. `self.lane_count() == other.lane_count()` and
/// 2. `self.lane_bits() >= other.lane_bits()`
pub fn wider_or_equal(self, other: Type) -> bool {
self.lane_count() == other.lane_count() && self.lane_bits() >= other.lane_bits()
}
}
impl Display for Type {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
if self.is_bool() {
write!(f, "b{}", self.lane_bits())
} else if self.is_int() {
write!(f, "i{}", self.lane_bits())
} else if self.is_float() {
write!(f, "f{}", self.lane_bits())
} else if self.is_vector() {
write!(f, "{}x{}", self.lane_type(), self.lane_count())
} else {
f.write_str(match *self {
VOID => "void",
IFLAGS => "iflags",
FFLAGS => "fflags",
_ => panic!("Invalid Type(0x{:x})", self.0),
})
}
}
}
impl Debug for Type {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
if self.is_bool() {
write!(f, "types::B{}", self.lane_bits())
} else if self.is_int() {
write!(f, "types::I{}", self.lane_bits())
} else if self.is_float() {
write!(f, "types::F{}", self.lane_bits())
} else if self.is_vector() {
write!(f, "{:?}X{}", self.lane_type(), self.lane_count())
} else {
match *self {
VOID => write!(f, "types::VOID"),
IFLAGS => write!(f, "types::IFLAGS"),
FFLAGS => write!(f, "types::FFLAGS"),
_ => write!(f, "Type(0x{:x})", self.0),
}
}
}
}
impl Default for Type {
fn default() -> Self {
VOID
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::string::ToString;
#[test]
fn basic_scalars() {
assert_eq!(VOID, VOID.lane_type());
assert_eq!(0, VOID.bits());
assert_eq!(IFLAGS, IFLAGS.lane_type());
assert_eq!(0, IFLAGS.bits());
assert_eq!(FFLAGS, FFLAGS.lane_type());
assert_eq!(0, FFLAGS.bits());
assert_eq!(B1, B1.lane_type());
assert_eq!(B8, B8.lane_type());
assert_eq!(B16, B16.lane_type());
assert_eq!(B32, B32.lane_type());
assert_eq!(B64, B64.lane_type());
assert_eq!(I8, I8.lane_type());
assert_eq!(I16, I16.lane_type());
assert_eq!(I32, I32.lane_type());
assert_eq!(I64, I64.lane_type());
assert_eq!(F32, F32.lane_type());
assert_eq!(F64, F64.lane_type());
assert_eq!(VOID.lane_bits(), 0);
assert_eq!(IFLAGS.lane_bits(), 0);
assert_eq!(FFLAGS.lane_bits(), 0);
assert_eq!(B1.lane_bits(), 1);
assert_eq!(B8.lane_bits(), 8);
assert_eq!(B16.lane_bits(), 16);
assert_eq!(B32.lane_bits(), 32);
assert_eq!(B64.lane_bits(), 64);
assert_eq!(I8.lane_bits(), 8);
assert_eq!(I16.lane_bits(), 16);
assert_eq!(I32.lane_bits(), 32);
assert_eq!(I64.lane_bits(), 64);
assert_eq!(F32.lane_bits(), 32);
assert_eq!(F64.lane_bits(), 64);
}
#[test]
fn typevar_functions() {
assert_eq!(VOID.half_width(), None);
assert_eq!(IFLAGS.half_width(), None);
assert_eq!(FFLAGS.half_width(), None);
assert_eq!(B1.half_width(), None);
assert_eq!(B8.half_width(), None);
assert_eq!(B16.half_width(), Some(B8));
assert_eq!(B32.half_width(), Some(B16));
assert_eq!(B64.half_width(), Some(B32));
assert_eq!(I8.half_width(), None);
assert_eq!(I16.half_width(), Some(I8));
assert_eq!(I32.half_width(), Some(I16));
assert_eq!(I32X4.half_width(), Some(I16X4));
assert_eq!(I64.half_width(), Some(I32));
assert_eq!(F32.half_width(), None);
assert_eq!(F64.half_width(), Some(F32));
assert_eq!(VOID.double_width(), None);
assert_eq!(IFLAGS.double_width(), None);
assert_eq!(FFLAGS.double_width(), None);
assert_eq!(B1.double_width(), None);
assert_eq!(B8.double_width(), Some(B16));
assert_eq!(B16.double_width(), Some(B32));
assert_eq!(B32.double_width(), Some(B64));
assert_eq!(B64.double_width(), None);
assert_eq!(I8.double_width(), Some(I16));
assert_eq!(I16.double_width(), Some(I32));
assert_eq!(I32.double_width(), Some(I64));
assert_eq!(I32X4.double_width(), Some(I64X4));
assert_eq!(I64.double_width(), None);
assert_eq!(F32.double_width(), Some(F64));
assert_eq!(F64.double_width(), None);
}
#[test]
fn vectors() {
let big = F64.by(256).unwrap();
assert_eq!(big.lane_bits(), 64);
assert_eq!(big.lane_count(), 256);
assert_eq!(big.bits(), 64 * 256);
assert_eq!(big.half_vector().unwrap().to_string(), "f64x128");
assert_eq!(B1.by(2).unwrap().half_vector().unwrap().to_string(), "b1");
assert_eq!(I32.half_vector(), None);
assert_eq!(VOID.half_vector(), None);
// Check that the generated constants match the computed vector types.
assert_eq!(I32.by(4), Some(I32X4));
assert_eq!(F64.by(8), Some(F64X8));
}
#[test]
fn format_scalars() {
assert_eq!(VOID.to_string(), "void");
assert_eq!(IFLAGS.to_string(), "iflags");
assert_eq!(FFLAGS.to_string(), "fflags");
assert_eq!(B1.to_string(), "b1");
assert_eq!(B8.to_string(), "b8");
assert_eq!(B16.to_string(), "b16");
assert_eq!(B32.to_string(), "b32");
assert_eq!(B64.to_string(), "b64");
assert_eq!(I8.to_string(), "i8");
assert_eq!(I16.to_string(), "i16");
assert_eq!(I32.to_string(), "i32");
assert_eq!(I64.to_string(), "i64");
assert_eq!(F32.to_string(), "f32");
assert_eq!(F64.to_string(), "f64");
}
#[test]
fn format_vectors() {
assert_eq!(B1.by(8).unwrap().to_string(), "b1x8");
assert_eq!(B8.by(1).unwrap().to_string(), "b8");
assert_eq!(B16.by(256).unwrap().to_string(), "b16x256");
assert_eq!(B32.by(4).unwrap().by(2).unwrap().to_string(), "b32x8");
assert_eq!(B64.by(8).unwrap().to_string(), "b64x8");
assert_eq!(I8.by(64).unwrap().to_string(), "i8x64");
assert_eq!(F64.by(2).unwrap().to_string(), "f64x2");
assert_eq!(I8.by(3), None);
assert_eq!(I8.by(512), None);
assert_eq!(VOID.by(4), None);
}
#[test]
fn as_bool() {
assert_eq!(I32X4.as_bool(), B32X4);
assert_eq!(I32.as_bool(), B1);
assert_eq!(I32X4.as_bool_pedantic(), B32X4);
assert_eq!(I32.as_bool_pedantic(), B32);
}
}

View File

@@ -0,0 +1,165 @@
//! Value locations.
//!
//! The register allocator assigns every SSA value to either a register or a stack slot. This
//! assignment is represented by a `ValueLoc` object.
use ir::StackSlot;
use isa::{RegInfo, RegUnit};
use std::fmt;
/// Value location.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ValueLoc {
/// This value has not been assigned to a location yet.
Unassigned,
/// Value is assigned to a register.
Reg(RegUnit),
/// Value is assigned to a stack slot.
Stack(StackSlot),
}
impl Default for ValueLoc {
fn default() -> Self {
ValueLoc::Unassigned
}
}
impl ValueLoc {
/// Is this an assigned location? (That is, not `Unassigned`).
pub fn is_assigned(&self) -> bool {
match *self {
ValueLoc::Unassigned => false,
_ => true,
}
}
/// Get the register unit of this location, or panic.
pub fn unwrap_reg(self) -> RegUnit {
match self {
ValueLoc::Reg(ru) => ru,
_ => panic!("Expected register: {:?}", self),
}
}
/// Get the stack slot of this location, or panic.
pub fn unwrap_stack(self) -> StackSlot {
match self {
ValueLoc::Stack(ss) => ss,
_ => panic!("Expected stack slot: {:?}", self),
}
}
/// Return an object that can display this value location, using the register info from the
/// target ISA.
pub fn display<'a, R: Into<Option<&'a RegInfo>>>(self, regs: R) -> DisplayValueLoc<'a> {
DisplayValueLoc(self, regs.into())
}
}
/// Displaying a `ValueLoc` correctly requires the associated `RegInfo` from the target ISA.
/// Without the register info, register units are simply show as numbers.
///
/// The `DisplayValueLoc` type can display the contained `ValueLoc`.
pub struct DisplayValueLoc<'a>(ValueLoc, Option<&'a RegInfo>);
impl<'a> fmt::Display for DisplayValueLoc<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ValueLoc::Unassigned => write!(f, "-"),
ValueLoc::Reg(ru) => {
match self.1 {
Some(regs) => write!(f, "{}", regs.display_regunit(ru)),
None => write!(f, "%{}", ru),
}
}
ValueLoc::Stack(ss) => write!(f, "{}", ss),
}
}
}
/// Function argument location.
///
/// The ABI specifies how arguments are passed to a function, and where return values appear after
/// the call. Just like a `ValueLoc`, function arguments can be passed in registers or on the
/// stack.
///
/// Function arguments on the stack are accessed differently for the incoming arguments to the
/// current function and the outgoing arguments to a called external function. For this reason,
/// the location of stack arguments is described as an offset into the array of function arguments
/// on the stack.
///
/// An `ArgumentLoc` can be translated to a `ValueLoc` only when we know if we're talking about an
/// incoming argument or an outgoing argument.
///
/// - For stack arguments, different `StackSlot` entities are used to represent incoming and
/// outgoing arguments.
/// - For register arguments, there is usually no difference, but if we ever add support for a
/// register-window ISA like SPARC, register arguments would also need to be translated.
#[derive(Copy, Clone, Debug)]
pub enum ArgumentLoc {
/// This argument has not been assigned to a location yet.
Unassigned,
/// Argument is passed in a register.
Reg(RegUnit),
/// Argument is passed on the stack, at the given byte offset into the argument array.
Stack(i32),
}
impl Default for ArgumentLoc {
fn default() -> Self {
ArgumentLoc::Unassigned
}
}
impl ArgumentLoc {
/// Is this an assigned location? (That is, not `Unassigned`).
pub fn is_assigned(&self) -> bool {
match *self {
ArgumentLoc::Unassigned => false,
_ => true,
}
}
/// Is this a register location?
pub fn is_reg(&self) -> bool {
match *self {
ArgumentLoc::Reg(_) => true,
_ => false,
}
}
/// Is this a stack location?
pub fn is_stack(&self) -> bool {
match *self {
ArgumentLoc::Stack(_) => true,
_ => false,
}
}
/// Return an object that can display this argument location, using the register info from the
/// target ISA.
pub fn display<'a, R: Into<Option<&'a RegInfo>>>(self, regs: R) -> DisplayArgumentLoc<'a> {
DisplayArgumentLoc(self, regs.into())
}
}
/// Displaying a `ArgumentLoc` correctly requires the associated `RegInfo` from the target ISA.
/// Without the register info, register units are simply show as numbers.
///
/// The `DisplayArgumentLoc` type can display the contained `ArgumentLoc`.
pub struct DisplayArgumentLoc<'a>(ArgumentLoc, Option<&'a RegInfo>);
impl<'a> fmt::Display for DisplayArgumentLoc<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
ArgumentLoc::Unassigned => write!(f, "-"),
ArgumentLoc::Reg(ru) => {
match self.1 {
Some(regs) => write!(f, "{}", regs.display_regunit(ru)),
None => write!(f, "%{}", ru),
}
}
ArgumentLoc::Stack(offset) => write!(f, "{}", offset),
}
}
}