Move library crates under 'lib/'.
Give these crates each a more standard directory layout with sources in a 'src' sub-sirectory and Cargo.toml in the top lib/foo directory. Add license and description fields to each. The build script for the cretonne crate now lives in 'lib/cretonne/build.rs' separating it from the normal library sources under 'lib/cretonne/src'.
This commit is contained in:
@@ -1,19 +0,0 @@
|
||||
[package]
|
||||
authors = ["The Cretonne Project Developers"]
|
||||
name = "cretonne"
|
||||
version = "0.0.0"
|
||||
description = "Low-level code generator library"
|
||||
documentation = "https://cretonne.readthedocs.io/"
|
||||
repository = "https://github.com/stoklund/cretonne"
|
||||
publish = false
|
||||
build = "build.rs"
|
||||
|
||||
[lib]
|
||||
name = "cretonne"
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
# It is a goal of the cretonne crate to have minimal external dependencies.
|
||||
# Please don't add any unless they are essential to the task of creating binary
|
||||
# machine code. Integration tests that need external dependencies can be
|
||||
# accomodated in src/tools/tests.
|
||||
@@ -1,46 +0,0 @@
|
||||
|
||||
// Build script.
|
||||
//
|
||||
// This program is run by Cargo when building libcretonne. It is used to generate Rust code from
|
||||
// the language definitions in the meta directory.
|
||||
//
|
||||
// Environment:
|
||||
//
|
||||
// OUT_DIR
|
||||
// Directory where generated files should be placed.
|
||||
//
|
||||
// The build script expects to be run from the directory where this build.rs file lives. The
|
||||
// current directory is used to find the sources.
|
||||
|
||||
|
||||
use std::env;
|
||||
use std::process;
|
||||
|
||||
fn main() {
|
||||
let out_dir = env::var("OUT_DIR").expect("The OUT_DIR environment variable must be set");
|
||||
|
||||
println!("Build script generating files in {}", out_dir);
|
||||
|
||||
let mut cur_dir = env::current_dir().expect("Can't access current working directory");
|
||||
|
||||
// We're in src/libcretonne. Find the top-level directory.
|
||||
assert!(cur_dir.pop(), "No parent 'src' directory");
|
||||
assert!(cur_dir.pop(), "No top-level directory");
|
||||
let top_dir = cur_dir.as_path();
|
||||
|
||||
// Scripts are in $top_dir/meta.
|
||||
let meta_dir = top_dir.join("lib/cretonne/meta");
|
||||
let build_script = meta_dir.join("build.py");
|
||||
|
||||
// Launch build script with Python. We'll just find python in the path.
|
||||
let status = process::Command::new("python")
|
||||
.current_dir(top_dir)
|
||||
.arg(build_script)
|
||||
.arg("--out-dir")
|
||||
.arg(out_dir)
|
||||
.status()
|
||||
.expect("Failed to launch second-level build script");
|
||||
if !status.success() {
|
||||
process::exit(status.code().unwrap());
|
||||
}
|
||||
}
|
||||
@@ -1,229 +0,0 @@
|
||||
//! A control flow graph represented as mappings of extended basic blocks to their predecessors
|
||||
//! and successors. Successors are represented as extended basic blocks while predecessors are
|
||||
//! represented by basic blocks.
|
||||
//! BasicBlocks are denoted by tuples of EBB and branch/jump instructions. Each predecessor
|
||||
//! tuple corresponds to the end of a basic block.
|
||||
//!
|
||||
//! ```c
|
||||
//! Ebb0:
|
||||
//! ... ; beginning of basic block
|
||||
//!
|
||||
//! ...
|
||||
//!
|
||||
//! brz vx, Ebb1 ; end of basic block
|
||||
//!
|
||||
//! ... ; beginning of basic block
|
||||
//!
|
||||
//! ...
|
||||
//!
|
||||
//! jmp Ebb2 ; end of basic block
|
||||
//! ```
|
||||
//!
|
||||
//! Here Ebb1 and Ebb2 would each have a single predecessor denoted as (Ebb0, `brz vx, Ebb1`)
|
||||
//! and (Ebb0, `jmp Ebb2`) respectively.
|
||||
|
||||
use ir::{Function, Inst, Ebb};
|
||||
use ir::instructions::BranchInfo;
|
||||
use entity_map::{EntityMap, Keys};
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// A basic block denoted by its enclosing Ebb and last instruction.
|
||||
pub type BasicBlock = (Ebb, Inst);
|
||||
|
||||
/// A container for the successors and predecessors of some Ebb.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct CFGNode {
|
||||
pub successors: Vec<Ebb>,
|
||||
pub predecessors: Vec<BasicBlock>,
|
||||
}
|
||||
|
||||
impl CFGNode {
|
||||
pub fn new() -> CFGNode {
|
||||
CFGNode {
|
||||
successors: Vec::new(),
|
||||
predecessors: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The Control Flow Graph maintains a mapping of ebbs to their predecessors
|
||||
/// and successors where predecessors are basic blocks and successors are
|
||||
/// extended basic blocks.
|
||||
#[derive(Debug)]
|
||||
pub struct ControlFlowGraph {
|
||||
entry_block: Option<Ebb>,
|
||||
data: EntityMap<Ebb, CFGNode>,
|
||||
}
|
||||
|
||||
impl ControlFlowGraph {
|
||||
/// During initialization mappings will be generated for any existing
|
||||
/// blocks within the CFG's associated function.
|
||||
pub fn new(func: &Function) -> ControlFlowGraph {
|
||||
|
||||
let mut cfg = ControlFlowGraph {
|
||||
data: EntityMap::with_capacity(func.dfg.num_ebbs()),
|
||||
entry_block: func.layout.entry_block(),
|
||||
};
|
||||
|
||||
for ebb in &func.layout {
|
||||
for inst in func.layout.ebb_insts(ebb) {
|
||||
match func.dfg[inst].analyze_branch() {
|
||||
BranchInfo::SingleDest(dest, _) => {
|
||||
cfg.add_edge((ebb, inst), dest);
|
||||
}
|
||||
BranchInfo::Table(jt) => {
|
||||
for (_, dest) in func.jump_tables[jt].entries() {
|
||||
cfg.add_edge((ebb, inst), dest);
|
||||
}
|
||||
}
|
||||
BranchInfo::NotABranch => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
cfg
|
||||
}
|
||||
|
||||
fn add_edge(&mut self, from: BasicBlock, to: Ebb) {
|
||||
self.data[from.0].successors.push(to);
|
||||
self.data[to].predecessors.push(from);
|
||||
}
|
||||
|
||||
pub fn get_predecessors(&self, ebb: Ebb) -> &Vec<BasicBlock> {
|
||||
&self.data[ebb].predecessors
|
||||
}
|
||||
|
||||
pub fn get_successors(&self, ebb: Ebb) -> &Vec<Ebb> {
|
||||
&self.data[ebb].successors
|
||||
}
|
||||
|
||||
/// Return [reachable] ebbs in postorder.
|
||||
pub fn postorder_ebbs(&self) -> Vec<Ebb> {
|
||||
let entry_block = match self.entry_block {
|
||||
None => {
|
||||
return Vec::new();
|
||||
}
|
||||
Some(eb) => eb,
|
||||
};
|
||||
|
||||
let mut grey = HashSet::new();
|
||||
let mut black = HashSet::new();
|
||||
let mut stack = vec![entry_block.clone()];
|
||||
let mut postorder = Vec::new();
|
||||
|
||||
while !stack.is_empty() {
|
||||
let node = stack.pop().unwrap();
|
||||
if !grey.contains(&node) {
|
||||
// This is a white node. Mark it as gray.
|
||||
grey.insert(node);
|
||||
stack.push(node);
|
||||
// Get any children we’ve never seen before.
|
||||
for child in self.get_successors(node) {
|
||||
if !grey.contains(child) {
|
||||
stack.push(child.clone());
|
||||
}
|
||||
}
|
||||
} else if !black.contains(&node) {
|
||||
postorder.push(node.clone());
|
||||
black.insert(node.clone());
|
||||
}
|
||||
}
|
||||
postorder
|
||||
}
|
||||
|
||||
/// An iterator across all of the ebbs stored in the cfg.
|
||||
pub fn ebbs(&self) -> Keys<Ebb> {
|
||||
self.data.keys()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ir::{Function, Builder, Cursor, VariableArgs, types};
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let func = Function::new();
|
||||
let cfg = ControlFlowGraph::new(&func);
|
||||
assert_eq!(None, cfg.ebbs().next());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_predecessors() {
|
||||
let mut func = Function::new();
|
||||
let ebb0 = func.dfg.make_ebb();
|
||||
let ebb1 = func.dfg.make_ebb();
|
||||
let ebb2 = func.dfg.make_ebb();
|
||||
func.layout.append_ebb(ebb0);
|
||||
func.layout.append_ebb(ebb1);
|
||||
func.layout.append_ebb(ebb2);
|
||||
|
||||
let cfg = ControlFlowGraph::new(&func);
|
||||
let nodes = cfg.ebbs().collect::<Vec<_>>();
|
||||
assert_eq!(nodes.len(), 3);
|
||||
|
||||
let mut fun_ebbs = func.layout.ebbs();
|
||||
for ebb in nodes {
|
||||
assert_eq!(ebb, fun_ebbs.next().unwrap());
|
||||
assert_eq!(cfg.get_predecessors(ebb).len(), 0);
|
||||
assert_eq!(cfg.get_successors(ebb).len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn branches_and_jumps() {
|
||||
let mut func = Function::new();
|
||||
let ebb0 = func.dfg.make_ebb();
|
||||
let cond = func.dfg.append_ebb_arg(ebb0, types::I32);
|
||||
let ebb1 = func.dfg.make_ebb();
|
||||
let ebb2 = func.dfg.make_ebb();
|
||||
|
||||
let br_ebb0_ebb2;
|
||||
let br_ebb1_ebb1;
|
||||
let jmp_ebb0_ebb1;
|
||||
let jmp_ebb1_ebb2;
|
||||
|
||||
{
|
||||
let mut cursor = Cursor::new(&mut func.layout);
|
||||
let mut b = Builder::new(&mut func.dfg, &mut cursor);
|
||||
|
||||
b.insert_ebb(ebb0);
|
||||
br_ebb0_ebb2 = b.brnz(cond, ebb2, VariableArgs::new());
|
||||
jmp_ebb0_ebb1 = b.jump(ebb1, VariableArgs::new());
|
||||
|
||||
b.insert_ebb(ebb1);
|
||||
br_ebb1_ebb1 = b.brnz(cond, ebb1, VariableArgs::new());
|
||||
jmp_ebb1_ebb2 = b.jump(ebb2, VariableArgs::new());
|
||||
|
||||
b.insert_ebb(ebb2);
|
||||
}
|
||||
|
||||
let cfg = ControlFlowGraph::new(&func);
|
||||
|
||||
let ebb0_predecessors = cfg.get_predecessors(ebb0);
|
||||
let ebb1_predecessors = cfg.get_predecessors(ebb1);
|
||||
let ebb2_predecessors = cfg.get_predecessors(ebb2);
|
||||
|
||||
let ebb0_successors = cfg.get_successors(ebb0);
|
||||
let ebb1_successors = cfg.get_successors(ebb1);
|
||||
let ebb2_successors = cfg.get_successors(ebb2);
|
||||
|
||||
assert_eq!(ebb0_predecessors.len(), 0);
|
||||
assert_eq!(ebb1_predecessors.len(), 2);
|
||||
assert_eq!(ebb2_predecessors.len(), 2);
|
||||
|
||||
assert_eq!(ebb1_predecessors.contains(&(ebb0, jmp_ebb0_ebb1)), true);
|
||||
assert_eq!(ebb1_predecessors.contains(&(ebb1, br_ebb1_ebb1)), true);
|
||||
assert_eq!(ebb2_predecessors.contains(&(ebb0, br_ebb0_ebb2)), true);
|
||||
assert_eq!(ebb2_predecessors.contains(&(ebb1, jmp_ebb1_ebb2)), true);
|
||||
|
||||
assert_eq!(ebb0_successors.len(), 2);
|
||||
assert_eq!(ebb1_successors.len(), 2);
|
||||
assert_eq!(ebb2_successors.len(), 0);
|
||||
|
||||
assert_eq!(ebb0_successors.contains(&ebb1), true);
|
||||
assert_eq!(ebb0_successors.contains(&ebb2), true);
|
||||
assert_eq!(ebb1_successors.contains(&ebb1), true);
|
||||
assert_eq!(ebb1_successors.contains(&ebb2), true);
|
||||
}
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
//! Runtime support for precomputed constant hash tables.
|
||||
//!
|
||||
//! The `meta/constant_hash.py` Python module can generate constant hash tables using open
|
||||
//! addressing and quadratic probing. The hash tables are arrays that are guaranteed to:
|
||||
//!
|
||||
//! - Have a power-of-two size.
|
||||
//! - Contain at least one empty slot.
|
||||
//!
|
||||
//! This module provides runtime support for lookups in these tables.
|
||||
|
||||
/// Trait that must be implemented by the entries in a constant hash table.
|
||||
pub trait Table<K: Copy + Eq> {
|
||||
/// Get the number of entries in this table which must be a power of two.
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Get the key corresponding to the entry at `idx`, or `None` if the entry is empty.
|
||||
/// The `idx` must be in range.
|
||||
fn key(&self, idx: usize) -> Option<K>;
|
||||
}
|
||||
|
||||
|
||||
/// Look for `key` in `table`.
|
||||
///
|
||||
/// The provided `hash` value must have been computed from `key` using the same hash function that
|
||||
/// was used to construct the table.
|
||||
///
|
||||
/// Returns the table index containing the found entry, or `None` if no entry could be found.
|
||||
pub fn probe<K: Copy + Eq, T: Table<K> + ?Sized>(table: &T, key: K, hash: usize) -> Option<usize> {
|
||||
debug_assert!(table.len().is_power_of_two());
|
||||
let mask = table.len() - 1;
|
||||
|
||||
let mut idx = hash;
|
||||
let mut step = 0;
|
||||
|
||||
loop {
|
||||
idx &= mask;
|
||||
|
||||
match table.key(idx) {
|
||||
None => return None,
|
||||
Some(k) if k == key => return Some(idx),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Quadratic probing.
|
||||
step += 1;
|
||||
// When `table.len()` is a power of two, it can be proven that `idx` will visit all
|
||||
// entries. This means that this loop will always terminate if the hash table has even
|
||||
// one unused entry.
|
||||
debug_assert!(step < table.len());
|
||||
idx += step;
|
||||
}
|
||||
}
|
||||
|
||||
/// A primitive hash function for matching opcodes.
|
||||
/// Must match `meta/constant_hash.py`.
|
||||
pub fn simple_hash(s: &str) -> usize {
|
||||
let mut h: u32 = 5381;
|
||||
for c in s.chars() {
|
||||
h = (h ^ c as u32).wrapping_add(h.rotate_right(6));
|
||||
}
|
||||
h as usize
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::simple_hash;
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
// c.f. meta/constant_hash.py tests.
|
||||
assert_eq!(simple_hash("Hello"), 0x2fa70c01);
|
||||
assert_eq!(simple_hash("world"), 0x5b0c31d5);
|
||||
}
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
/// ! A Dominator Tree represented as mappings of Ebbs to their immediate dominator.
|
||||
|
||||
use cfg::*;
|
||||
use ir::Ebb;
|
||||
use ir::entities::NO_INST;
|
||||
use entity_map::EntityMap;
|
||||
|
||||
pub struct DominatorTree {
|
||||
data: EntityMap<Ebb, Option<BasicBlock>>,
|
||||
}
|
||||
|
||||
impl DominatorTree {
|
||||
/// Build a dominator tree from a control flow graph using Keith D. Cooper's
|
||||
/// "Simple, Fast Dominator Algorithm."
|
||||
pub fn new(cfg: &ControlFlowGraph) -> DominatorTree {
|
||||
let mut ebbs = cfg.postorder_ebbs();
|
||||
ebbs.reverse();
|
||||
|
||||
let len = ebbs.len();
|
||||
|
||||
// The mappings which designate the dominator tree.
|
||||
let mut data = EntityMap::with_capacity(len);
|
||||
|
||||
let mut postorder_map = EntityMap::with_capacity(len);
|
||||
for (i, ebb) in ebbs.iter().enumerate() {
|
||||
postorder_map[ebb.clone()] = len - i;
|
||||
}
|
||||
|
||||
let mut changed = false;
|
||||
|
||||
if len > 0 {
|
||||
data[ebbs[0]] = Some((ebbs[0], NO_INST));
|
||||
changed = true;
|
||||
}
|
||||
|
||||
while changed {
|
||||
changed = false;
|
||||
for i in 1..len {
|
||||
let ebb = ebbs[i];
|
||||
let preds = cfg.get_predecessors(ebb);
|
||||
let mut new_idom = None;
|
||||
|
||||
for pred in preds {
|
||||
if new_idom == None {
|
||||
new_idom = Some(pred.clone());
|
||||
continue;
|
||||
}
|
||||
// If this predecessor has an idom available find its common
|
||||
// ancestor with the current value of new_idom.
|
||||
if let Some(_) = data[pred.0] {
|
||||
new_idom = match new_idom {
|
||||
Some(cur_idom) => {
|
||||
Some((DominatorTree::intersect(&mut data,
|
||||
&postorder_map,
|
||||
*pred,
|
||||
cur_idom)))
|
||||
}
|
||||
None => panic!("A 'current idom' should have been set!"),
|
||||
}
|
||||
}
|
||||
}
|
||||
match data[ebb] {
|
||||
None => {
|
||||
data[ebb] = new_idom;
|
||||
changed = true;
|
||||
}
|
||||
Some(idom) => {
|
||||
// Old idom != New idom
|
||||
if idom.0 != new_idom.unwrap().0 {
|
||||
data[ebb] = new_idom;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DominatorTree { data: data }
|
||||
}
|
||||
|
||||
/// Find the common dominator of two ebbs.
|
||||
fn intersect(data: &EntityMap<Ebb, Option<BasicBlock>>,
|
||||
ordering: &EntityMap<Ebb, usize>,
|
||||
first: BasicBlock,
|
||||
second: BasicBlock)
|
||||
-> BasicBlock {
|
||||
let mut a = first;
|
||||
let mut b = second;
|
||||
|
||||
// Here we use 'ordering', a mapping of ebbs to their postorder
|
||||
// visitation number, to ensure that we move upward through the tree.
|
||||
// Walking upward means that we may always expect self.data[a] and
|
||||
// self.data[b] to contain non-None entries.
|
||||
while a.0 != b.0 {
|
||||
while ordering[a.0] < ordering[b.0] {
|
||||
a = data[a.0].unwrap();
|
||||
}
|
||||
while ordering[b.0] < ordering[a.0] {
|
||||
b = data[b.0].unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: we can't rely on instruction numbers to always be ordered
|
||||
// from lowest to highest. Given that, it will be necessary to create
|
||||
// an abolute mapping to determine the instruction order in the future.
|
||||
if a.1 == NO_INST || a.1 < b.1 { a } else { b }
|
||||
}
|
||||
|
||||
/// Returns the immediate dominator of some ebb or None if the
|
||||
/// node is unreachable.
|
||||
pub fn idom(&self, ebb: Ebb) -> Option<BasicBlock> {
|
||||
self.data[ebb].clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use ir::{Function, Builder, Cursor, VariableArgs, types};
|
||||
use ir::entities::NO_INST;
|
||||
use cfg::ControlFlowGraph;
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let func = Function::new();
|
||||
let cfg = ControlFlowGraph::new(&func);
|
||||
let dtree = DominatorTree::new(&cfg);
|
||||
assert_eq!(0, dtree.data.keys().count());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_zero_entry_block() {
|
||||
let mut func = Function::new();
|
||||
let ebb3 = func.dfg.make_ebb();
|
||||
let cond = func.dfg.append_ebb_arg(ebb3, types::I32);
|
||||
let ebb1 = func.dfg.make_ebb();
|
||||
let ebb2 = func.dfg.make_ebb();
|
||||
let ebb0 = func.dfg.make_ebb();
|
||||
|
||||
let jmp_ebb3_ebb1;
|
||||
let br_ebb1_ebb0;
|
||||
let jmp_ebb1_ebb2;
|
||||
|
||||
{
|
||||
let mut cursor = Cursor::new(&mut func.layout);
|
||||
let mut b = Builder::new(&mut func.dfg, &mut cursor);
|
||||
|
||||
b.insert_ebb(ebb3);
|
||||
jmp_ebb3_ebb1 = b.jump(ebb1, VariableArgs::new());
|
||||
|
||||
b.insert_ebb(ebb1);
|
||||
br_ebb1_ebb0 = b.brnz(cond, ebb0, VariableArgs::new());
|
||||
jmp_ebb1_ebb2 = b.jump(ebb2, VariableArgs::new());
|
||||
|
||||
b.insert_ebb(ebb2);
|
||||
b.jump(ebb0, VariableArgs::new());
|
||||
|
||||
b.insert_ebb(ebb0);
|
||||
}
|
||||
|
||||
let cfg = ControlFlowGraph::new(&func);
|
||||
let dt = DominatorTree::new(&cfg);
|
||||
|
||||
assert_eq!(func.layout.entry_block().unwrap(), ebb3);
|
||||
assert_eq!(dt.idom(ebb3).unwrap(), (ebb3, NO_INST));
|
||||
assert_eq!(dt.idom(ebb1).unwrap(), (ebb3, jmp_ebb3_ebb1));
|
||||
assert_eq!(dt.idom(ebb2).unwrap(), (ebb1, jmp_ebb1_ebb2));
|
||||
assert_eq!(dt.idom(ebb0).unwrap(), (ebb1, br_ebb1_ebb0));
|
||||
}
|
||||
}
|
||||
@@ -1,275 +0,0 @@
|
||||
//! Densely numbered entity references as mapping keys.
|
||||
//!
|
||||
//! This module defines an `EntityRef` trait that should be implemented by reference types wrapping
|
||||
//! a small integer index. The `EntityMap` data structure uses the dense index space to implement a
|
||||
//! map with a vector. There are primary and secondary entity maps:
|
||||
//!
|
||||
//! - A *primary* `EntityMap` contains the main definition of an entity, and it can be used to
|
||||
//! allocate new entity references with the `push` method. The values stores in a primary map
|
||||
//! must implement the `PrimaryEntityData` marker trait.
|
||||
//! - A *secondary* `EntityMap` contains additional data about entities kept in a primary map. The
|
||||
//! values need to implement `Clone + Default` traits so the map can be grown with `ensure`.
|
||||
|
||||
use std::vec::Vec;
|
||||
use std::default::Default;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Index, IndexMut};
|
||||
|
||||
/// A type wrapping a small integer index should implement `EntityRef` so it can be used as the key
|
||||
/// of an `EntityMap`.
|
||||
pub trait EntityRef: Copy + Eq {
|
||||
/// Create a new entity reference from a small integer.
|
||||
/// This should crash if the requested index is not representable.
|
||||
fn new(usize) -> Self;
|
||||
|
||||
/// Get the index that was used to create this entity reference.
|
||||
fn index(self) -> usize;
|
||||
|
||||
/// Convert an `EntityRef` to an `Optional<EntityRef>` by using the default value as the null
|
||||
/// reference.
|
||||
///
|
||||
/// Entity references are often used in compact data structures like linked lists where a
|
||||
/// sentinel 'null' value is needed. Normally we would use an `Optional` for that, but
|
||||
/// currently that uses twice the memory of a plain `EntityRef`.
|
||||
///
|
||||
/// This method is called `wrap()` because it is the inverse of `unwrap()`.
|
||||
fn wrap(self) -> Option<Self>
|
||||
where Self: Default
|
||||
{
|
||||
if self == Self::default() {
|
||||
None
|
||||
} else {
|
||||
Some(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A mapping `K -> V` for densely indexed entity references.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EntityMap<K, V>
|
||||
where K: EntityRef
|
||||
{
|
||||
elems: Vec<V>,
|
||||
unused: PhantomData<K>,
|
||||
}
|
||||
|
||||
/// Shared `EntityMap` implementation for all value types.
|
||||
impl<K, V> EntityMap<K, V>
|
||||
where K: EntityRef
|
||||
{
|
||||
/// Create a new empty map.
|
||||
pub fn new() -> Self {
|
||||
EntityMap {
|
||||
elems: Vec::new(),
|
||||
unused: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if `k` is a valid key in the map.
|
||||
pub fn is_valid(&self, k: K) -> bool {
|
||||
k.index() < self.elems.len()
|
||||
}
|
||||
|
||||
/// Get the element at `k` if it exists.
|
||||
pub fn get(&self, k: K) -> Option<&V> {
|
||||
self.elems.get(k.index())
|
||||
}
|
||||
|
||||
/// Is this map completely empty?
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.elems.is_empty()
|
||||
}
|
||||
|
||||
/// Remove all entries from this map.
|
||||
pub fn clear(&mut self) {
|
||||
self.elems.clear()
|
||||
}
|
||||
|
||||
/// Iterate over all the keys in this map.
|
||||
pub fn keys(&self) -> Keys<K> {
|
||||
Keys {
|
||||
pos: 0,
|
||||
len: self.elems.len(),
|
||||
unused: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A marker trait for data stored in primary entity maps.
|
||||
///
|
||||
/// A primary entity map can be used to allocate new entity references with the `push` method. It
|
||||
/// is important that entity references can't be created anywhere else, so the data stored in a
|
||||
/// primary entity map must be tagged as `PrimaryEntityData` to unlock the `push` method.
|
||||
pub trait PrimaryEntityData {}
|
||||
|
||||
/// Additional methods for primary entry maps only.
|
||||
///
|
||||
/// These are identified by the `PrimaryEntityData` marker trait.
|
||||
impl<K, V> EntityMap<K, V>
|
||||
where K: EntityRef,
|
||||
V: PrimaryEntityData
|
||||
{
|
||||
/// Get the key that will be assigned to the next pushed value.
|
||||
pub fn next_key(&self) -> K {
|
||||
K::new(self.elems.len())
|
||||
}
|
||||
|
||||
/// Append `v` to the mapping, assigning a new key which is returned.
|
||||
pub fn push(&mut self, v: V) -> K {
|
||||
let k = self.next_key();
|
||||
self.elems.push(v);
|
||||
k
|
||||
}
|
||||
|
||||
/// Get the total number of entity references created.
|
||||
pub fn len(&self) -> usize {
|
||||
self.elems.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Additional methods for value types that implement `Clone` and `Default`.
|
||||
///
|
||||
/// When the value type implements these additional traits, the `EntityMap` can be resized
|
||||
/// explicitly with the `ensure` method.
|
||||
///
|
||||
/// Use this for secondary maps that are mapping keys created by another primary map.
|
||||
impl<K, V> EntityMap<K, V>
|
||||
where K: EntityRef,
|
||||
V: Clone + Default
|
||||
{
|
||||
/// Create a new secondary `EntityMap` that is prepared to hold `n` elements.
|
||||
///
|
||||
/// Use this when the length of the primary map is known:
|
||||
/// ```
|
||||
/// let secondary_map = EntityMap::with_capacity(primary_map.len());
|
||||
/// ```
|
||||
pub fn with_capacity(n: usize) -> Self {
|
||||
let mut map = EntityMap {
|
||||
elems: Vec::with_capacity(n),
|
||||
unused: PhantomData,
|
||||
};
|
||||
map.elems.resize(n, V::default());
|
||||
map
|
||||
}
|
||||
|
||||
/// Resize the map to have `n` entries by adding default entries as needed.
|
||||
pub fn resize(&mut self, n: usize) {
|
||||
self.elems.resize(n, V::default());
|
||||
}
|
||||
|
||||
/// Ensure that `k` is a valid key but adding default entries if necesssary.
|
||||
///
|
||||
/// Return a mutable reference to the corresponding entry.
|
||||
pub fn ensure(&mut self, k: K) -> &mut V {
|
||||
if !self.is_valid(k) {
|
||||
self.resize(k.index() + 1)
|
||||
}
|
||||
&mut self.elems[k.index()]
|
||||
}
|
||||
}
|
||||
|
||||
/// Immutable indexing into an `EntityMap`.
|
||||
/// The indexed value must be in the map, either because it was created by `push`, or the key was
|
||||
/// passed to `ensure`.
|
||||
impl<K, V> Index<K> for EntityMap<K, V>
|
||||
where K: EntityRef
|
||||
{
|
||||
type Output = V;
|
||||
|
||||
fn index(&self, k: K) -> &V {
|
||||
&self.elems[k.index()]
|
||||
}
|
||||
}
|
||||
|
||||
/// Mutable indexing into an `EntityMap`.
|
||||
/// Use `ensure` instead if the key is not known to be valid.
|
||||
impl<K, V> IndexMut<K> for EntityMap<K, V>
|
||||
where K: EntityRef
|
||||
{
|
||||
fn index_mut(&mut self, k: K) -> &mut V {
|
||||
&mut self.elems[k.index()]
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterate over all keys in order.
|
||||
pub struct Keys<K>
|
||||
where K: EntityRef
|
||||
{
|
||||
pos: usize,
|
||||
len: usize,
|
||||
unused: PhantomData<K>,
|
||||
}
|
||||
|
||||
impl<K> Iterator for Keys<K>
|
||||
where K: EntityRef
|
||||
{
|
||||
type Item = K;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.pos < self.len {
|
||||
let k = K::new(self.pos);
|
||||
self.pos += 1;
|
||||
Some(k)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// EntityRef impl for testing.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
struct E(u32);
|
||||
|
||||
impl EntityRef for E {
|
||||
fn new(i: usize) -> Self {
|
||||
E(i as u32)
|
||||
}
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimaryEntityData for isize {}
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let r0 = E(0);
|
||||
let r1 = E(1);
|
||||
let r2 = E(2);
|
||||
let mut m = EntityMap::new();
|
||||
|
||||
let v: Vec<E> = m.keys().collect();
|
||||
assert_eq!(v, []);
|
||||
|
||||
assert!(!m.is_valid(r0));
|
||||
m.ensure(r2);
|
||||
m[r2] = 3;
|
||||
assert!(m.is_valid(r1));
|
||||
m[r1] = 5;
|
||||
|
||||
assert_eq!(m[r1], 5);
|
||||
assert_eq!(m[r2], 3);
|
||||
|
||||
let v: Vec<E> = m.keys().collect();
|
||||
assert_eq!(v, [r0, r1, r2]);
|
||||
|
||||
let shared = &m;
|
||||
assert_eq!(shared[r0], 0);
|
||||
assert_eq!(shared[r1], 5);
|
||||
assert_eq!(shared[r2], 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push() {
|
||||
let mut m = EntityMap::new();
|
||||
let k1: E = m.push(12);
|
||||
let k2 = m.push(33);
|
||||
|
||||
assert_eq!(m[k1], 12);
|
||||
assert_eq!(m[k2], 33);
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
//! Cretonne instruction builder.
|
||||
//!
|
||||
//! A `Builder` provides a convenient interface for inserting instructions into a Cretonne
|
||||
//! function. Many of its methods are generated from the meta language instruction definitions.
|
||||
|
||||
use ir::{types, instructions};
|
||||
use ir::{InstructionData, DataFlowGraph, Cursor};
|
||||
use ir::{Opcode, Type, Inst, Value, Ebb, JumpTable, VariableArgs, FuncRef};
|
||||
use ir::immediates::{Imm64, Uimm8, Ieee32, Ieee64, ImmVector};
|
||||
use ir::condcodes::{IntCC, FloatCC};
|
||||
|
||||
/// Instruction builder.
|
||||
///
|
||||
/// A `Builder` holds mutable references to a data flow graph and a layout cursor. It provides
|
||||
/// convenience method for creating and inserting instructions at the current cursor position.
|
||||
pub struct Builder<'a> {
|
||||
pub dfg: &'a mut DataFlowGraph,
|
||||
pub pos: &'a mut Cursor<'a>,
|
||||
}
|
||||
|
||||
impl<'a> Builder<'a> {
|
||||
/// Create a new builder which inserts instructions at `pos`.
|
||||
/// The `dfg` and `pos.layout` references should be from the same `Function`.
|
||||
pub fn new(dfg: &'a mut DataFlowGraph, pos: &'a mut Cursor<'a>) -> Builder<'a> {
|
||||
Builder {
|
||||
dfg: dfg,
|
||||
pos: pos,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create and insert an EBB. Further instructions will be inserted into the new EBB.
|
||||
pub fn ebb(&mut self) -> Ebb {
|
||||
let ebb = self.dfg.make_ebb();
|
||||
self.insert_ebb(ebb);
|
||||
ebb
|
||||
}
|
||||
|
||||
/// Insert an existing EBB at the current position. Further instructions will be inserted into
|
||||
/// the new EBB.
|
||||
pub fn insert_ebb(&mut self, ebb: Ebb) {
|
||||
self.pos.insert_ebb(ebb);
|
||||
}
|
||||
|
||||
// Create and insert an instruction.
|
||||
// This method is used by the generated format-specific methods.
|
||||
fn insert_inst(&mut self, data: InstructionData) -> Inst {
|
||||
let inst = self.dfg.make_inst(data);
|
||||
self.pos.insert_inst(inst);
|
||||
inst
|
||||
}
|
||||
}
|
||||
|
||||
// Include code generated by `meta/gen_instr.py`. This file includes `Builder` methods per
|
||||
// instruction format and per opcode for inserting instructions.
|
||||
include!(concat!(env!("OUT_DIR"), "/builder.rs"));
|
||||
@@ -1,325 +0,0 @@
|
||||
//! Condition codes for the Cretonne code generator.
|
||||
//!
|
||||
//! A condition code here is an enumerated type that determined how to compare two numbers. There
|
||||
//! are different rules for comparing integers and floating point numbers, so they use different
|
||||
//! condition codes.
|
||||
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
|
||||
/// Common traits of condition codes.
|
||||
pub trait CondCode: Copy {
|
||||
/// Get the inverse condition code of `self`.
|
||||
///
|
||||
/// The inverse condition code produces the opposite result for all comparisons.
|
||||
/// That is, `cmp CC, x, y` is true if and only if `cmp CC.inverse(), x, y` is false.
|
||||
fn inverse(self) -> Self;
|
||||
|
||||
/// Get the reversed condition code for `self`.
|
||||
///
|
||||
/// The reversed condition code produces the same result as swapping `x` and `y` in the
|
||||
/// comparison. That is, `cmp CC, x, y` is the same as `cmp CC.reverse(), y, x`.
|
||||
fn reverse(self) -> Self;
|
||||
}
|
||||
|
||||
/// Condition code for comparing integers.
|
||||
///
|
||||
/// This condition code is used by the `icmp` instruction to compare integer values. There are
|
||||
/// separate codes for comparing the integers as signed or unsigned numbers where it makes a
|
||||
/// difference.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum IntCC {
|
||||
Equal,
|
||||
NotEqual,
|
||||
SignedLessThan,
|
||||
SignedGreaterThanOrEqual,
|
||||
SignedGreaterThan,
|
||||
SignedLessThanOrEqual,
|
||||
UnsignedLessThan,
|
||||
UnsignedGreaterThanOrEqual,
|
||||
UnsignedGreaterThan,
|
||||
UnsignedLessThanOrEqual,
|
||||
}
|
||||
|
||||
impl CondCode for IntCC {
|
||||
fn inverse(self) -> Self {
|
||||
use self::IntCC::*;
|
||||
match self {
|
||||
Equal => NotEqual,
|
||||
NotEqual => Equal,
|
||||
SignedLessThan => SignedGreaterThanOrEqual,
|
||||
SignedGreaterThanOrEqual => SignedLessThan,
|
||||
SignedGreaterThan => SignedLessThanOrEqual,
|
||||
SignedLessThanOrEqual => SignedGreaterThan,
|
||||
UnsignedLessThan => UnsignedGreaterThanOrEqual,
|
||||
UnsignedGreaterThanOrEqual => UnsignedLessThan,
|
||||
UnsignedGreaterThan => UnsignedLessThanOrEqual,
|
||||
UnsignedLessThanOrEqual => UnsignedGreaterThan,
|
||||
}
|
||||
}
|
||||
|
||||
fn reverse(self) -> Self {
|
||||
use self::IntCC::*;
|
||||
match self {
|
||||
Equal => Equal,
|
||||
NotEqual => NotEqual,
|
||||
SignedGreaterThan => SignedLessThan,
|
||||
SignedGreaterThanOrEqual => SignedLessThanOrEqual,
|
||||
SignedLessThan => SignedGreaterThan,
|
||||
SignedLessThanOrEqual => SignedGreaterThanOrEqual,
|
||||
UnsignedGreaterThan => UnsignedLessThan,
|
||||
UnsignedGreaterThanOrEqual => UnsignedLessThanOrEqual,
|
||||
UnsignedLessThan => UnsignedGreaterThan,
|
||||
UnsignedLessThanOrEqual => UnsignedGreaterThanOrEqual,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for IntCC {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
use self::IntCC::*;
|
||||
f.write_str(match self {
|
||||
&Equal => "eq",
|
||||
&NotEqual => "ne",
|
||||
&SignedGreaterThan => "sgt",
|
||||
&SignedGreaterThanOrEqual => "sge",
|
||||
&SignedLessThan => "slt",
|
||||
&SignedLessThanOrEqual => "sle",
|
||||
&UnsignedGreaterThan => "ugt",
|
||||
&UnsignedGreaterThanOrEqual => "uge",
|
||||
&UnsignedLessThan => "ult",
|
||||
&UnsignedLessThanOrEqual => "ule",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for IntCC {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
use self::IntCC::*;
|
||||
match s {
|
||||
"eq" => Ok(Equal),
|
||||
"ne" => Ok(NotEqual),
|
||||
"sge" => Ok(SignedGreaterThanOrEqual),
|
||||
"sgt" => Ok(SignedGreaterThan),
|
||||
"sle" => Ok(SignedLessThanOrEqual),
|
||||
"slt" => Ok(SignedLessThan),
|
||||
"uge" => Ok(UnsignedGreaterThanOrEqual),
|
||||
"ugt" => Ok(UnsignedGreaterThan),
|
||||
"ule" => Ok(UnsignedLessThanOrEqual),
|
||||
"ult" => Ok(UnsignedLessThan),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Condition code for comparing floating point numbers.
|
||||
///
|
||||
/// This condition code is used by the `fcmp` instruction to compare floating point values. Two
|
||||
/// IEEE floating point values relate in exactly one of four ways:
|
||||
///
|
||||
/// 1. `UN` - unordered when either value is NaN.
|
||||
/// 2. `EQ` - equal numerical value.
|
||||
/// 3. `LT` - `x` is less than `y`.
|
||||
/// 4. `GT` - `x` is greater than `y`.
|
||||
///
|
||||
/// Note that `0.0` and `-0.0` relate as `EQ` because they both represent the number 0.
|
||||
///
|
||||
/// The condition codes described here are used to produce a single boolean value from the
|
||||
/// comparison. The 14 condition codes here cover every possible combination of the relation above
|
||||
/// except the impossible `!UN & !EQ & !LT & !GT` and the always true `UN | EQ | LT | GT`.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum FloatCC {
|
||||
Ordered, // EQ | LT | GT
|
||||
Unordered, // UN
|
||||
|
||||
Equal, // EQ
|
||||
// The C '!=' operator is the inverse of '==': NotEqual.
|
||||
NotEqual, // UN | LT | GT
|
||||
OrderedNotEqual, // LT | GT
|
||||
UnorderedOrEqual, // UN | EQ
|
||||
|
||||
LessThan, // LT
|
||||
LessThanOrEqual, // LT | EQ
|
||||
GreaterThan, // GT
|
||||
GreaterThanOrEqual, // GT | EQ
|
||||
|
||||
UnorderedOrLessThan, // UN | LT
|
||||
UnorderedOrLessThanOrEqual, // UN | LT | EQ
|
||||
UnorderedOrGreaterThan, // UN | GT
|
||||
UnorderedOrGreaterThanOrEqual, // UN | GT | EQ
|
||||
}
|
||||
|
||||
impl CondCode for FloatCC {
|
||||
fn inverse(self) -> Self {
|
||||
use self::FloatCC::*;
|
||||
match self {
|
||||
Ordered => Unordered,
|
||||
Unordered => Ordered,
|
||||
Equal => NotEqual,
|
||||
NotEqual => Equal,
|
||||
OrderedNotEqual => UnorderedOrEqual,
|
||||
UnorderedOrEqual => OrderedNotEqual,
|
||||
LessThan => UnorderedOrGreaterThanOrEqual,
|
||||
LessThanOrEqual => UnorderedOrGreaterThan,
|
||||
GreaterThan => UnorderedOrLessThanOrEqual,
|
||||
GreaterThanOrEqual => UnorderedOrLessThan,
|
||||
UnorderedOrLessThan => GreaterThanOrEqual,
|
||||
UnorderedOrLessThanOrEqual => GreaterThan,
|
||||
UnorderedOrGreaterThan => LessThanOrEqual,
|
||||
UnorderedOrGreaterThanOrEqual => LessThan,
|
||||
}
|
||||
}
|
||||
fn reverse(self) -> Self {
|
||||
use self::FloatCC::*;
|
||||
match self {
|
||||
Ordered => Ordered,
|
||||
Unordered => Unordered,
|
||||
Equal => Equal,
|
||||
NotEqual => NotEqual,
|
||||
OrderedNotEqual => OrderedNotEqual,
|
||||
UnorderedOrEqual => UnorderedOrEqual,
|
||||
LessThan => GreaterThan,
|
||||
LessThanOrEqual => GreaterThanOrEqual,
|
||||
GreaterThan => LessThan,
|
||||
GreaterThanOrEqual => LessThanOrEqual,
|
||||
UnorderedOrLessThan => UnorderedOrGreaterThan,
|
||||
UnorderedOrLessThanOrEqual => UnorderedOrGreaterThanOrEqual,
|
||||
UnorderedOrGreaterThan => UnorderedOrLessThan,
|
||||
UnorderedOrGreaterThanOrEqual => UnorderedOrLessThanOrEqual,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for FloatCC {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
use self::FloatCC::*;
|
||||
f.write_str(match self {
|
||||
&Ordered => "ord",
|
||||
&Unordered => "uno",
|
||||
&Equal => "eq",
|
||||
&NotEqual => "ne",
|
||||
&OrderedNotEqual => "one",
|
||||
&UnorderedOrEqual => "ueq",
|
||||
&LessThan => "lt",
|
||||
&LessThanOrEqual => "le",
|
||||
&GreaterThan => "gt",
|
||||
&GreaterThanOrEqual => "ge",
|
||||
&UnorderedOrLessThan => "ult",
|
||||
&UnorderedOrLessThanOrEqual => "ule",
|
||||
&UnorderedOrGreaterThan => "ugt",
|
||||
&UnorderedOrGreaterThanOrEqual => "uge",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for FloatCC {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
use self::FloatCC::*;
|
||||
match s {
|
||||
"ord" => Ok(Ordered),
|
||||
"uno" => Ok(Unordered),
|
||||
"eq" => Ok(Equal),
|
||||
"ne" => Ok(NotEqual),
|
||||
"one" => Ok(OrderedNotEqual),
|
||||
"ueq" => Ok(UnorderedOrEqual),
|
||||
"lt" => Ok(LessThan),
|
||||
"le" => Ok(LessThanOrEqual),
|
||||
"gt" => Ok(GreaterThan),
|
||||
"ge" => Ok(GreaterThanOrEqual),
|
||||
"ult" => Ok(UnorderedOrLessThan),
|
||||
"ule" => Ok(UnorderedOrLessThanOrEqual),
|
||||
"ugt" => Ok(UnorderedOrGreaterThan),
|
||||
"uge" => Ok(UnorderedOrGreaterThanOrEqual),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
static INT_ALL: [IntCC; 10] = [IntCC::Equal,
|
||||
IntCC::NotEqual,
|
||||
IntCC::SignedLessThan,
|
||||
IntCC::SignedGreaterThanOrEqual,
|
||||
IntCC::SignedGreaterThan,
|
||||
IntCC::SignedLessThanOrEqual,
|
||||
IntCC::UnsignedLessThan,
|
||||
IntCC::UnsignedGreaterThanOrEqual,
|
||||
IntCC::UnsignedGreaterThan,
|
||||
IntCC::UnsignedLessThanOrEqual];
|
||||
|
||||
#[test]
|
||||
fn int_inverse() {
|
||||
for r in &INT_ALL {
|
||||
let cc = *r;
|
||||
let inv = cc.inverse();
|
||||
assert!(cc != inv);
|
||||
assert_eq!(inv.inverse(), cc);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn int_reverse() {
|
||||
for r in &INT_ALL {
|
||||
let cc = *r;
|
||||
let rev = cc.reverse();
|
||||
assert_eq!(rev.reverse(), cc);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn int_display() {
|
||||
for r in &INT_ALL {
|
||||
let cc = *r;
|
||||
assert_eq!(cc.to_string().parse(), Ok(cc));
|
||||
}
|
||||
}
|
||||
|
||||
static FLOAT_ALL: [FloatCC; 14] = [FloatCC::Ordered,
|
||||
FloatCC::Unordered,
|
||||
FloatCC::Equal,
|
||||
FloatCC::NotEqual,
|
||||
FloatCC::OrderedNotEqual,
|
||||
FloatCC::UnorderedOrEqual,
|
||||
FloatCC::LessThan,
|
||||
FloatCC::LessThanOrEqual,
|
||||
FloatCC::GreaterThan,
|
||||
FloatCC::GreaterThanOrEqual,
|
||||
FloatCC::UnorderedOrLessThan,
|
||||
FloatCC::UnorderedOrLessThanOrEqual,
|
||||
FloatCC::UnorderedOrGreaterThan,
|
||||
FloatCC::UnorderedOrGreaterThanOrEqual];
|
||||
|
||||
#[test]
|
||||
fn float_inverse() {
|
||||
for r in &FLOAT_ALL {
|
||||
let cc = *r;
|
||||
let inv = cc.inverse();
|
||||
assert!(cc != inv);
|
||||
assert_eq!(inv.inverse(), cc);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn float_reverse() {
|
||||
for r in &FLOAT_ALL {
|
||||
let cc = *r;
|
||||
let rev = cc.reverse();
|
||||
assert_eq!(rev.reverse(), cc);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn float_display() {
|
||||
for r in &FLOAT_ALL {
|
||||
let cc = *r;
|
||||
assert_eq!(cc.to_string().parse(), Ok(cc));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,443 +0,0 @@
|
||||
//! Data flow graph tracking Instructions, Values, and EBBs.
|
||||
|
||||
use ir::{Ebb, Inst, Value, Type};
|
||||
use ir::entities::{NO_VALUE, ExpandedValue};
|
||||
use ir::instructions::InstructionData;
|
||||
use entity_map::{EntityMap, PrimaryEntityData};
|
||||
|
||||
use std::ops::{Index, IndexMut};
|
||||
use std::u16;
|
||||
|
||||
/// A data flow graph defines all instuctions and extended basic blocks in a function as well as
|
||||
/// the data flow dependencies between them. The DFG also tracks values which can be either
|
||||
/// instruction results or EBB arguments.
|
||||
///
|
||||
/// The layout of EBBs in the function and of instructions in each EBB is recorded by the
|
||||
/// `FunctionLayout` data structure which form the other half of the function representation.
|
||||
///
|
||||
#[derive(Clone)]
|
||||
pub struct DataFlowGraph {
|
||||
/// Data about all of the instructions in the function, including opcodes and operands.
|
||||
/// The instructions in this map are not in program order. That is tracked by `Layout`, along
|
||||
/// with the EBB containing each instruction.
|
||||
insts: EntityMap<Inst, InstructionData>,
|
||||
|
||||
/// Extended basic blocks in the function and their arguments.
|
||||
/// This map is not in program order. That is handled by `Layout`, and so is the sequence of
|
||||
/// instructions contained in each EBB.
|
||||
ebbs: EntityMap<Ebb, EbbData>,
|
||||
|
||||
/// Extended value table. Most `Value` references refer directly to their defining instruction.
|
||||
/// Others index into this table.
|
||||
///
|
||||
/// This is implemented directly with a `Vec` rather than an `EntityMap<Value, ...>` because
|
||||
/// the Value entity references can refer to two things -- an instruction or an extended value.
|
||||
extended_values: Vec<ValueData>,
|
||||
}
|
||||
|
||||
impl PrimaryEntityData for InstructionData {}
|
||||
impl PrimaryEntityData for EbbData {}
|
||||
|
||||
impl DataFlowGraph {
|
||||
/// Create a new empty `DataFlowGraph`.
|
||||
pub fn new() -> DataFlowGraph {
|
||||
DataFlowGraph {
|
||||
insts: EntityMap::new(),
|
||||
ebbs: EntityMap::new(),
|
||||
extended_values: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the total number of instructions created in this function, whether they are currently
|
||||
/// inserted in the layout or not.
|
||||
///
|
||||
/// This is intended for use with `EntityMap::with_capacity`.
|
||||
pub fn num_insts(&self) -> usize {
|
||||
self.insts.len()
|
||||
}
|
||||
|
||||
/// Get the total number of extended basic blocks created in this function, whether they are
|
||||
/// currently inserted in the layout or not.
|
||||
///
|
||||
/// This is intended for use with `EntityMap::with_capacity`.
|
||||
pub fn num_ebbs(&self) -> usize {
|
||||
self.ebbs.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Handling values.
|
||||
///
|
||||
/// Values are either EBB arguments or instruction results.
|
||||
impl DataFlowGraph {
|
||||
// Allocate an extended value entry.
|
||||
fn make_value(&mut self, data: ValueData) -> Value {
|
||||
let vref = Value::new_table(self.extended_values.len());
|
||||
self.extended_values.push(data);
|
||||
vref
|
||||
}
|
||||
|
||||
/// Get the type of a value.
|
||||
pub fn value_type(&self, v: Value) -> Type {
|
||||
use ir::entities::ExpandedValue::*;
|
||||
match v.expand() {
|
||||
Direct(i) => self.insts[i].first_type(),
|
||||
Table(i) => {
|
||||
match self.extended_values[i] {
|
||||
ValueData::Inst { ty, .. } => ty,
|
||||
ValueData::Arg { ty, .. } => ty,
|
||||
}
|
||||
}
|
||||
None => panic!("NO_VALUE has no type"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the definition of a value.
|
||||
///
|
||||
/// This is either the instruction that defined it or the Ebb that has the value as an
|
||||
/// argument.
|
||||
pub fn value_def(&self, v: Value) -> ValueDef {
|
||||
use ir::entities::ExpandedValue::*;
|
||||
match v.expand() {
|
||||
Direct(inst) => ValueDef::Res(inst, 0),
|
||||
Table(idx) => {
|
||||
match self.extended_values[idx] {
|
||||
ValueData::Inst { inst, num, .. } => ValueDef::Res(inst, num as usize),
|
||||
ValueData::Arg { ebb, num, .. } => ValueDef::Arg(ebb, num as usize),
|
||||
}
|
||||
}
|
||||
None => panic!("NO_VALUE has no def"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Where did a value come from?
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ValueDef {
|
||||
/// Value is the n'th result of an instruction.
|
||||
Res(Inst, usize),
|
||||
/// Value is the n'th argument to an EBB.
|
||||
Arg(Ebb, usize),
|
||||
}
|
||||
|
||||
// Internal table storage for extended values.
|
||||
#[derive(Clone)]
|
||||
enum ValueData {
|
||||
// Value is defined by an instruction, but it is not the first result.
|
||||
Inst {
|
||||
ty: Type,
|
||||
num: u16, // Result number starting from 0.
|
||||
inst: Inst,
|
||||
next: Value, // Next result defined by `def`.
|
||||
},
|
||||
|
||||
// Value is an EBB argument.
|
||||
Arg {
|
||||
ty: Type,
|
||||
num: u16, // Argument number, starting from 0.
|
||||
ebb: Ebb,
|
||||
next: Value, // Next argument to `ebb`.
|
||||
},
|
||||
}
|
||||
|
||||
/// Iterate through a list of related value references, such as:
|
||||
///
|
||||
/// - All results defined by an instruction. See `DataFlowGraph::inst_results`.
|
||||
/// - All arguments to an EBB. See `DataFlowGraph::ebb_args`.
|
||||
///
|
||||
/// A value iterator borrows a `DataFlowGraph` reference.
|
||||
pub struct Values<'a> {
|
||||
dfg: &'a DataFlowGraph,
|
||||
cur: Value,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for Values<'a> {
|
||||
type Item = Value;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let prev = self.cur;
|
||||
|
||||
// Advance self.cur to the next value, or NO_VALUE.
|
||||
self.cur = match prev.expand() {
|
||||
ExpandedValue::Direct(inst) => self.dfg.insts[inst].second_result().unwrap_or_default(),
|
||||
ExpandedValue::Table(index) => {
|
||||
match self.dfg.extended_values[index] {
|
||||
ValueData::Inst { next, .. } => next,
|
||||
ValueData::Arg { next, .. } => next,
|
||||
}
|
||||
}
|
||||
ExpandedValue::None => return None,
|
||||
};
|
||||
|
||||
Some(prev)
|
||||
}
|
||||
}
|
||||
|
||||
/// Instructions.
|
||||
///
|
||||
impl DataFlowGraph {
|
||||
/// Create a new instruction.
|
||||
///
|
||||
/// The type of the first result is indicated by `data.ty`. If the instruction produces
|
||||
/// multiple results, also call `make_inst_results` to allocate value table entries.
|
||||
pub fn make_inst(&mut self, data: InstructionData) -> Inst {
|
||||
self.insts.push(data)
|
||||
}
|
||||
|
||||
/// Create result values for an instruction that produces multiple results.
|
||||
///
|
||||
/// Instructions that produce 0 or 1 result values only need to be created with `make_inst`. If
|
||||
/// the instruction may produce more than 1 result, call `make_inst_results` to allocate
|
||||
/// value table entries for the additional results.
|
||||
///
|
||||
/// The result value types are determined from the instruction's value type constraints and the
|
||||
/// provided `ctrl_typevar` type for polymorphic instructions. For non-polymorphic
|
||||
/// instructions, `ctrl_typevar` is ignored, and `VOID` can be used.
|
||||
///
|
||||
/// The type of the first result value is also set, even if it was already set in the
|
||||
/// `InstructionData` passed to `make_inst`. If this function is called with a single-result
|
||||
/// instruction, that is the only effect.
|
||||
///
|
||||
/// Returns the number of results produced by the instruction.
|
||||
pub fn make_inst_results(&mut self, inst: Inst, ctrl_typevar: Type) -> usize {
|
||||
let constraints = self.insts[inst].opcode().constraints();
|
||||
let fixed_results = constraints.fixed_results();
|
||||
|
||||
// Additional values form a linked list starting from the second result value. Generate
|
||||
// the list backwards so we don't have to modify value table entries in place. (This
|
||||
// causes additional result values to be numbered backwards which is not the aestetic
|
||||
// choice, but since it is only visible in extremely rare instructions with 3+ results,
|
||||
// we don't care).
|
||||
let mut head = NO_VALUE;
|
||||
let mut first_type = Type::default();
|
||||
|
||||
// TBD: Function call return values for direct and indirect function calls.
|
||||
|
||||
if fixed_results > 0 {
|
||||
for res_idx in (1..fixed_results).rev() {
|
||||
head = self.make_value(ValueData::Inst {
|
||||
ty: constraints.result_type(res_idx, ctrl_typevar),
|
||||
num: res_idx as u16,
|
||||
inst: inst,
|
||||
next: head,
|
||||
});
|
||||
}
|
||||
first_type = constraints.result_type(0, ctrl_typevar);
|
||||
}
|
||||
|
||||
// Update the second_result pointer in `inst`.
|
||||
if head != NO_VALUE {
|
||||
*self.insts[inst]
|
||||
.second_result_mut()
|
||||
.expect("instruction format doesn't allow multiple results") = head;
|
||||
}
|
||||
*self.insts[inst].first_type_mut() = first_type;
|
||||
|
||||
fixed_results
|
||||
}
|
||||
|
||||
/// Get the first result of an instruction.
|
||||
///
|
||||
/// If `Inst` doesn't produce any results, this returns a `Value` with a `VOID` type.
|
||||
pub fn first_result(&self, inst: Inst) -> Value {
|
||||
Value::new_direct(inst)
|
||||
}
|
||||
|
||||
/// Iterate through all the results of an instruction.
|
||||
pub fn inst_results<'a>(&'a self, inst: Inst) -> Values<'a> {
|
||||
Values {
|
||||
dfg: self,
|
||||
cur: if self.insts[inst].first_type().is_void() {
|
||||
NO_VALUE
|
||||
} else {
|
||||
Value::new_direct(inst)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allow immutable access to instructions via indexing.
|
||||
impl Index<Inst> for DataFlowGraph {
|
||||
type Output = InstructionData;
|
||||
|
||||
fn index<'a>(&'a self, inst: Inst) -> &'a InstructionData {
|
||||
&self.insts[inst]
|
||||
}
|
||||
}
|
||||
|
||||
/// Allow mutable access to instructions via indexing.
|
||||
impl IndexMut<Inst> for DataFlowGraph {
|
||||
fn index_mut<'a>(&'a mut self, inst: Inst) -> &'a mut InstructionData {
|
||||
&mut self.insts[inst]
|
||||
}
|
||||
}
|
||||
|
||||
/// Extended basic blocks.
|
||||
impl DataFlowGraph {
|
||||
/// Create a new basic block.
|
||||
pub fn make_ebb(&mut self) -> Ebb {
|
||||
self.ebbs.push(EbbData::new())
|
||||
}
|
||||
|
||||
/// Get the number of arguments on `ebb`.
|
||||
pub fn num_ebb_args(&self, ebb: Ebb) -> usize {
|
||||
let last_arg = self.ebbs[ebb].last_arg;
|
||||
match last_arg.expand() {
|
||||
ExpandedValue::None => 0,
|
||||
ExpandedValue::Table(idx) => {
|
||||
if let ValueData::Arg { num, .. } = self.extended_values[idx] {
|
||||
num as usize + 1
|
||||
} else {
|
||||
panic!("inconsistent value table entry for EBB arg");
|
||||
}
|
||||
}
|
||||
ExpandedValue::Direct(_) => panic!("inconsistent value table entry for EBB arg"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Append an argument with type `ty` to `ebb`.
|
||||
pub fn append_ebb_arg(&mut self, ebb: Ebb, ty: Type) -> Value {
|
||||
let num_args = self.num_ebb_args(ebb);
|
||||
assert!(num_args <= u16::MAX as usize, "Too many arguments to EBB");
|
||||
let val = self.make_value(ValueData::Arg {
|
||||
ty: ty,
|
||||
ebb: ebb,
|
||||
num: num_args as u16,
|
||||
next: NO_VALUE,
|
||||
});
|
||||
let last_arg = self.ebbs[ebb].last_arg;
|
||||
match last_arg.expand() {
|
||||
// If last_arg is NO_VALUE, we're adding the first EBB argument.
|
||||
ExpandedValue::None => {
|
||||
self.ebbs[ebb].first_arg = val;
|
||||
}
|
||||
// Append to linked list of arguments.
|
||||
ExpandedValue::Table(idx) => {
|
||||
if let ValueData::Arg { ref mut next, .. } = self.extended_values[idx] {
|
||||
*next = val;
|
||||
} else {
|
||||
panic!("inconsistent value table entry for EBB arg");
|
||||
}
|
||||
}
|
||||
ExpandedValue::Direct(_) => panic!("inconsistent value table entry for EBB arg"),
|
||||
};
|
||||
self.ebbs[ebb].last_arg = val;
|
||||
val
|
||||
}
|
||||
|
||||
/// Iterate through the arguments to an EBB.
|
||||
pub fn ebb_args<'a>(&'a self, ebb: Ebb) -> Values<'a> {
|
||||
Values {
|
||||
dfg: self,
|
||||
cur: self.ebbs[ebb].first_arg,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Contents of an extended basic block.
|
||||
//
|
||||
// Arguments for an extended basic block are values that dominate everything in the EBB. All
|
||||
// branches to this EBB must provide matching arguments, and the arguments to the entry EBB must
|
||||
// match the function arguments.
|
||||
#[derive(Clone)]
|
||||
struct EbbData {
|
||||
// First argument to this EBB, or `NO_VALUE` if the block has no arguments.
|
||||
//
|
||||
// The arguments are all ValueData::Argument entries that form a linked list from `first_arg`
|
||||
// to `last_arg`.
|
||||
first_arg: Value,
|
||||
|
||||
// Last argument to this EBB, or `NO_VALUE` if the block has no arguments.
|
||||
last_arg: Value,
|
||||
}
|
||||
|
||||
impl EbbData {
|
||||
fn new() -> EbbData {
|
||||
EbbData {
|
||||
first_arg: NO_VALUE,
|
||||
last_arg: NO_VALUE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ir::types;
|
||||
use ir::{Opcode, InstructionData};
|
||||
|
||||
#[test]
|
||||
fn make_inst() {
|
||||
let mut dfg = DataFlowGraph::new();
|
||||
|
||||
let idata = InstructionData::Nullary {
|
||||
opcode: Opcode::Iconst,
|
||||
ty: types::I32,
|
||||
};
|
||||
let inst = dfg.make_inst(idata);
|
||||
assert_eq!(inst.to_string(), "inst0");
|
||||
|
||||
// Immutable reference resolution.
|
||||
{
|
||||
let immdfg = &dfg;
|
||||
let ins = &immdfg[inst];
|
||||
assert_eq!(ins.opcode(), Opcode::Iconst);
|
||||
assert_eq!(ins.first_type(), types::I32);
|
||||
}
|
||||
|
||||
// Result iterator.
|
||||
let mut res = dfg.inst_results(inst);
|
||||
let val = res.next().unwrap();
|
||||
assert!(res.next().is_none());
|
||||
|
||||
assert_eq!(dfg.value_def(val), ValueDef::Res(inst, 0));
|
||||
assert_eq!(dfg.value_type(val), types::I32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_results() {
|
||||
let mut dfg = DataFlowGraph::new();
|
||||
|
||||
let idata = InstructionData::Nullary {
|
||||
opcode: Opcode::Trap,
|
||||
ty: types::VOID,
|
||||
};
|
||||
let inst = dfg.make_inst(idata);
|
||||
|
||||
// Result iterator should be empty.
|
||||
let mut res = dfg.inst_results(inst);
|
||||
assert_eq!(res.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ebb() {
|
||||
let mut dfg = DataFlowGraph::new();
|
||||
|
||||
let ebb = dfg.make_ebb();
|
||||
assert_eq!(ebb.to_string(), "ebb0");
|
||||
assert_eq!(dfg.num_ebb_args(ebb), 0);
|
||||
assert_eq!(dfg.ebb_args(ebb).next(), None);
|
||||
|
||||
let arg1 = dfg.append_ebb_arg(ebb, types::F32);
|
||||
assert_eq!(arg1.to_string(), "vx0");
|
||||
assert_eq!(dfg.num_ebb_args(ebb), 1);
|
||||
{
|
||||
let mut args1 = dfg.ebb_args(ebb);
|
||||
assert_eq!(args1.next(), Some(arg1));
|
||||
assert_eq!(args1.next(), None);
|
||||
}
|
||||
let arg2 = dfg.append_ebb_arg(ebb, types::I16);
|
||||
assert_eq!(arg2.to_string(), "vx1");
|
||||
assert_eq!(dfg.num_ebb_args(ebb), 2);
|
||||
{
|
||||
let mut args2 = dfg.ebb_args(ebb);
|
||||
assert_eq!(args2.next(), Some(arg1));
|
||||
assert_eq!(args2.next(), Some(arg2));
|
||||
assert_eq!(args2.next(), None);
|
||||
}
|
||||
|
||||
assert_eq!(dfg.value_def(arg1), ValueDef::Arg(ebb, 0));
|
||||
assert_eq!(dfg.value_def(arg2), ValueDef::Arg(ebb, 1));
|
||||
assert_eq!(dfg.value_type(arg1), types::F32);
|
||||
assert_eq!(dfg.value_type(arg2), types::I16);
|
||||
}
|
||||
}
|
||||
@@ -1,408 +0,0 @@
|
||||
//! IL entity references.
|
||||
//!
|
||||
//! Instructions in Cretonne IL need to reference other entities in the function. This can be other
|
||||
//! parts of the function like extended basic blocks or stack slots, or it can be external entities
|
||||
//! that are declared in the function preamble in the text format.
|
||||
//!
|
||||
//! These entity references in instruction operands are not implemented as Rust references both
|
||||
//! because Rust's ownership and mutability rules make it difficult, and because 64-bit pointers
|
||||
//! take up a lot of space, and we want a compact in-memory representation. Instead, entity
|
||||
//! references are structs wrapping a `u32` index into a table in the `Function` main data
|
||||
//! structure. There is a separate index type for each entity type, so we don't lose type safety.
|
||||
//!
|
||||
//! The `entities` module defines public types for the entity references along with constants
|
||||
//! representing an invalid reference. We prefer to use `Option<EntityRef>` whenever possible, but
|
||||
//! unfortunately that type is twice as large as the 32-bit index type on its own. Thus, compact
|
||||
//! data structures use the sentinen constant, while function arguments and return values prefer
|
||||
//! the more Rust-like `Option<EntityRef>` variant.
|
||||
//!
|
||||
//! The entity references all implement the `Display` trait in a way that matches the textual IL
|
||||
//! format.
|
||||
|
||||
use entity_map::EntityRef;
|
||||
use std::default::Default;
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use std::u32;
|
||||
|
||||
/// An opaque reference to an extended basic block in a function.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
|
||||
pub struct Ebb(u32);
|
||||
|
||||
impl EntityRef for Ebb {
|
||||
fn new(index: usize) -> Self {
|
||||
assert!(index < (u32::MAX as usize));
|
||||
Ebb(index as u32)
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl Ebb {
|
||||
/// Create a new EBB reference from its number. This corresponds to the ebbNN representation.
|
||||
pub fn with_number(n: u32) -> Option<Ebb> {
|
||||
if n < u32::MAX { Some(Ebb(n)) } else { None }
|
||||
}
|
||||
}
|
||||
|
||||
/// Display an `Ebb` reference as "ebb12".
|
||||
impl Display for Ebb {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write!(fmt, "ebb{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A guaranteed invalid EBB reference.
|
||||
pub const NO_EBB: Ebb = Ebb(u32::MAX);
|
||||
|
||||
impl Default for Ebb {
|
||||
fn default() -> Ebb {
|
||||
NO_EBB
|
||||
}
|
||||
}
|
||||
|
||||
/// An opaque reference to an instruction in a function.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
|
||||
pub struct Inst(u32);
|
||||
|
||||
impl EntityRef for Inst {
|
||||
fn new(index: usize) -> Self {
|
||||
assert!(index < (u32::MAX as usize));
|
||||
Inst(index as u32)
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// Display an `Inst` reference as "inst7".
|
||||
impl Display for Inst {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write!(fmt, "inst{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A guaranteed invalid instruction reference.
|
||||
pub const NO_INST: Inst = Inst(u32::MAX);
|
||||
|
||||
impl Default for Inst {
|
||||
fn default() -> Inst {
|
||||
NO_INST
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// An opaque reference to an SSA value.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct Value(u32);
|
||||
|
||||
// Value references can either reference an instruction directly, or they can refer to the extended
|
||||
// value table.
|
||||
pub enum ExpandedValue {
|
||||
// This is the first value produced by the referenced instruction.
|
||||
Direct(Inst),
|
||||
|
||||
// This value is described in the extended value table.
|
||||
Table(usize),
|
||||
|
||||
// This is NO_VALUE.
|
||||
None,
|
||||
}
|
||||
|
||||
impl Value {
|
||||
/// Create a `Direct` value from its number representation.
|
||||
/// This is the number in the vNN notation.
|
||||
pub fn direct_with_number(n: u32) -> Option<Value> {
|
||||
if n < u32::MAX / 2 {
|
||||
let encoding = n * 2;
|
||||
assert!(encoding < u32::MAX);
|
||||
Some(Value(encoding))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a `Table` value from its number representation.
|
||||
/// This is the number in the vxNN notation.
|
||||
pub fn table_with_number(n: u32) -> Option<Value> {
|
||||
if n < u32::MAX / 2 {
|
||||
let encoding = n * 2 + 1;
|
||||
assert!(encoding < u32::MAX);
|
||||
Some(Value(encoding))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
pub fn new_direct(i: Inst) -> Value {
|
||||
let encoding = i.index() * 2;
|
||||
assert!(encoding < u32::MAX as usize);
|
||||
Value(encoding as u32)
|
||||
}
|
||||
|
||||
pub fn new_table(index: usize) -> Value {
|
||||
let encoding = index * 2 + 1;
|
||||
assert!(encoding < u32::MAX as usize);
|
||||
Value(encoding as u32)
|
||||
}
|
||||
|
||||
// Expand the internal representation into something useful.
|
||||
pub fn expand(&self) -> ExpandedValue {
|
||||
use self::ExpandedValue::*;
|
||||
if *self == NO_VALUE {
|
||||
return None;
|
||||
}
|
||||
let index = (self.0 / 2) as usize;
|
||||
if self.0 % 2 == 0 {
|
||||
Direct(Inst::new(index))
|
||||
} else {
|
||||
Table(index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Display a `Value` reference as "v7" or "v2x".
|
||||
impl Display for Value {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
use self::ExpandedValue::*;
|
||||
match self.expand() {
|
||||
Direct(i) => write!(fmt, "v{}", i.0),
|
||||
Table(i) => write!(fmt, "vx{}", i),
|
||||
None => write!(fmt, "NO_VALUE"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A guaranteed invalid value reference.
|
||||
pub const NO_VALUE: Value = Value(u32::MAX);
|
||||
|
||||
impl Default for Value {
|
||||
fn default() -> Value {
|
||||
NO_VALUE
|
||||
}
|
||||
}
|
||||
|
||||
/// An opaque reference to a stack slot.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct StackSlot(u32);
|
||||
|
||||
impl EntityRef for StackSlot {
|
||||
fn new(index: usize) -> StackSlot {
|
||||
assert!(index < (u32::MAX as usize));
|
||||
StackSlot(index as u32)
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// Display a `StackSlot` reference as "ss12".
|
||||
impl Display for StackSlot {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write!(fmt, "ss{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A guaranteed invalid stack slot reference.
|
||||
pub const NO_STACK_SLOT: StackSlot = StackSlot(u32::MAX);
|
||||
|
||||
impl Default for StackSlot {
|
||||
fn default() -> StackSlot {
|
||||
NO_STACK_SLOT
|
||||
}
|
||||
}
|
||||
|
||||
/// An opaque reference to a jump table.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct JumpTable(u32);
|
||||
|
||||
impl EntityRef for JumpTable {
|
||||
fn new(index: usize) -> JumpTable {
|
||||
assert!(index < (u32::MAX as usize));
|
||||
JumpTable(index as u32)
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// Display a `JumpTable` reference as "jt12".
|
||||
impl Display for JumpTable {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write!(fmt, "jt{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A guaranteed invalid jump table reference.
|
||||
pub const NO_JUMP_TABLE: JumpTable = JumpTable(u32::MAX);
|
||||
|
||||
impl Default for JumpTable {
|
||||
fn default() -> JumpTable {
|
||||
NO_JUMP_TABLE
|
||||
}
|
||||
}
|
||||
|
||||
/// A reference to an external function.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct FuncRef(u32);
|
||||
|
||||
impl EntityRef for FuncRef {
|
||||
fn new(index: usize) -> FuncRef {
|
||||
assert!(index < (u32::MAX as usize));
|
||||
FuncRef(index as u32)
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// Display a `FuncRef` reference as "fn12".
|
||||
impl Display for FuncRef {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write!(fmt, "fn{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A guaranteed invalid function reference.
|
||||
pub const NO_FUNC_REF: FuncRef = FuncRef(u32::MAX);
|
||||
|
||||
impl Default for FuncRef {
|
||||
fn default() -> FuncRef {
|
||||
NO_FUNC_REF
|
||||
}
|
||||
}
|
||||
|
||||
/// A reference to a function signature.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct SigRef(u32);
|
||||
|
||||
impl EntityRef for SigRef {
|
||||
fn new(index: usize) -> SigRef {
|
||||
assert!(index < (u32::MAX as usize));
|
||||
SigRef(index as u32)
|
||||
}
|
||||
|
||||
fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
/// Display a `SigRef` reference as "sig12".
|
||||
impl Display for SigRef {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write!(fmt, "sig{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// A guaranteed invalid function reference.
|
||||
pub const NO_SIG_REF: SigRef = SigRef(u32::MAX);
|
||||
|
||||
impl Default for SigRef {
|
||||
fn default() -> SigRef {
|
||||
NO_SIG_REF
|
||||
}
|
||||
}
|
||||
|
||||
/// A reference to any of the entities defined in this module.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub enum AnyEntity {
|
||||
/// The whole function.
|
||||
Function,
|
||||
Ebb(Ebb),
|
||||
Inst(Inst),
|
||||
Value(Value),
|
||||
StackSlot(StackSlot),
|
||||
JumpTable(JumpTable),
|
||||
FuncRef(FuncRef),
|
||||
SigRef(SigRef),
|
||||
}
|
||||
|
||||
impl Display for AnyEntity {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
AnyEntity::Function => write!(fmt, "function"),
|
||||
AnyEntity::Ebb(r) => r.fmt(fmt),
|
||||
AnyEntity::Inst(r) => r.fmt(fmt),
|
||||
AnyEntity::Value(r) => r.fmt(fmt),
|
||||
AnyEntity::StackSlot(r) => r.fmt(fmt),
|
||||
AnyEntity::JumpTable(r) => r.fmt(fmt),
|
||||
AnyEntity::FuncRef(r) => r.fmt(fmt),
|
||||
AnyEntity::SigRef(r) => r.fmt(fmt),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Ebb> for AnyEntity {
|
||||
fn from(r: Ebb) -> AnyEntity {
|
||||
AnyEntity::Ebb(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Inst> for AnyEntity {
|
||||
fn from(r: Inst) -> AnyEntity {
|
||||
AnyEntity::Inst(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Value> for AnyEntity {
|
||||
fn from(r: Value) -> AnyEntity {
|
||||
AnyEntity::Value(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StackSlot> for AnyEntity {
|
||||
fn from(r: StackSlot) -> AnyEntity {
|
||||
AnyEntity::StackSlot(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JumpTable> for AnyEntity {
|
||||
fn from(r: JumpTable) -> AnyEntity {
|
||||
AnyEntity::JumpTable(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FuncRef> for AnyEntity {
|
||||
fn from(r: FuncRef) -> AnyEntity {
|
||||
AnyEntity::FuncRef(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SigRef> for AnyEntity {
|
||||
fn from(r: SigRef) -> AnyEntity {
|
||||
AnyEntity::SigRef(r)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::u32;
|
||||
use entity_map::EntityRef;
|
||||
|
||||
#[test]
|
||||
fn value_with_number() {
|
||||
assert_eq!(Value::direct_with_number(0).unwrap().to_string(), "v0");
|
||||
assert_eq!(Value::direct_with_number(1).unwrap().to_string(), "v1");
|
||||
assert_eq!(Value::table_with_number(0).unwrap().to_string(), "vx0");
|
||||
assert_eq!(Value::table_with_number(1).unwrap().to_string(), "vx1");
|
||||
|
||||
assert_eq!(Value::direct_with_number(u32::MAX / 2), None);
|
||||
assert_eq!(match Value::direct_with_number(u32::MAX / 2 - 1).unwrap().expand() {
|
||||
ExpandedValue::Direct(i) => i.index() as u32,
|
||||
_ => u32::MAX,
|
||||
},
|
||||
u32::MAX / 2 - 1);
|
||||
|
||||
assert_eq!(Value::table_with_number(u32::MAX / 2), None);
|
||||
assert_eq!(match Value::table_with_number(u32::MAX / 2 - 1).unwrap().expand() {
|
||||
ExpandedValue::Table(i) => i as u32,
|
||||
_ => u32::MAX,
|
||||
},
|
||||
u32::MAX / 2 - 1);
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
//! External function calls.
|
||||
//!
|
||||
//! To a Cretonne function, all functions are "external". Directly called functions must be
|
||||
//! declared in the preamble, and all function calls must have a signature.
|
||||
//!
|
||||
//! This module declares the data types used to represent external functions and call signatures.
|
||||
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use ir::Type;
|
||||
|
||||
/// Function signature.
|
||||
///
|
||||
/// The function signature describes the types of arguments and return values along with other
|
||||
/// details that are needed to call a function correctly.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Signature {
|
||||
pub argument_types: Vec<ArgumentType>,
|
||||
pub return_types: Vec<ArgumentType>,
|
||||
}
|
||||
|
||||
impl Signature {
|
||||
pub fn new() -> Signature {
|
||||
Signature {
|
||||
argument_types: Vec::new(),
|
||||
return_types: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_list(f: &mut Formatter, args: &Vec<ArgumentType>) -> fmt::Result {
|
||||
match args.split_first() {
|
||||
None => {}
|
||||
Some((first, rest)) => {
|
||||
try!(write!(f, "{}", first));
|
||||
for arg in rest {
|
||||
try!(write!(f, ", {}", arg));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Display for Signature {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
try!(write!(f, "("));
|
||||
try!(write_list(f, &self.argument_types));
|
||||
try!(write!(f, ")"));
|
||||
if !self.return_types.is_empty() {
|
||||
try!(write!(f, " -> "));
|
||||
try!(write_list(f, &self.return_types));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function argument or return value type.
|
||||
///
|
||||
/// This describes the value type being passed to or from a function along with flags that affect
|
||||
/// how the argument is passed.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub struct ArgumentType {
|
||||
pub value_type: Type,
|
||||
pub extension: ArgumentExtension,
|
||||
/// Place this argument in a register if possible.
|
||||
pub inreg: bool,
|
||||
}
|
||||
|
||||
impl ArgumentType {
|
||||
pub fn new(vt: Type) -> ArgumentType {
|
||||
ArgumentType {
|
||||
value_type: vt,
|
||||
extension: ArgumentExtension::None,
|
||||
inreg: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for ArgumentType {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
try!(write!(f, "{}", self.value_type));
|
||||
match self.extension {
|
||||
ArgumentExtension::None => {}
|
||||
ArgumentExtension::Uext => try!(write!(f, " uext")),
|
||||
ArgumentExtension::Sext => try!(write!(f, " sext")),
|
||||
}
|
||||
if self.inreg {
|
||||
try!(write!(f, " inreg"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Function argument extension options.
|
||||
///
|
||||
/// On some architectures, small integer function arguments are extended to the width of a
|
||||
/// general-purpose register.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub enum ArgumentExtension {
|
||||
/// No extension, high bits are indeterminate.
|
||||
None,
|
||||
/// Unsigned extension: high bits in register are 0.
|
||||
Uext,
|
||||
/// Signed extension: high bits in register replicate sign bit.
|
||||
Sext,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ir::types::{I32, F32, B8};
|
||||
|
||||
#[test]
|
||||
fn argument_type() {
|
||||
let mut t = ArgumentType::new(I32);
|
||||
assert_eq!(t.to_string(), "i32");
|
||||
t.extension = ArgumentExtension::Uext;
|
||||
assert_eq!(t.to_string(), "i32 uext");
|
||||
t.inreg = true;
|
||||
assert_eq!(t.to_string(), "i32 uext inreg");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signatures() {
|
||||
let mut sig = Signature::new();
|
||||
assert_eq!(sig.to_string(), "()");
|
||||
sig.argument_types.push(ArgumentType::new(I32));
|
||||
assert_eq!(sig.to_string(), "(i32)");
|
||||
sig.return_types.push(ArgumentType::new(F32));
|
||||
assert_eq!(sig.to_string(), "(i32) -> f32");
|
||||
sig.argument_types.push(ArgumentType::new(I32.by(4).unwrap()));
|
||||
assert_eq!(sig.to_string(), "(i32, i32x4) -> f32");
|
||||
sig.return_types.push(ArgumentType::new(B8));
|
||||
assert_eq!(sig.to_string(), "(i32, i32x4) -> f32, b8");
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
//! Function names.
|
||||
//!
|
||||
//! The name of a function doesn't have any meaning to Cretonne which compiles functions
|
||||
//! independently.
|
||||
|
||||
use std::fmt::{self, Write};
|
||||
use std::ascii::AsciiExt;
|
||||
|
||||
/// The name of a function can be any UTF-8 string.
|
||||
///
|
||||
/// Function names are mostly a testing and debugging tool.
|
||||
/// In particular, `.cton` files use function names to identify functions.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub struct FunctionName(String);
|
||||
|
||||
impl FunctionName {
|
||||
pub fn new<S: Into<String>>(s: S) -> FunctionName {
|
||||
FunctionName(s.into())
|
||||
}
|
||||
}
|
||||
|
||||
fn is_id_start(c: char) -> bool {
|
||||
c.is_ascii() && (c == '_' || c.is_alphabetic())
|
||||
}
|
||||
|
||||
fn is_id_continue(c: char) -> bool {
|
||||
c.is_ascii() && (c == '_' || c.is_alphanumeric())
|
||||
}
|
||||
|
||||
// The function name may need quotes if it doesn't parse as an identifier.
|
||||
fn needs_quotes(name: &str) -> bool {
|
||||
let mut iter = name.chars();
|
||||
if let Some(ch) = iter.next() {
|
||||
!is_id_start(ch) || !iter.all(is_id_continue)
|
||||
} else {
|
||||
// A blank function name needs quotes.
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FunctionName {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if needs_quotes(&self.0) {
|
||||
try!(f.write_char('"'));
|
||||
for c in self.0.chars().flat_map(char::escape_default) {
|
||||
try!(f.write_char(c));
|
||||
}
|
||||
f.write_char('"')
|
||||
} else {
|
||||
f.write_str(&self.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{needs_quotes, FunctionName};
|
||||
|
||||
#[test]
|
||||
fn quoting() {
|
||||
assert_eq!(needs_quotes(""), true);
|
||||
assert_eq!(needs_quotes("x"), false);
|
||||
assert_eq!(needs_quotes(" "), true);
|
||||
assert_eq!(needs_quotes("0"), true);
|
||||
assert_eq!(needs_quotes("x0"), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn escaping() {
|
||||
assert_eq!(FunctionName::new("").to_string(), "\"\"");
|
||||
assert_eq!(FunctionName::new("x").to_string(), "x");
|
||||
assert_eq!(FunctionName::new(" ").to_string(), "\" \"");
|
||||
assert_eq!(FunctionName::new(" \n").to_string(), "\" \\n\"");
|
||||
assert_eq!(FunctionName::new("a\u{1000}v").to_string(),
|
||||
"\"a\\u{1000}v\"");
|
||||
}
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
//! Intermediate representation of a function.
|
||||
//!
|
||||
//! The `Function` struct defined in this module owns all of its extended basic blocks and
|
||||
//! instructions.
|
||||
|
||||
use std::fmt::{self, Display, Debug, Formatter};
|
||||
use ir::{FunctionName, Signature, Inst, StackSlot, StackSlotData, JumpTable, JumpTableData,
|
||||
DataFlowGraph, Layout};
|
||||
use isa::Encoding;
|
||||
use entity_map::{EntityMap, PrimaryEntityData};
|
||||
use write::write_function;
|
||||
|
||||
/// A function.
|
||||
///
|
||||
/// Functions can be cloned, but it is not a very fast operation.
|
||||
/// The clone will have all the same entity numbers as the original.
|
||||
#[derive(Clone)]
|
||||
pub struct Function {
|
||||
/// Name of this function. Mostly used by `.cton` files.
|
||||
pub name: FunctionName,
|
||||
|
||||
/// Signature of this function.
|
||||
signature: Signature,
|
||||
|
||||
/// Stack slots allocated in this function.
|
||||
pub stack_slots: EntityMap<StackSlot, StackSlotData>,
|
||||
|
||||
/// Jump tables used in this function.
|
||||
pub jump_tables: EntityMap<JumpTable, JumpTableData>,
|
||||
|
||||
/// Data flow graph containing the primary definition of all instructions, EBBs and values.
|
||||
pub dfg: DataFlowGraph,
|
||||
|
||||
/// Layout of EBBs and instructions in the function body.
|
||||
pub layout: Layout,
|
||||
|
||||
/// Encoding recipe and bits for the legal instructions.
|
||||
/// Illegal instructions have the `Encoding::default()` value.
|
||||
pub encodings: EntityMap<Inst, Encoding>,
|
||||
}
|
||||
|
||||
impl PrimaryEntityData for StackSlotData {}
|
||||
impl PrimaryEntityData for JumpTableData {}
|
||||
|
||||
impl Function {
|
||||
/// Create a function with the given name and signature.
|
||||
pub fn with_name_signature(name: FunctionName, sig: Signature) -> Function {
|
||||
Function {
|
||||
name: name,
|
||||
signature: sig,
|
||||
stack_slots: EntityMap::new(),
|
||||
jump_tables: EntityMap::new(),
|
||||
dfg: DataFlowGraph::new(),
|
||||
layout: Layout::new(),
|
||||
encodings: EntityMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new empty, anomymous function.
|
||||
pub fn new() -> Function {
|
||||
Self::with_name_signature(FunctionName::default(), Signature::new())
|
||||
}
|
||||
|
||||
/// Get the signature of this function.
|
||||
pub fn own_signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Function {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write_function(fmt, self, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Function {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write_function(fmt, self, None)
|
||||
}
|
||||
}
|
||||
@@ -1,717 +0,0 @@
|
||||
|
||||
//! Immediate operands for Cretonne instructions
|
||||
//!
|
||||
//! This module defines the types of immediate operands that can appear on Cretonne instructions.
|
||||
//! Each type here should have a corresponding definition in the `cretonne.immediates` Python
|
||||
//! module in the meta language.
|
||||
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use std::mem;
|
||||
use std::str::FromStr;
|
||||
|
||||
/// 64-bit immediate integer operand.
|
||||
///
|
||||
/// An `Imm64` operand can also be used to represent immediate values of smaller integer types by
|
||||
/// sign-extending to `i64`.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Imm64(i64);
|
||||
|
||||
impl Imm64 {
|
||||
pub fn new(x: i64) -> Imm64 {
|
||||
Imm64(x)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<i64> for Imm64 {
|
||||
fn into(self) -> i64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Imm64 {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
let x = self.0;
|
||||
if -10_000 < x && x < 10_000 {
|
||||
// Use decimal for small numbers.
|
||||
write!(f, "{}", x)
|
||||
} else {
|
||||
// Hexadecimal with a multiple of 4 digits and group separators:
|
||||
//
|
||||
// 0xfff0
|
||||
// 0x0001_ffff
|
||||
// 0xffff_ffff_fff8_4400
|
||||
//
|
||||
let mut pos = (64 - x.leading_zeros() - 1) & 0xf0;
|
||||
try!(write!(f, "0x{:04x}", (x >> pos) & 0xffff));
|
||||
while pos > 0 {
|
||||
pos -= 16;
|
||||
try!(write!(f, "_{:04x}", (x >> pos) & 0xffff));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Imm64 {
|
||||
type Err = &'static str;
|
||||
|
||||
// Parse a decimal or hexadecimal Imm64, formatted as above.
|
||||
fn from_str(s: &str) -> Result<Imm64, &'static str> {
|
||||
let mut value: u64 = 0;
|
||||
let mut digits = 0;
|
||||
let negative = s.starts_with('-');
|
||||
let s2 = if negative { &s[1..] } else { s };
|
||||
|
||||
if s2.starts_with("0x") {
|
||||
// Hexadecimal.
|
||||
for ch in s2[2..].chars() {
|
||||
match ch.to_digit(16) {
|
||||
Some(digit) => {
|
||||
digits += 1;
|
||||
if digits > 16 {
|
||||
return Err("Too many hexadecimal digits in Imm64");
|
||||
}
|
||||
// This can't overflow given the digit limit.
|
||||
value = (value << 4) | digit as u64;
|
||||
}
|
||||
None => {
|
||||
// Allow embedded underscores, but fail on anything else.
|
||||
if ch != '_' {
|
||||
return Err("Invalid character in hexadecimal Imm64");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Decimal number, possibly negative.
|
||||
for ch in s2.chars() {
|
||||
match ch.to_digit(16) {
|
||||
Some(digit) => {
|
||||
digits += 1;
|
||||
match value.checked_mul(10) {
|
||||
None => return Err("Too large decimal Imm64"),
|
||||
Some(v) => value = v,
|
||||
}
|
||||
match value.checked_add(digit as u64) {
|
||||
None => return Err("Too large decimal Imm64"),
|
||||
Some(v) => value = v,
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// Allow embedded underscores, but fail on anything else.
|
||||
if ch != '_' {
|
||||
return Err("Invalid character in decimal Imm64");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if digits == 0 {
|
||||
return Err("No digits in Imm64");
|
||||
}
|
||||
|
||||
// We support the range-and-a-half from -2^63 .. 2^64-1.
|
||||
if negative {
|
||||
value = value.wrapping_neg();
|
||||
// Don't allow large negative values to wrap around and become positive.
|
||||
if value as i64 > 0 {
|
||||
return Err("Negative number too small for Imm64");
|
||||
}
|
||||
}
|
||||
Ok(Imm64::new(value as i64))
|
||||
}
|
||||
}
|
||||
|
||||
/// 8-bit unsigned integer immediate operand.
|
||||
///
|
||||
/// This is used to indicate lane indexes typically.
|
||||
pub type Uimm8 = u8;
|
||||
|
||||
/// An IEEE binary32 immediate floating point value.
|
||||
///
|
||||
/// All bit patterns are allowed.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct Ieee32(f32);
|
||||
|
||||
/// An IEEE binary64 immediate floating point value.
|
||||
///
|
||||
/// All bit patterns are allowed.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct Ieee64(f64);
|
||||
|
||||
// Format a floating point number in a way that is reasonably human-readable, and that can be
|
||||
// converted back to binary without any rounding issues. The hexadecimal formatting of normal and
|
||||
// subnormal numbers is compatible with C99 and the printf "%a" format specifier. The NaN and Inf
|
||||
// formats are not supported by C99.
|
||||
//
|
||||
// The encoding parameters are:
|
||||
//
|
||||
// w - exponent field width in bits
|
||||
// t - trailing significand field width in bits
|
||||
//
|
||||
fn format_float(bits: u64, w: u8, t: u8, f: &mut Formatter) -> fmt::Result {
|
||||
debug_assert!(w > 0 && w <= 16, "Invalid exponent range");
|
||||
debug_assert!(1 + w + t <= 64, "Too large IEEE format for u64");
|
||||
debug_assert!((t + w + 1).is_power_of_two(), "Unexpected IEEE format size");
|
||||
|
||||
let max_e_bits = (1u64 << w) - 1;
|
||||
let t_bits = bits & ((1u64 << t) - 1); // Trailing significand.
|
||||
let e_bits = (bits >> t) & max_e_bits; // Biased exponent.
|
||||
let sign_bit = (bits >> w + t) & 1;
|
||||
|
||||
let bias: i32 = (1 << (w - 1)) - 1;
|
||||
let e = e_bits as i32 - bias; // Unbiased exponent.
|
||||
let emin = 1 - bias; // Minimum exponent.
|
||||
|
||||
// How many hexadecimal digits are needed for the trailing significand?
|
||||
let digits = (t + 3) / 4;
|
||||
// Trailing significand left-aligned in `digits` hexadecimal digits.
|
||||
let left_t_bits = t_bits << (4 * digits - t);
|
||||
|
||||
// All formats share the leading sign.
|
||||
if sign_bit != 0 {
|
||||
try!(write!(f, "-"));
|
||||
}
|
||||
|
||||
if e_bits == 0 {
|
||||
if t_bits == 0 {
|
||||
// Zero.
|
||||
write!(f, "0.0")
|
||||
} else {
|
||||
// Subnormal.
|
||||
write!(f, "0x0.{0:01$x}p{2}", left_t_bits, digits as usize, emin)
|
||||
}
|
||||
} else if e_bits == max_e_bits {
|
||||
if t_bits == 0 {
|
||||
// Infinity.
|
||||
write!(f, "Inf")
|
||||
} else {
|
||||
// NaN.
|
||||
let payload = t_bits & ((1 << (t - 1)) - 1);
|
||||
if t_bits & (1 << (t - 1)) != 0 {
|
||||
// Quiet NaN.
|
||||
if payload != 0 {
|
||||
write!(f, "NaN:0x{:x}", payload)
|
||||
} else {
|
||||
write!(f, "NaN")
|
||||
}
|
||||
} else {
|
||||
// Signaling NaN.
|
||||
write!(f, "sNaN:0x{:x}", payload)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Normal number.
|
||||
write!(f, "0x1.{0:01$x}p{2}", left_t_bits, digits as usize, e)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse a float using the same format as `format_float` above.
|
||||
//
|
||||
// The encoding parameters are:
|
||||
//
|
||||
// w - exponent field width in bits
|
||||
// t - trailing significand field width in bits
|
||||
//
|
||||
fn parse_float(s: &str, w: u8, t: u8) -> Result<u64, &'static str> {
|
||||
debug_assert!(w > 0 && w <= 16, "Invalid exponent range");
|
||||
debug_assert!(1 + w + t <= 64, "Too large IEEE format for u64");
|
||||
debug_assert!((t + w + 1).is_power_of_two(), "Unexpected IEEE format size");
|
||||
|
||||
let (sign_bit, s2) = if s.starts_with('-') {
|
||||
(1u64 << t + w, &s[1..])
|
||||
} else {
|
||||
(0, s)
|
||||
};
|
||||
|
||||
if !s2.starts_with("0x") {
|
||||
let max_e_bits = ((1u64 << w) - 1) << t;
|
||||
let quiet_bit = 1u64 << (t - 1);
|
||||
|
||||
// The only decimal encoding allowed is 0.
|
||||
if s2 == "0.0" {
|
||||
return Ok(sign_bit);
|
||||
}
|
||||
|
||||
if s2 == "Inf" {
|
||||
// +/- infinity: e = max, t = 0.
|
||||
return Ok(sign_bit | max_e_bits);
|
||||
}
|
||||
if s2 == "NaN" {
|
||||
// Canonical quiet NaN: e = max, t = quiet.
|
||||
return Ok(sign_bit | max_e_bits | quiet_bit);
|
||||
}
|
||||
if s2.starts_with("NaN:0x") {
|
||||
// Quiet NaN with payload.
|
||||
return match u64::from_str_radix(&s2[6..], 16) {
|
||||
Ok(payload) if payload < quiet_bit => {
|
||||
Ok(sign_bit | max_e_bits | quiet_bit | payload)
|
||||
}
|
||||
_ => Err("Invalid NaN payload"),
|
||||
};
|
||||
}
|
||||
if s2.starts_with("sNaN:0x") {
|
||||
// Signaling NaN with payload.
|
||||
return match u64::from_str_radix(&s2[7..], 16) {
|
||||
Ok(payload) if 0 < payload && payload < quiet_bit => {
|
||||
Ok(sign_bit | max_e_bits | payload)
|
||||
}
|
||||
_ => Err("Invalid sNaN payload"),
|
||||
};
|
||||
}
|
||||
|
||||
return Err("Float must be hexadecimal");
|
||||
}
|
||||
let s3 = &s2[2..];
|
||||
|
||||
let mut digits = 0u8;
|
||||
let mut digits_before_period: Option<u8> = None;
|
||||
let mut significand = 0u64;
|
||||
let mut exponent = 0i32;
|
||||
|
||||
for (idx, ch) in s3.char_indices() {
|
||||
match ch {
|
||||
'.' => {
|
||||
// This is the radix point. There can only be one.
|
||||
if digits_before_period != None {
|
||||
return Err("Multiple radix points");
|
||||
} else {
|
||||
digits_before_period = Some(digits);
|
||||
}
|
||||
}
|
||||
'p' => {
|
||||
// The following exponent is a decimal number.
|
||||
let exp_str = &s3[1 + idx..];
|
||||
match exp_str.parse::<i16>() {
|
||||
Ok(e) => {
|
||||
exponent = e as i32;
|
||||
break;
|
||||
}
|
||||
Err(_) => return Err("Bad exponent"),
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
match ch.to_digit(16) {
|
||||
Some(digit) => {
|
||||
digits += 1;
|
||||
if digits > 16 {
|
||||
return Err("Too many digits");
|
||||
}
|
||||
significand = (significand << 4) | digit as u64;
|
||||
}
|
||||
None => return Err("Invalid character"),
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if digits == 0 {
|
||||
return Err("No digits");
|
||||
}
|
||||
|
||||
if significand == 0 {
|
||||
// This is +/- 0.0.
|
||||
return Ok(sign_bit);
|
||||
}
|
||||
|
||||
// Number of bits appearing after the radix point.
|
||||
match digits_before_period {
|
||||
None => {} // No radix point present.
|
||||
Some(d) => exponent -= 4 * (digits - d) as i32,
|
||||
};
|
||||
|
||||
// Normalize the significand and exponent.
|
||||
let significant_bits = (64 - significand.leading_zeros()) as u8;
|
||||
if significant_bits > t + 1 {
|
||||
let adjust = significant_bits - (t + 1);
|
||||
if significand & ((1u64 << adjust) - 1) != 0 {
|
||||
return Err("Too many significant bits");
|
||||
}
|
||||
// Adjust significand down.
|
||||
significand >>= adjust;
|
||||
exponent += adjust as i32;
|
||||
} else {
|
||||
let adjust = t + 1 - significant_bits;
|
||||
significand <<= adjust;
|
||||
exponent -= adjust as i32;
|
||||
}
|
||||
assert_eq!(significand >> t, 1);
|
||||
|
||||
// Trailing significand excludes the high bit.
|
||||
let t_bits = significand & ((1 << t) - 1);
|
||||
|
||||
let max_exp = (1i32 << w) - 2;
|
||||
let bias: i32 = (1 << (w - 1)) - 1;
|
||||
exponent += bias + t as i32;
|
||||
|
||||
if exponent > max_exp {
|
||||
Err("Magnitude too large")
|
||||
} else if exponent > 0 {
|
||||
// This is a normal number.
|
||||
let e_bits = (exponent as u64) << t;
|
||||
Ok(sign_bit | e_bits | t_bits)
|
||||
} else if 1 - exponent <= t as i32 {
|
||||
// This is a subnormal number: e = 0, t = significand bits.
|
||||
// Renormalize significand for exponent = 1.
|
||||
let adjust = 1 - exponent;
|
||||
if significand & ((1u64 << adjust) - 1) != 0 {
|
||||
Err("Subnormal underflow")
|
||||
} else {
|
||||
significand >>= adjust;
|
||||
Ok(sign_bit | significand)
|
||||
}
|
||||
} else {
|
||||
Err("Magnitude too small")
|
||||
}
|
||||
}
|
||||
|
||||
impl Ieee32 {
|
||||
pub fn new(x: f32) -> Ieee32 {
|
||||
Ieee32(x)
|
||||
}
|
||||
|
||||
/// Construct Ieee32 immediate from raw bits.
|
||||
pub fn from_bits(x: u32) -> Ieee32 {
|
||||
Ieee32(unsafe { mem::transmute(x) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Ieee32 {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
let bits: u32 = unsafe { mem::transmute(self.0) };
|
||||
format_float(bits as u64, 8, 23, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Ieee32 {
|
||||
type Err = &'static str;
|
||||
|
||||
fn from_str(s: &str) -> Result<Ieee32, &'static str> {
|
||||
match parse_float(s, 8, 23) {
|
||||
Ok(b) => Ok(Ieee32::from_bits(b as u32)),
|
||||
Err(s) => Err(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Ieee64 {
|
||||
pub fn new(x: f64) -> Ieee64 {
|
||||
Ieee64(x)
|
||||
}
|
||||
|
||||
/// Construct Ieee64 immediate from raw bits.
|
||||
pub fn from_bits(x: u64) -> Ieee64 {
|
||||
Ieee64(unsafe { mem::transmute(x) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Ieee64 {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
let bits: u64 = unsafe { mem::transmute(self.0) };
|
||||
format_float(bits, 11, 52, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Ieee64 {
|
||||
type Err = &'static str;
|
||||
|
||||
fn from_str(s: &str) -> Result<Ieee64, &'static str> {
|
||||
match parse_float(s, 11, 52) {
|
||||
Ok(b) => Ok(Ieee64::from_bits(b)),
|
||||
Err(s) => Err(s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Arbitrary vector immediate.
|
||||
///
|
||||
/// This kind of immediate can represent any kind of SIMD vector constant.
|
||||
/// The representation is simply the sequence of bytes that would be used to store the vector.
|
||||
pub type ImmVector = Vec<u8>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::{f32, f64};
|
||||
use std::str::FromStr;
|
||||
use std::fmt::Display;
|
||||
|
||||
#[test]
|
||||
fn format_imm64() {
|
||||
assert_eq!(Imm64(0).to_string(), "0");
|
||||
assert_eq!(Imm64(9999).to_string(), "9999");
|
||||
assert_eq!(Imm64(10000).to_string(), "0x2710");
|
||||
assert_eq!(Imm64(-9999).to_string(), "-9999");
|
||||
assert_eq!(Imm64(-10000).to_string(), "0xffff_ffff_ffff_d8f0");
|
||||
assert_eq!(Imm64(0xffff).to_string(), "0xffff");
|
||||
assert_eq!(Imm64(0x10000).to_string(), "0x0001_0000");
|
||||
}
|
||||
|
||||
// Verify that `text` can be parsed as a `T` into a value that displays as `want`.
|
||||
fn parse_ok<T: FromStr + Display>(text: &str, want: &str)
|
||||
where <T as FromStr>::Err: Display
|
||||
{
|
||||
match text.parse::<T>() {
|
||||
Err(s) => panic!("\"{}\".parse() error: {}", text, s),
|
||||
Ok(x) => assert_eq!(x.to_string(), want),
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that `text` fails to parse as `T` with the error `msg`.
|
||||
fn parse_err<T: FromStr + Display>(text: &str, msg: &str)
|
||||
where <T as FromStr>::Err: Display
|
||||
{
|
||||
match text.parse::<T>() {
|
||||
Err(s) => assert_eq!(s.to_string(), msg),
|
||||
Ok(x) => panic!("Wanted Err({}), but got {}", msg, x),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_imm64() {
|
||||
parse_ok::<Imm64>("0", "0");
|
||||
parse_ok::<Imm64>("1", "1");
|
||||
parse_ok::<Imm64>("-0", "0");
|
||||
parse_ok::<Imm64>("-1", "-1");
|
||||
parse_ok::<Imm64>("0x0", "0");
|
||||
parse_ok::<Imm64>("0xf", "15");
|
||||
parse_ok::<Imm64>("-0x9", "-9");
|
||||
|
||||
// Probe limits.
|
||||
parse_ok::<Imm64>("0xffffffff_ffffffff", "-1");
|
||||
parse_ok::<Imm64>("0x80000000_00000000", "0x8000_0000_0000_0000");
|
||||
parse_ok::<Imm64>("-0x80000000_00000000", "0x8000_0000_0000_0000");
|
||||
parse_err::<Imm64>("-0x80000000_00000001",
|
||||
"Negative number too small for Imm64");
|
||||
parse_ok::<Imm64>("18446744073709551615", "-1");
|
||||
parse_ok::<Imm64>("-9223372036854775808", "0x8000_0000_0000_0000");
|
||||
// Overflow both the checked_add and checked_mul.
|
||||
parse_err::<Imm64>("18446744073709551616", "Too large decimal Imm64");
|
||||
parse_err::<Imm64>("184467440737095516100", "Too large decimal Imm64");
|
||||
parse_err::<Imm64>("-9223372036854775809",
|
||||
"Negative number too small for Imm64");
|
||||
|
||||
// Underscores are allowed where digits go.
|
||||
parse_ok::<Imm64>("0_0", "0");
|
||||
parse_ok::<Imm64>("-_10_0", "-100");
|
||||
parse_ok::<Imm64>("_10_", "10");
|
||||
parse_ok::<Imm64>("0x97_88_bb", "0x0097_88bb");
|
||||
parse_ok::<Imm64>("0x_97_", "151");
|
||||
|
||||
parse_err::<Imm64>("", "No digits in Imm64");
|
||||
parse_err::<Imm64>("-", "No digits in Imm64");
|
||||
parse_err::<Imm64>("_", "No digits in Imm64");
|
||||
parse_err::<Imm64>("0x", "No digits in Imm64");
|
||||
parse_err::<Imm64>("0x_", "No digits in Imm64");
|
||||
parse_err::<Imm64>("-0x", "No digits in Imm64");
|
||||
parse_err::<Imm64>(" ", "Invalid character in decimal Imm64");
|
||||
parse_err::<Imm64>("0 ", "Invalid character in decimal Imm64");
|
||||
parse_err::<Imm64>(" 0", "Invalid character in decimal Imm64");
|
||||
parse_err::<Imm64>("--", "Invalid character in decimal Imm64");
|
||||
parse_err::<Imm64>("-0x-", "Invalid character in hexadecimal Imm64");
|
||||
|
||||
// Hex count overflow.
|
||||
parse_err::<Imm64>("0x0_0000_0000_0000_0000",
|
||||
"Too many hexadecimal digits in Imm64");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_ieee32() {
|
||||
assert_eq!(Ieee32::new(0.0).to_string(), "0.0");
|
||||
assert_eq!(Ieee32::new(-0.0).to_string(), "-0.0");
|
||||
assert_eq!(Ieee32::new(1.0).to_string(), "0x1.000000p0");
|
||||
assert_eq!(Ieee32::new(1.5).to_string(), "0x1.800000p0");
|
||||
assert_eq!(Ieee32::new(0.5).to_string(), "0x1.000000p-1");
|
||||
assert_eq!(Ieee32::new(f32::EPSILON).to_string(), "0x1.000000p-23");
|
||||
assert_eq!(Ieee32::new(f32::MIN).to_string(), "-0x1.fffffep127");
|
||||
assert_eq!(Ieee32::new(f32::MAX).to_string(), "0x1.fffffep127");
|
||||
// Smallest positive normal number.
|
||||
assert_eq!(Ieee32::new(f32::MIN_POSITIVE).to_string(),
|
||||
"0x1.000000p-126");
|
||||
// Subnormals.
|
||||
assert_eq!(Ieee32::new(f32::MIN_POSITIVE / 2.0).to_string(),
|
||||
"0x0.800000p-126");
|
||||
assert_eq!(Ieee32::new(f32::MIN_POSITIVE * f32::EPSILON).to_string(),
|
||||
"0x0.000002p-126");
|
||||
assert_eq!(Ieee32::new(f32::INFINITY).to_string(), "Inf");
|
||||
assert_eq!(Ieee32::new(f32::NEG_INFINITY).to_string(), "-Inf");
|
||||
assert_eq!(Ieee32::new(f32::NAN).to_string(), "NaN");
|
||||
assert_eq!(Ieee32::new(-f32::NAN).to_string(), "-NaN");
|
||||
// Construct some qNaNs with payloads.
|
||||
assert_eq!(Ieee32::from_bits(0x7fc00001).to_string(), "NaN:0x1");
|
||||
assert_eq!(Ieee32::from_bits(0x7ff00001).to_string(), "NaN:0x300001");
|
||||
// Signaling NaNs.
|
||||
assert_eq!(Ieee32::from_bits(0x7f800001).to_string(), "sNaN:0x1");
|
||||
assert_eq!(Ieee32::from_bits(0x7fa00001).to_string(), "sNaN:0x200001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_ieee32() {
|
||||
parse_ok::<Ieee32>("0.0", "0.0");
|
||||
parse_ok::<Ieee32>("-0.0", "-0.0");
|
||||
parse_ok::<Ieee32>("0x0", "0.0");
|
||||
parse_ok::<Ieee32>("0x0.0", "0.0");
|
||||
parse_ok::<Ieee32>("0x.0", "0.0");
|
||||
parse_ok::<Ieee32>("0x0.", "0.0");
|
||||
parse_ok::<Ieee32>("0x1", "0x1.000000p0");
|
||||
parse_ok::<Ieee32>("-0x1", "-0x1.000000p0");
|
||||
parse_ok::<Ieee32>("0x10", "0x1.000000p4");
|
||||
parse_ok::<Ieee32>("0x10.0", "0x1.000000p4");
|
||||
parse_err::<Ieee32>("0.", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>(".0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>("0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>("-0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>(".", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>("", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>("-", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>("0x", "No digits");
|
||||
parse_err::<Ieee32>("0x..", "Multiple radix points");
|
||||
|
||||
// Check significant bits.
|
||||
parse_ok::<Ieee32>("0x0.ffffff", "0x1.fffffep-1");
|
||||
parse_ok::<Ieee32>("0x1.fffffe", "0x1.fffffep0");
|
||||
parse_ok::<Ieee32>("0x3.fffffc", "0x1.fffffep1");
|
||||
parse_ok::<Ieee32>("0x7.fffff8", "0x1.fffffep2");
|
||||
parse_ok::<Ieee32>("0xf.fffff0", "0x1.fffffep3");
|
||||
parse_err::<Ieee32>("0x1.ffffff", "Too many significant bits");
|
||||
parse_err::<Ieee32>("0x1.fffffe0000000000", "Too many digits");
|
||||
|
||||
// Exponents.
|
||||
parse_ok::<Ieee32>("0x1p3", "0x1.000000p3");
|
||||
parse_ok::<Ieee32>("0x1p-3", "0x1.000000p-3");
|
||||
parse_ok::<Ieee32>("0x1.0p3", "0x1.000000p3");
|
||||
parse_ok::<Ieee32>("0x2.0p3", "0x1.000000p4");
|
||||
parse_ok::<Ieee32>("0x1.0p127", "0x1.000000p127");
|
||||
parse_ok::<Ieee32>("0x1.0p-126", "0x1.000000p-126");
|
||||
parse_ok::<Ieee32>("0x0.1p-122", "0x1.000000p-126");
|
||||
parse_err::<Ieee32>("0x2.0p127", "Magnitude too large");
|
||||
|
||||
// Subnormals.
|
||||
parse_ok::<Ieee32>("0x1.0p-127", "0x0.800000p-126");
|
||||
parse_ok::<Ieee32>("0x1.0p-149", "0x0.000002p-126");
|
||||
parse_ok::<Ieee32>("0x0.000002p-126", "0x0.000002p-126");
|
||||
parse_err::<Ieee32>("0x0.100001p-126", "Subnormal underflow");
|
||||
parse_err::<Ieee32>("0x1.8p-149", "Subnormal underflow");
|
||||
parse_err::<Ieee32>("0x1.0p-150", "Magnitude too small");
|
||||
|
||||
// NaNs and Infs.
|
||||
parse_ok::<Ieee32>("Inf", "Inf");
|
||||
parse_ok::<Ieee32>("-Inf", "-Inf");
|
||||
parse_ok::<Ieee32>("NaN", "NaN");
|
||||
parse_ok::<Ieee32>("-NaN", "-NaN");
|
||||
parse_ok::<Ieee32>("NaN:0x0", "NaN");
|
||||
parse_err::<Ieee32>("NaN:", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>("NaN:0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee32>("NaN:0x", "Invalid NaN payload");
|
||||
parse_ok::<Ieee32>("NaN:0x000001", "NaN:0x1");
|
||||
parse_ok::<Ieee32>("NaN:0x300001", "NaN:0x300001");
|
||||
parse_err::<Ieee32>("NaN:0x400001", "Invalid NaN payload");
|
||||
parse_ok::<Ieee32>("sNaN:0x1", "sNaN:0x1");
|
||||
parse_err::<Ieee32>("sNaN:0x0", "Invalid sNaN payload");
|
||||
parse_ok::<Ieee32>("sNaN:0x200001", "sNaN:0x200001");
|
||||
parse_err::<Ieee32>("sNaN:0x400001", "Invalid sNaN payload");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_ieee64() {
|
||||
assert_eq!(Ieee64::new(0.0).to_string(), "0.0");
|
||||
assert_eq!(Ieee64::new(-0.0).to_string(), "-0.0");
|
||||
assert_eq!(Ieee64::new(1.0).to_string(), "0x1.0000000000000p0");
|
||||
assert_eq!(Ieee64::new(1.5).to_string(), "0x1.8000000000000p0");
|
||||
assert_eq!(Ieee64::new(0.5).to_string(), "0x1.0000000000000p-1");
|
||||
assert_eq!(Ieee64::new(f64::EPSILON).to_string(),
|
||||
"0x1.0000000000000p-52");
|
||||
assert_eq!(Ieee64::new(f64::MIN).to_string(), "-0x1.fffffffffffffp1023");
|
||||
assert_eq!(Ieee64::new(f64::MAX).to_string(), "0x1.fffffffffffffp1023");
|
||||
// Smallest positive normal number.
|
||||
assert_eq!(Ieee64::new(f64::MIN_POSITIVE).to_string(),
|
||||
"0x1.0000000000000p-1022");
|
||||
// Subnormals.
|
||||
assert_eq!(Ieee64::new(f64::MIN_POSITIVE / 2.0).to_string(),
|
||||
"0x0.8000000000000p-1022");
|
||||
assert_eq!(Ieee64::new(f64::MIN_POSITIVE * f64::EPSILON).to_string(),
|
||||
"0x0.0000000000001p-1022");
|
||||
assert_eq!(Ieee64::new(f64::INFINITY).to_string(), "Inf");
|
||||
assert_eq!(Ieee64::new(f64::NEG_INFINITY).to_string(), "-Inf");
|
||||
assert_eq!(Ieee64::new(f64::NAN).to_string(), "NaN");
|
||||
assert_eq!(Ieee64::new(-f64::NAN).to_string(), "-NaN");
|
||||
// Construct some qNaNs with payloads.
|
||||
assert_eq!(Ieee64::from_bits(0x7ff8000000000001).to_string(), "NaN:0x1");
|
||||
assert_eq!(Ieee64::from_bits(0x7ffc000000000001).to_string(),
|
||||
"NaN:0x4000000000001");
|
||||
// Signaling NaNs.
|
||||
assert_eq!(Ieee64::from_bits(0x7ff0000000000001).to_string(),
|
||||
"sNaN:0x1");
|
||||
assert_eq!(Ieee64::from_bits(0x7ff4000000000001).to_string(),
|
||||
"sNaN:0x4000000000001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_ieee64() {
|
||||
parse_ok::<Ieee64>("0.0", "0.0");
|
||||
parse_ok::<Ieee64>("-0.0", "-0.0");
|
||||
parse_ok::<Ieee64>("0x0", "0.0");
|
||||
parse_ok::<Ieee64>("0x0.0", "0.0");
|
||||
parse_ok::<Ieee64>("0x.0", "0.0");
|
||||
parse_ok::<Ieee64>("0x0.", "0.0");
|
||||
parse_ok::<Ieee64>("0x1", "0x1.0000000000000p0");
|
||||
parse_ok::<Ieee64>("-0x1", "-0x1.0000000000000p0");
|
||||
parse_ok::<Ieee64>("0x10", "0x1.0000000000000p4");
|
||||
parse_ok::<Ieee64>("0x10.0", "0x1.0000000000000p4");
|
||||
parse_err::<Ieee64>("0.", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>(".0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>("0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>("-0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>(".", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>("", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>("-", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>("0x", "No digits");
|
||||
parse_err::<Ieee64>("0x..", "Multiple radix points");
|
||||
|
||||
// Check significant bits.
|
||||
parse_ok::<Ieee64>("0x0.fffffffffffff8", "0x1.fffffffffffffp-1");
|
||||
parse_ok::<Ieee64>("0x1.fffffffffffff", "0x1.fffffffffffffp0");
|
||||
parse_ok::<Ieee64>("0x3.ffffffffffffe", "0x1.fffffffffffffp1");
|
||||
parse_ok::<Ieee64>("0x7.ffffffffffffc", "0x1.fffffffffffffp2");
|
||||
parse_ok::<Ieee64>("0xf.ffffffffffff8", "0x1.fffffffffffffp3");
|
||||
parse_err::<Ieee64>("0x3.fffffffffffff", "Too many significant bits");
|
||||
parse_err::<Ieee64>("0x001.fffffe00000000", "Too many digits");
|
||||
|
||||
// Exponents.
|
||||
parse_ok::<Ieee64>("0x1p3", "0x1.0000000000000p3");
|
||||
parse_ok::<Ieee64>("0x1p-3", "0x1.0000000000000p-3");
|
||||
parse_ok::<Ieee64>("0x1.0p3", "0x1.0000000000000p3");
|
||||
parse_ok::<Ieee64>("0x2.0p3", "0x1.0000000000000p4");
|
||||
parse_ok::<Ieee64>("0x1.0p1023", "0x1.0000000000000p1023");
|
||||
parse_ok::<Ieee64>("0x1.0p-1022", "0x1.0000000000000p-1022");
|
||||
parse_ok::<Ieee64>("0x0.1p-1018", "0x1.0000000000000p-1022");
|
||||
parse_err::<Ieee64>("0x2.0p1023", "Magnitude too large");
|
||||
|
||||
// Subnormals.
|
||||
parse_ok::<Ieee64>("0x1.0p-1023", "0x0.8000000000000p-1022");
|
||||
parse_ok::<Ieee64>("0x1.0p-1074", "0x0.0000000000001p-1022");
|
||||
parse_ok::<Ieee64>("0x0.0000000000001p-1022", "0x0.0000000000001p-1022");
|
||||
parse_err::<Ieee64>("0x0.10000000000008p-1022", "Subnormal underflow");
|
||||
parse_err::<Ieee64>("0x1.8p-1074", "Subnormal underflow");
|
||||
parse_err::<Ieee64>("0x1.0p-1075", "Magnitude too small");
|
||||
|
||||
// NaNs and Infs.
|
||||
parse_ok::<Ieee64>("Inf", "Inf");
|
||||
parse_ok::<Ieee64>("-Inf", "-Inf");
|
||||
parse_ok::<Ieee64>("NaN", "NaN");
|
||||
parse_ok::<Ieee64>("-NaN", "-NaN");
|
||||
parse_ok::<Ieee64>("NaN:0x0", "NaN");
|
||||
parse_err::<Ieee64>("NaN:", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>("NaN:0", "Float must be hexadecimal");
|
||||
parse_err::<Ieee64>("NaN:0x", "Invalid NaN payload");
|
||||
parse_ok::<Ieee64>("NaN:0x000001", "NaN:0x1");
|
||||
parse_ok::<Ieee64>("NaN:0x4000000000001", "NaN:0x4000000000001");
|
||||
parse_err::<Ieee64>("NaN:0x8000000000001", "Invalid NaN payload");
|
||||
parse_ok::<Ieee64>("sNaN:0x1", "sNaN:0x1");
|
||||
parse_err::<Ieee64>("sNaN:0x0", "Invalid sNaN payload");
|
||||
parse_ok::<Ieee64>("sNaN:0x4000000000001", "sNaN:0x4000000000001");
|
||||
parse_err::<Ieee64>("sNaN:0x8000000000001", "Invalid sNaN payload");
|
||||
}
|
||||
}
|
||||
@@ -1,695 +0,0 @@
|
||||
//! Instruction formats and opcodes.
|
||||
//!
|
||||
//! The `instructions` module contains definitions for instruction formats, opcodes, and the
|
||||
//! in-memory representation of IL instructions.
|
||||
//!
|
||||
//! A large part of this module is auto-generated from the instruction descriptions in the meta
|
||||
//! directory.
|
||||
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use ir::{Value, Type, Ebb, JumpTable, FuncRef};
|
||||
use ir::immediates::{Imm64, Uimm8, Ieee32, Ieee64, ImmVector};
|
||||
use ir::condcodes::*;
|
||||
use ir::types;
|
||||
|
||||
// Include code generated by `meta/gen_instr.py`. This file contains:
|
||||
//
|
||||
// - The `pub enum InstructionFormat` enum with all the instruction formats.
|
||||
// - The `pub enum Opcode` definition with all known opcodes,
|
||||
// - The `const OPCODE_FORMAT: [InstructionFormat; N]` table.
|
||||
// - The private `fn opcode_name(Opcode) -> &'static str` function, and
|
||||
// - The hash table `const OPCODE_HASH_TABLE: [Opcode; N]`.
|
||||
//
|
||||
// For value type constraints:
|
||||
//
|
||||
// - The `const OPCODE_CONSTRAINTS : [OpcodeConstraints; N]` table.
|
||||
// - The `const TYPE_SETS : [ValueTypeSet; N]` table.
|
||||
// - The `const OPERAND_CONSTRAINTS : [OperandConstraint; N]` table.
|
||||
//
|
||||
include!(concat!(env!("OUT_DIR"), "/opcodes.rs"));
|
||||
|
||||
impl Display for Opcode {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
write!(f, "{}", opcode_name(*self))
|
||||
}
|
||||
}
|
||||
|
||||
impl Opcode {
|
||||
/// Get the instruction format for this opcode.
|
||||
pub fn format(self) -> Option<InstructionFormat> {
|
||||
if self == Opcode::NotAnOpcode {
|
||||
None
|
||||
} else {
|
||||
Some(OPCODE_FORMAT[self as usize - 1])
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the constraint descriptor for this opcode.
|
||||
/// Panic if this is called on `NotAnOpcode`.
|
||||
pub fn constraints(self) -> OpcodeConstraints {
|
||||
OPCODE_CONSTRAINTS[self as usize - 1]
|
||||
}
|
||||
}
|
||||
|
||||
// This trait really belongs in libreader where it is used by the .cton file parser, but since it
|
||||
// critically depends on the `opcode_name()` function which is needed here anyway, it lives in this
|
||||
// module. This also saves us from runing the build script twice to generate code for the two
|
||||
// separate crates.
|
||||
impl FromStr for Opcode {
|
||||
type Err = &'static str;
|
||||
|
||||
/// Parse an Opcode name from a string.
|
||||
fn from_str(s: &str) -> Result<Opcode, &'static str> {
|
||||
use constant_hash::{Table, simple_hash, probe};
|
||||
|
||||
impl<'a> Table<&'a str> for [Opcode] {
|
||||
fn len(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn key(&self, idx: usize) -> Option<&'a str> {
|
||||
if self[idx] == Opcode::NotAnOpcode {
|
||||
None
|
||||
} else {
|
||||
Some(opcode_name(self[idx]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match probe::<&str, [Opcode]>(&OPCODE_HASH_TABLE, s, simple_hash(s)) {
|
||||
None => Err("Unknown opcode"),
|
||||
Some(i) => Ok(OPCODE_HASH_TABLE[i]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Contents on an instruction.
|
||||
///
|
||||
/// Every variant must contain `opcode` and `ty` fields. An instruction that doesn't produce a
|
||||
/// value should have its `ty` field set to `VOID`. The size of `InstructionData` should be kept at
|
||||
/// 16 bytes on 64-bit architectures. If more space is needed to represent an instruction, use a
|
||||
/// `Box<AuxData>` to store the additional information out of line.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum InstructionData {
|
||||
Nullary { opcode: Opcode, ty: Type },
|
||||
Unary {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
arg: Value,
|
||||
},
|
||||
UnaryImm {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
imm: Imm64,
|
||||
},
|
||||
UnaryIeee32 {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
imm: Ieee32,
|
||||
},
|
||||
UnaryIeee64 {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
imm: Ieee64,
|
||||
},
|
||||
UnaryImmVector {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
data: Box<UnaryImmVectorData>,
|
||||
},
|
||||
UnarySplit {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
second_result: Value,
|
||||
arg: Value,
|
||||
},
|
||||
Binary {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
args: [Value; 2],
|
||||
},
|
||||
BinaryImm {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
arg: Value,
|
||||
imm: Imm64,
|
||||
},
|
||||
// Same as BinaryImm, but the immediate is the lhs operand.
|
||||
BinaryImmRev {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
arg: Value,
|
||||
imm: Imm64,
|
||||
},
|
||||
BinaryOverflow {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
second_result: Value,
|
||||
args: [Value; 2],
|
||||
},
|
||||
Ternary {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
args: [Value; 3],
|
||||
},
|
||||
TernaryOverflow {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
second_result: Value,
|
||||
data: Box<TernaryOverflowData>,
|
||||
},
|
||||
InsertLane {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
lane: Uimm8,
|
||||
args: [Value; 2],
|
||||
},
|
||||
ExtractLane {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
lane: Uimm8,
|
||||
arg: Value,
|
||||
},
|
||||
IntCompare {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
cond: IntCC,
|
||||
args: [Value; 2],
|
||||
},
|
||||
FloatCompare {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
cond: FloatCC,
|
||||
args: [Value; 2],
|
||||
},
|
||||
Jump {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
data: Box<JumpData>,
|
||||
},
|
||||
Branch {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
data: Box<BranchData>,
|
||||
},
|
||||
BranchTable {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
arg: Value,
|
||||
table: JumpTable,
|
||||
},
|
||||
Call {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
second_result: Value,
|
||||
data: Box<CallData>,
|
||||
},
|
||||
Return {
|
||||
opcode: Opcode,
|
||||
ty: Type,
|
||||
data: Box<ReturnData>,
|
||||
},
|
||||
}
|
||||
|
||||
/// A variable list of `Value` operands used for function call arguments and passing arguments to
|
||||
/// basic blocks.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VariableArgs(Vec<Value>);
|
||||
|
||||
impl VariableArgs {
|
||||
pub fn new() -> VariableArgs {
|
||||
VariableArgs(Vec::new())
|
||||
}
|
||||
|
||||
pub fn push(&mut self, v: Value) {
|
||||
self.0.push(v)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
// Coerce VariableArgs into a &[Value] slice.
|
||||
impl Deref for VariableArgs {
|
||||
type Target = [Value];
|
||||
|
||||
fn deref<'a>(&'a self) -> &'a [Value] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for VariableArgs {
|
||||
fn deref_mut<'a>(&'a mut self) -> &'a mut [Value] {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VariableArgs {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
for (i, val) in self.0.iter().enumerate() {
|
||||
if i == 0 {
|
||||
try!(write!(fmt, "{}", val));
|
||||
} else {
|
||||
try!(write!(fmt, ", {}", val));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for VariableArgs {
|
||||
fn default() -> VariableArgs {
|
||||
VariableArgs::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload data for `vconst`.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct UnaryImmVectorData {
|
||||
pub imm: ImmVector,
|
||||
}
|
||||
|
||||
impl Display for UnaryImmVectorData {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
try!(write!(f, "#"));
|
||||
for b in &self.imm {
|
||||
try!(write!(f, "{:02x}", b));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload data for ternary instructions with multiple results, such as `iadd_carry`.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TernaryOverflowData {
|
||||
pub args: [Value; 3],
|
||||
}
|
||||
|
||||
impl Display for TernaryOverflowData {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
write!(f, "{}, {}, {}", self.args[0], self.args[1], self.args[2])
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload data for jump instructions. These need to carry lists of EBB arguments that won't fit
|
||||
/// in the allowed InstructionData size.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct JumpData {
|
||||
pub destination: Ebb,
|
||||
pub varargs: VariableArgs,
|
||||
}
|
||||
|
||||
impl Display for JumpData {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
if self.varargs.is_empty() {
|
||||
write!(f, "{}", self.destination)
|
||||
} else {
|
||||
write!(f, "{}({})", self.destination, self.varargs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload data for branch instructions. These need to carry lists of EBB arguments that won't fit
|
||||
/// in the allowed InstructionData size.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BranchData {
|
||||
pub arg: Value,
|
||||
pub destination: Ebb,
|
||||
pub varargs: VariableArgs,
|
||||
}
|
||||
|
||||
impl Display for BranchData {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
try!(write!(f, "{}, {}", self.arg, self.destination));
|
||||
if !self.varargs.is_empty() {
|
||||
try!(write!(f, "({})", self.varargs));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload of a call instruction.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CallData {
|
||||
/// Callee function.
|
||||
pub func_ref: FuncRef,
|
||||
|
||||
/// Dynamically sized array containing call argument values.
|
||||
pub varargs: VariableArgs,
|
||||
}
|
||||
|
||||
impl Display for CallData {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
write!(f, "TBD({})", self.varargs)
|
||||
}
|
||||
}
|
||||
|
||||
/// Payload of a return instruction.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ReturnData {
|
||||
// Dynamically sized array containing return values.
|
||||
pub varargs: VariableArgs,
|
||||
}
|
||||
|
||||
/// Analyzing an instruction.
|
||||
///
|
||||
/// Avoid large matches on instruction formats by using the methods efined here to examine
|
||||
/// instructions.
|
||||
impl InstructionData {
|
||||
/// Return information about the destination of a branch or jump instruction.
|
||||
///
|
||||
/// Any instruction that can transfer control to another EBB reveals its possible destinations
|
||||
/// here.
|
||||
pub fn analyze_branch<'a>(&'a self) -> BranchInfo<'a> {
|
||||
match self {
|
||||
&InstructionData::Jump { ref data, .. } => {
|
||||
BranchInfo::SingleDest(data.destination, &data.varargs)
|
||||
}
|
||||
&InstructionData::Branch { ref data, .. } => {
|
||||
BranchInfo::SingleDest(data.destination, &data.varargs)
|
||||
}
|
||||
&InstructionData::BranchTable { table, .. } => BranchInfo::Table(table),
|
||||
_ => BranchInfo::NotABranch,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if an instruction is terminating, or false otherwise.
|
||||
pub fn is_terminating<'a>(&'a self) -> bool {
|
||||
match self {
|
||||
&InstructionData::Jump { .. } => true,
|
||||
&InstructionData::Return { .. } => true,
|
||||
&InstructionData::Nullary { .. } => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about branch and jump instructions.
|
||||
pub enum BranchInfo<'a> {
|
||||
/// This is not a branch or jump instruction.
|
||||
/// This instruction will not transfer control to another EBB in the function, but it may still
|
||||
/// affect control flow by returning or trapping.
|
||||
NotABranch,
|
||||
|
||||
/// This is a branch or jump to a single destination EBB, possibly taking value arguments.
|
||||
SingleDest(Ebb, &'a [Value]),
|
||||
|
||||
/// This is a jump table branch which can have many destination EBBs.
|
||||
Table(JumpTable),
|
||||
}
|
||||
|
||||
/// Value type constraints for a given opcode.
|
||||
///
|
||||
/// The `InstructionFormat` determines the constraints on most operands, but `Value` operands and
|
||||
/// results are not determined by the format. Every `Opcode` has an associated
|
||||
/// `OpcodeConstraints` object that provides the missing details.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct OpcodeConstraints {
|
||||
/// Flags for this opcode encoded as a bit field:
|
||||
///
|
||||
/// Bits 0-2:
|
||||
/// Number of fixed result values. This does not include `variable_args` results as are
|
||||
/// produced by call instructions.
|
||||
///
|
||||
/// Bit 3:
|
||||
/// This opcode is polymorphic and the controlling type variable can be inferred from the
|
||||
/// designated input operand. This is the `typevar_operand` index given to the
|
||||
/// `InstructionFormat` meta language object. When bit 0 is not set, the controlling type
|
||||
/// variable must be the first output value instead.
|
||||
flags: u8,
|
||||
|
||||
/// Permitted set of types for the controlling type variable as an index into `TYPE_SETS`.
|
||||
typeset_offset: u8,
|
||||
|
||||
/// Offset into `OPERAND_CONSTRAINT` table of the descriptors for this opcode. The first
|
||||
/// `fixed_results()` entries describe the result constraints, then follows constraints for the
|
||||
/// fixed `Value` input operands. The number of `Value` inputs is determined by the instruction
|
||||
/// format.
|
||||
constraint_offset: u16,
|
||||
}
|
||||
|
||||
impl OpcodeConstraints {
|
||||
/// Can the controlling type variable for this opcode be inferred from the designated value
|
||||
/// input operand?
|
||||
/// This also implies that this opcode is polymorphic.
|
||||
pub fn use_typevar_operand(self) -> bool {
|
||||
(self.flags & 0x8) != 0
|
||||
}
|
||||
|
||||
/// Get the number of *fixed* result values produced by this opcode.
|
||||
/// This does not include `variable_args` produced by calls.
|
||||
pub fn fixed_results(self) -> usize {
|
||||
(self.flags & 0x7) as usize
|
||||
}
|
||||
|
||||
/// Get the offset into `TYPE_SETS` for the controlling type variable.
|
||||
/// Returns `None` if the instruction is not polymorphic.
|
||||
fn typeset_offset(self) -> Option<usize> {
|
||||
let offset = self.typeset_offset as usize;
|
||||
if offset < TYPE_SETS.len() {
|
||||
Some(offset)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the offset into OPERAND_CONSTRAINTS where the descriptors for this opcode begin.
|
||||
fn constraint_offset(self) -> usize {
|
||||
self.constraint_offset as usize
|
||||
}
|
||||
|
||||
/// Get the value type of result number `n`, having resolved the controlling type variable to
|
||||
/// `ctrl_type`.
|
||||
pub fn result_type(self, n: usize, ctrl_type: Type) -> Type {
|
||||
assert!(n < self.fixed_results(), "Invalid result index");
|
||||
OPERAND_CONSTRAINTS[self.constraint_offset() + n]
|
||||
.resolve(ctrl_type)
|
||||
.expect("Result constraints can't be free")
|
||||
}
|
||||
|
||||
/// Get the typeset of allowed types for the controlling type variable in a polymorphic
|
||||
/// instruction.
|
||||
pub fn ctrl_typeset(self) -> Option<ValueTypeSet> {
|
||||
self.typeset_offset().map(|offset| TYPE_SETS[offset])
|
||||
}
|
||||
|
||||
/// Is this instruction polymorphic?
|
||||
pub fn is_polymorphic(self) -> bool {
|
||||
self.ctrl_typeset().is_some()
|
||||
}
|
||||
}
|
||||
|
||||
/// A value type set describes the permitted set of types for a type variable.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct ValueTypeSet {
|
||||
min_lanes: u8,
|
||||
max_lanes: u8,
|
||||
min_int: u8,
|
||||
max_int: u8,
|
||||
min_float: u8,
|
||||
max_float: u8,
|
||||
min_bool: u8,
|
||||
max_bool: u8,
|
||||
}
|
||||
|
||||
impl ValueTypeSet {
|
||||
/// Is `scalar` part of the base type set?
|
||||
///
|
||||
/// Note that the base type set does not have to be included in the type set proper.
|
||||
fn is_base_type(&self, scalar: Type) -> bool {
|
||||
let l2b = scalar.log2_lane_bits();
|
||||
if scalar.is_int() {
|
||||
self.min_int <= l2b && l2b < self.max_int
|
||||
} else if scalar.is_float() {
|
||||
self.min_float <= l2b && l2b < self.max_float
|
||||
} else if scalar.is_bool() {
|
||||
self.min_bool <= l2b && l2b < self.max_bool
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Does `typ` belong to this set?
|
||||
pub fn contains(&self, typ: Type) -> bool {
|
||||
let l2l = typ.log2_lane_count();
|
||||
self.min_lanes <= l2l && l2l < self.max_lanes && self.is_base_type(typ.lane_type())
|
||||
}
|
||||
|
||||
/// Get an example member of this type set.
|
||||
///
|
||||
/// This is used for error messages to avoid suggesting invalid types.
|
||||
pub fn example(&self) -> Type {
|
||||
let t = if self.max_int > 5 {
|
||||
types::I32
|
||||
} else if self.max_float > 5 {
|
||||
types::F32
|
||||
} else if self.max_bool > 5 {
|
||||
types::B32
|
||||
} else {
|
||||
types::B1
|
||||
};
|
||||
t.by(1 << self.min_lanes).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Operand constraints. This describes the value type constraints on a single `Value` operand.
|
||||
enum OperandConstraint {
|
||||
/// This operand has a concrete value type.
|
||||
Concrete(Type),
|
||||
|
||||
/// This operand can vary freely within the given type set.
|
||||
/// The type set is identified by its index into the TYPE_SETS constant table.
|
||||
Free(u8),
|
||||
|
||||
/// This operand is the same type as the controlling type variable.
|
||||
Same,
|
||||
|
||||
/// This operand is `ctrlType.lane_type()`.
|
||||
LaneOf,
|
||||
|
||||
/// This operand is `ctrlType.as_bool()`.
|
||||
AsBool,
|
||||
|
||||
/// This operand is `ctrlType.half_width()`.
|
||||
HalfWidth,
|
||||
|
||||
/// This operand is `ctrlType.double_width()`.
|
||||
DoubleWidth,
|
||||
}
|
||||
|
||||
impl OperandConstraint {
|
||||
/// Resolve this operand constraint into a concrete value type, given the value of the
|
||||
/// controlling type variable.
|
||||
/// Returns `None` if this is a free operand which is independent of the controlling type
|
||||
/// variable.
|
||||
pub fn resolve(&self, ctrl_type: Type) -> Option<Type> {
|
||||
use self::OperandConstraint::*;
|
||||
match *self {
|
||||
Concrete(t) => Some(t),
|
||||
Free(_) => None,
|
||||
Same => Some(ctrl_type),
|
||||
LaneOf => Some(ctrl_type.lane_type()),
|
||||
AsBool => Some(ctrl_type.as_bool()),
|
||||
HalfWidth => Some(ctrl_type.half_width().expect("invalid type for half_width")),
|
||||
DoubleWidth => Some(ctrl_type.double_width().expect("invalid type for double_width")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn opcodes() {
|
||||
let x = Opcode::Iadd;
|
||||
let mut y = Opcode::Isub;
|
||||
|
||||
assert!(x != y);
|
||||
y = Opcode::Iadd;
|
||||
assert_eq!(x, y);
|
||||
assert_eq!(x.format(), Some(InstructionFormat::Binary));
|
||||
|
||||
assert_eq!(format!("{:?}", Opcode::IaddImm), "IaddImm");
|
||||
assert_eq!(Opcode::IaddImm.to_string(), "iadd_imm");
|
||||
|
||||
// Check the matcher.
|
||||
assert_eq!("iadd".parse::<Opcode>(), Ok(Opcode::Iadd));
|
||||
assert_eq!("iadd_imm".parse::<Opcode>(), Ok(Opcode::IaddImm));
|
||||
assert_eq!("iadd\0".parse::<Opcode>(), Err("Unknown opcode"));
|
||||
assert_eq!("".parse::<Opcode>(), Err("Unknown opcode"));
|
||||
assert_eq!("\0".parse::<Opcode>(), Err("Unknown opcode"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn instruction_data() {
|
||||
use std::mem;
|
||||
// The size of the InstructionData enum is important for performance. It should not exceed
|
||||
// 16 bytes. Use `Box<FooData>` out-of-line payloads for instruction formats that require
|
||||
// more space than that.
|
||||
// It would be fine with a data structure smaller than 16 bytes, but what are the odds of
|
||||
// that?
|
||||
assert_eq!(mem::size_of::<InstructionData>(), 16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_set() {
|
||||
use ir::types::*;
|
||||
|
||||
let vts = ValueTypeSet {
|
||||
min_lanes: 0,
|
||||
max_lanes: 8,
|
||||
min_int: 3,
|
||||
max_int: 7,
|
||||
min_float: 0,
|
||||
max_float: 0,
|
||||
min_bool: 3,
|
||||
max_bool: 7,
|
||||
};
|
||||
assert!(vts.contains(I32));
|
||||
assert!(vts.contains(I64));
|
||||
assert!(vts.contains(I32X4));
|
||||
assert!(!vts.contains(F32));
|
||||
assert!(!vts.contains(B1));
|
||||
assert!(vts.contains(B8));
|
||||
assert!(vts.contains(B64));
|
||||
assert_eq!(vts.example().to_string(), "i32");
|
||||
|
||||
let vts = ValueTypeSet {
|
||||
min_lanes: 0,
|
||||
max_lanes: 8,
|
||||
min_int: 0,
|
||||
max_int: 0,
|
||||
min_float: 5,
|
||||
max_float: 7,
|
||||
min_bool: 3,
|
||||
max_bool: 7,
|
||||
};
|
||||
assert_eq!(vts.example().to_string(), "f32");
|
||||
|
||||
let vts = ValueTypeSet {
|
||||
min_lanes: 1,
|
||||
max_lanes: 8,
|
||||
min_int: 0,
|
||||
max_int: 0,
|
||||
min_float: 5,
|
||||
max_float: 7,
|
||||
min_bool: 3,
|
||||
max_bool: 7,
|
||||
};
|
||||
assert_eq!(vts.example().to_string(), "f32x2");
|
||||
|
||||
let vts = ValueTypeSet {
|
||||
min_lanes: 2,
|
||||
max_lanes: 8,
|
||||
min_int: 0,
|
||||
max_int: 0,
|
||||
min_float: 0,
|
||||
max_float: 0,
|
||||
min_bool: 3,
|
||||
max_bool: 7,
|
||||
};
|
||||
assert!(!vts.contains(B32X2));
|
||||
assert!(vts.contains(B32X4));
|
||||
assert_eq!(vts.example().to_string(), "b32x4");
|
||||
|
||||
let vts = ValueTypeSet {
|
||||
// TypeSet(lanes=(1, 256), ints=(8, 64))
|
||||
min_lanes: 0,
|
||||
max_lanes: 9,
|
||||
min_int: 3,
|
||||
max_int: 7,
|
||||
min_float: 0,
|
||||
max_float: 0,
|
||||
min_bool: 0,
|
||||
max_bool: 0,
|
||||
};
|
||||
assert!(vts.contains(I32));
|
||||
assert!(vts.contains(I32X4));
|
||||
}
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
//! Jump table representation.
|
||||
//!
|
||||
//! Jump tables are declared in the preamble and assigned an `ir::entities::JumpTable` reference.
|
||||
//! The actual table of destinations is stored in a `JumpTableData` struct defined in this module.
|
||||
|
||||
use ir::entities::{Ebb, NO_EBB};
|
||||
use std::iter;
|
||||
use std::slice;
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
|
||||
/// Contents of a jump table.
|
||||
///
|
||||
/// All jump tables use 0-based indexing and are expected to be densely populated. They don't need
|
||||
/// to be completely populated, though. Individual entries can be missing.
|
||||
#[derive(Clone)]
|
||||
pub struct JumpTableData {
|
||||
// Table entries, using NO_EBB as a placeholder for missing entries.
|
||||
table: Vec<Ebb>,
|
||||
|
||||
// How many `NO_EBB` holes in table?
|
||||
holes: usize,
|
||||
}
|
||||
|
||||
impl JumpTableData {
|
||||
/// Create a new empty jump table.
|
||||
pub fn new() -> JumpTableData {
|
||||
JumpTableData {
|
||||
table: Vec::new(),
|
||||
holes: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set a table entry.
|
||||
///
|
||||
/// The table will grow as needed to fit 'idx'.
|
||||
pub fn set_entry(&mut self, idx: usize, dest: Ebb) {
|
||||
assert!(dest != NO_EBB);
|
||||
// Resize table to fit `idx`.
|
||||
if idx >= self.table.len() {
|
||||
self.holes += idx - self.table.len();
|
||||
self.table.resize(idx + 1, NO_EBB);
|
||||
} else if self.table[idx] == NO_EBB {
|
||||
// We're filling in an existing hole.
|
||||
self.holes -= 1;
|
||||
}
|
||||
self.table[idx] = dest;
|
||||
}
|
||||
|
||||
/// Clear a table entry.
|
||||
///
|
||||
/// The `br_table` instruction will fall through if given an index corresponding to a cleared
|
||||
/// table entry.
|
||||
pub fn clear_entry(&mut self, idx: usize) {
|
||||
if idx < self.table.len() && self.table[idx] != NO_EBB {
|
||||
self.holes += 1;
|
||||
self.table[idx] = NO_EBB;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the entry for `idx`, or `None`.
|
||||
pub fn get_entry(&self, idx: usize) -> Option<Ebb> {
|
||||
if idx < self.table.len() && self.table[idx] != NO_EBB {
|
||||
Some(self.table[idx])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Enumerate over all `(idx, dest)` pairs in the table in order.
|
||||
///
|
||||
/// This returns an iterator that skips any empty slots in the table.
|
||||
pub fn entries<'a>(&'a self) -> Entries {
|
||||
Entries(self.table.iter().cloned().enumerate())
|
||||
}
|
||||
|
||||
/// Access the whole table as a mutable slice.
|
||||
pub fn as_mut_slice(&mut self) -> &mut [Ebb] {
|
||||
self.table.as_mut_slice()
|
||||
}
|
||||
}
|
||||
|
||||
/// Enumerate `(idx, dest)` pairs in order.
|
||||
pub struct Entries<'a>(iter::Enumerate<iter::Cloned<slice::Iter<'a, Ebb>>>);
|
||||
|
||||
impl<'a> Iterator for Entries<'a> {
|
||||
type Item = (usize, Ebb);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
loop {
|
||||
if let Some((idx, dest)) = self.0.next() {
|
||||
if dest != NO_EBB {
|
||||
return Some((idx, dest));
|
||||
}
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for JumpTableData {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
let first = self.table.first().cloned().unwrap_or_default();
|
||||
if first == NO_EBB {
|
||||
try!(write!(fmt, "jump_table 0"));
|
||||
} else {
|
||||
try!(write!(fmt, "jump_table {}", first));
|
||||
}
|
||||
|
||||
for dest in self.table.iter().cloned().skip(1) {
|
||||
if dest == NO_EBB {
|
||||
try!(write!(fmt, ", 0"));
|
||||
} else {
|
||||
try!(write!(fmt, ", {}", dest));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::JumpTableData;
|
||||
use ir::Ebb;
|
||||
use entity_map::EntityRef;
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let jt = JumpTableData::new();
|
||||
|
||||
assert_eq!(jt.get_entry(0), None);
|
||||
assert_eq!(jt.get_entry(10), None);
|
||||
|
||||
assert_eq!(jt.to_string(), "jump_table 0");
|
||||
|
||||
let v: Vec<(usize, Ebb)> = jt.entries().collect();
|
||||
assert_eq!(v, []);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert() {
|
||||
let e1 = Ebb::new(1);
|
||||
let e2 = Ebb::new(2);
|
||||
|
||||
let mut jt = JumpTableData::new();
|
||||
|
||||
jt.set_entry(0, e1);
|
||||
jt.set_entry(0, e2);
|
||||
jt.set_entry(10, e1);
|
||||
|
||||
assert_eq!(jt.to_string(),
|
||||
"jump_table ebb2, 0, 0, 0, 0, 0, 0, 0, 0, 0, ebb1");
|
||||
|
||||
let v: Vec<(usize, Ebb)> = jt.entries().collect();
|
||||
assert_eq!(v, [(0, e2), (10, e1)]);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,27 +0,0 @@
|
||||
//! Representation of Cretonne IL functions.
|
||||
|
||||
pub mod types;
|
||||
pub mod entities;
|
||||
pub mod condcodes;
|
||||
pub mod immediates;
|
||||
pub mod instructions;
|
||||
pub mod stackslot;
|
||||
pub mod jumptable;
|
||||
pub mod dfg;
|
||||
pub mod layout;
|
||||
pub mod function;
|
||||
mod funcname;
|
||||
mod extfunc;
|
||||
mod builder;
|
||||
|
||||
pub use ir::funcname::FunctionName;
|
||||
pub use ir::extfunc::{Signature, ArgumentType, ArgumentExtension};
|
||||
pub use ir::types::Type;
|
||||
pub use ir::entities::{Ebb, Inst, Value, StackSlot, JumpTable, FuncRef, SigRef};
|
||||
pub use ir::instructions::{Opcode, InstructionData, VariableArgs};
|
||||
pub use ir::stackslot::StackSlotData;
|
||||
pub use ir::jumptable::JumpTableData;
|
||||
pub use ir::dfg::{DataFlowGraph, ValueDef};
|
||||
pub use ir::layout::{Layout, Cursor};
|
||||
pub use ir::function::Function;
|
||||
pub use ir::builder::Builder;
|
||||
@@ -1,45 +0,0 @@
|
||||
//! Stack slots.
|
||||
//!
|
||||
//! The `StackSlotData` struct keeps track of a single stack slot in a function.
|
||||
//!
|
||||
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
|
||||
/// Contents of a stack slot.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct StackSlotData {
|
||||
/// Size of stack slot in bytes.
|
||||
pub size: u32,
|
||||
}
|
||||
|
||||
impl StackSlotData {
|
||||
/// Create a stack slot with the specified byte size.
|
||||
pub fn new(size: u32) -> StackSlotData {
|
||||
StackSlotData { size: size }
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for StackSlotData {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
|
||||
write!(fmt, "stack_slot {}", self.size)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ir::Function;
|
||||
use super::StackSlotData;
|
||||
|
||||
#[test]
|
||||
fn stack_slot() {
|
||||
let mut func = Function::new();
|
||||
|
||||
let ss0 = func.stack_slots.push(StackSlotData::new(4));
|
||||
let ss1 = func.stack_slots.push(StackSlotData::new(8));
|
||||
assert_eq!(ss0.to_string(), "ss0");
|
||||
assert_eq!(ss1.to_string(), "ss1");
|
||||
|
||||
assert_eq!(func.stack_slots[ss0].size, 4);
|
||||
assert_eq!(func.stack_slots[ss1].size, 8);
|
||||
}
|
||||
}
|
||||
@@ -1,342 +0,0 @@
|
||||
//! Common types for the Cretonne code generator.
|
||||
|
||||
use std::default::Default;
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
//
|
||||
// Value types
|
||||
//
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
|
||||
/// The type of an SSA value.
|
||||
///
|
||||
/// The `VOID` type is only used for instructions that produce no value. It can't be part of a SIMD
|
||||
/// vector.
|
||||
///
|
||||
/// Basic integer types: `I8`, `I16`, `I32`, and `I64`. These types are sign-agnostic.
|
||||
///
|
||||
/// Basic floating point types: `F32` and `F64`. IEEE single and double precision.
|
||||
///
|
||||
/// Boolean types: `B1`, `B8`, `B16`, `B32`, and `B64`. These all encode 'true' or 'false'. The
|
||||
/// larger types use redundant bits.
|
||||
///
|
||||
/// SIMD vector types have power-of-two lanes, up to 256. Lanes can be any int/float/bool type.
|
||||
///
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Type(u8);
|
||||
|
||||
/// No type. Used for functions without a return value. Can't be loaded or stored. Can't be part of
|
||||
/// a SIMD vector.
|
||||
pub const VOID: Type = Type(0);
|
||||
|
||||
// Include code generated by `meta/gen_types.py`. This file contains constant definitions for all
|
||||
// the scalar types as well as common vector types for 64, 128, 256, and 512-bit SID vectors.
|
||||
include!(concat!(env!("OUT_DIR"), "/types.rs"));
|
||||
|
||||
impl Type {
|
||||
/// Get the lane type of this SIMD vector type.
|
||||
///
|
||||
/// A scalar type is the same as a SIMD vector type with one lane, so it returns itself.
|
||||
pub fn lane_type(self) -> Type {
|
||||
Type(self.0 & 0x0f)
|
||||
}
|
||||
|
||||
/// Get log2 of the number of bits in a lane.
|
||||
pub fn log2_lane_bits(self) -> u8 {
|
||||
match self.lane_type() {
|
||||
B1 => 0,
|
||||
B8 | I8 => 3,
|
||||
B16 | I16 => 4,
|
||||
B32 | I32 | F32 => 5,
|
||||
B64 | I64 | F64 => 6,
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of bits in a lane.
|
||||
pub fn lane_bits(self) -> u8 {
|
||||
match self.lane_type() {
|
||||
B1 => 1,
|
||||
B8 | I8 => 8,
|
||||
B16 | I16 => 16,
|
||||
B32 | I32 | F32 => 32,
|
||||
B64 | I64 | F64 => 64,
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a type with the same number of lanes as this type, but with the lanes replaced by
|
||||
/// booleans of the same size.
|
||||
pub fn as_bool(self) -> Type {
|
||||
// Replace the low 4 bits with the boolean version, preserve the high 4 bits.
|
||||
let lane = match self.lane_type() {
|
||||
B8 | I8 => B8,
|
||||
B16 | I16 => B16,
|
||||
B32 | I32 | F32 => B32,
|
||||
B64 | I64 | F64 => B64,
|
||||
_ => B1,
|
||||
};
|
||||
Type(lane.0 | (self.0 & 0xf0))
|
||||
}
|
||||
|
||||
/// Get a type with the same number of lanes as this type, but with lanes that are half the
|
||||
/// number of bits.
|
||||
pub fn half_width(self) -> Option<Type> {
|
||||
let lane = match self.lane_type() {
|
||||
I16 => I8,
|
||||
I32 => I16,
|
||||
I64 => I32,
|
||||
F64 => F32,
|
||||
B16 => B8,
|
||||
B32 => B16,
|
||||
B64 => B32,
|
||||
_ => return None,
|
||||
};
|
||||
Some(Type(lane.0 | (self.0 & 0xf0)))
|
||||
}
|
||||
|
||||
/// Get a type with the same number of lanes as this type, but with lanes that are twice the
|
||||
/// number of bits.
|
||||
pub fn double_width(self) -> Option<Type> {
|
||||
let lane = match self.lane_type() {
|
||||
I8 => I16,
|
||||
I16 => I32,
|
||||
I32 => I64,
|
||||
F32 => F64,
|
||||
B8 => B16,
|
||||
B16 => B32,
|
||||
B32 => B64,
|
||||
_ => return None,
|
||||
};
|
||||
Some(Type(lane.0 | (self.0 & 0xf0)))
|
||||
}
|
||||
|
||||
/// Is this the VOID type?
|
||||
pub fn is_void(self) -> bool {
|
||||
self == VOID
|
||||
}
|
||||
|
||||
/// Is this a scalar boolean type?
|
||||
pub fn is_bool(self) -> bool {
|
||||
match self {
|
||||
B1 | B8 | B16 | B32 | B64 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Is this a scalar integer type?
|
||||
pub fn is_int(self) -> bool {
|
||||
match self {
|
||||
I8 | I16 | I32 | I64 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Is this a scalar floating point type?
|
||||
pub fn is_float(self) -> bool {
|
||||
match self {
|
||||
F32 | F64 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get log2 of the number of lanes in this SIMD vector type.
|
||||
///
|
||||
/// All SIMD types have a lane count that is a power of two and no larger than 256, so this
|
||||
/// will be a number in the range 0-8.
|
||||
///
|
||||
/// A scalar type is the same as a SIMD vector type with one lane, so it return 0.
|
||||
pub fn log2_lane_count(self) -> u8 {
|
||||
self.0 >> 4
|
||||
}
|
||||
|
||||
/// Is this a scalar type? (That is, not a SIMD vector type).
|
||||
///
|
||||
/// A scalar type is the same as a SIMD vector type with one lane.
|
||||
pub fn is_scalar(self) -> bool {
|
||||
self.log2_lane_count() == 0
|
||||
}
|
||||
|
||||
/// Get the number of lanes in this SIMD vector type.
|
||||
///
|
||||
/// A scalar type is the same as a SIMD vector type with one lane, so it returns 1.
|
||||
pub fn lane_count(self) -> u16 {
|
||||
1 << self.log2_lane_count()
|
||||
}
|
||||
|
||||
/// Get the total number of bits used to represent this type.
|
||||
pub fn bits(self) -> u16 {
|
||||
self.lane_bits() as u16 * self.lane_count()
|
||||
}
|
||||
|
||||
/// Get a SIMD vector type with `n` times more lanes than this one.
|
||||
///
|
||||
/// If this is a scalar type, this produces a SIMD type with this as a lane type and `n` lanes.
|
||||
///
|
||||
/// If this is already a SIMD vector type, this produces a SIMD vector type with `n *
|
||||
/// self.lane_count()` lanes.
|
||||
pub fn by(self, n: u16) -> Option<Type> {
|
||||
if self.lane_bits() == 0 || !n.is_power_of_two() {
|
||||
return None;
|
||||
}
|
||||
let log2_lanes: u32 = n.trailing_zeros();
|
||||
let new_type = self.0 as u32 + (log2_lanes << 4);
|
||||
if new_type < 0x90 {
|
||||
Some(Type(new_type as u8))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a SIMD vector with half the number of lanes.
|
||||
pub fn half_vector(self) -> Option<Type> {
|
||||
if self.is_scalar() {
|
||||
None
|
||||
} else {
|
||||
Some(Type(self.0 - 0x10))
|
||||
}
|
||||
}
|
||||
|
||||
/// Index of this type, for use with hash tables etc.
|
||||
pub fn index(self) -> usize {
|
||||
self.0 as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Type {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
if self.is_void() {
|
||||
write!(f, "void")
|
||||
} else if self.is_bool() {
|
||||
write!(f, "b{}", self.lane_bits())
|
||||
} else if self.is_int() {
|
||||
write!(f, "i{}", self.lane_bits())
|
||||
} else if self.is_float() {
|
||||
write!(f, "f{}", self.lane_bits())
|
||||
} else if !self.is_scalar() {
|
||||
write!(f, "{}x{}", self.lane_type(), self.lane_count())
|
||||
} else {
|
||||
panic!("Invalid Type(0x{:x})", self.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Type {
|
||||
fn default() -> Type {
|
||||
VOID
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn basic_scalars() {
|
||||
assert_eq!(VOID, VOID.lane_type());
|
||||
assert_eq!(0, VOID.bits());
|
||||
assert_eq!(B1, B1.lane_type());
|
||||
assert_eq!(B8, B8.lane_type());
|
||||
assert_eq!(B16, B16.lane_type());
|
||||
assert_eq!(B32, B32.lane_type());
|
||||
assert_eq!(B64, B64.lane_type());
|
||||
assert_eq!(I8, I8.lane_type());
|
||||
assert_eq!(I16, I16.lane_type());
|
||||
assert_eq!(I32, I32.lane_type());
|
||||
assert_eq!(I64, I64.lane_type());
|
||||
assert_eq!(F32, F32.lane_type());
|
||||
assert_eq!(F64, F64.lane_type());
|
||||
|
||||
assert_eq!(VOID.lane_bits(), 0);
|
||||
assert_eq!(B1.lane_bits(), 1);
|
||||
assert_eq!(B8.lane_bits(), 8);
|
||||
assert_eq!(B16.lane_bits(), 16);
|
||||
assert_eq!(B32.lane_bits(), 32);
|
||||
assert_eq!(B64.lane_bits(), 64);
|
||||
assert_eq!(I8.lane_bits(), 8);
|
||||
assert_eq!(I16.lane_bits(), 16);
|
||||
assert_eq!(I32.lane_bits(), 32);
|
||||
assert_eq!(I64.lane_bits(), 64);
|
||||
assert_eq!(F32.lane_bits(), 32);
|
||||
assert_eq!(F64.lane_bits(), 64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn typevar_functions() {
|
||||
assert_eq!(VOID.half_width(), None);
|
||||
assert_eq!(B1.half_width(), None);
|
||||
assert_eq!(B8.half_width(), None);
|
||||
assert_eq!(B16.half_width(), Some(B8));
|
||||
assert_eq!(B32.half_width(), Some(B16));
|
||||
assert_eq!(B64.half_width(), Some(B32));
|
||||
assert_eq!(I8.half_width(), None);
|
||||
assert_eq!(I16.half_width(), Some(I8));
|
||||
assert_eq!(I32.half_width(), Some(I16));
|
||||
assert_eq!(I32X4.half_width(), Some(I16X4));
|
||||
assert_eq!(I64.half_width(), Some(I32));
|
||||
assert_eq!(F32.half_width(), None);
|
||||
assert_eq!(F64.half_width(), Some(F32));
|
||||
|
||||
assert_eq!(VOID.double_width(), None);
|
||||
assert_eq!(B1.double_width(), None);
|
||||
assert_eq!(B8.double_width(), Some(B16));
|
||||
assert_eq!(B16.double_width(), Some(B32));
|
||||
assert_eq!(B32.double_width(), Some(B64));
|
||||
assert_eq!(B64.double_width(), None);
|
||||
assert_eq!(I8.double_width(), Some(I16));
|
||||
assert_eq!(I16.double_width(), Some(I32));
|
||||
assert_eq!(I32.double_width(), Some(I64));
|
||||
assert_eq!(I32X4.double_width(), Some(I64X4));
|
||||
assert_eq!(I64.double_width(), None);
|
||||
assert_eq!(F32.double_width(), Some(F64));
|
||||
assert_eq!(F64.double_width(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vectors() {
|
||||
let big = F64.by(256).unwrap();
|
||||
assert_eq!(big.lane_bits(), 64);
|
||||
assert_eq!(big.lane_count(), 256);
|
||||
assert_eq!(big.bits(), 64 * 256);
|
||||
|
||||
assert_eq!(big.half_vector().unwrap().to_string(), "f64x128");
|
||||
assert_eq!(B1.by(2).unwrap().half_vector().unwrap().to_string(), "b1");
|
||||
assert_eq!(I32.half_vector(), None);
|
||||
assert_eq!(VOID.half_vector(), None);
|
||||
|
||||
// Check that the generated constants match the computed vector types.
|
||||
assert_eq!(I32.by(4), Some(I32X4));
|
||||
assert_eq!(F64.by(8), Some(F64X8));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_scalars() {
|
||||
assert_eq!(VOID.to_string(), "void");
|
||||
assert_eq!(B1.to_string(), "b1");
|
||||
assert_eq!(B8.to_string(), "b8");
|
||||
assert_eq!(B16.to_string(), "b16");
|
||||
assert_eq!(B32.to_string(), "b32");
|
||||
assert_eq!(B64.to_string(), "b64");
|
||||
assert_eq!(I8.to_string(), "i8");
|
||||
assert_eq!(I16.to_string(), "i16");
|
||||
assert_eq!(I32.to_string(), "i32");
|
||||
assert_eq!(I64.to_string(), "i64");
|
||||
assert_eq!(F32.to_string(), "f32");
|
||||
assert_eq!(F64.to_string(), "f64");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_vectors() {
|
||||
assert_eq!(B1.by(8).unwrap().to_string(), "b1x8");
|
||||
assert_eq!(B8.by(1).unwrap().to_string(), "b8");
|
||||
assert_eq!(B16.by(256).unwrap().to_string(), "b16x256");
|
||||
assert_eq!(B32.by(4).unwrap().by(2).unwrap().to_string(), "b32x8");
|
||||
assert_eq!(B64.by(8).unwrap().to_string(), "b64x8");
|
||||
assert_eq!(I8.by(64).unwrap().to_string(), "i8x64");
|
||||
assert_eq!(F64.by(2).unwrap().to_string(), "f64x2");
|
||||
assert_eq!(I8.by(3), None);
|
||||
assert_eq!(I8.by(512), None);
|
||||
assert_eq!(VOID.by(4), None);
|
||||
}
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
//! Support types for generated encoding tables.
|
||||
//!
|
||||
//! This module contains types and functions for working with the encoding tables generated by
|
||||
//! `meta/gen_encoding.py`.
|
||||
use ir::{Type, Opcode};
|
||||
use isa::Encoding;
|
||||
use constant_hash::{Table, probe};
|
||||
|
||||
/// Level 1 hash table entry.
|
||||
///
|
||||
/// One level 1 hash table is generated per CPU mode. This table is keyed by the controlling type
|
||||
/// variable, using `VOID` for non-polymorphic instructions.
|
||||
///
|
||||
/// The hash table values are references to level 2 hash tables, encoded as an offset in `LEVEL2`
|
||||
/// where the table begins, and the binary logarithm of its length. All the level 2 hash tables
|
||||
/// have a power-of-two size.
|
||||
///
|
||||
/// Entries are generic over the offset type. It will typically be `u32` or `u16`, depending on the
|
||||
/// size of the `LEVEL2` table. A `u16` offset allows entries to shrink to 32 bits each, but some
|
||||
/// ISAs may have tables so large that `u32` offsets are needed.
|
||||
///
|
||||
/// Empty entries are encoded with a 0 `log2len`. This is on the assumption that no level 2 tables
|
||||
/// have only a single entry.
|
||||
pub struct Level1Entry<OffT: Into<u32> + Copy> {
|
||||
pub ty: Type,
|
||||
pub log2len: u8,
|
||||
pub offset: OffT,
|
||||
}
|
||||
|
||||
impl<OffT: Into<u32> + Copy> Table<Type> for [Level1Entry<OffT>] {
|
||||
fn len(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn key(&self, idx: usize) -> Option<Type> {
|
||||
if self[idx].log2len != 0 {
|
||||
Some(self[idx].ty)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Level 2 hash table entry.
|
||||
///
|
||||
/// The second level hash tables are keyed by `Opcode`, and contain an offset into the `ENCLISTS`
|
||||
/// table where the encoding recipes for the instrution are stored.
|
||||
///
|
||||
/// Entries are generic over the offset type which depends on the size of `ENCLISTS`. A `u16`
|
||||
/// offset allows the entries to be only 32 bits each. There is no benefit to dropping down to `u8`
|
||||
/// for tiny ISAs. The entries won't shrink below 32 bits since the opcode is expected to be 16
|
||||
/// bits.
|
||||
///
|
||||
/// Empty entries are encoded with a `NotAnOpcode` `opcode` field.
|
||||
pub struct Level2Entry<OffT: Into<u32> + Copy> {
|
||||
pub opcode: Opcode,
|
||||
pub offset: OffT,
|
||||
}
|
||||
|
||||
impl<OffT: Into<u32> + Copy> Table<Opcode> for [Level2Entry<OffT>] {
|
||||
fn len(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn key(&self, idx: usize) -> Option<Opcode> {
|
||||
let opc = self[idx].opcode;
|
||||
if opc != Opcode::NotAnOpcode {
|
||||
Some(opc)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Two-level hash table lookup.
|
||||
///
|
||||
/// Given the controlling type variable and instruction opcode, find the corresponding encoding
|
||||
/// list.
|
||||
///
|
||||
/// Returns an offset into the ISA's `ENCLIST` table, or `None` if the opcode/type combination is
|
||||
/// not legal.
|
||||
pub fn lookup_enclist<OffT1, OffT2>(ctrl_typevar: Type,
|
||||
opcode: Opcode,
|
||||
level1_table: &[Level1Entry<OffT1>],
|
||||
level2_table: &[Level2Entry<OffT2>])
|
||||
-> Option<usize>
|
||||
where OffT1: Into<u32> + Copy,
|
||||
OffT2: Into<u32> + Copy
|
||||
{
|
||||
probe(level1_table, ctrl_typevar, ctrl_typevar.index()).and_then(|l1idx| {
|
||||
let l1ent = &level1_table[l1idx];
|
||||
let l2off = l1ent.offset.into() as usize;
|
||||
let l2tab = &level2_table[l2off..l2off + (1 << l1ent.log2len)];
|
||||
probe(l2tab, opcode, opcode as usize).map(|l2idx| l2tab[l2idx].offset.into() as usize)
|
||||
})
|
||||
}
|
||||
|
||||
/// Encoding list entry.
|
||||
///
|
||||
/// Encoding lists are represented as sequences of u16 words.
|
||||
pub type EncListEntry = u16;
|
||||
|
||||
/// Number of bits used to represent a predicate. c.f. `meta.gen_encoding.py`.
|
||||
const PRED_BITS: u8 = 12;
|
||||
const PRED_MASK: EncListEntry = (1 << PRED_BITS) - 1;
|
||||
|
||||
/// The match-always instruction predicate. c.f. `meta.gen_encoding.py`.
|
||||
const CODE_ALWAYS: EncListEntry = PRED_MASK;
|
||||
|
||||
/// The encoding list terminator.
|
||||
const CODE_FAIL: EncListEntry = 0xffff;
|
||||
|
||||
/// Find the most general encoding of `inst`.
|
||||
///
|
||||
/// Given an encoding list offset as returned by `lookup_enclist` above, search the encoding list
|
||||
/// for the most general encoding that applies to `inst`. The encoding lists are laid out such that
|
||||
/// this is the last valid entry in the list.
|
||||
///
|
||||
/// This function takes two closures that are used to evaluate predicates:
|
||||
/// - `instp` is passed an instruction predicate number to be evaluated on the current instruction.
|
||||
/// - `isap` is passed an ISA predicate number to evaluate.
|
||||
///
|
||||
/// Returns the corresponding encoding, or `None` if no list entries are satisfied by `inst`.
|
||||
pub fn general_encoding<InstP, IsaP>(offset: usize,
|
||||
enclist: &[EncListEntry],
|
||||
instp: InstP,
|
||||
isap: IsaP)
|
||||
-> Option<Encoding>
|
||||
where InstP: Fn(EncListEntry) -> bool,
|
||||
IsaP: Fn(EncListEntry) -> bool
|
||||
{
|
||||
let mut found = None;
|
||||
let mut pos = offset;
|
||||
while enclist[pos] != CODE_FAIL {
|
||||
let pred = enclist[pos];
|
||||
if pred <= CODE_ALWAYS {
|
||||
// This is an instruction predicate followed by recipe and encbits entries.
|
||||
if pred == CODE_ALWAYS || instp(pred) {
|
||||
found = Some(Encoding::new(enclist[pos + 1], enclist[pos + 2]))
|
||||
}
|
||||
pos += 3;
|
||||
} else {
|
||||
// This is an ISA predicate entry.
|
||||
pos += 1;
|
||||
if !isap(pred & PRED_MASK) {
|
||||
// ISA predicate failed, skip the next N entries.
|
||||
pos += 3 * (pred >> PRED_BITS) as usize;
|
||||
}
|
||||
}
|
||||
}
|
||||
found
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
//! The `Encoding` struct.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
/// Bits needed to encode an instruction as binary machine code.
|
||||
///
|
||||
/// The encoding consists of two parts, both specific to the target ISA: An encoding *recipe*, and
|
||||
/// encoding *bits*. The recipe determines the native instruction format and the mapping of
|
||||
/// operands to encoded bits. The encoding bits provide additional information to the recipe,
|
||||
/// typically parts of the opcode.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct Encoding {
|
||||
recipe: u16,
|
||||
bits: u16,
|
||||
}
|
||||
|
||||
impl Encoding {
|
||||
/// Create a new `Encoding` containing `(recipe, bits)`.
|
||||
pub fn new(recipe: u16, bits: u16) -> Encoding {
|
||||
Encoding {
|
||||
recipe: recipe,
|
||||
bits: bits,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the recipe number in this encoding.
|
||||
pub fn recipe(self) -> usize {
|
||||
self.recipe as usize
|
||||
}
|
||||
|
||||
/// Get the recipe-specific encoding bits.
|
||||
pub fn bits(self) -> u16 {
|
||||
self.bits
|
||||
}
|
||||
|
||||
/// Is this a legal encoding, or the default placeholder?
|
||||
pub fn is_legal(self) -> bool {
|
||||
self != Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// The default encoding is the illegal one.
|
||||
impl Default for Encoding {
|
||||
fn default() -> Self {
|
||||
Self::new(0xffff, 0xffff)
|
||||
}
|
||||
}
|
||||
|
||||
/// ISA-independent display of an encoding.
|
||||
impl fmt::Display for Encoding {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if self.is_legal() {
|
||||
write!(f, "{}#{:02x}", self.recipe, self.bits)
|
||||
} else {
|
||||
write!(f, "-")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Temporary object that holds enough context to properly display an encoding.
|
||||
/// This is meant to be created by `TargetIsa::display_enc()`.
|
||||
pub struct DisplayEncoding {
|
||||
pub encoding: Encoding,
|
||||
pub recipe_names: &'static [&'static str],
|
||||
}
|
||||
|
||||
impl fmt::Display for DisplayEncoding {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if self.encoding.is_legal() {
|
||||
write!(f,
|
||||
"{}#{:02x}",
|
||||
self.recipe_names[self.encoding.recipe()],
|
||||
self.encoding.bits)
|
||||
} else {
|
||||
write!(f, "-")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
//! Instruction Set Architectures.
|
||||
//!
|
||||
//! The `isa` module provides a `TargetIsa` trait which provides the behavior specialization needed
|
||||
//! by the ISA-independent code generator. The sub-modules of this module provide definitions for
|
||||
//! the instruction sets that Cretonne can target. Each sub-module has it's own implementation of
|
||||
//! `TargetIsa`.
|
||||
//!
|
||||
//! # Constructing a `TargetIsa` instance
|
||||
//!
|
||||
//! The target ISA is built from the following information:
|
||||
//!
|
||||
//! - The name of the target ISA as a string. Cretonne is a cross-compiler, so the ISA to target
|
||||
//! can be selected dynamically. Individual ISAs can be left out when Cretonne is compiled, so a
|
||||
//! string is used to identify the proper sub-module.
|
||||
//! - Values for settings that apply to all ISAs. This is represented by a `settings::Flags`
|
||||
//! instance.
|
||||
//! - Values for ISA-specific settings.
|
||||
//!
|
||||
//! The `isa::lookup()` function is the main entry point which returns an `isa::Builder`
|
||||
//! appropriate for the requested ISA:
|
||||
//!
|
||||
//! ```
|
||||
//! use cretonne::settings::{self, Configurable};
|
||||
//! use cretonne::isa;
|
||||
//!
|
||||
//! let shared_builder = settings::builder();
|
||||
//! let shared_flags = settings::Flags::new(&shared_builder);
|
||||
//!
|
||||
//! match isa::lookup("riscv") {
|
||||
//! None => {
|
||||
//! // The RISC-V target ISA is not available.
|
||||
//! }
|
||||
//! Some(mut isa_builder) => {
|
||||
//! isa_builder.set("supports_m", "on");
|
||||
//! let isa = isa_builder.finish(shared_flags);
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The configured target ISA trait object is a `Box<TargetIsa>` which can be used for multiple
|
||||
//! concurrent function compilations.
|
||||
|
||||
pub use isa::encoding::Encoding;
|
||||
use settings;
|
||||
use ir::{InstructionData, DataFlowGraph};
|
||||
|
||||
pub mod riscv;
|
||||
mod encoding;
|
||||
mod enc_tables;
|
||||
|
||||
/// Look for a supported ISA with the given `name`.
|
||||
/// Return a builder that can create a corresponding `TargetIsa`.
|
||||
pub fn lookup(name: &str) -> Option<Builder> {
|
||||
match name {
|
||||
"riscv" => riscv_builder(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Make a builder for RISC-V.
|
||||
fn riscv_builder() -> Option<Builder> {
|
||||
Some(riscv::isa_builder())
|
||||
}
|
||||
|
||||
/// Builder for a `TargetIsa`.
|
||||
/// Modify the ISA-specific settings before creating the `TargetIsa` trait object with `finish`.
|
||||
pub struct Builder {
|
||||
setup: settings::Builder,
|
||||
constructor: fn(settings::Flags, &settings::Builder) -> Box<TargetIsa>,
|
||||
}
|
||||
|
||||
impl Builder {
|
||||
/// Combine the ISA-specific settings with the provided ISA-independent settings and allocate a
|
||||
/// fully configured `TargetIsa` trait object.
|
||||
pub fn finish(self, shared_flags: settings::Flags) -> Box<TargetIsa> {
|
||||
(self.constructor)(shared_flags, &self.setup)
|
||||
}
|
||||
}
|
||||
|
||||
impl settings::Configurable for Builder {
|
||||
fn set(&mut self, name: &str, value: &str) -> settings::Result<()> {
|
||||
self.setup.set(name, value)
|
||||
}
|
||||
|
||||
fn set_bool(&mut self, name: &str, value: bool) -> settings::Result<()> {
|
||||
self.setup.set_bool(name, value)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TargetIsa {
|
||||
/// Get the name of this ISA.
|
||||
fn name(&self) -> &'static str;
|
||||
|
||||
/// Get the ISA-independent flags that were used to make this trait object.
|
||||
fn flags(&self) -> &settings::Flags;
|
||||
|
||||
/// Encode an instruction after determining it is legal.
|
||||
///
|
||||
/// If `inst` can legally be encoded in this ISA, produce the corresponding `Encoding` object.
|
||||
/// Otherwise, return `None`.
|
||||
///
|
||||
/// This is also the main entry point for determining if an instruction is legal.
|
||||
fn encode(&self, dfg: &DataFlowGraph, inst: &InstructionData) -> Option<Encoding>;
|
||||
|
||||
/// Get a static array of names associated with encoding recipes in this ISA. Encoding recipes
|
||||
/// are numbered starting from 0, corresponding to indexes into th name array.
|
||||
///
|
||||
/// This is just used for printing and parsing encodings in the textual IL format.
|
||||
fn recipe_names(&self) -> &'static [&'static str];
|
||||
|
||||
/// Create an object that can display an ISA-dependent encoding properly.
|
||||
fn display_enc(&self, enc: Encoding) -> encoding::DisplayEncoding {
|
||||
encoding::DisplayEncoding {
|
||||
encoding: enc,
|
||||
recipe_names: self.recipe_names(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
//! Encoding tables for RISC-V.
|
||||
|
||||
use ir::{Opcode, InstructionData};
|
||||
use ir::instructions::InstructionFormat;
|
||||
use ir::types;
|
||||
use predicates;
|
||||
use isa::enc_tables::{Level1Entry, Level2Entry};
|
||||
|
||||
// Include the generated encoding tables:
|
||||
// - `LEVEL1_RV32`
|
||||
// - `LEVEL1_RV64`
|
||||
// - `LEVEL2`
|
||||
// - `ENCLIST`
|
||||
include!(concat!(env!("OUT_DIR"), "/encoding-riscv.rs"));
|
||||
@@ -1,206 +0,0 @@
|
||||
//! RISC-V Instruction Set Architecture.
|
||||
|
||||
pub mod settings;
|
||||
mod enc_tables;
|
||||
|
||||
use super::super::settings as shared_settings;
|
||||
use isa::enc_tables::{self as shared_enc_tables, lookup_enclist, general_encoding};
|
||||
use isa::Builder as IsaBuilder;
|
||||
use isa::{TargetIsa, Encoding};
|
||||
use ir::{InstructionData, DataFlowGraph};
|
||||
|
||||
#[allow(dead_code)]
|
||||
struct Isa {
|
||||
shared_flags: shared_settings::Flags,
|
||||
isa_flags: settings::Flags,
|
||||
cpumode: &'static [shared_enc_tables::Level1Entry<u16>],
|
||||
}
|
||||
|
||||
pub fn isa_builder() -> IsaBuilder {
|
||||
IsaBuilder {
|
||||
setup: settings::builder(),
|
||||
constructor: isa_constructor,
|
||||
}
|
||||
}
|
||||
|
||||
fn isa_constructor(shared_flags: shared_settings::Flags,
|
||||
builder: &shared_settings::Builder)
|
||||
-> Box<TargetIsa> {
|
||||
let level1 = if shared_flags.is_64bit() {
|
||||
&enc_tables::LEVEL1_RV64[..]
|
||||
} else {
|
||||
&enc_tables::LEVEL1_RV32[..]
|
||||
};
|
||||
Box::new(Isa {
|
||||
isa_flags: settings::Flags::new(&shared_flags, builder),
|
||||
shared_flags: shared_flags,
|
||||
cpumode: level1,
|
||||
})
|
||||
}
|
||||
|
||||
impl TargetIsa for Isa {
|
||||
fn name(&self) -> &'static str {
|
||||
"riscv"
|
||||
}
|
||||
|
||||
fn flags(&self) -> &shared_settings::Flags {
|
||||
&self.shared_flags
|
||||
}
|
||||
|
||||
fn encode(&self, _: &DataFlowGraph, inst: &InstructionData) -> Option<Encoding> {
|
||||
lookup_enclist(inst.first_type(),
|
||||
inst.opcode(),
|
||||
self.cpumode,
|
||||
&enc_tables::LEVEL2[..])
|
||||
.and_then(|enclist_offset| {
|
||||
general_encoding(enclist_offset,
|
||||
&enc_tables::ENCLISTS[..],
|
||||
|instp| enc_tables::check_instp(inst, instp),
|
||||
|isap| self.isa_flags.numbered_predicate(isap as usize))
|
||||
})
|
||||
}
|
||||
|
||||
fn recipe_names(&self) -> &'static [&'static str] {
|
||||
&enc_tables::RECIPE_NAMES[..]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use settings::{self, Configurable};
|
||||
use isa;
|
||||
use ir::{DataFlowGraph, InstructionData, Opcode};
|
||||
use ir::{types, immediates};
|
||||
|
||||
fn encstr(isa: &isa::TargetIsa, enc: isa::Encoding) -> String {
|
||||
isa.display_enc(enc).to_string()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_64bitenc() {
|
||||
let mut shared_builder = settings::builder();
|
||||
shared_builder.set_bool("is_64bit", true).unwrap();
|
||||
let shared_flags = settings::Flags::new(&shared_builder);
|
||||
let isa = isa::lookup("riscv").unwrap().finish(shared_flags);
|
||||
|
||||
let mut dfg = DataFlowGraph::new();
|
||||
let ebb = dfg.make_ebb();
|
||||
let arg64 = dfg.append_ebb_arg(ebb, types::I64);
|
||||
let arg32 = dfg.append_ebb_arg(ebb, types::I32);
|
||||
|
||||
// Try to encode iadd_imm.i64 vx1, -10.
|
||||
let inst64 = InstructionData::BinaryImm {
|
||||
opcode: Opcode::IaddImm,
|
||||
ty: types::I64,
|
||||
arg: arg64,
|
||||
imm: immediates::Imm64::new(-10),
|
||||
};
|
||||
|
||||
// ADDI is I/0b00100
|
||||
assert_eq!(encstr(&*isa, isa.encode(&dfg, &inst64).unwrap()), "I#04");
|
||||
|
||||
// Try to encode iadd_imm.i64 vx1, -10000.
|
||||
let inst64_large = InstructionData::BinaryImm {
|
||||
opcode: Opcode::IaddImm,
|
||||
ty: types::I64,
|
||||
arg: arg64,
|
||||
imm: immediates::Imm64::new(-10000),
|
||||
};
|
||||
|
||||
// Immediate is out of range for ADDI.
|
||||
assert_eq!(isa.encode(&dfg, &inst64_large), None);
|
||||
|
||||
// Create an iadd_imm.i32 which is encodable in RV64.
|
||||
let inst32 = InstructionData::BinaryImm {
|
||||
opcode: Opcode::IaddImm,
|
||||
ty: types::I32,
|
||||
arg: arg32,
|
||||
imm: immediates::Imm64::new(10),
|
||||
};
|
||||
|
||||
// ADDIW is I/0b00110
|
||||
assert_eq!(encstr(&*isa, isa.encode(&dfg, &inst32).unwrap()), "I#06");
|
||||
}
|
||||
|
||||
// Same as above, but for RV32.
|
||||
#[test]
|
||||
fn test_32bitenc() {
|
||||
let mut shared_builder = settings::builder();
|
||||
shared_builder.set_bool("is_64bit", false).unwrap();
|
||||
let shared_flags = settings::Flags::new(&shared_builder);
|
||||
let isa = isa::lookup("riscv").unwrap().finish(shared_flags);
|
||||
|
||||
let mut dfg = DataFlowGraph::new();
|
||||
let ebb = dfg.make_ebb();
|
||||
let arg64 = dfg.append_ebb_arg(ebb, types::I64);
|
||||
let arg32 = dfg.append_ebb_arg(ebb, types::I32);
|
||||
|
||||
// Try to encode iadd_imm.i64 vx1, -10.
|
||||
let inst64 = InstructionData::BinaryImm {
|
||||
opcode: Opcode::IaddImm,
|
||||
ty: types::I64,
|
||||
arg: arg64,
|
||||
imm: immediates::Imm64::new(-10),
|
||||
};
|
||||
|
||||
// ADDI is I/0b00100
|
||||
assert_eq!(isa.encode(&dfg, &inst64), None);
|
||||
|
||||
// Try to encode iadd_imm.i64 vx1, -10000.
|
||||
let inst64_large = InstructionData::BinaryImm {
|
||||
opcode: Opcode::IaddImm,
|
||||
ty: types::I64,
|
||||
arg: arg64,
|
||||
imm: immediates::Imm64::new(-10000),
|
||||
};
|
||||
|
||||
// Immediate is out of range for ADDI.
|
||||
assert_eq!(isa.encode(&dfg, &inst64_large), None);
|
||||
|
||||
// Create an iadd_imm.i32 which is encodable in RV32.
|
||||
let inst32 = InstructionData::BinaryImm {
|
||||
opcode: Opcode::IaddImm,
|
||||
ty: types::I32,
|
||||
arg: arg32,
|
||||
imm: immediates::Imm64::new(10),
|
||||
};
|
||||
|
||||
// ADDI is I/0b00100
|
||||
assert_eq!(encstr(&*isa, isa.encode(&dfg, &inst32).unwrap()), "I#04");
|
||||
|
||||
// Create an imul.i32 which is encodable in RV32, but only when use_m is true.
|
||||
let mul32 = InstructionData::Binary {
|
||||
opcode: Opcode::Imul,
|
||||
ty: types::I32,
|
||||
args: [arg32, arg32],
|
||||
};
|
||||
|
||||
assert_eq!(isa.encode(&dfg, &mul32), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rv32m() {
|
||||
let mut shared_builder = settings::builder();
|
||||
shared_builder.set_bool("is_64bit", false).unwrap();
|
||||
let shared_flags = settings::Flags::new(&shared_builder);
|
||||
|
||||
// Set the supports_m stting which in turn enables the use_m predicate that unlocks
|
||||
// encodings for imul.
|
||||
let mut isa_builder = isa::lookup("riscv").unwrap();
|
||||
isa_builder.set_bool("supports_m", true).unwrap();
|
||||
|
||||
let isa = isa_builder.finish(shared_flags);
|
||||
|
||||
let mut dfg = DataFlowGraph::new();
|
||||
let ebb = dfg.make_ebb();
|
||||
let arg32 = dfg.append_ebb_arg(ebb, types::I32);
|
||||
|
||||
// Create an imul.i32 which is encodable in RV32M.
|
||||
let mul32 = InstructionData::Binary {
|
||||
opcode: Opcode::Imul,
|
||||
ty: types::I32,
|
||||
args: [arg32, arg32],
|
||||
};
|
||||
assert_eq!(encstr(&*isa, isa.encode(&dfg, &mul32).unwrap()), "R#10c");
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
//! RISC-V Settings.
|
||||
|
||||
use settings::{self, detail, Builder};
|
||||
use std::fmt;
|
||||
|
||||
// Include code generated by `meta/gen_settings.py`. This file contains a public `Flags` struct
|
||||
// with an impl for all of the settings defined in `meta/cretonne/settings.py`.
|
||||
include!(concat!(env!("OUT_DIR"), "/settings-riscv.rs"));
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{builder, Flags};
|
||||
use settings::{self, Configurable};
|
||||
|
||||
#[test]
|
||||
fn display_default() {
|
||||
let shared = settings::Flags::new(&settings::builder());
|
||||
let b = builder();
|
||||
let f = Flags::new(&shared, &b);
|
||||
assert_eq!(f.to_string(),
|
||||
"[riscv]\n\
|
||||
supports_m = false\n\
|
||||
supports_a = false\n\
|
||||
supports_f = false\n\
|
||||
supports_d = false\n\
|
||||
enable_m = true\n");
|
||||
// Predicates are not part of the Display output.
|
||||
assert_eq!(f.full_float(), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn predicates() {
|
||||
let shared = settings::Flags::new(&settings::builder());
|
||||
let mut b = builder();
|
||||
b.set_bool("supports_f", true).unwrap();
|
||||
b.set_bool("supports_d", true).unwrap();
|
||||
let f = Flags::new(&shared, &b);
|
||||
assert_eq!(f.full_float(), true);
|
||||
|
||||
let mut sb = settings::builder();
|
||||
sb.set_bool("enable_simd", false).unwrap();
|
||||
let shared = settings::Flags::new(&sb);
|
||||
let mut b = builder();
|
||||
b.set_bool("supports_f", true).unwrap();
|
||||
b.set_bool("supports_d", true).unwrap();
|
||||
let f = Flags::new(&shared, &b);
|
||||
assert_eq!(f.full_float(), false);
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
//! Legalize instructions.
|
||||
//!
|
||||
//! A legal instruction is one that can be mapped directly to a machine code instruction for the
|
||||
//! target ISA. The `legalize_function()` function takes as input any function and transforms it
|
||||
//! into an equivalent function using only legal instructions.
|
||||
//!
|
||||
//! The characteristics of legal instructions depend on the target ISA, so any given instruction
|
||||
//! can be legal for one ISA and illegal for another.
|
||||
//!
|
||||
//! Besides transforming instructions, the legalizer also fills out the `function.encodings` map
|
||||
//! which provides a legal encoding recipe for every instruction.
|
||||
//!
|
||||
//! The legalizer does not deal with register allocation constraints. These constraints are derived
|
||||
//! from the encoding recipes, and solved later by the register allocator.
|
||||
|
||||
use ir::Function;
|
||||
use isa::TargetIsa;
|
||||
|
||||
/// Legalize `func` for `isa`.
|
||||
///
|
||||
/// - Transform any instructions that don't have a legal representation in `isa`.
|
||||
/// - Fill out `func.encodings`.
|
||||
///
|
||||
pub fn legalize_function(func: &mut Function, isa: &TargetIsa) {
|
||||
// TODO: This is very simplified and incomplete.
|
||||
func.encodings.resize(func.dfg.num_insts());
|
||||
for ebb in func.layout.ebbs() {
|
||||
for inst in func.layout.ebb_insts(ebb) {
|
||||
match isa.encode(&func.dfg, &func.dfg[inst]) {
|
||||
Some(encoding) => func.encodings[inst] = encoding,
|
||||
None => {
|
||||
// TODO: We should transform the instruction into legal equivalents.
|
||||
// Possible strategies are:
|
||||
// 1. Expand instruction into sequence of legal instructions. Possibly
|
||||
// iteratively.
|
||||
// 2. Split the controlling type variable into high and low parts. This applies
|
||||
// both to SIMD vector types which can be halved and to integer types such
|
||||
// as `i64` used on a 32-bit ISA.
|
||||
// 3. Promote the controlling type variable to a larger type. This typically
|
||||
// means expressing `i8` and `i16` arithmetic in terms if `i32` operations
|
||||
// on RISC targets. (It may or may not be beneficial to promote small vector
|
||||
// types versus splitting them.)
|
||||
// 4. Convert to library calls. For example, floating point operations on an
|
||||
// ISA with no IEEE 754 support.
|
||||
//
|
||||
// The iteration scheme used here is not going to cut it. Transforming
|
||||
// instructions involves changing `function.layout` which is impossiblr while
|
||||
// it is referenced by the two iterators. We need a layout cursor that can
|
||||
// maintain a position *and* permit inserting and replacing instructions.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
|
||||
// ====------------------------------------------------------------------------------------==== //
|
||||
//
|
||||
// Cretonne code generation library.
|
||||
//
|
||||
// ====------------------------------------------------------------------------------------==== //
|
||||
|
||||
pub use verifier::verify_function;
|
||||
pub use write::write_function;
|
||||
pub use legalizer::legalize_function;
|
||||
|
||||
pub const VERSION: &'static str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
pub mod ir;
|
||||
pub mod isa;
|
||||
pub mod cfg;
|
||||
pub mod dominator_tree;
|
||||
pub mod entity_map;
|
||||
pub mod settings;
|
||||
pub mod verifier;
|
||||
|
||||
mod write;
|
||||
mod constant_hash;
|
||||
mod predicates;
|
||||
mod legalizer;
|
||||
@@ -1,66 +0,0 @@
|
||||
//! Predicate functions for testing instruction fields.
|
||||
//!
|
||||
//! This module defines functions that are used by the instruction predicates defined by
|
||||
//! `meta/cretonne/predicates.py` classes.
|
||||
//!
|
||||
//! The predicates the operate on integer fields use `Into<i64>` as a shared trait bound. This
|
||||
//! bound is implemented by all the native integer types as well as `Imm64`.
|
||||
//!
|
||||
//! Some of these predicates may be unused in certain ISA configurations, so we suppress the
|
||||
//! dead_code warning.
|
||||
|
||||
/// Check that `x` can be represented as a `wd`-bit signed integer with `sc` low zero bits.
|
||||
#[allow(dead_code)]
|
||||
pub fn is_signed_int<T: Into<i64>>(x: T, wd: u8, sc: u8) -> bool {
|
||||
let s = x.into();
|
||||
s == (s >> sc << (64 - wd + sc) >> (64 - wd))
|
||||
}
|
||||
|
||||
/// Check that `x` can be represented as a `wd`-bit unsigned integer with `sc` low zero bits.
|
||||
#[allow(dead_code)]
|
||||
pub fn is_unsigned_int<T: Into<i64>>(x: T, wd: u8, sc: u8) -> bool {
|
||||
let u = x.into() as u64;
|
||||
// Bitmask of the permitted bits.
|
||||
let m = (1 << wd) - (1 << sc);
|
||||
u == (u & m)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn cvt_u32() {
|
||||
let x1 = 0u32;
|
||||
let x2 = 1u32;
|
||||
let x3 = 0xffff_fff0u32;
|
||||
|
||||
assert!(is_signed_int(x1, 1, 0));
|
||||
assert!(is_signed_int(x1, 2, 1));
|
||||
assert!(is_signed_int(x2, 2, 0));
|
||||
assert!(!is_signed_int(x2, 2, 1));
|
||||
|
||||
// u32 doesn't sign-extend when converted to i64.
|
||||
assert!(!is_signed_int(x3, 8, 0));
|
||||
|
||||
assert!(is_unsigned_int(x1, 1, 0));
|
||||
assert!(is_unsigned_int(x1, 8, 4));
|
||||
assert!(is_unsigned_int(x2, 1, 0));
|
||||
assert!(!is_unsigned_int(x2, 8, 4));
|
||||
assert!(!is_unsigned_int(x3, 1, 0));
|
||||
assert!(is_unsigned_int(x3, 32, 4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cvt_imm64() {
|
||||
use ir::immediates::Imm64;
|
||||
|
||||
let x1 = Imm64::new(-8);
|
||||
let x2 = Imm64::new(8);
|
||||
|
||||
assert!(is_signed_int(x1, 16, 2));
|
||||
assert!(is_signed_int(x2, 16, 2));
|
||||
assert!(!is_signed_int(x1, 16, 4));
|
||||
assert!(!is_signed_int(x2, 16, 4));
|
||||
}
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
//! Shared settings module.
|
||||
//!
|
||||
//! This module defines data structures to access the settings defined in the meta language.
|
||||
//!
|
||||
//! Each settings group is translated to a `Flags` struct either in this module or in its
|
||||
//! ISA-specific `settings` module. The struct provides individual getter methods for all of the
|
||||
//! settings as well as computed predicate flags.
|
||||
//!
|
||||
//! The `Flags` struct is immutable once it has been created. A `Builder` instance is used to
|
||||
//! create it.
|
||||
//!
|
||||
//! # Example
|
||||
//! ```
|
||||
//! use cretonne::settings::{self, Configurable};
|
||||
//!
|
||||
//! let mut b = settings::builder();
|
||||
//! b.set("opt_level", "fastest");
|
||||
//!
|
||||
//! let f = settings::Flags::new(&b);
|
||||
//! assert_eq!(f.opt_level(), settings::OptLevel::Fastest);
|
||||
//! ```
|
||||
|
||||
use std::fmt;
|
||||
use std::result;
|
||||
|
||||
use constant_hash::{probe, simple_hash};
|
||||
|
||||
/// A string-based configurator for settings groups.
|
||||
///
|
||||
/// The `Configurable` protocol allows settings to be modified by name before a finished `Flags`
|
||||
/// struct is created.
|
||||
pub trait Configurable {
|
||||
/// Set the string value of any setting by name.
|
||||
///
|
||||
/// This can set any type of setting whether it is numeric, boolean, or enumerated.
|
||||
fn set(&mut self, name: &str, value: &str) -> Result<()>;
|
||||
|
||||
/// Set the value of a boolean setting by name.
|
||||
///
|
||||
/// If the identified setting isn't a boolean, a `BadType` error is returned.
|
||||
fn set_bool(&mut self, name: &str, value: bool) -> Result<()>;
|
||||
}
|
||||
|
||||
/// Collect settings values based on a template.
|
||||
pub struct Builder {
|
||||
template: &'static detail::Template,
|
||||
bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Builder {
|
||||
/// Create a new builder with defaults and names from the given template.
|
||||
pub fn new(tmpl: &'static detail::Template) -> Builder {
|
||||
Builder {
|
||||
template: tmpl,
|
||||
bytes: tmpl.defaults.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract contents of builder once everything is configured.
|
||||
pub fn state_for(&self, name: &str) -> &[u8] {
|
||||
assert_eq!(name, self.template.name);
|
||||
&self.bytes[..]
|
||||
}
|
||||
|
||||
/// Set the value of a single bit.
|
||||
fn set_bit(&mut self, offset: usize, bit: u8, value: bool) {
|
||||
let byte = &mut self.bytes[offset];
|
||||
let mask = 1 << bit;
|
||||
if value {
|
||||
*byte |= mask;
|
||||
} else {
|
||||
*byte &= !mask;
|
||||
}
|
||||
}
|
||||
|
||||
/// Look up a descriptor by name.
|
||||
fn lookup(&self, name: &str) -> Result<(usize, detail::Detail)> {
|
||||
match probe(self.template, name, simple_hash(name)) {
|
||||
None => Err(Error::BadName),
|
||||
Some(entry) => {
|
||||
let d = &self.template.descriptors[self.template.hash_table[entry] as usize];
|
||||
Ok((d.offset as usize, d.detail))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_bool_value(value: &str) -> Result<bool> {
|
||||
match value {
|
||||
"true" | "on" | "yes" | "1" => Ok(true),
|
||||
"false" | "off" | "no" | "0" => Ok(false),
|
||||
_ => Err(Error::BadValue),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_enum_value(value: &str, choices: &[&str]) -> Result<u8> {
|
||||
match choices.iter().position(|&tag| tag == value) {
|
||||
Some(idx) => Ok(idx as u8),
|
||||
None => Err(Error::BadValue),
|
||||
}
|
||||
}
|
||||
|
||||
impl Configurable for Builder {
|
||||
fn set_bool(&mut self, name: &str, value: bool) -> Result<()> {
|
||||
use self::detail::Detail;
|
||||
let (offset, detail) = try!(self.lookup(name));
|
||||
if let Detail::Bool { bit } = detail {
|
||||
self.set_bit(offset, bit, value);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::BadType)
|
||||
}
|
||||
}
|
||||
|
||||
fn set(&mut self, name: &str, value: &str) -> Result<()> {
|
||||
use self::detail::Detail;
|
||||
let (offset, detail) = try!(self.lookup(name));
|
||||
match detail {
|
||||
Detail::Bool { bit } => {
|
||||
self.set_bit(offset, bit, try!(parse_bool_value(value)));
|
||||
}
|
||||
Detail::Num => {
|
||||
self.bytes[offset] = try!(value.parse().map_err(|_| Error::BadValue));
|
||||
}
|
||||
Detail::Enum { last, enumerators } => {
|
||||
self.bytes[offset] = try!(parse_enum_value(value,
|
||||
self.template.enums(last, enumerators)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// An error produced when changing a setting.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Error {
|
||||
/// No setting by this name exists.
|
||||
BadName,
|
||||
|
||||
/// Type mismatch for setting (e.g., setting an enum setting as a bool).
|
||||
BadType,
|
||||
|
||||
/// This is not a valid value for this setting.
|
||||
BadValue,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
/// Implementation details for generated code.
|
||||
///
|
||||
/// This module holds definitions that need to be public so the can be instantiated by generated
|
||||
/// code in other modules.
|
||||
pub mod detail {
|
||||
use std::fmt;
|
||||
use constant_hash;
|
||||
|
||||
/// An instruction group template.
|
||||
pub struct Template {
|
||||
pub name: &'static str,
|
||||
pub descriptors: &'static [Descriptor],
|
||||
pub enumerators: &'static [&'static str],
|
||||
pub hash_table: &'static [u16],
|
||||
pub defaults: &'static [u8],
|
||||
}
|
||||
|
||||
impl Template {
|
||||
/// Get enumerators corresponding to a `Details::Enum`.
|
||||
pub fn enums(&self, last: u8, enumerators: u16) -> &[&'static str] {
|
||||
let from = enumerators as usize;
|
||||
let len = last as usize + 1;
|
||||
&self.enumerators[from..from + len]
|
||||
}
|
||||
|
||||
/// Format a setting value as a TOML string. This is mostly for use by the generated
|
||||
/// `Display` implementation.
|
||||
pub fn format_toml_value(&self,
|
||||
detail: Detail,
|
||||
byte: u8,
|
||||
f: &mut fmt::Formatter)
|
||||
-> fmt::Result {
|
||||
match detail {
|
||||
Detail::Bool { bit } => write!(f, "{}", (byte & (1 << bit)) != 0),
|
||||
Detail::Num => write!(f, "{}", byte),
|
||||
Detail::Enum { last, enumerators } => {
|
||||
if byte <= last {
|
||||
let tags = self.enums(last, enumerators);
|
||||
write!(f, "\"{}\"", tags[byte as usize])
|
||||
} else {
|
||||
write!(f, "{}", byte)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The template contains a hash table for by-name lookup.
|
||||
impl<'a> constant_hash::Table<&'a str> for Template {
|
||||
fn len(&self) -> usize {
|
||||
self.hash_table.len()
|
||||
}
|
||||
|
||||
fn key(&self, idx: usize) -> Option<&'a str> {
|
||||
let e = self.hash_table[idx] as usize;
|
||||
if e < self.descriptors.len() {
|
||||
Some(self.descriptors[e].name)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A setting descriptor holds the information needed to generically set and print a setting.
|
||||
///
|
||||
/// Each settings group will be represented as a constant DESCRIPTORS array.
|
||||
pub struct Descriptor {
|
||||
/// Lower snake-case name of setting as defined in meta.
|
||||
pub name: &'static str,
|
||||
|
||||
/// Offset of byte containing this setting.
|
||||
pub offset: u32,
|
||||
|
||||
/// Additional details, depending on the kind of setting.
|
||||
pub detail: Detail,
|
||||
}
|
||||
|
||||
/// The different kind of settings along with descriptor bits that depend on the kind.
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum Detail {
|
||||
/// A boolean setting only uses one bit, numbered from LSB.
|
||||
Bool { bit: u8 },
|
||||
|
||||
/// A numerical setting uses the whole byte.
|
||||
Num,
|
||||
|
||||
/// An Enum setting uses a range of enumerators.
|
||||
Enum {
|
||||
/// Numerical value of last enumerator, allowing for 1-256 enumerators.
|
||||
last: u8,
|
||||
|
||||
/// First enumerator in the ENUMERATORS table.
|
||||
enumerators: u16,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Include code generated by `meta/gen_settings.py`. This file contains a public `Flags` struct
|
||||
// with an impl for all of the settings defined in `meta/cretonne/settings.py`.
|
||||
include!(concat!(env!("OUT_DIR"), "/settings.rs"));
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{builder, Flags};
|
||||
use super::Error::*;
|
||||
use super::Configurable;
|
||||
|
||||
#[test]
|
||||
fn display_default() {
|
||||
let b = builder();
|
||||
let f = Flags::new(&b);
|
||||
assert_eq!(f.to_string(),
|
||||
"[shared]\n\
|
||||
opt_level = \"default\"\n\
|
||||
is_64bit = false\n\
|
||||
enable_float = true\n\
|
||||
enable_simd = true\n\
|
||||
enable_atomics = true\n");
|
||||
assert_eq!(f.opt_level(), super::OptLevel::Default);
|
||||
assert_eq!(f.enable_simd(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn modify_bool() {
|
||||
let mut b = builder();
|
||||
assert_eq!(b.set_bool("not_there", true), Err(BadName));
|
||||
assert_eq!(b.set_bool("enable_simd", true), Ok(()));
|
||||
assert_eq!(b.set_bool("enable_simd", false), Ok(()));
|
||||
|
||||
let f = Flags::new(&b);
|
||||
assert_eq!(f.enable_simd(), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn modify_string() {
|
||||
let mut b = builder();
|
||||
assert_eq!(b.set("not_there", "true"), Err(BadName));
|
||||
assert_eq!(b.set("enable_simd", ""), Err(BadValue));
|
||||
assert_eq!(b.set("enable_simd", "best"), Err(BadValue));
|
||||
assert_eq!(b.set("opt_level", "true"), Err(BadValue));
|
||||
assert_eq!(b.set("opt_level", "best"), Ok(()));
|
||||
assert_eq!(b.set("enable_simd", "0"), Ok(()));
|
||||
|
||||
let f = Flags::new(&b);
|
||||
assert_eq!(f.enable_simd(), false);
|
||||
assert_eq!(f.opt_level(), super::OptLevel::Best);
|
||||
}
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
//! A verifier for ensuring that functions are well formed.
|
||||
//! It verifies:
|
||||
//!
|
||||
//! EBB integrity
|
||||
//!
|
||||
//! - All instructions reached from the ebb_insts iterator must belong to
|
||||
//! the EBB as reported by inst_ebb().
|
||||
//! - Every EBB must end in a terminator instruction, and no other instruction
|
||||
//! can be a terminator.
|
||||
//! - Every value in the ebb_args iterator belongs to the EBB as reported by value_ebb.
|
||||
//!
|
||||
//! Instruction integrity
|
||||
//!
|
||||
//! - The instruction format must match the opcode.
|
||||
//! TODO:
|
||||
//! - All result values must be created for multi-valued instructions.
|
||||
//! - Instructions with no results must have a VOID first_type().
|
||||
//! - All referenced entities must exist. (Values, EBBs, stack slots, ...)
|
||||
//!
|
||||
//! SSA form
|
||||
//!
|
||||
//! - Values must be defined by an instruction that exists and that is inserted in
|
||||
//! an EBB, or be an argument of an existing EBB.
|
||||
//! - Values used by an instruction must dominate the instruction.
|
||||
//! Control flow graph and dominator tree integrity:
|
||||
//!
|
||||
//! - All predecessors in the CFG must be branches to the EBB.
|
||||
//! - All branches to an EBB must be present in the CFG.
|
||||
//! - A recomputed dominator tree is identical to the existing one.
|
||||
//!
|
||||
//! Type checking
|
||||
//!
|
||||
//! - Compare input and output values against the opcode's type constraints.
|
||||
//! For polymorphic opcodes, determine the controlling type variable first.
|
||||
//! - Branches and jumps must pass arguments to destination EBBs that match the
|
||||
//! expected types excatly. The number of arguments must match.
|
||||
//! - All EBBs in a jump_table must take no arguments.
|
||||
//! - Function calls are type checked against their signature.
|
||||
//! - The entry block must take arguments that match the signature of the current
|
||||
//! function.
|
||||
//! - All return instructions must have return value operands matching the current
|
||||
//! function signature.
|
||||
//!
|
||||
//! Ad hoc checking
|
||||
//!
|
||||
//! - Stack slot loads and stores must be in-bounds.
|
||||
//! - Immediate constraints for certain opcodes, like udiv_imm v3, 0.
|
||||
//! - Extend / truncate instructions have more type constraints: Source type can't be
|
||||
//! larger / smaller than result type.
|
||||
//! - Insertlane and extractlane instructions have immediate lane numbers that must be in
|
||||
//! range for their polymorphic type.
|
||||
//! - Swizzle and shuffle instructions take a variable number of lane arguments. The number
|
||||
//! of arguments must match the destination type, and the lane indexes must be in range.
|
||||
|
||||
use ir::{Function, ValueDef, Ebb, Inst};
|
||||
use ir::instructions::InstructionFormat;
|
||||
use ir::entities::AnyEntity;
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use std::result;
|
||||
|
||||
/// A verifier error.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct Error {
|
||||
pub location: AnyEntity,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
write!(f, "{}: {}", self.location, self.message)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
// Create an `Err` variant of `Result<X>` from a location and `format!` args.
|
||||
macro_rules! err {
|
||||
( $loc:expr, $msg:expr ) => {
|
||||
Err(Error {
|
||||
location: $loc.into(),
|
||||
message: String::from($msg),
|
||||
})
|
||||
};
|
||||
|
||||
( $loc:expr, $fmt:expr, $( $arg:expr ),+ ) => {
|
||||
Err(Error {
|
||||
location: $loc.into(),
|
||||
message: format!( $fmt, $( $arg ),+ ),
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
pub fn verify_function(func: &Function) -> Result<()> {
|
||||
Verifier::new(func).run()
|
||||
}
|
||||
|
||||
pub struct Verifier<'a> {
|
||||
func: &'a Function,
|
||||
}
|
||||
|
||||
impl<'a> Verifier<'a> {
|
||||
pub fn new(func: &'a Function) -> Verifier {
|
||||
Verifier { func: func }
|
||||
}
|
||||
|
||||
fn ebb_integrity(&self, ebb: Ebb, inst: Inst) -> Result<()> {
|
||||
|
||||
let is_terminator = self.func.dfg[inst].is_terminating();
|
||||
let is_last_inst = self.func.layout.last_inst(ebb) == inst;
|
||||
|
||||
if is_terminator && !is_last_inst {
|
||||
// Terminating instructions only occur at the end of blocks.
|
||||
return err!(inst,
|
||||
"a terminator instruction was encountered before the end of {}",
|
||||
ebb);
|
||||
}
|
||||
if is_last_inst && !is_terminator {
|
||||
return err!(ebb, "block does not end in a terminator instruction!");
|
||||
}
|
||||
|
||||
// Instructions belong to the correct ebb.
|
||||
let inst_ebb = self.func.layout.inst_ebb(inst);
|
||||
if inst_ebb != Some(ebb) {
|
||||
return err!(inst, "should belong to {} not {:?}", ebb, inst_ebb);
|
||||
}
|
||||
|
||||
// Arguments belong to the correct ebb.
|
||||
for arg in self.func.dfg.ebb_args(ebb) {
|
||||
match self.func.dfg.value_def(arg) {
|
||||
ValueDef::Arg(arg_ebb, _) => {
|
||||
if ebb != arg_ebb {
|
||||
return err!(arg, "does not belong to {}", ebb);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return err!(arg, "expected an argument, found a result");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn instruction_integrity(&self, inst: Inst) -> Result<()> {
|
||||
let inst_data = &self.func.dfg[inst];
|
||||
|
||||
// The instruction format matches the opcode
|
||||
if inst_data.opcode().format() != Some(InstructionFormat::from(inst_data)) {
|
||||
return err!(inst, "instruction opcode doesn't match instruction format");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(&self) -> Result<()> {
|
||||
for ebb in self.func.layout.ebbs() {
|
||||
for inst in self.func.layout.ebb_insts(ebb) {
|
||||
try!(self.ebb_integrity(ebb, inst));
|
||||
try!(self.instruction_integrity(inst));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ir::Function;
|
||||
use ir::instructions::{InstructionData, Opcode};
|
||||
use ir::types;
|
||||
|
||||
macro_rules! assert_err_with_msg {
|
||||
($e:expr, $msg:expr) => (
|
||||
match $e {
|
||||
Ok(_) => { panic!("Expected an error!") },
|
||||
Err(Error { message, .. } ) => {
|
||||
if !message.contains($msg) {
|
||||
panic!(format!("'{}' did not contain the substring '{}'", message, $msg));
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let func = Function::new();
|
||||
let verifier = Verifier::new(&func);
|
||||
assert_eq!(verifier.run(), Ok(()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bad_instruction_format() {
|
||||
let mut func = Function::new();
|
||||
let ebb0 = func.dfg.make_ebb();
|
||||
func.layout.append_ebb(ebb0);
|
||||
let nullary_with_bad_opcode = func.dfg.make_inst(InstructionData::Nullary {
|
||||
opcode: Opcode::Jump,
|
||||
ty: types::VOID,
|
||||
});
|
||||
func.layout.append_inst(nullary_with_bad_opcode, ebb0);
|
||||
let verifier = Verifier::new(&func);
|
||||
assert_err_with_msg!(verifier.run(), "instruction format");
|
||||
}
|
||||
}
|
||||
@@ -1,243 +0,0 @@
|
||||
//! Converting Cretonne IL to text.
|
||||
//!
|
||||
//! The `write` module provides the `write_function` function which converts an IL `Function` to an
|
||||
//! equivalent textual representation. This textual representation can be read back by the
|
||||
//! `cretonne-reader` crate.
|
||||
|
||||
use ir::{Function, Ebb, Inst, Value, Type};
|
||||
use isa::TargetIsa;
|
||||
use std::fmt::{Result, Error, Write};
|
||||
use std::result;
|
||||
|
||||
/// Write `func` to `w` as equivalent text.
|
||||
/// Use `isa` to emit ISA-dependent annotations.
|
||||
pub fn write_function(w: &mut Write, func: &Function, isa: Option<&TargetIsa>) -> Result {
|
||||
try!(write_spec(w, func));
|
||||
try!(writeln!(w, " {{"));
|
||||
let mut any = try!(write_preamble(w, func));
|
||||
for ebb in &func.layout {
|
||||
if any {
|
||||
try!(writeln!(w, ""));
|
||||
}
|
||||
try!(write_ebb(w, func, isa, ebb));
|
||||
any = true;
|
||||
}
|
||||
writeln!(w, "}}")
|
||||
}
|
||||
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
//
|
||||
// Function spec.
|
||||
//
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
|
||||
fn write_spec(w: &mut Write, func: &Function) -> Result {
|
||||
write!(w, "function {}{}", func.name, func.own_signature())
|
||||
}
|
||||
|
||||
fn write_preamble(w: &mut Write, func: &Function) -> result::Result<bool, Error> {
|
||||
let mut any = false;
|
||||
|
||||
for ss in func.stack_slots.keys() {
|
||||
any = true;
|
||||
try!(writeln!(w, " {} = {}", ss, func.stack_slots[ss]));
|
||||
}
|
||||
|
||||
for jt in func.jump_tables.keys() {
|
||||
any = true;
|
||||
try!(writeln!(w, " {} = {}", jt, func.jump_tables[jt]));
|
||||
}
|
||||
|
||||
Ok(any)
|
||||
}
|
||||
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
//
|
||||
// Basic blocks
|
||||
//
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
|
||||
pub fn write_arg(w: &mut Write, func: &Function, arg: Value) -> Result {
|
||||
write!(w, "{}: {}", arg, func.dfg.value_type(arg))
|
||||
}
|
||||
|
||||
pub fn write_ebb_header(w: &mut Write, func: &Function, ebb: Ebb) -> Result {
|
||||
// Write out the basic block header, outdented:
|
||||
//
|
||||
// ebb1:
|
||||
// ebb1(vx1: i32):
|
||||
// ebb10(vx4: f64, vx5: b1):
|
||||
//
|
||||
|
||||
// If we're writing encoding annotations, shift by 20.
|
||||
if !func.encodings.is_empty() {
|
||||
try!(write!(w, " "));
|
||||
}
|
||||
|
||||
let mut args = func.dfg.ebb_args(ebb);
|
||||
match args.next() {
|
||||
None => return writeln!(w, "{}:", ebb),
|
||||
Some(arg) => {
|
||||
try!(write!(w, "{}(", ebb));
|
||||
try!(write_arg(w, func, arg));
|
||||
}
|
||||
}
|
||||
// Remaining args.
|
||||
for arg in args {
|
||||
try!(write!(w, ", "));
|
||||
try!(write_arg(w, func, arg));
|
||||
}
|
||||
writeln!(w, "):")
|
||||
}
|
||||
|
||||
pub fn write_ebb(w: &mut Write, func: &Function, isa: Option<&TargetIsa>, ebb: Ebb) -> Result {
|
||||
try!(write_ebb_header(w, func, ebb));
|
||||
for inst in func.layout.ebb_insts(ebb) {
|
||||
try!(write_instruction(w, func, isa, inst));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
//
|
||||
// Instructions
|
||||
//
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
|
||||
// Should `inst` be printed with a type suffix?
|
||||
//
|
||||
// Polymorphic instructions may need a suffix indicating the value of the controlling type variable
|
||||
// if it can't be trivially inferred.
|
||||
//
|
||||
fn type_suffix(func: &Function, inst: Inst) -> Option<Type> {
|
||||
let constraints = func.dfg[inst].opcode().constraints();
|
||||
|
||||
if !constraints.is_polymorphic() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// If the controlling type variable can be inferred from the type of the designated value input
|
||||
// operand, we don't need the type suffix.
|
||||
// TODO: Should we include the suffix when the input value is defined in another block? The
|
||||
// parser needs to know the type of the value, so it must be defined in a block that lexically
|
||||
// comes before this one.
|
||||
if constraints.use_typevar_operand() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// This polymorphic instruction doesn't support basic type inference.
|
||||
// The controlling type variable is required to be the type of the first result.
|
||||
let rtype = func.dfg.value_type(func.dfg.first_result(inst));
|
||||
assert!(!rtype.is_void(),
|
||||
"Polymorphic instruction must produce a result");
|
||||
Some(rtype)
|
||||
}
|
||||
|
||||
fn write_instruction(w: &mut Write,
|
||||
func: &Function,
|
||||
isa: Option<&TargetIsa>,
|
||||
inst: Inst)
|
||||
-> Result {
|
||||
// Write out encoding info.
|
||||
if let Some(enc) = func.encodings.get(inst).cloned() {
|
||||
let mut s = String::with_capacity(16);
|
||||
if let Some(isa) = isa {
|
||||
try!(write!(s, "[{}]", isa.display_enc(enc)));
|
||||
} else {
|
||||
try!(write!(s, "[{}]", enc));
|
||||
}
|
||||
// Align instruction following ISA annotation to col 24.
|
||||
try!(write!(w, "{:23} ", s));
|
||||
} else {
|
||||
// No annotations, simply indent by 4.
|
||||
try!(write!(w, " "));
|
||||
}
|
||||
|
||||
// Write out the result values, if any.
|
||||
let mut has_results = false;
|
||||
for r in func.dfg.inst_results(inst) {
|
||||
if !has_results {
|
||||
has_results = true;
|
||||
try!(write!(w, "{}", r));
|
||||
} else {
|
||||
try!(write!(w, ", {}", r));
|
||||
}
|
||||
}
|
||||
if has_results {
|
||||
try!(write!(w, " = "));
|
||||
}
|
||||
|
||||
// Then the opcode, possibly with a '.type' suffix.
|
||||
let opcode = func.dfg[inst].opcode();
|
||||
|
||||
match type_suffix(func, inst) {
|
||||
Some(suf) => try!(write!(w, "{}.{}", opcode, suf)),
|
||||
None => try!(write!(w, "{}", opcode)),
|
||||
}
|
||||
|
||||
// Then the operands, depending on format.
|
||||
use ir::instructions::InstructionData::*;
|
||||
match func.dfg[inst] {
|
||||
Nullary { .. } => writeln!(w, ""),
|
||||
Unary { arg, .. } => writeln!(w, " {}", arg),
|
||||
UnaryImm { imm, .. } => writeln!(w, " {}", imm),
|
||||
UnaryIeee32 { imm, .. } => writeln!(w, " {}", imm),
|
||||
UnaryIeee64 { imm, .. } => writeln!(w, " {}", imm),
|
||||
UnaryImmVector { ref data, .. } => writeln!(w, " {}", data),
|
||||
UnarySplit { arg, .. } => writeln!(w, " {}", arg),
|
||||
Binary { args, .. } => writeln!(w, " {}, {}", args[0], args[1]),
|
||||
BinaryImm { arg, imm, .. } => writeln!(w, " {}, {}", arg, imm),
|
||||
BinaryImmRev { imm, arg, .. } => writeln!(w, " {}, {}", imm, arg),
|
||||
BinaryOverflow { args, .. } => writeln!(w, " {}, {}", args[0], args[1]),
|
||||
Ternary { args, .. } => writeln!(w, " {}, {}, {}", args[0], args[1], args[2]),
|
||||
TernaryOverflow { ref data, .. } => writeln!(w, " {}", data),
|
||||
InsertLane { lane, args, .. } => writeln!(w, " {}, {}, {}", args[0], lane, args[1]),
|
||||
ExtractLane { lane, arg, .. } => writeln!(w, " {}, {}", arg, lane),
|
||||
IntCompare { cond, args, .. } => writeln!(w, " {}, {}, {}", cond, args[0], args[1]),
|
||||
FloatCompare { cond, args, .. } => writeln!(w, " {}, {}, {}", cond, args[0], args[1]),
|
||||
Jump { ref data, .. } => writeln!(w, " {}", data),
|
||||
Branch { ref data, .. } => writeln!(w, " {}", data),
|
||||
BranchTable { arg, table, .. } => writeln!(w, " {}, {}", arg, table),
|
||||
Call { ref data, .. } => writeln!(w, " {}", data),
|
||||
Return { ref data, .. } => {
|
||||
if data.varargs.is_empty() {
|
||||
writeln!(w, "")
|
||||
} else {
|
||||
writeln!(w, " {}", data.varargs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ir::{Function, FunctionName, StackSlotData};
|
||||
use ir::types;
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let mut f = Function::new();
|
||||
assert_eq!(f.to_string(), "function \"\"() {\n}\n");
|
||||
|
||||
f.name = FunctionName::new("foo".to_string());
|
||||
assert_eq!(f.to_string(), "function foo() {\n}\n");
|
||||
|
||||
f.stack_slots.push(StackSlotData::new(4));
|
||||
assert_eq!(f.to_string(),
|
||||
"function foo() {\n ss0 = stack_slot 4\n}\n");
|
||||
|
||||
let ebb = f.dfg.make_ebb();
|
||||
f.layout.append_ebb(ebb);
|
||||
assert_eq!(f.to_string(),
|
||||
"function foo() {\n ss0 = stack_slot 4\n\nebb0:\n}\n");
|
||||
|
||||
f.dfg.append_ebb_arg(ebb, types::I8);
|
||||
assert_eq!(f.to_string(),
|
||||
"function foo() {\n ss0 = stack_slot 4\n\nebb0(vx0: i8):\n}\n");
|
||||
|
||||
f.dfg.append_ebb_arg(ebb, types::F32.by(4).unwrap());
|
||||
assert_eq!(f.to_string(),
|
||||
"function foo() {\n ss0 = stack_slot 4\n\nebb0(vx0: i8, vx1: f32x4):\n}\n");
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
authors = ["The Cretonne Project Developers"]
|
||||
name = "filecheck"
|
||||
version = "0.0.0"
|
||||
publish = false
|
||||
|
||||
[lib]
|
||||
name = "filecheck"
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
regex = "0.1.71"
|
||||
@@ -1,419 +0,0 @@
|
||||
use error::{Error, Result};
|
||||
use variable::{VariableMap, Value, varname_prefix};
|
||||
use pattern::Pattern;
|
||||
use regex::{Regex, Captures};
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::cmp::max;
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use MatchRange;
|
||||
use explain::{Recorder, Explainer};
|
||||
|
||||
// The different kinds of directives we support.
|
||||
enum Directive {
|
||||
Check(Pattern),
|
||||
SameLn(Pattern),
|
||||
NextLn(Pattern),
|
||||
Unordered(Pattern),
|
||||
Not(Pattern),
|
||||
Regex(String, String),
|
||||
}
|
||||
|
||||
// Regular expression matching a directive.
|
||||
// The match groups are:
|
||||
//
|
||||
// 1. Keyword.
|
||||
// 2. Rest of line / pattern.
|
||||
//
|
||||
const DIRECTIVE_RX: &'static str = r"\b(check|sameln|nextln|unordered|not|regex):\s+(.*)";
|
||||
|
||||
impl Directive {
|
||||
/// Create a new directive from a `DIRECTIVE_RX` match.
|
||||
fn new(caps: Captures) -> Result<Directive> {
|
||||
let cmd = caps.at(1).expect("group 1 must match");
|
||||
let rest = caps.at(2).expect("group 2 must match");
|
||||
|
||||
if cmd == "regex" {
|
||||
return Directive::regex(rest);
|
||||
}
|
||||
|
||||
// All other commands are followed by a pattern.
|
||||
let pat = try!(rest.parse());
|
||||
|
||||
match cmd {
|
||||
"check" => Ok(Directive::Check(pat)),
|
||||
"sameln" => Ok(Directive::SameLn(pat)),
|
||||
"nextln" => Ok(Directive::NextLn(pat)),
|
||||
"unordered" => Ok(Directive::Unordered(pat)),
|
||||
"not" => {
|
||||
if !pat.defs().is_empty() {
|
||||
let msg = format!("can't define variables '$({}=...' in not: {}",
|
||||
pat.defs()[0],
|
||||
rest);
|
||||
Err(Error::DuplicateDef(msg))
|
||||
} else {
|
||||
Ok(Directive::Not(pat))
|
||||
}
|
||||
}
|
||||
_ => panic!("unexpected command {} in regex match", cmd),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a `regex:` directive from a `VAR=...` string.
|
||||
fn regex(rest: &str) -> Result<Directive> {
|
||||
let varlen = varname_prefix(rest);
|
||||
if varlen == 0 {
|
||||
return Err(Error::Syntax(format!("invalid variable name in regex: {}", rest)));
|
||||
}
|
||||
let var = rest[0..varlen].to_string();
|
||||
if !rest[varlen..].starts_with("=") {
|
||||
return Err(Error::Syntax(format!("expected '=' after variable '{}' in regex: {}",
|
||||
var,
|
||||
rest)));
|
||||
}
|
||||
Ok(Directive::Regex(var, rest[varlen + 1..].to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Builder for constructing a `Checker` instance.
|
||||
pub struct CheckerBuilder {
|
||||
directives: Vec<Directive>,
|
||||
linerx: Regex,
|
||||
}
|
||||
|
||||
impl CheckerBuilder {
|
||||
/// Create a new, blank `CheckerBuilder`.
|
||||
pub fn new() -> CheckerBuilder {
|
||||
CheckerBuilder {
|
||||
directives: Vec::new(),
|
||||
linerx: Regex::new(DIRECTIVE_RX).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a potential directive line.
|
||||
///
|
||||
/// Returns true if this is a a directive with one of the known prefixes.
|
||||
/// Returns false if no known directive was found.
|
||||
/// Returns an error if there is a problem with the directive.
|
||||
pub fn directive(&mut self, l: &str) -> Result<bool> {
|
||||
match self.linerx.captures(l) {
|
||||
Some(caps) => {
|
||||
self.directives.push(try!(Directive::new(caps)));
|
||||
Ok(true)
|
||||
}
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add multiple directives.
|
||||
///
|
||||
/// The text is split into lines that are added individually as potential directives.
|
||||
/// This method can be used to parse a whole test file containing multiple directives.
|
||||
pub fn text(&mut self, t: &str) -> Result<&mut Self> {
|
||||
for caps in self.linerx.captures_iter(t) {
|
||||
self.directives.push(try!(Directive::new(caps)));
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Get the finished `Checker`.
|
||||
pub fn finish(&mut self) -> Checker {
|
||||
// Move directives into the new checker, leaving `self.directives` empty and ready for
|
||||
// building a new checker.
|
||||
Checker::new(self.directives.split_off(0))
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify a list of directives against a test input.
|
||||
///
|
||||
/// Use a `CheckerBuilder` to construct a `Checker`. Then use the `test` method to verify the list
|
||||
/// of directives against a test input.
|
||||
pub struct Checker {
|
||||
directives: Vec<Directive>,
|
||||
}
|
||||
|
||||
impl Checker {
|
||||
fn new(directives: Vec<Directive>) -> Checker {
|
||||
Checker { directives: directives }
|
||||
}
|
||||
|
||||
/// An empty checker contains no directives, and will match any input string.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.directives.is_empty()
|
||||
}
|
||||
|
||||
/// Verify directives against the input text.
|
||||
///
|
||||
/// This returns `true` if the text matches all the directives, `false` if it doesn't.
|
||||
/// An error is only returned if there is a problem with the directives.
|
||||
pub fn check(&self, text: &str, vars: &VariableMap) -> Result<bool> {
|
||||
self.run(text, vars, &mut ())
|
||||
}
|
||||
|
||||
/// Explain how directives are matched against the input text.
|
||||
pub fn explain(&self, text: &str, vars: &VariableMap) -> Result<(bool, String)> {
|
||||
let mut expl = Explainer::new(text);
|
||||
let success = try!(self.run(text, vars, &mut expl));
|
||||
expl.finish();
|
||||
Ok((success, expl.to_string()))
|
||||
}
|
||||
|
||||
fn run(&self, text: &str, vars: &VariableMap, recorder: &mut Recorder) -> Result<bool> {
|
||||
let mut state = State::new(text, vars, recorder);
|
||||
|
||||
// For each pending `not:` check, store (begin-offset, regex).
|
||||
let mut nots = Vec::new();
|
||||
|
||||
for (dct_idx, dct) in self.directives.iter().enumerate() {
|
||||
let (pat, range) = match *dct {
|
||||
Directive::Check(ref pat) => (pat, state.check()),
|
||||
Directive::SameLn(ref pat) => (pat, state.sameln()),
|
||||
Directive::NextLn(ref pat) => (pat, state.nextln()),
|
||||
Directive::Unordered(ref pat) => (pat, state.unordered(pat)),
|
||||
Directive::Not(ref pat) => {
|
||||
// Resolve `not:` directives immediately to get the right variable values, but
|
||||
// don't match it until we know the end of the range.
|
||||
//
|
||||
// The `not:` directives test the same range as `unordered:` directives. In
|
||||
// particular, if they refer to defined variables, their range is restricted to
|
||||
// the text following the match that defined the variable.
|
||||
nots.push((dct_idx, state.unordered_begin(pat), try!(pat.resolve(&state))));
|
||||
continue;
|
||||
}
|
||||
Directive::Regex(ref var, ref rx) => {
|
||||
state.vars.insert(var.clone(),
|
||||
VarDef {
|
||||
value: Value::Regex(Cow::Borrowed(rx)),
|
||||
offset: 0,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// Check if `pat` matches in `range`.
|
||||
state.recorder.directive(dct_idx);
|
||||
if let Some((match_begin, match_end)) = try!(state.match_positive(pat, range)) {
|
||||
if let &Directive::Unordered(_) = dct {
|
||||
// This was an unordered unordered match.
|
||||
// Keep track of the largest matched position, but leave `last_ordered` alone.
|
||||
state.max_match = max(state.max_match, match_end);
|
||||
} else {
|
||||
// Ordered match.
|
||||
state.last_ordered = match_end;
|
||||
state.max_match = match_end;
|
||||
|
||||
// Verify any pending `not:` directives now that we know their range.
|
||||
for (not_idx, not_begin, rx) in nots.drain(..) {
|
||||
state.recorder.directive(not_idx);
|
||||
if let Some((s, e)) = rx.find(&text[not_begin..match_begin]) {
|
||||
// Matched `not:` pattern.
|
||||
state.recorder.matched_not(rx.as_str(), (not_begin + s, not_begin + e));
|
||||
return Ok(false);
|
||||
} else {
|
||||
state.recorder.missed_not(rx.as_str(), (not_begin, match_begin));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No match!
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Verify any pending `not:` directives after the last ordered directive.
|
||||
for (not_idx, not_begin, rx) in nots.drain(..) {
|
||||
state.recorder.directive(not_idx);
|
||||
if let Some(_) = rx.find(&text[not_begin..]) {
|
||||
// Matched `not:` pattern.
|
||||
// TODO: Use matched range for an error message.
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
/// A local definition of a variable.
|
||||
pub struct VarDef<'a> {
|
||||
/// The value given to the variable.
|
||||
value: Value<'a>,
|
||||
/// Offset in input text from where the variable is available.
|
||||
offset: usize,
|
||||
}
|
||||
|
||||
struct State<'a> {
|
||||
text: &'a str,
|
||||
env_vars: &'a VariableMap,
|
||||
recorder: &'a mut Recorder,
|
||||
|
||||
vars: HashMap<String, VarDef<'a>>,
|
||||
// Offset after the last ordered match. This does not include recent unordered matches.
|
||||
last_ordered: usize,
|
||||
// Largest offset following a positive match, including unordered matches.
|
||||
max_match: usize,
|
||||
}
|
||||
|
||||
impl<'a> State<'a> {
|
||||
fn new(text: &'a str, env_vars: &'a VariableMap, recorder: &'a mut Recorder) -> State<'a> {
|
||||
State {
|
||||
text: text,
|
||||
env_vars: env_vars,
|
||||
recorder: recorder,
|
||||
vars: HashMap::new(),
|
||||
last_ordered: 0,
|
||||
max_match: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Get the offset following the match that defined `var`, or 0 if var is an environment
|
||||
// variable or unknown.
|
||||
fn def_offset(&self, var: &str) -> usize {
|
||||
self.vars.get(var).map(|&VarDef { offset, .. }| offset).unwrap_or(0)
|
||||
}
|
||||
|
||||
// Get the offset of the beginning of the next line after `pos`.
|
||||
fn bol(&self, pos: usize) -> usize {
|
||||
if let Some(offset) = self.text[pos..].find('\n') {
|
||||
pos + offset + 1
|
||||
} else {
|
||||
self.text.len()
|
||||
}
|
||||
}
|
||||
|
||||
// Get the range in text to be matched by a `check:`.
|
||||
fn check(&self) -> MatchRange {
|
||||
(self.max_match, self.text.len())
|
||||
}
|
||||
|
||||
// Get the range in text to be matched by a `sameln:`.
|
||||
fn sameln(&self) -> MatchRange {
|
||||
let b = self.max_match;
|
||||
let e = self.bol(b);
|
||||
(b, e)
|
||||
}
|
||||
|
||||
// Get the range in text to be matched by a `nextln:`.
|
||||
fn nextln(&self) -> MatchRange {
|
||||
let b = self.bol(self.max_match);
|
||||
let e = self.bol(b);
|
||||
(b, e)
|
||||
}
|
||||
|
||||
// Get the beginning of the range in text to be matched by a `unordered:` or `not:` directive.
|
||||
// The unordered directive must match after the directives that define the variables used.
|
||||
fn unordered_begin(&self, pat: &Pattern) -> usize {
|
||||
pat.parts()
|
||||
.iter()
|
||||
.filter_map(|part| part.ref_var())
|
||||
.map(|var| self.def_offset(var))
|
||||
.fold(self.last_ordered, max)
|
||||
}
|
||||
|
||||
// Get the range in text to be matched by a `unordered:` directive.
|
||||
fn unordered(&self, pat: &Pattern) -> MatchRange {
|
||||
(self.unordered_begin(pat), self.text.len())
|
||||
}
|
||||
|
||||
// Search for `pat` in `range`, return the range matched.
|
||||
// After a positive match, update variable definitions, if any.
|
||||
fn match_positive(&mut self, pat: &Pattern, range: MatchRange) -> Result<Option<MatchRange>> {
|
||||
let rx = try!(pat.resolve(self));
|
||||
let txt = &self.text[range.0..range.1];
|
||||
let defs = pat.defs();
|
||||
let matched_range = if defs.is_empty() {
|
||||
// Pattern defines no variables. Fastest search is `find`.
|
||||
rx.find(txt)
|
||||
} else {
|
||||
// We need the captures to define variables.
|
||||
rx.captures(txt).map(|caps| {
|
||||
let matched_range = caps.pos(0).expect("whole expression must match");
|
||||
for var in defs {
|
||||
let txtval = caps.name(var).unwrap_or("");
|
||||
self.recorder.defined_var(var, txtval);
|
||||
let vardef = VarDef {
|
||||
value: Value::Text(Cow::Borrowed(txtval)),
|
||||
// This offset is the end of the whole matched pattern, not just the text
|
||||
// defining the variable.
|
||||
offset: range.0 + matched_range.1,
|
||||
};
|
||||
self.vars.insert(var.clone(), vardef);
|
||||
}
|
||||
matched_range
|
||||
})
|
||||
};
|
||||
Ok(if let Some((b, e)) = matched_range {
|
||||
let r = (range.0 + b, range.0 + e);
|
||||
self.recorder.matched_check(rx.as_str(), r);
|
||||
Some(r)
|
||||
} else {
|
||||
self.recorder.missed_check(rx.as_str(), range);
|
||||
None
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> VariableMap for State<'a> {
|
||||
fn lookup(&self, varname: &str) -> Option<Value> {
|
||||
// First look for a local define.
|
||||
if let Some(&VarDef { ref value, .. }) = self.vars.get(varname) {
|
||||
Some(value.clone())
|
||||
} else {
|
||||
// No local, maybe an environment variable?
|
||||
self.env_vars.lookup(varname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Directive {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
use self::Directive::*;
|
||||
match *self {
|
||||
Check(ref pat) => writeln!(f, "check: {}", pat),
|
||||
SameLn(ref pat) => writeln!(f, "sameln: {}", pat),
|
||||
NextLn(ref pat) => writeln!(f, "nextln: {}", pat),
|
||||
Unordered(ref pat) => writeln!(f, "unordered: {}", pat),
|
||||
Not(ref pat) => writeln!(f, "not: {}", pat),
|
||||
Regex(ref var, ref rx) => writeln!(f, "regex: {}={}", var, rx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Checker {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
for (idx, dir) in self.directives.iter().enumerate() {
|
||||
try!(write!(f, "#{} {}", idx, dir));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::CheckerBuilder;
|
||||
use error::Error;
|
||||
|
||||
fn e2s(e: Error) -> String {
|
||||
e.to_string()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn directive() {
|
||||
let mut b = CheckerBuilder::new();
|
||||
|
||||
assert_eq!(b.directive("not here: more text").map_err(e2s), Ok(false));
|
||||
assert_eq!(b.directive("not here: regex: X=more text").map_err(e2s),
|
||||
Ok(true));
|
||||
assert_eq!(b.directive("regex: X = tommy").map_err(e2s),
|
||||
Err("expected '=' after variable 'X' in regex: X = tommy".to_string()));
|
||||
assert_eq!(b.directive("[arm]not: patt $x $(y) here").map_err(e2s),
|
||||
Ok(true));
|
||||
assert_eq!(b.directive("[x86]sameln: $x $(y=[^]]*) there").map_err(e2s),
|
||||
Ok(true));
|
||||
|
||||
let c = b.finish();
|
||||
assert_eq!(c.to_string(),
|
||||
"#0 regex: X=more text\n#1 not: patt $(x) $(y) here\n#2 sameln: $(x) \
|
||||
$(y=[^]]*) there\n");
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
use std::result;
|
||||
use std::convert::From;
|
||||
use std::error::Error as StdError;
|
||||
use std::fmt;
|
||||
use regex;
|
||||
|
||||
/// A result from the filecheck library.
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
/// A filecheck error.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// A syntax error in a check line.
|
||||
Syntax(String),
|
||||
/// A check refers to an undefined variable.
|
||||
///
|
||||
/// The pattern contains `$foo` where the `foo` variable has not yet been defined.
|
||||
/// Use `$$` to match a literal dollar sign.
|
||||
UndefVariable(String),
|
||||
/// A pattern contains a back-reference to a variable that was defined in the same pattern.
|
||||
///
|
||||
/// For example, `check: Hello $(world=.*) $world`. Backreferences are not support. Often the
|
||||
/// desired effect can be achieved with the `sameln` check:
|
||||
///
|
||||
/// ```text
|
||||
/// check: Hello $(world=[^ ]*)
|
||||
/// sameln: $world
|
||||
/// ```
|
||||
Backref(String),
|
||||
/// A pattern contains multiple definitions of the same variable.
|
||||
DuplicateDef(String),
|
||||
/// An error in a regular expression.
|
||||
///
|
||||
/// Use `cause()` to get the underlying `Regex` library error.
|
||||
Regex(regex::Error),
|
||||
}
|
||||
|
||||
impl StdError for Error {
|
||||
fn description(&self) -> &str {
|
||||
use Error::*;
|
||||
match *self {
|
||||
Syntax(ref s) => s,
|
||||
UndefVariable(ref s) => s,
|
||||
Backref(ref s) => s,
|
||||
DuplicateDef(ref s) => s,
|
||||
Regex(ref err) => err.description(),
|
||||
}
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&StdError> {
|
||||
use Error::*;
|
||||
match *self {
|
||||
Regex(ref err) => Some(err),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "{}", self.description())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<regex::Error> for Error {
|
||||
fn from(e: regex::Error) -> Error {
|
||||
Error::Regex(e)
|
||||
}
|
||||
}
|
||||
@@ -1,196 +0,0 @@
|
||||
//! Explaining how *filecheck* matched or failed to match a file.
|
||||
|
||||
use MatchRange;
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use std::cmp::min;
|
||||
|
||||
/// Record events during matching.
|
||||
pub trait Recorder {
|
||||
/// Set the directive we're talking about now.
|
||||
fn directive(&mut self, dct: usize);
|
||||
|
||||
/// Matched a positive check directive (check/sameln/nextln/unordered).
|
||||
fn matched_check(&mut self, regex: &str, matched: MatchRange);
|
||||
|
||||
/// Matched a `not:` directive. This means the match will fail.
|
||||
fn matched_not(&mut self, regex: &str, matched: MatchRange);
|
||||
|
||||
/// Missed a positive check directive. The range given is the range searched for a match.
|
||||
fn missed_check(&mut self, regex: &str, searched: MatchRange);
|
||||
|
||||
/// Missed `not:` directive (as intended).
|
||||
fn missed_not(&mut self, regex: &str, searched: MatchRange);
|
||||
|
||||
/// The directive defined a variable.
|
||||
fn defined_var(&mut self, varname: &str, value: &str);
|
||||
}
|
||||
|
||||
/// The null recorder just doesn't listen to anything you say.
|
||||
impl Recorder for () {
|
||||
fn directive(&mut self, _: usize) {}
|
||||
fn matched_check(&mut self, _: &str, _: MatchRange) {}
|
||||
fn matched_not(&mut self, _: &str, _: MatchRange) {}
|
||||
fn defined_var(&mut self, _: &str, _: &str) {}
|
||||
fn missed_check(&mut self, _: &str, _: MatchRange) {}
|
||||
fn missed_not(&mut self, _: &str, _: MatchRange) {}
|
||||
}
|
||||
|
||||
struct Match {
|
||||
directive: usize,
|
||||
is_match: bool,
|
||||
is_not: bool,
|
||||
regex: String,
|
||||
range: MatchRange,
|
||||
}
|
||||
|
||||
struct VarDef {
|
||||
directive: usize,
|
||||
varname: String,
|
||||
value: String,
|
||||
}
|
||||
|
||||
/// Record an explanation for the matching process, success or failure.
|
||||
pub struct Explainer<'a> {
|
||||
text: &'a str,
|
||||
directive: usize,
|
||||
matches: Vec<Match>,
|
||||
vardefs: Vec<VarDef>,
|
||||
}
|
||||
|
||||
impl<'a> Explainer<'a> {
|
||||
pub fn new(text: &'a str) -> Explainer {
|
||||
Explainer {
|
||||
text: text,
|
||||
directive: 0,
|
||||
matches: Vec::new(),
|
||||
vardefs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Finish up after recording all events in a match.
|
||||
pub fn finish(&mut self) {
|
||||
self.matches.sort_by_key(|m| (m.range, m.directive));
|
||||
self.vardefs.sort_by_key(|v| v.directive);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for Explainer<'a> {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
// Offset of beginning of the last line printed.
|
||||
let mut curln = 0;
|
||||
// Offset of beginning of the next line to be printed.
|
||||
let mut nextln = 0;
|
||||
|
||||
for m in &self.matches {
|
||||
// Emit lines until m.range.0 is visible.
|
||||
while nextln <= m.range.0 && nextln < self.text.len() {
|
||||
let newln = self.text[nextln..]
|
||||
.find('\n')
|
||||
.map(|d| nextln + d + 1)
|
||||
.unwrap_or(self.text.len());
|
||||
assert!(newln > nextln);
|
||||
try!(writeln!(f, "> {}", &self.text[nextln..newln - 1]));
|
||||
curln = nextln;
|
||||
nextln = newln;
|
||||
}
|
||||
|
||||
// Emit ~~~ under the part of the match in curln.
|
||||
if m.is_match {
|
||||
try!(write!(f, " "));
|
||||
let mend = min(m.range.1, nextln - 1);
|
||||
for pos in curln..mend {
|
||||
try!(if pos < m.range.0 {
|
||||
write!(f, " ")
|
||||
} else if pos == m.range.0 {
|
||||
write!(f, "^")
|
||||
} else {
|
||||
write!(f, "~")
|
||||
});
|
||||
}
|
||||
try!(writeln!(f, ""));
|
||||
}
|
||||
|
||||
// Emit the match message itself.
|
||||
try!(writeln!(f,
|
||||
"{} #{}{}: {}",
|
||||
if m.is_match { "Matched" } else { "Missed" },
|
||||
m.directive,
|
||||
if m.is_not { " not" } else { "" },
|
||||
m.regex));
|
||||
|
||||
// Emit any variable definitions.
|
||||
if let Ok(found) = self.vardefs.binary_search_by_key(&m.directive, |v| v.directive) {
|
||||
let mut first = found;
|
||||
while first > 0 && self.vardefs[first - 1].directive == m.directive {
|
||||
first -= 1;
|
||||
}
|
||||
for d in &self.vardefs[first..] {
|
||||
if d.directive != m.directive {
|
||||
break;
|
||||
}
|
||||
try!(writeln!(f, "Define {}={}", d.varname, d.value));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Emit trailing lines.
|
||||
for line in self.text[nextln..].lines() {
|
||||
try!(writeln!(f, "> {}", line));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Recorder for Explainer<'a> {
|
||||
fn directive(&mut self, dct: usize) {
|
||||
self.directive = dct;
|
||||
}
|
||||
|
||||
fn matched_check(&mut self, regex: &str, matched: MatchRange) {
|
||||
self.matches.push(Match {
|
||||
directive: self.directive,
|
||||
is_match: true,
|
||||
is_not: false,
|
||||
regex: regex.to_owned(),
|
||||
range: matched,
|
||||
});
|
||||
}
|
||||
|
||||
fn matched_not(&mut self, regex: &str, matched: MatchRange) {
|
||||
self.matches.push(Match {
|
||||
directive: self.directive,
|
||||
is_match: true,
|
||||
is_not: true,
|
||||
regex: regex.to_owned(),
|
||||
range: matched,
|
||||
});
|
||||
}
|
||||
|
||||
fn missed_check(&mut self, regex: &str, searched: MatchRange) {
|
||||
self.matches.push(Match {
|
||||
directive: self.directive,
|
||||
is_match: false,
|
||||
is_not: false,
|
||||
regex: regex.to_owned(),
|
||||
range: searched,
|
||||
});
|
||||
}
|
||||
|
||||
fn missed_not(&mut self, regex: &str, searched: MatchRange) {
|
||||
self.matches.push(Match {
|
||||
directive: self.directive,
|
||||
is_match: false,
|
||||
is_not: true,
|
||||
regex: regex.to_owned(),
|
||||
range: searched,
|
||||
});
|
||||
}
|
||||
|
||||
fn defined_var(&mut self, varname: &str, value: &str) {
|
||||
self.vardefs.push(VarDef {
|
||||
directive: self.directive,
|
||||
varname: varname.to_owned(),
|
||||
value: value.to_owned(),
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,250 +0,0 @@
|
||||
//! This crate provides a text pattern matching library with functionality similar to the LLVM
|
||||
//! project's [FileCheck command](http://llvm.org/docs/CommandGuide/FileCheck.html).
|
||||
//!
|
||||
//! A list of directives is typically extracted from a file containing a test case. The test case
|
||||
//! is then run through the program under test, and its output matched against the directives.
|
||||
//!
|
||||
//! See the [CheckerBuilder](struct.CheckerBuilder.html) and [Checker](struct.Checker.html) types
|
||||
//! for the main library API.
|
||||
//!
|
||||
//! # Directives
|
||||
//!
|
||||
//! These are the directives recognized by *filecheck*:
|
||||
//! <pre class="rust">
|
||||
//! <a href="#the-check-directive">check: <i><pattern></i></a>
|
||||
//! <a href="#the-sameln-directive">sameln: <i><pattern></i></a>
|
||||
//! <a href="#the-nextln-directive">nextln: <i><pattern></i></a>
|
||||
//! <a href="#the-unordered-directive">unordered: <i><pattern></i></a>
|
||||
//! <a href="#the-not-directive">not: <i><pattern></i></a>
|
||||
//! <a href="#the-regex-directive">regex: <i><variable></i>=<i><regex></i></a>
|
||||
//! </pre>
|
||||
//! Each directive is described in more detail below.
|
||||
//!
|
||||
//! ## Example
|
||||
//!
|
||||
//! The Rust program below prints the primes less than 100. It has *filecheck* directives embedded
|
||||
//! in comments:
|
||||
//!
|
||||
//! ```rust
|
||||
//! fn is_prime(x: u32) -> bool {
|
||||
//! (2..x).all(|d| x % d != 0)
|
||||
//! }
|
||||
//!
|
||||
//! // Check that we get the primes and nothing else:
|
||||
//! // regex: NUM=\d+
|
||||
//! // not: $NUM
|
||||
//! // check: 2
|
||||
//! // nextln: 3
|
||||
//! // check: 89
|
||||
//! // nextln: 97
|
||||
//! // not: $NUM
|
||||
//! fn main() {
|
||||
//! for p in (2..10).filter(|&x| is_prime(x)) {
|
||||
//! println!("{}", p);
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! A test driver compiles and runs the program, then pipes the output through *filecheck*:
|
||||
//!
|
||||
//! ```sh
|
||||
//! $ rustc primes.rs
|
||||
//! $ ./primes | cton-util filecheck -v
|
||||
//! #0 regex: NUM=\d+
|
||||
//! #1 not: $NUM
|
||||
//! #2 check: 2
|
||||
//! #3 nextln: 3
|
||||
//! #4 check: 89
|
||||
//! #5 nextln: 97
|
||||
//! #6 not: $NUM
|
||||
//! no match #1: \d+
|
||||
//! > 2
|
||||
//! ~
|
||||
//! match #2: \b2\b
|
||||
//! > 3
|
||||
//! ~
|
||||
//! match #3: \b3\b
|
||||
//! > 5
|
||||
//! > 7
|
||||
//! ...
|
||||
//! > 79
|
||||
//! > 83
|
||||
//! > 89
|
||||
//! ~~
|
||||
//! match #4: \b89\b
|
||||
//! > 97
|
||||
//! ~~
|
||||
//! match #5: \b97\b
|
||||
//! no match #6: \d+
|
||||
//! OK
|
||||
//! ```
|
||||
//!
|
||||
//! ## The `check:` directive
|
||||
//!
|
||||
//! Match patterns non-overlapping and in order:
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 check: one
|
||||
//! #1 check: two
|
||||
//! ```
|
||||
//!
|
||||
//! These directives will match the string `"one two"`, but not `"two one"`. The second directive
|
||||
//! must match after the first one, and it can't overlap.
|
||||
//!
|
||||
//! ## The `sameln:` directive
|
||||
//!
|
||||
//! Match a pattern in the same line as the previous match.
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 check: one
|
||||
//! #1 sameln: two
|
||||
//! ```
|
||||
//!
|
||||
//! These directives will match the string `"one two"`, but not `"one\ntwo"`. The second match must
|
||||
//! be in the same line as the first. Like the `check:` directive, the match must also follow the
|
||||
//! first match, so `"two one" would not be matched.
|
||||
//!
|
||||
//! If there is no previous match, `sameln:` matches on the first line of the input.
|
||||
//!
|
||||
//! ## The `nextln:` directive
|
||||
//!
|
||||
//! Match a pattern in the next line after the previous match.
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 check: one
|
||||
//! #1 nextln: two
|
||||
//! ```
|
||||
//!
|
||||
//! These directives will match the string `"one\ntwo"`, but not `"one two"` or `"one\n\ntwo"`.
|
||||
//!
|
||||
//! If there is no previous match, `nextln:` matches on the second line of the input as if there
|
||||
//! were a previous match on the first line.
|
||||
//!
|
||||
//! ## The `unordered:` directive
|
||||
//!
|
||||
//! Match patterns in any order, and possibly overlapping each other.
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 unordered: one
|
||||
//! #1 unordered: two
|
||||
//! ```
|
||||
//!
|
||||
//! These directives will match the string `"one two"` *and* the string `"two one"`.
|
||||
//!
|
||||
//! When a normal ordered match is inserted into a sequence of `unordered:` directives, it acts as
|
||||
//! a barrier:
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 unordered: one
|
||||
//! #1 unordered: two
|
||||
//! #2 check: three
|
||||
//! #3 unordered: four
|
||||
//! #4 unordered: five
|
||||
//! ```
|
||||
//!
|
||||
//! These directives will match `"two one three four five"`, but not `"two three one four five"`.
|
||||
//! The `unordered:` matches are not allowed to cross the ordered `check:` directive.
|
||||
//!
|
||||
//! When `unordered:` matches define and use variables, a topological order is enforced. This means
|
||||
//! that a match referencing a variable must follow the match where the variable was defined:
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 regex: V=\bv\d+\b
|
||||
//! #1 unordered: $(va=$V) = load
|
||||
//! #2 unordered: $(vb=$V) = iadd $va
|
||||
//! #3 unordered: $(vc=$V) = load
|
||||
//! #4 unordered: iadd $va, $vc
|
||||
//! ```
|
||||
//!
|
||||
//! In the above directives, #2 must match after #1, and #4 must match after both #1 and #3, but
|
||||
//! otherwise they can match in any order.
|
||||
//!
|
||||
//! ## The `not:` directive
|
||||
//!
|
||||
//! Check that a pattern *does not* appear between matches.
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 check: one
|
||||
//! #1 not: two
|
||||
//! #2 check: three
|
||||
//! ```
|
||||
//!
|
||||
//! The directives above will match `"one five three"`, but not `"one two three"`.
|
||||
//!
|
||||
//! The pattern in a `not:` directive can't define any variables. Since it never matches anything,
|
||||
//! the variables would not get a value.
|
||||
//!
|
||||
//! ## The `regex:` directive
|
||||
//!
|
||||
//! Define a shorthand name for a regular expression.
|
||||
//!
|
||||
//! ```sh
|
||||
//! #0 regex: ID=\b[_a-zA-Z][_0-9a-zA-Z]*\b
|
||||
//! #1 check: $ID + $ID
|
||||
//! ```
|
||||
//!
|
||||
//! The `regex:` directive gives a name to a regular expression which can then be used as part of a
|
||||
//! pattern to match. Patterns are otherwise just plain text strings to match, so this is not
|
||||
//! simple macro expansion.
|
||||
//!
|
||||
//! See [the Rust regex crate](../regex/index.html#syntax) for the regular expression syntax.
|
||||
//!
|
||||
//! # Patterns and variables
|
||||
//!
|
||||
//! Patterns are plain text strings to be matched in the input file. The dollar sign is used as an
|
||||
//! escape character to expand variables. The following escape sequences are recognized:
|
||||
//!
|
||||
//! <pre>
|
||||
//! $$ Match single dollar sign.
|
||||
//! $() Match the empty string.
|
||||
//! $(=<i><regex></i>) Match regular expression <i><regex></i>.
|
||||
//! $<i><var></i> Match contents of variable <i><var></i>.
|
||||
//! $(<i><var></i>) Match contents of variable <i><var></i>.
|
||||
//! $(<i><var></i>=<i><regex></i>) Match <i><regex></i>, then
|
||||
//! define <i><var></i> as the matched text.
|
||||
//! $(<i><var></i>=$<i><rxvar></i>) Match regex in <i><rxvar></i>, then
|
||||
//! define <i><var></i> as the matched text.
|
||||
//! </pre>
|
||||
//!
|
||||
//! Variables can contain either plain text or regular expressions. Plain text variables are
|
||||
//! defined with the `$(var=...)` syntax in a previous directive. They match the same text again.
|
||||
//! Backreferences within the same pattern are not allowed. When a variable is defined in a
|
||||
//! pattern, it can't be referenced again in the same pattern.
|
||||
//!
|
||||
//! Regular expression variables are defined with the `regex:` directive. They match the regular
|
||||
//! expression each time they are used, so the matches don't need to be identical.
|
||||
//!
|
||||
//! ## Word boundaries
|
||||
//!
|
||||
//! If a pattern begins or ends with a (plain text) letter or number, it will only match on a word
|
||||
//! boundary. Use the `$()` empty string match to prevent this:
|
||||
//!
|
||||
//! ```sh
|
||||
//! check: one$()
|
||||
//! ```
|
||||
//!
|
||||
//! This will match `"one"` and `"onetwo"`, but not `"zeroone"`.
|
||||
//!
|
||||
//! The empty match syntax can also be used to require leading or trailing whitespace:
|
||||
//!
|
||||
//! ```sh
|
||||
//! check: one, $()
|
||||
//! ```
|
||||
//!
|
||||
//! This will match `"one, two"` , but not `"one,two"`. Without the `$()`, trailing whitespace
|
||||
//! would be trimmed from the pattern.
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use variable::{VariableMap, Value, NO_VARIABLES};
|
||||
pub use checker::{Checker, CheckerBuilder};
|
||||
|
||||
extern crate regex;
|
||||
|
||||
mod error;
|
||||
mod variable;
|
||||
mod pattern;
|
||||
mod checker;
|
||||
mod explain;
|
||||
|
||||
/// The range of a match in the input text.
|
||||
pub type MatchRange = (usize, usize);
|
||||
@@ -1,518 +0,0 @@
|
||||
//! Pattern matching for a single directive.
|
||||
|
||||
use error::{Error, Result};
|
||||
use variable::{varname_prefix, VariableMap, Value};
|
||||
use std::str::FromStr;
|
||||
use std::fmt::{self, Display, Formatter, Write};
|
||||
use regex::{Regex, RegexBuilder, quote};
|
||||
|
||||
/// A pattern to match as specified in a directive.
|
||||
///
|
||||
/// Each pattern is broken into a sequence of parts that must match in order. The kinds of parts
|
||||
/// are:
|
||||
///
|
||||
/// 1. Plain text match.
|
||||
/// 2. Variable match, `$FOO` or `$(FOO)`. The variable `FOO` may expand to plain text or a regex.
|
||||
/// 3. Variable definition from literal regex, `$(foo=.*)`. Match the regex and assign matching text
|
||||
/// to variable `foo`.
|
||||
/// 4. Variable definition from regex variable, `$(foo=$RX)`. Lookup variable `RX` which should
|
||||
/// expand to a regex, match the regex, and assign matching text to variable `foo`.
|
||||
///
|
||||
pub struct Pattern {
|
||||
parts: Vec<Part>,
|
||||
// Variables defined by this pattern.
|
||||
defs: Vec<String>,
|
||||
}
|
||||
|
||||
/// One atomic part of a pattern.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Part {
|
||||
/// Match a plain string.
|
||||
Text(String),
|
||||
/// Match a regular expression. The regex has already been wrapped in a non-capturing group if
|
||||
/// necessary, so it is safe to concatenate.
|
||||
Regex(String),
|
||||
/// Match the contents of a variable, which can be plain text or regex.
|
||||
Var(String),
|
||||
/// Match literal regex, then assign match to variable.
|
||||
/// The regex has already been wrapped in a named capture group.
|
||||
DefLit { def: usize, regex: String },
|
||||
/// Lookup variable `var`, match resulting regex, assign matching text to variable `defs[def]`.
|
||||
DefVar { def: usize, var: String },
|
||||
}
|
||||
|
||||
impl Part {
|
||||
/// Get the variabled referenced by this part, if any.
|
||||
pub fn ref_var(&self) -> Option<&str> {
|
||||
match *self {
|
||||
Part::Var(ref var) => Some(var),
|
||||
Part::DefVar { ref var, .. } => Some(var),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Pattern {
|
||||
/// Create a new blank pattern. Use the `FromStr` trait to generate Patterns with content.
|
||||
fn new() -> Pattern {
|
||||
Pattern {
|
||||
parts: Vec::new(),
|
||||
defs: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the variable `v` is defined by this pattern.
|
||||
pub fn defines_var(&self, v: &str) -> bool {
|
||||
self.defs.iter().any(|d| d == v)
|
||||
}
|
||||
|
||||
/// Add a definition of a new variable.
|
||||
/// Return the allocated def number.
|
||||
fn add_def(&mut self, v: &str) -> Result<usize> {
|
||||
if self.defines_var(v) {
|
||||
Err(Error::DuplicateDef(format!("duplicate definition of ${} in same pattern", v)))
|
||||
} else {
|
||||
let idx = self.defs.len();
|
||||
self.defs.push(v.to_string());
|
||||
Ok(idx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a `Part` from a prefix of `s`.
|
||||
/// Return the part and the number of bytes consumed from `s`.
|
||||
/// Adds defined variables to `self.defs`.
|
||||
fn parse_part(&mut self, s: &str) -> Result<(Part, usize)> {
|
||||
let dollar = s.find('$');
|
||||
if dollar != Some(0) {
|
||||
// String doesn't begin with a dollar sign, so match plain text up to the dollar sign.
|
||||
let end = dollar.unwrap_or(s.len());
|
||||
return Ok((Part::Text(s[0..end].to_string()), end));
|
||||
}
|
||||
|
||||
// String starts with a dollar sign. Look for these possibilities:
|
||||
//
|
||||
// 1. `$$`.
|
||||
// 2. `$var`.
|
||||
// 3. `$(var)`.
|
||||
// 4. `$(var=regex)`. Where `regex` is a regular expression possibly containing matching
|
||||
// braces.
|
||||
// 5. `$(var=$VAR)`.
|
||||
|
||||
// A doubled dollar sign matches a single dollar sign.
|
||||
if s.starts_with("$$") {
|
||||
return Ok((Part::Text("$".to_string()), 2));
|
||||
}
|
||||
|
||||
// Look for `$var`.
|
||||
let varname_end = 1 + varname_prefix(&s[1..]);
|
||||
if varname_end != 1 {
|
||||
return Ok((Part::Var(s[1..varname_end].to_string()), varname_end));
|
||||
}
|
||||
|
||||
// All remaining possibilities start with `$(`.
|
||||
if s.len() < 2 || !s.starts_with("$(") {
|
||||
return Err(Error::Syntax("pattern syntax error, use $$ to match a single $"
|
||||
.to_string()));
|
||||
}
|
||||
|
||||
// Match the variable name, allowing for an empty varname in `$()`, or `$(=...)`.
|
||||
let varname_end = 2 + varname_prefix(&s[2..]);
|
||||
let varname = s[2..varname_end].to_string();
|
||||
|
||||
match s[varname_end..].chars().next() {
|
||||
None => {
|
||||
return Err(Error::Syntax(format!("unterminated $({}...", varname)));
|
||||
}
|
||||
Some(')') => {
|
||||
let part = if varname.is_empty() {
|
||||
// Match `$()`, turn it into an empty text match.
|
||||
Part::Text(varname)
|
||||
} else {
|
||||
// Match `$(var)`.
|
||||
Part::Var(varname)
|
||||
};
|
||||
return Ok((part, varname_end + 1));
|
||||
}
|
||||
Some('=') => {
|
||||
// Variable definition. Fall through.
|
||||
}
|
||||
Some(ch) => {
|
||||
return Err(Error::Syntax(format!("syntax error in $({}... '{}'", varname, ch)));
|
||||
}
|
||||
}
|
||||
|
||||
// This is a variable definition of the form `$(var=...`.
|
||||
|
||||
// Allocate a definition index.
|
||||
let def = if varname.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(try!(self.add_def(&varname)))
|
||||
};
|
||||
|
||||
// Match `$(var=$PAT)`.
|
||||
if s[varname_end + 1..].starts_with('$') {
|
||||
let refname_begin = varname_end + 2;
|
||||
let refname_end = refname_begin + varname_prefix(&s[refname_begin..]);
|
||||
if refname_begin == refname_end {
|
||||
return Err(Error::Syntax(format!("expected variable name in $({}=$...", varname)));
|
||||
}
|
||||
if !s[refname_end..].starts_with(')') {
|
||||
return Err(Error::Syntax(format!("expected ')' after $({}=${}...",
|
||||
varname,
|
||||
&s[refname_begin..refname_end])));
|
||||
}
|
||||
let refname = s[refname_begin..refname_end].to_string();
|
||||
return if let Some(defidx) = def {
|
||||
Ok((Part::DefVar {
|
||||
def: defidx,
|
||||
var: refname,
|
||||
},
|
||||
refname_end + 1))
|
||||
} else {
|
||||
Err(Error::Syntax(format!("expected variable name in $(=${})", refname)))
|
||||
};
|
||||
}
|
||||
|
||||
// Last case: `$(var=...)` where `...` is a regular expression, possibly containing matched
|
||||
// parentheses.
|
||||
let rx_begin = varname_end + 1;
|
||||
let rx_end = rx_begin + regex_prefix(&s[rx_begin..]);
|
||||
if s[rx_end..].starts_with(')') {
|
||||
let part = if let Some(defidx) = def {
|
||||
// Wrap the regex in a named capture group.
|
||||
Part::DefLit {
|
||||
def: defidx,
|
||||
regex: format!("(?P<{}>{})", varname, &s[rx_begin..rx_end]),
|
||||
}
|
||||
} else {
|
||||
// When the varname is empty just match the regex, don't capture any variables.
|
||||
// This is `$(=[a-z])`.
|
||||
// Wrap the regex in a non-capturing group to make it concatenation-safe.
|
||||
Part::Regex(format!("(?:{})", &s[rx_begin..rx_end]))
|
||||
};
|
||||
Ok((part, rx_end + 1))
|
||||
} else {
|
||||
Err(Error::Syntax(format!("missing ')' after regex in $({}={}",
|
||||
varname,
|
||||
&s[rx_begin..rx_end])))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the length of a regular expression terminated by `)` or `}`.
|
||||
/// Handle nested and escaped parentheses in the rx, but don't actualy parse it.
|
||||
/// Return the position of the terminating brace or the length of the string.
|
||||
fn regex_prefix(s: &str) -> usize {
|
||||
// The prevous char was a backslash.
|
||||
let mut escape = false;
|
||||
// State around parsing charsets.
|
||||
enum State {
|
||||
Normal, // Outside any charset.
|
||||
Curly, // Inside curly braces.
|
||||
CSFirst, // Immediately after opening `[`.
|
||||
CSNeg, // Immediately after `[^`.
|
||||
CSBody, // Inside `[...`.
|
||||
}
|
||||
let mut state = State::Normal;
|
||||
|
||||
// Current nesting level of parens.
|
||||
let mut nest = 0usize;
|
||||
|
||||
for (idx, ch) in s.char_indices() {
|
||||
if escape {
|
||||
escape = false;
|
||||
continue;
|
||||
} else if ch == '\\' {
|
||||
escape = true;
|
||||
continue;
|
||||
}
|
||||
match state {
|
||||
State::Normal => {
|
||||
match ch {
|
||||
'[' => state = State::CSFirst,
|
||||
'{' => state = State::Curly,
|
||||
'(' => nest += 1,
|
||||
')' if nest > 0 => nest -= 1,
|
||||
')' | '}' => return idx,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
State::Curly => {
|
||||
if ch == '}' {
|
||||
state = State::Normal;
|
||||
}
|
||||
}
|
||||
State::CSFirst => {
|
||||
state = match ch {
|
||||
'^' => State::CSNeg,
|
||||
_ => State::CSBody,
|
||||
}
|
||||
}
|
||||
State::CSNeg => state = State::CSBody,
|
||||
State::CSBody => {
|
||||
if ch == ']' {
|
||||
state = State::Normal;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
s.len()
|
||||
}
|
||||
|
||||
impl FromStr for Pattern {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Pattern> {
|
||||
// Always remove leading and trailing whitespace.
|
||||
// Use `$()` to actually include that in a match.
|
||||
let s = s.trim();
|
||||
let mut pat = Pattern::new();
|
||||
let mut pos = 0;
|
||||
while pos < s.len() {
|
||||
let (part, len) = try!(pat.parse_part(&s[pos..]));
|
||||
if let Some(v) = part.ref_var() {
|
||||
if pat.defines_var(v) {
|
||||
return Err(Error::Backref(format!("unsupported back-reference to '${}' \
|
||||
defined in same pattern",
|
||||
v)));
|
||||
}
|
||||
}
|
||||
pat.parts.push(part);
|
||||
pos += len;
|
||||
}
|
||||
Ok(pat)
|
||||
}
|
||||
}
|
||||
|
||||
impl Pattern {
|
||||
/// Get a list of parts in this pattern.
|
||||
pub fn parts(&self) -> &[Part] {
|
||||
&self.parts
|
||||
}
|
||||
|
||||
/// Get a list of variable names defined when this pattern matches.
|
||||
pub fn defs(&self) -> &[String] {
|
||||
&self.defs
|
||||
}
|
||||
|
||||
/// Resolve all variable references in this pattern, turning it into a regular expression.
|
||||
pub fn resolve(&self, vmap: &VariableMap) -> Result<Regex> {
|
||||
let mut out = String::new();
|
||||
|
||||
// Add a word boundary check `\b` to the beginning of the regex, but only if the first part
|
||||
// is a plain text match that starts with a word character.
|
||||
//
|
||||
// This behavior can be disabled by starting the pattern with `$()`.
|
||||
if let Some(&Part::Text(ref s)) = self.parts.first() {
|
||||
if s.starts_with(char::is_alphanumeric) {
|
||||
out.push_str(r"\b");
|
||||
}
|
||||
}
|
||||
|
||||
for part in &self.parts {
|
||||
match *part {
|
||||
Part::Text(ref s) => {
|
||||
out.push_str("e(s));
|
||||
}
|
||||
Part::Regex(ref rx) => out.push_str(rx),
|
||||
Part::Var(ref var) => {
|
||||
// Resolve the variable. We can handle a plain text expansion.
|
||||
match vmap.lookup(var) {
|
||||
None => {
|
||||
return Err(Error::UndefVariable(format!("undefined variable ${}", var)))
|
||||
}
|
||||
Some(Value::Text(s)) => out.push_str("e(&s)),
|
||||
// Wrap regex in non-capturing group for safe concatenation.
|
||||
Some(Value::Regex(rx)) => write!(out, "(?:{})", rx).unwrap(),
|
||||
}
|
||||
}
|
||||
Part::DefLit { ref regex, .. } => out.push_str(regex),
|
||||
Part::DefVar { def, ref var } => {
|
||||
// Wrap regex in a named capture group.
|
||||
write!(out, "(?P<{}>", self.defs[def]).unwrap();
|
||||
match vmap.lookup(var) {
|
||||
None => {
|
||||
return Err(Error::UndefVariable(format!("undefined variable ${}", var)))
|
||||
}
|
||||
Some(Value::Text(s)) => write!(out, "{})", quote(&s[..])).unwrap(),
|
||||
Some(Value::Regex(rx)) => write!(out, "{})", rx).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add a word boundary check `\b` to the end of the regex, but only if the final part
|
||||
// is a plain text match that ends with a word character.
|
||||
//
|
||||
// This behavior can be disabled by ending the pattern with `$()`.
|
||||
if let Some(&Part::Text(ref s)) = self.parts.last() {
|
||||
if s.ends_with(char::is_alphanumeric) {
|
||||
out.push_str(r"\b");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(try!(RegexBuilder::new(&out).multi_line(true).compile()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Pattern {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
for part in &self.parts {
|
||||
use self::Part::*;
|
||||
try!(match *part {
|
||||
Text(ref txt) if txt == "" => write!(f, "$()"),
|
||||
Text(ref txt) if txt == "$" => write!(f, "$$"),
|
||||
Text(ref txt) => write!(f, "{}", txt),
|
||||
Regex(ref rx) => write!(f, "$(={})", rx),
|
||||
Var(ref var) => write!(f, "$({})", var),
|
||||
DefLit { def, ref regex } => {
|
||||
let defvar = &self.defs[def];
|
||||
// (?P<defvar>...).
|
||||
let litrx = ®ex[5 + defvar.len()..regex.len() - 1];
|
||||
write!(f, "$({}={})", defvar, litrx)
|
||||
}
|
||||
DefVar { def, ref var } => write!(f, "$({}=${})", self.defs[def], var),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn regex() {
|
||||
use super::regex_prefix;
|
||||
|
||||
assert_eq!(regex_prefix(""), 0);
|
||||
assert_eq!(regex_prefix(")"), 0);
|
||||
assert_eq!(regex_prefix(")c"), 0);
|
||||
assert_eq!(regex_prefix("x"), 1);
|
||||
assert_eq!(regex_prefix("x)x"), 1);
|
||||
|
||||
assert_eq!(regex_prefix("x(c))x"), 4);
|
||||
assert_eq!(regex_prefix("()x(c))x"), 6);
|
||||
assert_eq!(regex_prefix("()x(c)"), 6);
|
||||
|
||||
assert_eq!(regex_prefix("x([)]))x"), 6);
|
||||
assert_eq!(regex_prefix("x[)])x"), 4);
|
||||
assert_eq!(regex_prefix("x[^)])x"), 5);
|
||||
assert_eq!(regex_prefix("x[^])x"), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn part() {
|
||||
use super::{Pattern, Part};
|
||||
let mut pat = Pattern::new();
|
||||
|
||||
// This is dubious, should we panic instead?
|
||||
assert_eq!(pat.parse_part("").unwrap(), (Part::Text("".to_string()), 0));
|
||||
|
||||
assert_eq!(pat.parse_part("x").unwrap(),
|
||||
(Part::Text("x".to_string()), 1));
|
||||
assert_eq!(pat.parse_part("x2").unwrap(),
|
||||
(Part::Text("x2".to_string()), 2));
|
||||
assert_eq!(pat.parse_part("x$").unwrap(),
|
||||
(Part::Text("x".to_string()), 1));
|
||||
assert_eq!(pat.parse_part("x$$").unwrap(),
|
||||
(Part::Text("x".to_string()), 1));
|
||||
|
||||
assert_eq!(pat.parse_part("$").unwrap_err().to_string(),
|
||||
"pattern syntax error, use $$ to match a single $");
|
||||
|
||||
assert_eq!(pat.parse_part("$$").unwrap(),
|
||||
(Part::Text("$".to_string()), 2));
|
||||
assert_eq!(pat.parse_part("$$ ").unwrap(),
|
||||
(Part::Text("$".to_string()), 2));
|
||||
|
||||
assert_eq!(pat.parse_part("$0").unwrap(),
|
||||
(Part::Var("0".to_string()), 2));
|
||||
assert_eq!(pat.parse_part("$xx=").unwrap(),
|
||||
(Part::Var("xx".to_string()), 3));
|
||||
assert_eq!(pat.parse_part("$xx$").unwrap(),
|
||||
(Part::Var("xx".to_string()), 3));
|
||||
|
||||
assert_eq!(pat.parse_part("$(0)").unwrap(),
|
||||
(Part::Var("0".to_string()), 4));
|
||||
assert_eq!(pat.parse_part("$()").unwrap(),
|
||||
(Part::Text("".to_string()), 3));
|
||||
|
||||
assert_eq!(pat.parse_part("$(0").unwrap_err().to_string(),
|
||||
("unterminated $(0..."));
|
||||
assert_eq!(pat.parse_part("$(foo:").unwrap_err().to_string(),
|
||||
("syntax error in $(foo... ':'"));
|
||||
assert_eq!(pat.parse_part("$(foo =").unwrap_err().to_string(),
|
||||
("syntax error in $(foo... ' '"));
|
||||
assert_eq!(pat.parse_part("$(eo0=$bar").unwrap_err().to_string(),
|
||||
("expected ')' after $(eo0=$bar..."));
|
||||
assert_eq!(pat.parse_part("$(eo1=$bar}").unwrap_err().to_string(),
|
||||
("expected ')' after $(eo1=$bar..."));
|
||||
assert_eq!(pat.parse_part("$(eo2=$)").unwrap_err().to_string(),
|
||||
("expected variable name in $(eo2=$..."));
|
||||
assert_eq!(pat.parse_part("$(eo3=$-)").unwrap_err().to_string(),
|
||||
("expected variable name in $(eo3=$..."));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partdefs() {
|
||||
use super::{Pattern, Part};
|
||||
let mut pat = Pattern::new();
|
||||
|
||||
assert_eq!(pat.parse_part("$(foo=$bar)").unwrap(),
|
||||
(Part::DefVar {
|
||||
def: 0,
|
||||
var: "bar".to_string(),
|
||||
},
|
||||
11));
|
||||
assert_eq!(pat.parse_part("$(foo=$bar)").unwrap_err().to_string(),
|
||||
"duplicate definition of $foo in same pattern");
|
||||
|
||||
assert_eq!(pat.parse_part("$(fxo=$bar)x").unwrap(),
|
||||
(Part::DefVar {
|
||||
def: 1,
|
||||
var: "bar".to_string(),
|
||||
},
|
||||
11));
|
||||
|
||||
assert_eq!(pat.parse_part("$(fo2=[a-z])").unwrap(),
|
||||
(Part::DefLit {
|
||||
def: 2,
|
||||
regex: "(?P<fo2>[a-z])".to_string(),
|
||||
},
|
||||
12));
|
||||
assert_eq!(pat.parse_part("$(fo3=[a-)])").unwrap(),
|
||||
(Part::DefLit {
|
||||
def: 3,
|
||||
regex: "(?P<fo3>[a-)])".to_string(),
|
||||
},
|
||||
12));
|
||||
assert_eq!(pat.parse_part("$(fo4=)").unwrap(),
|
||||
(Part::DefLit {
|
||||
def: 4,
|
||||
regex: "(?P<fo4>)".to_string(),
|
||||
},
|
||||
7));
|
||||
|
||||
assert_eq!(pat.parse_part("$(=.*)").unwrap(),
|
||||
(Part::Regex("(?:.*)".to_string()), 6));
|
||||
|
||||
assert_eq!(pat.parse_part("$(=)").unwrap(),
|
||||
(Part::Regex("(?:)".to_string()), 4));
|
||||
assert_eq!(pat.parse_part("$()").unwrap(),
|
||||
(Part::Text("".to_string()), 3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pattern() {
|
||||
use super::Pattern;
|
||||
|
||||
let p: Pattern = " Hello world! ".parse().unwrap();
|
||||
assert_eq!(format!("{:?}", p.parts), "[Text(\"Hello world!\")]");
|
||||
|
||||
let p: Pattern = " $foo=$(bar) ".parse().unwrap();
|
||||
assert_eq!(format!("{:?}", p.parts),
|
||||
"[Var(\"foo\"), Text(\"=\"), Var(\"bar\")]");
|
||||
}
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
extern crate filecheck;
|
||||
|
||||
use filecheck::{CheckerBuilder, NO_VARIABLES, Error as FcError};
|
||||
|
||||
fn e2s(e: FcError) -> String {
|
||||
e.to_string()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let c = CheckerBuilder::new().finish();
|
||||
assert!(c.is_empty());
|
||||
|
||||
// An empty checker matches anything.
|
||||
assert_eq!(c.check("", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("hello", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_directives() {
|
||||
let c = CheckerBuilder::new().text("nothing here").unwrap().finish();
|
||||
assert!(c.is_empty());
|
||||
|
||||
// An empty checker matches anything.
|
||||
assert_eq!(c.check("", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("hello", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_matches() {
|
||||
let c = CheckerBuilder::new().text("regex: FOO=bar").unwrap().finish();
|
||||
assert!(!c.is_empty());
|
||||
|
||||
// An empty checker matches anything.
|
||||
assert_eq!(c.check("", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("hello", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
check: one
|
||||
check: two
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one
|
||||
and a half
|
||||
two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
let t = "
|
||||
zero
|
||||
and a half
|
||||
two
|
||||
one
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sameln() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
check: one
|
||||
sameln: two
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one
|
||||
and a half
|
||||
two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one
|
||||
two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nextln() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
check: one
|
||||
nextln: two
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one
|
||||
and a half
|
||||
two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one
|
||||
two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "
|
||||
zero
|
||||
one
|
||||
two";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn leading_nextln() {
|
||||
// A leading nextln directive should match from line 2.
|
||||
// This is somewhat arbitrary, but consistent with a preceeding 'check: $()' directive.
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
nextln: one
|
||||
nextln: two
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
let t = "zero
|
||||
one
|
||||
two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
let t = "one
|
||||
two
|
||||
three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn leading_sameln() {
|
||||
// A leading sameln directive should match from line 1.
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
sameln: one
|
||||
sameln: two
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
let t = "zero
|
||||
one two three
|
||||
";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "zero one two three";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
let t = "zero one
|
||||
two three";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn not() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
check: one$()
|
||||
not: $()eat$()
|
||||
check: $()two
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
let t = "onetwo";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
let t = "one eat two";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "oneeattwo";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "oneatwo";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn notnot() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
check: one$()
|
||||
not: $()eat$()
|
||||
not: half
|
||||
check: $()two
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
let t = "onetwo";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
let t = "one eat two";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "one half two";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "oneeattwo";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
// The `not: half` pattern only matches whole words, but the bracketing matches are considered
|
||||
// word boundaries, so it does match in this case.
|
||||
let t = "onehalftwo";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
|
||||
let t = "oneatwo";
|
||||
assert_eq!(c.check(t, NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unordered() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
check: one
|
||||
unordered: two
|
||||
unordered: three
|
||||
check: four
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
assert_eq!(c.check("one two three four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("one three two four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
assert_eq!(c.check("one two four three four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("one three four two four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
assert_eq!(c.check("one two four three", NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
assert_eq!(c.check("one three four two", NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn leading_unordered() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
unordered: two
|
||||
unordered: three
|
||||
check: four
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
assert_eq!(c.check("one two three four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("one three two four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
assert_eq!(c.check("one two four three four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("one three four two four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
assert_eq!(c.check("one two four three", NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
assert_eq!(c.check("one three four two", NO_VARIABLES).map_err(e2s), Ok(false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trailing_unordered() {
|
||||
let c = CheckerBuilder::new()
|
||||
.text("
|
||||
check: one
|
||||
unordered: two
|
||||
unordered: three
|
||||
")
|
||||
.unwrap()
|
||||
.finish();
|
||||
|
||||
assert_eq!(c.check("one two three four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("one three two four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
assert_eq!(c.check("one two four three four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("one three four two four", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
|
||||
assert_eq!(c.check("one two four three", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
assert_eq!(c.check("one three four two", NO_VARIABLES).map_err(e2s), Ok(true));
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
/// A variable name is one or more ASCII alphanumerical characters, including underscore.
|
||||
/// Note that numerical variable names like `$45` are allowed too.
|
||||
///
|
||||
/// Try to parse a variable name from the begining of `s`.
|
||||
/// Return the index of the character following the varname.
|
||||
/// This returns 0 if `s` doesn't have a prefix that is a variable name.
|
||||
pub fn varname_prefix(s: &str) -> usize {
|
||||
for (idx, ch) in s.char_indices() {
|
||||
match ch {
|
||||
'a'...'z' | 'A'...'Z' | '0'...'9' | '_' => {}
|
||||
_ => return idx,
|
||||
}
|
||||
}
|
||||
s.len()
|
||||
}
|
||||
|
||||
/// A variable can contain either a regular expression or plain text.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Value<'a> {
|
||||
Text(Cow<'a, str>),
|
||||
Regex(Cow<'a, str>),
|
||||
}
|
||||
|
||||
/// Resolve variables by name.
|
||||
pub trait VariableMap {
|
||||
/// Get the value of the variable `varname`, or return `None` for an unknown variable name.
|
||||
fn lookup(&self, varname: &str) -> Option<Value>;
|
||||
}
|
||||
|
||||
impl VariableMap for () {
|
||||
fn lookup(&self, _: &str) -> Option<Value> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// An empty variable map.
|
||||
pub const NO_VARIABLES: &'static VariableMap = &();
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn varname() {
|
||||
use super::varname_prefix;
|
||||
|
||||
assert_eq!(varname_prefix(""), 0);
|
||||
assert_eq!(varname_prefix("\0"), 0);
|
||||
assert_eq!(varname_prefix("_"), 1);
|
||||
assert_eq!(varname_prefix("0"), 1);
|
||||
assert_eq!(varname_prefix("01"), 2);
|
||||
assert_eq!(varname_prefix("b"), 1);
|
||||
assert_eq!(varname_prefix("C"), 1);
|
||||
assert_eq!(varname_prefix("."), 0);
|
||||
assert_eq!(varname_prefix(".s"), 0);
|
||||
assert_eq!(varname_prefix("0."), 1);
|
||||
assert_eq!(varname_prefix("01="), 2);
|
||||
assert_eq!(varname_prefix("0a)"), 2);
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
[package]
|
||||
authors = ["The Cretonne Project Developers"]
|
||||
name = "cretonne-reader"
|
||||
version = "0.0.0"
|
||||
publish = false
|
||||
|
||||
[lib]
|
||||
name = "cton_reader"
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
cretonne = { path = "../libcretonne" }
|
||||
@@ -1,44 +0,0 @@
|
||||
//! Define the `Location`, `Error`, and `Result` types.
|
||||
|
||||
#![macro_use]
|
||||
|
||||
use std::fmt;
|
||||
use std::result;
|
||||
|
||||
/// The location of a `Token` or `Error`.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct Location {
|
||||
pub line_number: usize,
|
||||
}
|
||||
|
||||
/// A parse error is returned when the parse failed.
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
pub location: Location,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}: {}", self.location.line_number, self.message)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
// Create an `Err` variant of `Result<X>` from a location and `format!` args.
|
||||
macro_rules! err {
|
||||
( $loc:expr, $msg:expr ) => {
|
||||
Err($crate::Error {
|
||||
location: $loc.clone(),
|
||||
message: String::from($msg),
|
||||
})
|
||||
};
|
||||
|
||||
( $loc:expr, $fmt:expr, $( $arg:expr ),+ ) => {
|
||||
Err($crate::Error {
|
||||
location: $loc.clone(),
|
||||
message: format!( $fmt, $( $arg ),+ ),
|
||||
})
|
||||
};
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
//! Parsed representation of `set` and `isa` commands.
|
||||
//!
|
||||
//! A test case file can contain `set` commands that set ISA-independent settings, and it can
|
||||
//! contain `isa` commands that select an ISA and applies ISA-specific settings.
|
||||
//!
|
||||
//! If a test case file contains `isa` commands, the tests will only be run against the specified
|
||||
//! ISAs. If the file contains no `isa` commands, the tests will be run against all supported ISAs.
|
||||
|
||||
use cretonne::settings::{Flags, Configurable, Error as SetError};
|
||||
use cretonne::isa::TargetIsa;
|
||||
use error::{Result, Location};
|
||||
use testcommand::TestOption;
|
||||
|
||||
/// The ISA specifications in a `.cton` file.
|
||||
pub enum IsaSpec {
|
||||
/// The parsed file does not contain any `isa` commands, but it may contain `set` commands
|
||||
/// which are reflected in the finished `Flags` object.
|
||||
None(Flags),
|
||||
|
||||
/// The parsed file does contains `isa` commands.
|
||||
/// Each `isa` command is used to configure a `TargetIsa` trait object.
|
||||
Some(Vec<Box<TargetIsa>>),
|
||||
}
|
||||
|
||||
/// Parse an iterator of command line options and apply them to `config`.
|
||||
pub fn parse_options<'a, I>(iter: I, config: &mut Configurable, loc: &Location) -> Result<()>
|
||||
where I: Iterator<Item = &'a str>
|
||||
{
|
||||
for opt in iter.map(TestOption::new) {
|
||||
match opt {
|
||||
TestOption::Flag(name) => {
|
||||
match config.set_bool(name, true) {
|
||||
Ok(_) => {}
|
||||
Err(SetError::BadName) => return err!(loc, "unknown flag '{}'", opt),
|
||||
Err(_) => return err!(loc, "not a boolean flag: '{}'", opt),
|
||||
}
|
||||
}
|
||||
TestOption::Value(name, value) => {
|
||||
match config.set(name, value) {
|
||||
Ok(_) => {}
|
||||
Err(SetError::BadName) => return err!(loc, "unknown setting '{}'", opt),
|
||||
Err(SetError::BadType) => return err!(loc, "invalid setting type: '{}'", opt),
|
||||
Err(SetError::BadValue) => return err!(loc, "invalid setting value: '{}'", opt),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,489 +0,0 @@
|
||||
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
//
|
||||
// Lexical analysis for .cton files.
|
||||
//
|
||||
// ====--------------------------------------------------------------------------------------====//
|
||||
|
||||
use std::str::CharIndices;
|
||||
use std::u16;
|
||||
use cretonne::ir::types;
|
||||
use cretonne::ir::{Value, Ebb};
|
||||
use error::Location;
|
||||
|
||||
/// A Token returned from the `Lexer`.
|
||||
///
|
||||
/// Some variants may contains references to the original source text, so the `Token` has the same
|
||||
/// lifetime as the source.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum Token<'a> {
|
||||
Comment(&'a str),
|
||||
LPar, // '('
|
||||
RPar, // ')'
|
||||
LBrace, // '{'
|
||||
RBrace, // '}'
|
||||
Comma, // ','
|
||||
Dot, // '.'
|
||||
Colon, // ':'
|
||||
Equal, // '='
|
||||
Arrow, // '->'
|
||||
Float(&'a str), // Floating point immediate
|
||||
Integer(&'a str), // Integer immediate
|
||||
Type(types::Type), // i32, f32, b32x4, ...
|
||||
Value(Value), // v12, vx7
|
||||
Ebb(Ebb), // ebb3
|
||||
StackSlot(u32), // ss3
|
||||
JumpTable(u32), // jt2
|
||||
FuncRef(u32), // fn2
|
||||
SigRef(u32), // sig2
|
||||
Identifier(&'a str), // Unrecognized identifier (opcode, enumerator, ...)
|
||||
}
|
||||
|
||||
/// A `Token` with an associated location.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct LocatedToken<'a> {
|
||||
pub token: Token<'a>,
|
||||
pub location: Location,
|
||||
}
|
||||
|
||||
/// Wrap up a `Token` with the given location.
|
||||
fn token<'a>(token: Token<'a>, loc: Location) -> Result<LocatedToken<'a>, LocatedError> {
|
||||
Ok(LocatedToken {
|
||||
token: token,
|
||||
location: loc,
|
||||
})
|
||||
}
|
||||
|
||||
/// An error from the lexical analysis.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Error {
|
||||
InvalidChar,
|
||||
}
|
||||
|
||||
/// An `Error` with an associated Location.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct LocatedError {
|
||||
pub error: Error,
|
||||
pub location: Location,
|
||||
}
|
||||
|
||||
/// Wrap up an `Error` with the given location.
|
||||
fn error<'a>(error: Error, loc: Location) -> Result<LocatedToken<'a>, LocatedError> {
|
||||
Err(LocatedError {
|
||||
error: error,
|
||||
location: loc,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the number of decimal digits at the end of `s`.
|
||||
fn trailing_digits(s: &str) -> usize {
|
||||
// It's faster to iterate backwards over bytes, and we're only counting ASCII digits.
|
||||
s.as_bytes().iter().rev().cloned().take_while(|&b| b'0' <= b && b <= b'9').count()
|
||||
}
|
||||
|
||||
/// Pre-parse a supposed entity name by splitting it into two parts: A head of lowercase ASCII
|
||||
/// letters and numeric tail.
|
||||
pub fn split_entity_name(name: &str) -> Option<(&str, u32)> {
|
||||
let (head, tail) = name.split_at(name.len() - trailing_digits(name));
|
||||
if tail.len() > 1 && tail.starts_with('0') {
|
||||
None
|
||||
} else {
|
||||
tail.parse().ok().map(|n| (head, n))
|
||||
}
|
||||
}
|
||||
|
||||
/// Lexical analysis.
|
||||
///
|
||||
/// A `Lexer` reads text from a `&str` and provides a sequence of tokens.
|
||||
///
|
||||
/// Also keep track of a line number for error reporting.
|
||||
///
|
||||
pub struct Lexer<'a> {
|
||||
// Complete source being processed.
|
||||
source: &'a str,
|
||||
|
||||
// Iterator into `source`.
|
||||
chars: CharIndices<'a>,
|
||||
|
||||
// Next character to be processed, or `None` at the end.
|
||||
lookahead: Option<char>,
|
||||
|
||||
// Index into `source` of lookahead character.
|
||||
pos: usize,
|
||||
|
||||
// Current line number.
|
||||
line_number: usize,
|
||||
}
|
||||
|
||||
impl<'a> Lexer<'a> {
|
||||
pub fn new(s: &'a str) -> Lexer {
|
||||
let mut lex = Lexer {
|
||||
source: s,
|
||||
chars: s.char_indices(),
|
||||
lookahead: None,
|
||||
pos: 0,
|
||||
line_number: 1,
|
||||
};
|
||||
// Advance to the first char.
|
||||
lex.next_ch();
|
||||
lex
|
||||
}
|
||||
|
||||
// Advance to the next character.
|
||||
// Return the next lookahead character, or None when the end is encountered.
|
||||
// Always update cur_ch to reflect
|
||||
fn next_ch(&mut self) -> Option<char> {
|
||||
if self.lookahead == Some('\n') {
|
||||
self.line_number += 1;
|
||||
}
|
||||
match self.chars.next() {
|
||||
Some((idx, ch)) => {
|
||||
self.pos = idx;
|
||||
self.lookahead = Some(ch);
|
||||
}
|
||||
None => {
|
||||
self.pos = self.source.len();
|
||||
self.lookahead = None;
|
||||
}
|
||||
}
|
||||
self.lookahead
|
||||
}
|
||||
|
||||
// Get the location corresponding to `lookahead`.
|
||||
fn loc(&self) -> Location {
|
||||
Location { line_number: self.line_number }
|
||||
}
|
||||
|
||||
// Starting from `lookahead`, are we looking at `prefix`?
|
||||
fn looking_at(&self, prefix: &str) -> bool {
|
||||
self.source[self.pos..].starts_with(prefix)
|
||||
}
|
||||
|
||||
// Scan a single-char token.
|
||||
fn scan_char(&mut self, tok: Token<'a>) -> Result<LocatedToken<'a>, LocatedError> {
|
||||
assert!(self.lookahead != None);
|
||||
let loc = self.loc();
|
||||
self.next_ch();
|
||||
token(tok, loc)
|
||||
}
|
||||
|
||||
// Scan a multi-char token.
|
||||
fn scan_chars(&mut self,
|
||||
count: usize,
|
||||
tok: Token<'a>)
|
||||
-> Result<LocatedToken<'a>, LocatedError> {
|
||||
let loc = self.loc();
|
||||
for _ in 0..count {
|
||||
assert!(self.lookahead != None);
|
||||
self.next_ch();
|
||||
}
|
||||
token(tok, loc)
|
||||
}
|
||||
|
||||
/// Get the rest of the current line.
|
||||
/// The next token returned by `next()` will be from the following lines.
|
||||
pub fn rest_of_line(&mut self) -> &'a str {
|
||||
let begin = self.pos;
|
||||
loop {
|
||||
match self.next_ch() {
|
||||
None | Some('\n') => return &self.source[begin..self.pos],
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Scan a comment extending to the end of the current line.
|
||||
fn scan_comment(&mut self) -> Result<LocatedToken<'a>, LocatedError> {
|
||||
let loc = self.loc();
|
||||
let text = self.rest_of_line();
|
||||
return token(Token::Comment(text), loc);
|
||||
}
|
||||
|
||||
// Scan a number token which can represent either an integer or floating point number.
|
||||
//
|
||||
// Accept the following forms:
|
||||
//
|
||||
// - `10`: Integer
|
||||
// - `-10`: Integer
|
||||
// - `0xff_00`: Integer
|
||||
// - `0.0`: Float
|
||||
// - `0x1.f`: Float
|
||||
// - `-0x2.4`: Float
|
||||
// - `0x0.4p-34`: Float
|
||||
//
|
||||
// This function does not filter out all invalid numbers. It depends in the context-sensitive
|
||||
// decoding of the text for that. For example, the number of allowed digits an an Ieee32` and
|
||||
// an `Ieee64` constant are different.
|
||||
fn scan_number(&mut self) -> Result<LocatedToken<'a>, LocatedError> {
|
||||
let begin = self.pos;
|
||||
let loc = self.loc();
|
||||
let mut is_float = false;
|
||||
|
||||
// Skip a leading sign.
|
||||
if self.lookahead == Some('-') {
|
||||
self.next_ch();
|
||||
}
|
||||
|
||||
// Check for NaNs with payloads.
|
||||
if self.looking_at("NaN:") || self.looking_at("sNaN:") {
|
||||
// Skip the `NaN:` prefix, the loop below won't accept it.
|
||||
// We expect a hexadecimal number to follow the colon.
|
||||
while self.next_ch() != Some(':') {}
|
||||
is_float = true;
|
||||
} else if self.looking_at("NaN") || self.looking_at("Inf") {
|
||||
// This is Inf or a default quiet NaN.
|
||||
is_float = true;
|
||||
}
|
||||
|
||||
// Look for the end of this number. Detect the radix point if there is one.
|
||||
loop {
|
||||
match self.next_ch() {
|
||||
Some('-') | Some('_') => {}
|
||||
Some('.') => is_float = true,
|
||||
Some(ch) if ch.is_alphanumeric() => {}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
let text = &self.source[begin..self.pos];
|
||||
if is_float {
|
||||
token(Token::Float(text), loc)
|
||||
} else {
|
||||
token(Token::Integer(text), loc)
|
||||
}
|
||||
}
|
||||
|
||||
// Scan a 'word', which is an identifier-like sequence of characters beginning with '_' or an
|
||||
// alphabetic char, followed by zero or more alphanumeric or '_' characters.
|
||||
fn scan_word(&mut self) -> Result<LocatedToken<'a>, LocatedError> {
|
||||
let begin = self.pos;
|
||||
let loc = self.loc();
|
||||
|
||||
assert!(self.lookahead == Some('_') || self.lookahead.unwrap().is_alphabetic());
|
||||
loop {
|
||||
match self.next_ch() {
|
||||
Some('_') => {}
|
||||
Some(ch) if ch.is_alphanumeric() => {}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
let text = &self.source[begin..self.pos];
|
||||
|
||||
// Look for numbered well-known entities like ebb15, v45, ...
|
||||
token(split_entity_name(text)
|
||||
.and_then(|(prefix, number)| {
|
||||
Self::numbered_entity(prefix, number)
|
||||
.or_else(|| Self::value_type(text, prefix, number))
|
||||
})
|
||||
.unwrap_or(Token::Identifier(text)),
|
||||
loc)
|
||||
}
|
||||
|
||||
// If prefix is a well-known entity prefix and suffix is a valid entity number, return the
|
||||
// decoded token.
|
||||
fn numbered_entity(prefix: &str, number: u32) -> Option<Token<'a>> {
|
||||
match prefix {
|
||||
"v" => Value::direct_with_number(number).map(|v| Token::Value(v)),
|
||||
"vx" => Value::table_with_number(number).map(|v| Token::Value(v)),
|
||||
"ebb" => Ebb::with_number(number).map(|ebb| Token::Ebb(ebb)),
|
||||
"ss" => Some(Token::StackSlot(number)),
|
||||
"jt" => Some(Token::JumpTable(number)),
|
||||
"fn" => Some(Token::FuncRef(number)),
|
||||
"sig" => Some(Token::SigRef(number)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Recognize a scalar or vector type.
|
||||
fn value_type(text: &str, prefix: &str, number: u32) -> Option<Token<'a>> {
|
||||
let is_vector = prefix.ends_with('x');
|
||||
let scalar = if is_vector {
|
||||
&prefix[0..prefix.len() - 1]
|
||||
} else {
|
||||
text
|
||||
};
|
||||
let base_type = match scalar {
|
||||
"i8" => types::I8,
|
||||
"i16" => types::I16,
|
||||
"i32" => types::I32,
|
||||
"i64" => types::I64,
|
||||
"f32" => types::F32,
|
||||
"f64" => types::F64,
|
||||
"b1" => types::B1,
|
||||
"b8" => types::B8,
|
||||
"b16" => types::B16,
|
||||
"b32" => types::B32,
|
||||
"b64" => types::B64,
|
||||
_ => return None,
|
||||
};
|
||||
if is_vector {
|
||||
if number <= u16::MAX as u32 {
|
||||
base_type.by(number as u16).map(|t| Token::Type(t))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
Some(Token::Type(base_type))
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the next token or a lexical error.
|
||||
///
|
||||
/// Return None when the end of the source is encountered.
|
||||
pub fn next(&mut self) -> Option<Result<LocatedToken<'a>, LocatedError>> {
|
||||
loop {
|
||||
let loc = self.loc();
|
||||
return match self.lookahead {
|
||||
None => None,
|
||||
Some(';') => Some(self.scan_comment()),
|
||||
Some('(') => Some(self.scan_char(Token::LPar)),
|
||||
Some(')') => Some(self.scan_char(Token::RPar)),
|
||||
Some('{') => Some(self.scan_char(Token::LBrace)),
|
||||
Some('}') => Some(self.scan_char(Token::RBrace)),
|
||||
Some(',') => Some(self.scan_char(Token::Comma)),
|
||||
Some('.') => Some(self.scan_char(Token::Dot)),
|
||||
Some(':') => Some(self.scan_char(Token::Colon)),
|
||||
Some('=') => Some(self.scan_char(Token::Equal)),
|
||||
Some('-') => {
|
||||
if self.looking_at("->") {
|
||||
Some(self.scan_chars(2, Token::Arrow))
|
||||
} else {
|
||||
Some(self.scan_number())
|
||||
}
|
||||
}
|
||||
Some(ch) if ch.is_digit(10) => Some(self.scan_number()),
|
||||
Some(ch) if ch.is_alphabetic() => Some(self.scan_word()),
|
||||
Some(ch) if ch.is_whitespace() => {
|
||||
self.next_ch();
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
// Skip invalid char, return error.
|
||||
self.next_ch();
|
||||
Some(error(Error::InvalidChar, loc))
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::trailing_digits;
|
||||
use super::*;
|
||||
use cretonne::ir::types;
|
||||
use cretonne::ir::{Value, Ebb};
|
||||
use error::Location;
|
||||
|
||||
#[test]
|
||||
fn digits() {
|
||||
assert_eq!(trailing_digits(""), 0);
|
||||
assert_eq!(trailing_digits("x"), 0);
|
||||
assert_eq!(trailing_digits("0x"), 0);
|
||||
assert_eq!(trailing_digits("x1"), 1);
|
||||
assert_eq!(trailing_digits("1x1"), 1);
|
||||
assert_eq!(trailing_digits("1x01"), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn entity_name() {
|
||||
assert_eq!(split_entity_name(""), None);
|
||||
assert_eq!(split_entity_name("x"), None);
|
||||
assert_eq!(split_entity_name("x+"), None);
|
||||
assert_eq!(split_entity_name("x+1"), Some(("x+", 1)));
|
||||
assert_eq!(split_entity_name("x-1"), Some(("x-", 1)));
|
||||
assert_eq!(split_entity_name("1"), Some(("", 1)));
|
||||
assert_eq!(split_entity_name("x1"), Some(("x", 1)));
|
||||
assert_eq!(split_entity_name("xy0"), Some(("xy", 0)));
|
||||
// Reject this non-canonical form.
|
||||
assert_eq!(split_entity_name("inst01"), None);
|
||||
}
|
||||
|
||||
fn token<'a>(token: Token<'a>, line: usize) -> Option<Result<LocatedToken<'a>, LocatedError>> {
|
||||
Some(super::token(token, Location { line_number: line }))
|
||||
}
|
||||
|
||||
fn error<'a>(error: Error, line: usize) -> Option<Result<LocatedToken<'a>, LocatedError>> {
|
||||
Some(super::error(error, Location { line_number: line }))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn make_lexer() {
|
||||
let mut l1 = Lexer::new("");
|
||||
let mut l2 = Lexer::new(" ");
|
||||
let mut l3 = Lexer::new("\n ");
|
||||
|
||||
assert_eq!(l1.next(), None);
|
||||
assert_eq!(l2.next(), None);
|
||||
assert_eq!(l3.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lex_comment() {
|
||||
let mut lex = Lexer::new("; hello");
|
||||
assert_eq!(lex.next(), token(Token::Comment("; hello"), 1));
|
||||
assert_eq!(lex.next(), None);
|
||||
|
||||
lex = Lexer::new("\n ;hello\n;foo");
|
||||
assert_eq!(lex.next(), token(Token::Comment(";hello"), 2));
|
||||
assert_eq!(lex.next(), token(Token::Comment(";foo"), 3));
|
||||
assert_eq!(lex.next(), None);
|
||||
|
||||
// Scan a comment after an invalid char.
|
||||
let mut lex = Lexer::new("#; hello");
|
||||
assert_eq!(lex.next(), error(Error::InvalidChar, 1));
|
||||
assert_eq!(lex.next(), token(Token::Comment("; hello"), 1));
|
||||
assert_eq!(lex.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lex_chars() {
|
||||
let mut lex = Lexer::new("(); hello\n = :{, }.");
|
||||
assert_eq!(lex.next(), token(Token::LPar, 1));
|
||||
assert_eq!(lex.next(), token(Token::RPar, 1));
|
||||
assert_eq!(lex.next(), token(Token::Comment("; hello"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Equal, 2));
|
||||
assert_eq!(lex.next(), token(Token::Colon, 2));
|
||||
assert_eq!(lex.next(), token(Token::LBrace, 2));
|
||||
assert_eq!(lex.next(), token(Token::Comma, 2));
|
||||
assert_eq!(lex.next(), token(Token::RBrace, 2));
|
||||
assert_eq!(lex.next(), token(Token::Dot, 2));
|
||||
assert_eq!(lex.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lex_numbers() {
|
||||
let mut lex = Lexer::new(" 0 2_000 -1,0xf -0x0 0.0 0x0.4p-34");
|
||||
assert_eq!(lex.next(), token(Token::Integer("0"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Integer("2_000"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Integer("-1"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Comma, 1));
|
||||
assert_eq!(lex.next(), token(Token::Integer("0xf"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Integer("-0x0"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Float("0.0"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Float("0x0.4p-34"), 1));
|
||||
assert_eq!(lex.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lex_identifiers() {
|
||||
let mut lex = Lexer::new("v0 v00 vx01 ebb1234567890 ebb5234567890 v1x vx1 vxvx4 \
|
||||
function0 function b1 i32x4 f32x5");
|
||||
assert_eq!(lex.next(),
|
||||
token(Token::Value(Value::direct_with_number(0).unwrap()), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("v00"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("vx01"), 1));
|
||||
assert_eq!(lex.next(),
|
||||
token(Token::Ebb(Ebb::with_number(1234567890).unwrap()), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("ebb5234567890"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("v1x"), 1));
|
||||
assert_eq!(lex.next(),
|
||||
token(Token::Value(Value::table_with_number(1).unwrap()), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("vxvx4"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("function0"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("function"), 1));
|
||||
assert_eq!(lex.next(), token(Token::Type(types::B1), 1));
|
||||
assert_eq!(lex.next(), token(Token::Type(types::I32.by(4).unwrap()), 1));
|
||||
assert_eq!(lex.next(), token(Token::Identifier("f32x5"), 1));
|
||||
assert_eq!(lex.next(), None);
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
//! Cretonne file reader library.
|
||||
//!
|
||||
//! The cton_reader library supports reading .cton files. This functionality is needed for testing
|
||||
//! Cretonne, but is not essential for a JIT compiler.
|
||||
|
||||
extern crate cretonne;
|
||||
|
||||
pub use error::{Location, Result, Error};
|
||||
pub use parser::{parse_functions, parse_test};
|
||||
pub use testcommand::{TestCommand, TestOption};
|
||||
pub use testfile::{TestFile, Details};
|
||||
pub use isaspec::IsaSpec;
|
||||
pub use sourcemap::SourceMap;
|
||||
|
||||
mod error;
|
||||
mod lexer;
|
||||
mod parser;
|
||||
mod testcommand;
|
||||
mod isaspec;
|
||||
mod testfile;
|
||||
mod sourcemap;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,220 +0,0 @@
|
||||
//! Source map for translating source entity names to parsed entities.
|
||||
//!
|
||||
//! When the parser reads in a source file, entities like instructions, EBBs, and values get new
|
||||
//! entity numbers. The parser maintains a mapping from the entity names in the source to the final
|
||||
//! entity references.
|
||||
//!
|
||||
//! The `SourceMap` struct defined in this module makes the same mapping available to parser
|
||||
//! clients.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use cretonne::ir::{StackSlot, JumpTable, Ebb, Value, Inst};
|
||||
use cretonne::ir::entities::AnyEntity;
|
||||
use error::{Result, Location};
|
||||
use lexer::split_entity_name;
|
||||
|
||||
/// Mapping from source entity names to entity references that are valid in the parsed function.
|
||||
#[derive(Debug)]
|
||||
pub struct SourceMap {
|
||||
values: HashMap<Value, Value>, // vNN, vxNN
|
||||
ebbs: HashMap<Ebb, Ebb>, // ebbNN
|
||||
stack_slots: HashMap<u32, StackSlot>, // ssNN
|
||||
jump_tables: HashMap<u32, JumpTable>, // jtNN
|
||||
|
||||
// Store locations for entities, including instructions.
|
||||
locations: HashMap<AnyEntity, Location>,
|
||||
}
|
||||
|
||||
/// Read-only interface which is exposed outside the parser crate.
|
||||
impl SourceMap {
|
||||
/// Look up a value entity by its source number.
|
||||
pub fn get_value(&self, src: Value) -> Option<Value> {
|
||||
self.values.get(&src).cloned()
|
||||
}
|
||||
|
||||
/// Look up a EBB entity by its source number.
|
||||
pub fn get_ebb(&self, src: Ebb) -> Option<Ebb> {
|
||||
self.ebbs.get(&src).cloned()
|
||||
}
|
||||
|
||||
/// Look up a stack slot entity by its source number.
|
||||
pub fn get_ss(&self, src_num: u32) -> Option<StackSlot> {
|
||||
self.stack_slots.get(&src_num).cloned()
|
||||
}
|
||||
|
||||
/// Look up a jump table entity by its source number.
|
||||
pub fn get_jt(&self, src_num: u32) -> Option<JumpTable> {
|
||||
self.jump_tables.get(&src_num).cloned()
|
||||
}
|
||||
|
||||
/// Look up an entity by source name.
|
||||
/// Returns the entity reference corresponding to `name`, if it exists.
|
||||
pub fn lookup_str(&self, name: &str) -> Option<AnyEntity> {
|
||||
split_entity_name(name).and_then(|(ent, num)| {
|
||||
match ent {
|
||||
"v" => {
|
||||
Value::direct_with_number(num)
|
||||
.and_then(|v| self.get_value(v))
|
||||
.map(AnyEntity::Value)
|
||||
}
|
||||
"vx" => {
|
||||
Value::table_with_number(num)
|
||||
.and_then(|v| self.get_value(v))
|
||||
.map(AnyEntity::Value)
|
||||
}
|
||||
"ebb" => Ebb::with_number(num).and_then(|e| self.get_ebb(e)).map(AnyEntity::Ebb),
|
||||
"ss" => self.get_ss(num).map(AnyEntity::StackSlot),
|
||||
"jt" => self.get_jt(num).map(AnyEntity::JumpTable),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the source location where an entity was defined.
|
||||
/// This looks up entities in the parsed function, not the source entity numbers.
|
||||
pub fn location(&self, entity: AnyEntity) -> Option<Location> {
|
||||
self.locations.get(&entity).cloned()
|
||||
}
|
||||
|
||||
/// Rewrite an Ebb reference.
|
||||
pub fn rewrite_ebb(&self, ebb: &mut Ebb, loc: AnyEntity) -> Result<()> {
|
||||
match self.get_ebb(*ebb) {
|
||||
Some(new) => {
|
||||
*ebb = new;
|
||||
Ok(())
|
||||
}
|
||||
None => {
|
||||
err!(self.location(loc).unwrap_or_default(),
|
||||
"undefined reference: {}",
|
||||
ebb)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Rewrite a value reference.
|
||||
pub fn rewrite_value(&self, val: &mut Value, loc: AnyEntity) -> Result<()> {
|
||||
match self.get_value(*val) {
|
||||
Some(new) => {
|
||||
*val = new;
|
||||
Ok(())
|
||||
}
|
||||
None => {
|
||||
err!(self.location(loc).unwrap_or_default(),
|
||||
"undefined reference: {}",
|
||||
val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Rewrite a slice of value references.
|
||||
pub fn rewrite_values(&self, vals: &mut [Value], loc: AnyEntity) -> Result<()> {
|
||||
for val in vals {
|
||||
try!(self.rewrite_value(val, loc));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Interface for mutating a source map.
|
||||
///
|
||||
/// This interface is provided for the parser itself, it is not made available outside the crate.
|
||||
pub trait MutableSourceMap {
|
||||
fn new() -> Self;
|
||||
|
||||
/// Define a value mapping from the source name `src` to the final `entity`.
|
||||
fn def_value(&mut self, src: Value, entity: Value, loc: &Location) -> Result<()>;
|
||||
fn def_ebb(&mut self, src: Ebb, entity: Ebb, loc: &Location) -> Result<()>;
|
||||
fn def_ss(&mut self, src_num: u32, entity: StackSlot, loc: &Location) -> Result<()>;
|
||||
fn def_jt(&mut self, src_num: u32, entity: JumpTable, loc: &Location) -> Result<()>;
|
||||
|
||||
/// Define an instruction. Since instruction numbers never appear in source, only the location
|
||||
/// is recorded.
|
||||
fn def_inst(&mut self, entity: Inst, loc: &Location) -> Result<()>;
|
||||
}
|
||||
|
||||
impl MutableSourceMap for SourceMap {
|
||||
fn new() -> SourceMap {
|
||||
SourceMap {
|
||||
values: HashMap::new(),
|
||||
ebbs: HashMap::new(),
|
||||
stack_slots: HashMap::new(),
|
||||
jump_tables: HashMap::new(),
|
||||
locations: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn def_value(&mut self, src: Value, entity: Value, loc: &Location) -> Result<()> {
|
||||
if self.values.insert(src, entity).is_some() {
|
||||
err!(loc, "duplicate value: {}", src)
|
||||
} else if self.locations.insert(entity.into(), loc.clone()).is_some() {
|
||||
err!(loc, "duplicate entity: {}", entity)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn def_ebb(&mut self, src: Ebb, entity: Ebb, loc: &Location) -> Result<()> {
|
||||
if self.ebbs.insert(src, entity).is_some() {
|
||||
err!(loc, "duplicate EBB: {}", src)
|
||||
} else if self.locations.insert(entity.into(), loc.clone()).is_some() {
|
||||
err!(loc, "duplicate entity: {}", entity)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn def_ss(&mut self, src_num: u32, entity: StackSlot, loc: &Location) -> Result<()> {
|
||||
if self.stack_slots.insert(src_num, entity).is_some() {
|
||||
err!(loc, "duplicate stack slot: ss{}", src_num)
|
||||
} else if self.locations.insert(entity.into(), loc.clone()).is_some() {
|
||||
err!(loc, "duplicate entity: {}", entity)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn def_jt(&mut self, src_num: u32, entity: JumpTable, loc: &Location) -> Result<()> {
|
||||
if self.jump_tables.insert(src_num, entity).is_some() {
|
||||
err!(loc, "duplicate jump table: jt{}", src_num)
|
||||
} else if self.locations.insert(entity.into(), loc.clone()).is_some() {
|
||||
err!(loc, "duplicate entity: {}", entity)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn def_inst(&mut self, entity: Inst, loc: &Location) -> Result<()> {
|
||||
if self.locations.insert(entity.into(), loc.clone()).is_some() {
|
||||
err!(loc, "duplicate entity: {}", entity)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use parse_test;
|
||||
|
||||
#[test]
|
||||
fn details() {
|
||||
let tf = parse_test("function detail() {
|
||||
ss10 = stack_slot 13
|
||||
jt10 = jump_table ebb0
|
||||
ebb0(v4: i32, vx7: i32):
|
||||
v10 = iadd v4, vx7
|
||||
}")
|
||||
.unwrap();
|
||||
let map = &tf.functions[0].1.map;
|
||||
|
||||
assert_eq!(map.lookup_str("v0"), None);
|
||||
assert_eq!(map.lookup_str("ss1"), None);
|
||||
assert_eq!(map.lookup_str("ss10").unwrap().to_string(), "ss0");
|
||||
assert_eq!(map.lookup_str("jt10").unwrap().to_string(), "jt0");
|
||||
assert_eq!(map.lookup_str("ebb0").unwrap().to_string(), "ebb0");
|
||||
assert_eq!(map.lookup_str("v4").unwrap().to_string(), "vx0");
|
||||
assert_eq!(map.lookup_str("vx7").unwrap().to_string(), "vx1");
|
||||
assert_eq!(map.lookup_str("v10").unwrap().to_string(), "v0");
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
//! Test commands.
|
||||
//!
|
||||
//! A `.cton` file can begin with one or more *test commands* which specify what is to be tested.
|
||||
//! The general syntax is:
|
||||
//!
|
||||
//! <pre>
|
||||
//! test <i><command></i> </i>[options]</i>...
|
||||
//! </pre>
|
||||
//!
|
||||
//! The options are either a single identifier flag, or setting values like `identifier=value`.
|
||||
//!
|
||||
//! The parser does not understand the test commands or which options are alid. It simply parses
|
||||
//! the general format into a `TestCommand` data structure.
|
||||
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct TestCommand<'a> {
|
||||
pub command: &'a str,
|
||||
pub options: Vec<TestOption<'a>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum TestOption<'a> {
|
||||
Flag(&'a str),
|
||||
Value(&'a str, &'a str),
|
||||
}
|
||||
|
||||
impl<'a> TestCommand<'a> {
|
||||
pub fn new(s: &'a str) -> TestCommand<'a> {
|
||||
let mut parts = s.split_whitespace();
|
||||
let cmd = parts.next().unwrap_or("");
|
||||
TestCommand {
|
||||
command: cmd,
|
||||
options: parts.filter(|s| !s.is_empty()).map(TestOption::new).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for TestCommand<'a> {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
try!(write!(f, "{}", self.command));
|
||||
for opt in &self.options {
|
||||
try!(write!(f, " {}", opt));
|
||||
}
|
||||
writeln!(f, "")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TestOption<'a> {
|
||||
pub fn new(s: &'a str) -> TestOption<'a> {
|
||||
match s.find('=') {
|
||||
None => TestOption::Flag(s),
|
||||
Some(p) => TestOption::Value(&s[0..p], &s[p + 1..]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for TestOption<'a> {
|
||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
TestOption::Flag(s) => write!(f, "{}", s),
|
||||
TestOption::Value(s, v) => write!(f, "{}={}", s, v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_option() {
|
||||
assert_eq!(TestOption::new(""), TestOption::Flag(""));
|
||||
assert_eq!(TestOption::new("foo"), TestOption::Flag("foo"));
|
||||
assert_eq!(TestOption::new("foo=bar"), TestOption::Value("foo", "bar"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_command() {
|
||||
assert_eq!(&TestCommand::new("").to_string(), "\n");
|
||||
assert_eq!(&TestCommand::new("cat").to_string(), "cat\n");
|
||||
assert_eq!(&TestCommand::new("cat ").to_string(), "cat\n");
|
||||
assert_eq!(&TestCommand::new("cat 1 ").to_string(), "cat 1\n");
|
||||
assert_eq!(&TestCommand::new("cat one=4 two t").to_string(),
|
||||
"cat one=4 two t\n");
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
//! Data structures representing a parsed test file.
|
||||
//!
|
||||
//! A test file is a `.cton` file which contains test commands and settings for running a
|
||||
//! file-based test case.
|
||||
//!
|
||||
|
||||
use cretonne::ir::Function;
|
||||
use cretonne::ir::entities::AnyEntity;
|
||||
use testcommand::TestCommand;
|
||||
use isaspec::IsaSpec;
|
||||
use sourcemap::SourceMap;
|
||||
use error::Location;
|
||||
|
||||
/// A parsed test case.
|
||||
///
|
||||
/// This is the result of parsing a `.cton` file which contains a number of test commands and ISA
|
||||
/// specs followed by the functions that should be tested.
|
||||
pub struct TestFile<'a> {
|
||||
/// `test foo ...` lines.
|
||||
pub commands: Vec<TestCommand<'a>>,
|
||||
/// `isa bar ...` lines.
|
||||
pub isa_spec: IsaSpec,
|
||||
pub functions: Vec<(Function, Details<'a>)>,
|
||||
}
|
||||
|
||||
/// Additional details about a function parsed from a text string.
|
||||
/// These are useful for detecting test commands embedded in comments etc.
|
||||
/// The details to not affect the semantics of the function.
|
||||
#[derive(Debug)]
|
||||
pub struct Details<'a> {
|
||||
pub location: Location,
|
||||
pub comments: Vec<Comment<'a>>,
|
||||
pub map: SourceMap,
|
||||
}
|
||||
|
||||
/// A comment in a parsed function.
|
||||
///
|
||||
/// The comment belongs to the immediately preceeding entity, whether that is an EBB header, and
|
||||
/// instruction, or one of the preamble declarations.
|
||||
///
|
||||
/// Comments appearing inside the function but before the preamble, as well as comments appearing
|
||||
/// after the function are tagged as `AnyEntity::Function`.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Comment<'a> {
|
||||
pub entity: AnyEntity,
|
||||
pub text: &'a str,
|
||||
}
|
||||
@@ -10,9 +10,9 @@ name = "cton-util"
|
||||
path = "main.rs"
|
||||
|
||||
[dependencies]
|
||||
cretonne = { path = "../libcretonne" }
|
||||
cretonne-reader = { path = "../libreader" }
|
||||
filecheck = { path = "../libfilecheck" }
|
||||
cretonne = { path = "../../lib/cretonne" }
|
||||
cretonne-reader = { path = "../../lib/reader" }
|
||||
filecheck = { path = "../../lib/filecheck" }
|
||||
docopt = "0.6.80"
|
||||
rustc-serialize = "0.3.19"
|
||||
num_cpus = "1.1.0"
|
||||
|
||||
Reference in New Issue
Block a user