Update to rustfmt-preview (#348)
* Update to rustfmt-preview. * Run "cargo fmt --all" with rustfmt 0.4.1. rustfmt 0.4.1 is the latest release of rustfmt-preview available on the stable channel. * Fix a long line that rustfmt 0.4.1 can't handle. * Remove unneeded commas left behind by rustfmt.
This commit is contained in:
11
.travis.yml
11
.travis.yml
@@ -16,7 +16,16 @@ addons:
|
||||
install:
|
||||
- pip3 install --user --upgrade mypy flake8
|
||||
- mypy --version
|
||||
- travis_wait ./check-rustfmt.sh --install
|
||||
before_script:
|
||||
- cargo uninstall rustfmt || true
|
||||
- cargo install --list
|
||||
- rustup toolchain install stable
|
||||
- rustup component add --toolchain=stable rustfmt-preview
|
||||
- rustup component list --toolchain=stable
|
||||
- rustup show
|
||||
- rustfmt +stable --version || echo fail
|
||||
- rustup update
|
||||
- rustfmt +stable --version
|
||||
script: ./test-all.sh
|
||||
cache:
|
||||
cargo: true
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Usage: check-rustfmt.sh [--install]
|
||||
#
|
||||
# Check that the desired version of rustfmt is installed.
|
||||
#
|
||||
# Rustfmt is still immature enough that its formatting decisions can change
|
||||
# between versions. This makes it difficult to enforce a certain style in a
|
||||
# test script since not all developers will upgrade rustfmt at the same time.
|
||||
# To work around this, we only verify formatting when a specific version of
|
||||
# rustfmt is installed.
|
||||
#
|
||||
# Exits 0 if the right version of rustfmt is installed, 1 otherwise.
|
||||
#
|
||||
# With the --install option, also tries to install the right version.
|
||||
|
||||
# This version should always be bumped to the newest version available that
|
||||
# works with stable Rust.
|
||||
# ... but not 0.10.0, since it's the same as 0.9.0 except for a deprecation
|
||||
# error (and it requires --force to disable the error and enable normal
|
||||
# operation, however that doesn't appear to be possible through "cargo fmt").
|
||||
VERS="0.9.0"
|
||||
|
||||
if cargo install --list | tee /dev/null | grep -q "^rustfmt v$VERS"; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ ${1:-""} != "--install" ]]; then
|
||||
echo "********************************************************************"
|
||||
echo "* Please install rustfmt v$VERS to verify formatting. *"
|
||||
echo "* If a newer version of rustfmt is available, update this script. *"
|
||||
echo "********************************************************************"
|
||||
echo "$0 --install"
|
||||
sleep 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Installing rustfmt v$VERS."
|
||||
cargo install --force --vers="$VERS" rustfmt
|
||||
@@ -8,4 +8,4 @@ cd $(dirname "$0")
|
||||
# Make sure we can find rustfmt.
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
|
||||
exec cargo fmt --all -- "$@"
|
||||
exec cargo +stable fmt --all -- "$@"
|
||||
|
||||
@@ -18,12 +18,8 @@ pub fn run(files: &[String]) -> CommandResult {
|
||||
}
|
||||
|
||||
fn cat_one(filename: &str) -> CommandResult {
|
||||
let buffer = read_to_string(&filename).map_err(
|
||||
|e| format!("{}: {}", filename, e),
|
||||
)?;
|
||||
let items = parse_functions(&buffer).map_err(
|
||||
|e| format!("{}: {}", filename, e),
|
||||
)?;
|
||||
let buffer = read_to_string(&filename).map_err(|e| format!("{}: {}", filename, e))?;
|
||||
let items = parse_functions(&buffer).map_err(|e| format!("{}: {}", filename, e))?;
|
||||
|
||||
for (idx, func) in items.into_iter().enumerate() {
|
||||
if idx != 0 {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
//! CLI tool to read Cretonne IR files and compile them into native code.
|
||||
|
||||
use capstone::prelude::*;
|
||||
use cretonne_codegen::Context;
|
||||
use cretonne_codegen::isa::TargetIsa;
|
||||
use cretonne_codegen::print_errors::pretty_error;
|
||||
use cretonne_codegen::settings::FlagsOrIsa;
|
||||
use cretonne_codegen::Context;
|
||||
use cretonne_codegen::{binemit, ir};
|
||||
use cretonne_reader::parse_test;
|
||||
use std::path::Path;
|
||||
@@ -80,9 +80,7 @@ fn handle_module(
|
||||
name: &str,
|
||||
fisa: FlagsOrIsa,
|
||||
) -> Result<(), String> {
|
||||
let buffer = read_to_string(&path).map_err(
|
||||
|e| format!("{}: {}", name, e),
|
||||
)?;
|
||||
let buffer = read_to_string(&path).map_err(|e| format!("{}: {}", name, e))?;
|
||||
let test_file = parse_test(&buffer).map_err(|e| format!("{}: {}", name, e))?;
|
||||
|
||||
// If we have an isa from the command-line, use that. Otherwise if the
|
||||
@@ -154,12 +152,10 @@ fn get_disassembler(isa: &TargetIsa) -> Result<Capstone, String> {
|
||||
}
|
||||
}
|
||||
"arm32" => Capstone::new().arm().mode(arch::arm::ArchMode::Arm).build(),
|
||||
"arm64" => {
|
||||
Capstone::new()
|
||||
"arm64" => Capstone::new()
|
||||
.arm64()
|
||||
.mode(arch::arm64::ArchMode::Arm)
|
||||
.build()
|
||||
}
|
||||
.build(),
|
||||
_ => return Err(String::from("Unknown ISA")),
|
||||
};
|
||||
|
||||
|
||||
@@ -1,14 +1,8 @@
|
||||
#![deny(trivial_numeric_casts)]
|
||||
#![warn(unused_import_braces, unstable_features, unused_extern_crates)]
|
||||
#![cfg_attr(feature="cargo-clippy", warn(
|
||||
float_arithmetic,
|
||||
mut_mut,
|
||||
nonminimal_bool,
|
||||
option_map_unwrap_or,
|
||||
option_map_unwrap_or_else,
|
||||
unicode_not_nfc,
|
||||
use_self,
|
||||
))]
|
||||
#![cfg_attr(feature = "cargo-clippy",
|
||||
warn(float_arithmetic, mut_mut, nonminimal_bool, option_map_unwrap_or,
|
||||
option_map_unwrap_or_else, unicode_not_nfc, use_self))]
|
||||
|
||||
#[macro_use]
|
||||
extern crate cfg_if;
|
||||
@@ -134,9 +128,7 @@ fn cton_util() -> CommandResult {
|
||||
);
|
||||
|
||||
#[cfg(not(feature = "wasm"))]
|
||||
let result = Err(
|
||||
"Error: cton-util was compiled without wasm support.".to_owned(),
|
||||
);
|
||||
let result = Err("Error: cton-util was compiled without wasm support.".to_owned());
|
||||
|
||||
result
|
||||
} else {
|
||||
|
||||
@@ -19,12 +19,8 @@ pub fn run(files: &[String]) -> CommandResult {
|
||||
}
|
||||
|
||||
fn print_cfg(filename: &str) -> CommandResult {
|
||||
let buffer = read_to_string(filename).map_err(
|
||||
|e| format!("{}: {}", filename, e),
|
||||
)?;
|
||||
let items = parse_functions(&buffer).map_err(
|
||||
|e| format!("{}: {}", filename, e),
|
||||
)?;
|
||||
let buffer = read_to_string(filename).map_err(|e| format!("{}: {}", filename, e))?;
|
||||
let items = parse_functions(&buffer).map_err(|e| format!("{}: {}", filename, e))?;
|
||||
|
||||
for (idx, func) in items.into_iter().enumerate() {
|
||||
if idx != 0 {
|
||||
|
||||
@@ -22,14 +22,14 @@ pub fn run(files: &[String], verbose: bool) -> CommandResult {
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
io::stdin().read_to_string(&mut buffer).map_err(|e| {
|
||||
format!("stdin: {}", e)
|
||||
})?;
|
||||
io::stdin()
|
||||
.read_to_string(&mut buffer)
|
||||
.map_err(|e| format!("stdin: {}", e))?;
|
||||
|
||||
if verbose {
|
||||
let (success, explain) = checker.explain(&buffer, NO_VARIABLES).map_err(
|
||||
|e| e.to_string(),
|
||||
)?;
|
||||
let (success, explain) = checker
|
||||
.explain(&buffer, NO_VARIABLES)
|
||||
.map_err(|e| e.to_string())?;
|
||||
print!("{}", explain);
|
||||
if success {
|
||||
println!("OK");
|
||||
@@ -37,27 +37,25 @@ pub fn run(files: &[String], verbose: bool) -> CommandResult {
|
||||
} else {
|
||||
Err("Check failed".to_string())
|
||||
}
|
||||
} else if checker.check(&buffer, NO_VARIABLES).map_err(
|
||||
|e| e.to_string(),
|
||||
)?
|
||||
} else if checker
|
||||
.check(&buffer, NO_VARIABLES)
|
||||
.map_err(|e| e.to_string())?
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
let (_, explain) = checker.explain(&buffer, NO_VARIABLES).map_err(
|
||||
|e| e.to_string(),
|
||||
)?;
|
||||
let (_, explain) = checker
|
||||
.explain(&buffer, NO_VARIABLES)
|
||||
.map_err(|e| e.to_string())?;
|
||||
print!("{}", explain);
|
||||
Err("Check failed".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
fn read_checkfile(filename: &str) -> Result<Checker, String> {
|
||||
let buffer = read_to_string(&filename).map_err(
|
||||
|e| format!("{}: {}", filename, e),
|
||||
)?;
|
||||
let buffer = read_to_string(&filename).map_err(|e| format!("{}: {}", filename, e))?;
|
||||
let mut builder = CheckerBuilder::new();
|
||||
builder.text(&buffer).map_err(
|
||||
|e| format!("{}: {}", filename, e),
|
||||
)?;
|
||||
builder
|
||||
.text(&buffer)
|
||||
.map_err(|e| format!("{}: {}", filename, e))?;
|
||||
Ok(builder.finish())
|
||||
}
|
||||
|
||||
@@ -23,9 +23,19 @@ function banner {
|
||||
}
|
||||
|
||||
# Run rustfmt if we have it.
|
||||
if $topdir/check-rustfmt.sh; then
|
||||
banner "Rust formatting"
|
||||
$topdir/format-all.sh --write-mode=diff
|
||||
banner "Rust formatting"
|
||||
if command -v rustfmt > /dev/null; then
|
||||
# In newer versions of rustfmt, replace --write-mode=diff with --check.
|
||||
if ! $topdir/format-all.sh --write-mode=diff ; then
|
||||
echo "Formatting diffs detected! Run \"cargo fmt --all\" to correct."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "rustfmt not available; formatting not checked!"
|
||||
echo
|
||||
echo "If you are using rustup, rustfmt can be installed via"
|
||||
echo "\"rustup component add --toolchain=stable rustfmt-preview\", or see"
|
||||
echo "https://github.com/rust-lang-nursery/rustfmt for more information."
|
||||
fi
|
||||
|
||||
# Check if any Python files have changed since we last checked them.
|
||||
|
||||
@@ -67,9 +67,7 @@ fn main() {
|
||||
.arg("--out-dir")
|
||||
.arg(out_dir)
|
||||
.status()
|
||||
.expect(
|
||||
"Failed to launch second-level build script; is python installed?",
|
||||
);
|
||||
.expect("Failed to launch second-level build script; is python installed?");
|
||||
if !status.success() {
|
||||
process::exit(status.code().unwrap());
|
||||
}
|
||||
@@ -132,16 +130,14 @@ impl Isa {
|
||||
/// Returns isa targets to configure conditional compilation.
|
||||
fn isa_targets(cretonne_targets: Option<&str>, target_triple: &str) -> Result<Vec<Isa>, String> {
|
||||
match cretonne_targets {
|
||||
Some("native") => {
|
||||
Isa::from_arch(target_triple.split('-').next().unwrap())
|
||||
Some("native") => Isa::from_arch(target_triple.split('-').next().unwrap())
|
||||
.map(|isa| vec![isa])
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"no supported isa found for target triple `{}`",
|
||||
target_triple
|
||||
)
|
||||
})
|
||||
}
|
||||
}),
|
||||
Some(targets) => {
|
||||
let unknown_isa_targets = targets
|
||||
.split(',')
|
||||
|
||||
@@ -62,16 +62,14 @@ impl ValueConversion {
|
||||
ValueConversion::IntSplit => ty.half_width().expect("Integer type too small to split"),
|
||||
ValueConversion::VectorSplit => ty.half_vector().expect("Not a vector"),
|
||||
ValueConversion::IntBits => Type::int(ty.bits()).expect("Bad integer size"),
|
||||
ValueConversion::Sext(nty) |
|
||||
ValueConversion::Uext(nty) => nty,
|
||||
ValueConversion::Sext(nty) | ValueConversion::Uext(nty) => nty,
|
||||
}
|
||||
}
|
||||
|
||||
/// Is this a split conversion that results in two arguments?
|
||||
pub fn is_split(self) -> bool {
|
||||
match self {
|
||||
ValueConversion::IntSplit |
|
||||
ValueConversion::VectorSplit => true,
|
||||
ValueConversion::IntSplit | ValueConversion::VectorSplit => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
use super::{Comparator, Forest, Node, NodeData, NodePool, Path, INNER_SIZE};
|
||||
use packed_option::PackedOption;
|
||||
use std::marker::PhantomData;
|
||||
#[cfg(test)]
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
#[cfg(test)]
|
||||
use std::string::String;
|
||||
|
||||
@@ -50,7 +50,9 @@ where
|
||||
{
|
||||
/// Create a new empty forest.
|
||||
pub fn new() -> Self {
|
||||
Self { nodes: NodePool::new() }
|
||||
Self {
|
||||
nodes: NodePool::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear all maps in the forest.
|
||||
@@ -101,9 +103,9 @@ where
|
||||
|
||||
/// Get the value stored for `key`.
|
||||
pub fn get(&self, key: K, forest: &MapForest<K, V, C>, comp: &C) -> Option<V> {
|
||||
self.root.expand().and_then(|root| {
|
||||
Path::default().find(key, root, &forest.nodes, comp)
|
||||
})
|
||||
self.root
|
||||
.expand()
|
||||
.and_then(|root| Path::default().find(key, root, &forest.nodes, comp))
|
||||
}
|
||||
|
||||
/// Look up the value stored for `key`.
|
||||
@@ -292,30 +294,30 @@ where
|
||||
///
|
||||
/// If the cursor is already pointing at the first entry, leave it there and return `None`.
|
||||
pub fn prev(&mut self) -> Option<(K, V)> {
|
||||
self.root.expand().and_then(
|
||||
|root| self.path.prev(root, self.pool),
|
||||
)
|
||||
self.root
|
||||
.expand()
|
||||
.and_then(|root| self.path.prev(root, self.pool))
|
||||
}
|
||||
|
||||
/// Get the current key, or `None` if the cursor is at the end.
|
||||
pub fn key(&self) -> Option<K> {
|
||||
self.path.leaf_pos().and_then(|(node, entry)| {
|
||||
self.pool[node].unwrap_leaf().0.get(entry).cloned()
|
||||
})
|
||||
self.path
|
||||
.leaf_pos()
|
||||
.and_then(|(node, entry)| self.pool[node].unwrap_leaf().0.get(entry).cloned())
|
||||
}
|
||||
|
||||
/// Get the current value, or `None` if the cursor is at the end.
|
||||
pub fn value(&self) -> Option<V> {
|
||||
self.path.leaf_pos().and_then(|(node, entry)| {
|
||||
self.pool[node].unwrap_leaf().1.get(entry).cloned()
|
||||
})
|
||||
self.path
|
||||
.leaf_pos()
|
||||
.and_then(|(node, entry)| self.pool[node].unwrap_leaf().1.get(entry).cloned())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the current value, or `None` if the cursor is at the end.
|
||||
pub fn value_mut(&mut self) -> Option<&mut V> {
|
||||
self.path.leaf_pos().and_then(move |(node, entry)| {
|
||||
self.pool[node].unwrap_leaf_mut().1.get_mut(entry)
|
||||
})
|
||||
self.path
|
||||
.leaf_pos()
|
||||
.and_then(move |(node, entry)| self.pool[node].unwrap_leaf_mut().1.get_mut(entry))
|
||||
}
|
||||
|
||||
/// Move this cursor to `key`.
|
||||
|
||||
@@ -362,7 +362,8 @@ impl<F: Forest> NodeData<F> {
|
||||
/// right sibling node is returned.
|
||||
pub fn balance(&mut self, crit_key: F::Key, rhs: &mut Self) -> Option<F::Key> {
|
||||
match (self, rhs) {
|
||||
(&mut NodeData::Inner {
|
||||
(
|
||||
&mut NodeData::Inner {
|
||||
size: ref mut l_size,
|
||||
keys: ref mut l_keys,
|
||||
tree: ref mut l_tree,
|
||||
@@ -371,7 +372,8 @@ impl<F: Forest> NodeData<F> {
|
||||
size: ref mut r_size,
|
||||
keys: ref mut r_keys,
|
||||
tree: ref mut r_tree,
|
||||
}) => {
|
||||
},
|
||||
) => {
|
||||
let l_ents = usize::from(*l_size) + 1;
|
||||
let r_ents = usize::from(*r_size) + 1;
|
||||
let ents = l_ents + r_ents;
|
||||
@@ -408,7 +410,8 @@ impl<F: Forest> NodeData<F> {
|
||||
Some(new_crit)
|
||||
}
|
||||
}
|
||||
(&mut NodeData::Leaf {
|
||||
(
|
||||
&mut NodeData::Leaf {
|
||||
size: ref mut l_size,
|
||||
keys: ref mut l_keys,
|
||||
vals: ref mut l_vals,
|
||||
@@ -417,7 +420,8 @@ impl<F: Forest> NodeData<F> {
|
||||
size: ref mut r_size,
|
||||
keys: ref mut r_keys,
|
||||
vals: ref mut r_vals,
|
||||
}) => {
|
||||
},
|
||||
) => {
|
||||
let l_ents = usize::from(*l_size);
|
||||
let l_keys = l_keys.borrow_mut();
|
||||
let l_vals = l_vals.borrow_mut();
|
||||
@@ -677,11 +681,7 @@ mod test {
|
||||
assert!(leaf.try_leaf_insert(2, 'c', SetValue()));
|
||||
assert_eq!(leaf.to_string(), "[ a b c d ]");
|
||||
for i in 4..15 {
|
||||
assert!(leaf.try_leaf_insert(
|
||||
usize::from(i),
|
||||
('a' as u8 + i) as char,
|
||||
SetValue(),
|
||||
));
|
||||
assert!(leaf.try_leaf_insert(usize::from(i), ('a' as u8 + i) as char, SetValue()));
|
||||
}
|
||||
assert_eq!(leaf.to_string(), "[ a b c d e f g h i j k l m n o ]");
|
||||
|
||||
@@ -779,21 +779,13 @@ mod test {
|
||||
fn leaf_balance() {
|
||||
let mut lhs = NodeData::<TF>::leaf('a', SetValue());
|
||||
for i in 1..6 {
|
||||
assert!(lhs.try_leaf_insert(
|
||||
usize::from(i),
|
||||
('a' as u8 + i) as char,
|
||||
SetValue(),
|
||||
));
|
||||
assert!(lhs.try_leaf_insert(usize::from(i), ('a' as u8 + i) as char, SetValue()));
|
||||
}
|
||||
assert_eq!(lhs.to_string(), "[ a b c d e f ]");
|
||||
|
||||
let mut rhs = NodeData::<TF>::leaf('0', SetValue());
|
||||
for i in 1..8 {
|
||||
assert!(rhs.try_leaf_insert(
|
||||
usize::from(i),
|
||||
('0' as u8 + i) as char,
|
||||
SetValue(),
|
||||
));
|
||||
assert!(rhs.try_leaf_insert(usize::from(i), ('0' as u8 + i) as char, SetValue()));
|
||||
}
|
||||
assert_eq!(rhs.to_string(), "[ 0 1 2 3 4 5 6 7 ]");
|
||||
|
||||
|
||||
@@ -303,9 +303,9 @@ impl<F: Forest> Path<F> {
|
||||
// When inserting into an inner node (`ins_node.is_some()`), we must point to a valid
|
||||
// entry in the current node since the new entry is inserted *after* the insert
|
||||
// location.
|
||||
if entry > split.lhs_entries ||
|
||||
(entry == split.lhs_entries &&
|
||||
(split.lhs_entries > split.rhs_entries || ins_node.is_some()))
|
||||
if entry > split.lhs_entries
|
||||
|| (entry == split.lhs_entries
|
||||
&& (split.lhs_entries > split.rhs_entries || ins_node.is_some()))
|
||||
{
|
||||
node = rhs_node;
|
||||
entry -= split.lhs_entries;
|
||||
@@ -406,7 +406,9 @@ impl<F: Forest> Path<F> {
|
||||
let crit_node = self.node[crit_level];
|
||||
|
||||
match pool[crit_node] {
|
||||
NodeData::Inner { size, ref mut keys, .. } => {
|
||||
NodeData::Inner {
|
||||
size, ref mut keys, ..
|
||||
} => {
|
||||
debug_assert!(crit_kidx < size);
|
||||
keys[usize::from(crit_kidx)] = crit_key;
|
||||
}
|
||||
@@ -436,7 +438,10 @@ impl<F: Forest> Path<F> {
|
||||
|
||||
// Discard the root node if it has shrunk to a single sub-tree.
|
||||
let mut ns = 0;
|
||||
while let NodeData::Inner { size: 0, ref tree, .. } = pool[self.node[ns]] {
|
||||
while let NodeData::Inner {
|
||||
size: 0, ref tree, ..
|
||||
} = pool[self.node[ns]]
|
||||
{
|
||||
ns += 1;
|
||||
self.node[ns] = tree[0];
|
||||
}
|
||||
@@ -616,9 +621,8 @@ impl<F: Forest> Path<F> {
|
||||
|
||||
/// Update the critical key for the right sibling node at `level`.
|
||||
fn update_right_crit_key(&self, level: usize, crit_key: F::Key, pool: &mut NodePool<F>) {
|
||||
let bl = self.right_sibling_branch_level(level, pool).expect(
|
||||
"No right sibling exists",
|
||||
);
|
||||
let bl = self.right_sibling_branch_level(level, pool)
|
||||
.expect("No right sibling exists");
|
||||
match pool[self.node[bl]] {
|
||||
NodeData::Inner { ref mut keys, .. } => {
|
||||
keys[usize::from(self.entry[bl])] = crit_key;
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
use super::{Forest, Node, NodeData};
|
||||
use entity::PrimaryMap;
|
||||
use std::ops::{Index, IndexMut};
|
||||
#[cfg(test)]
|
||||
use std::fmt;
|
||||
use std::ops::{Index, IndexMut};
|
||||
|
||||
/// A pool of nodes, including a free list.
|
||||
pub(super) struct NodePool<F: Forest> {
|
||||
@@ -51,7 +51,9 @@ impl<F: Forest> NodePool<F> {
|
||||
pub fn free_node(&mut self, node: Node) {
|
||||
// Quick check for a double free.
|
||||
debug_assert!(!self.nodes[node].is_free(), "{} is already free", node);
|
||||
self.nodes[node] = NodeData::Free { next: self.freelist };
|
||||
self.nodes[node] = NodeData::Free {
|
||||
next: self.freelist,
|
||||
};
|
||||
self.freelist = Some(node);
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
use super::{Comparator, Forest, Node, NodeData, NodePool, Path, SetValue, INNER_SIZE};
|
||||
use packed_option::PackedOption;
|
||||
use std::marker::PhantomData;
|
||||
#[cfg(test)]
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
#[cfg(test)]
|
||||
use std::string::String;
|
||||
|
||||
@@ -47,7 +47,9 @@ where
|
||||
{
|
||||
/// Create a new empty forest.
|
||||
pub fn new() -> Self {
|
||||
Self { nodes: NodePool::new() }
|
||||
Self {
|
||||
nodes: NodePool::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear all sets in the forest.
|
||||
@@ -232,16 +234,16 @@ where
|
||||
///
|
||||
/// If the cursor is already pointing at the first element, leave it there and return `None`.
|
||||
pub fn prev(&mut self) -> Option<K> {
|
||||
self.root.expand().and_then(|root| {
|
||||
self.path.prev(root, self.pool).map(|(k, _)| k)
|
||||
})
|
||||
self.root
|
||||
.expand()
|
||||
.and_then(|root| self.path.prev(root, self.pool).map(|(k, _)| k))
|
||||
}
|
||||
|
||||
/// Get the current element, or `None` if the cursor is at the end.
|
||||
pub fn elem(&self) -> Option<K> {
|
||||
self.path.leaf_pos().and_then(|(node, entry)| {
|
||||
self.pool[node].unwrap_leaf().0.get(entry).cloned()
|
||||
})
|
||||
self.path
|
||||
.leaf_pos()
|
||||
.and_then(|(node, entry)| self.pool[node].unwrap_leaf().0.get(entry).cloned())
|
||||
}
|
||||
|
||||
/// Move this cursor to `elem`.
|
||||
|
||||
@@ -7,7 +7,7 @@ mod memorysink;
|
||||
mod relaxation;
|
||||
mod shrink;
|
||||
|
||||
pub use self::memorysink::{MemoryCodeSink, RelocSink, TrapSink, NullTrapSink};
|
||||
pub use self::memorysink::{MemoryCodeSink, NullTrapSink, RelocSink, TrapSink};
|
||||
pub use self::relaxation::relax_branches;
|
||||
pub use self::shrink::shrink_instructions;
|
||||
pub use regalloc::RegDiversions;
|
||||
|
||||
@@ -78,8 +78,8 @@ pub fn relax_branches(func: &mut Function, isa: &TargetIsa) -> Result<CodeOffset
|
||||
let dest_offset = cur.func.offsets[dest];
|
||||
// This could be an out-of-range branch.
|
||||
// Relax it unless the destination offset has not been computed yet.
|
||||
if !range.contains(offset, dest_offset) &&
|
||||
(dest_offset != 0 || Some(dest) == cur.func.layout.entry_block())
|
||||
if !range.contains(offset, dest_offset)
|
||||
&& (dest_offset != 0 || Some(dest) == cur.func.layout.entry_block())
|
||||
{
|
||||
offset += relax_branch(&mut cur, offset, dest_offset, &encinfo, isa);
|
||||
continue;
|
||||
@@ -148,14 +148,14 @@ fn relax_branch(
|
||||
// Pick the first encoding that can handle the branch range.
|
||||
let dfg = &cur.func.dfg;
|
||||
let ctrl_type = dfg.ctrl_typevar(inst);
|
||||
if let Some(enc) = isa.legal_encodings(cur.func, &dfg[inst], ctrl_type).find(
|
||||
|&enc| {
|
||||
if let Some(enc) = isa.legal_encodings(cur.func, &dfg[inst], ctrl_type)
|
||||
.find(|&enc| {
|
||||
let range = encinfo.branch_range(enc).expect("Branch with no range");
|
||||
if !range.contains(offset, dest_offset) {
|
||||
dbg!(" trying [{}]: out of range", encinfo.display(enc));
|
||||
false
|
||||
} else if encinfo.operand_constraints(enc) !=
|
||||
encinfo.operand_constraints(cur.func.encodings[inst])
|
||||
} else if encinfo.operand_constraints(enc)
|
||||
!= encinfo.operand_constraints(cur.func.encodings[inst])
|
||||
{
|
||||
// Conservatively give up if the encoding has different constraints
|
||||
// than the original, so that we don't risk picking a new encoding
|
||||
@@ -168,9 +168,7 @@ fn relax_branch(
|
||||
dbg!(" trying [{}]: OK", encinfo.display(enc));
|
||||
true
|
||||
}
|
||||
},
|
||||
)
|
||||
{
|
||||
}) {
|
||||
cur.func.encodings[inst] = enc;
|
||||
return encinfo.bytes(enc);
|
||||
}
|
||||
|
||||
@@ -23,9 +23,7 @@ pub fn shrink_instructions(func: &mut Function, isa: &TargetIsa) {
|
||||
|
||||
// Pick the last encoding with constraints that are satisfied.
|
||||
let best_enc = isa.legal_encodings(func, &func.dfg[inst], ctrl_type)
|
||||
.filter(|e| {
|
||||
encinfo.constraints[e.recipe()].satisfied(inst, &divert, &func)
|
||||
})
|
||||
.filter(|e| encinfo.constraints[e.recipe()].satisfied(inst, &divert, &func))
|
||||
.min_by_key(|e| encinfo.bytes(*e))
|
||||
.unwrap();
|
||||
|
||||
@@ -41,7 +39,6 @@ pub fn shrink_instructions(func: &mut Function, isa: &TargetIsa) {
|
||||
encinfo.bytes(best_enc)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
divert.apply(&func.dfg[inst]);
|
||||
}
|
||||
|
||||
@@ -107,8 +107,9 @@ mod tests {
|
||||
|
||||
let s4 = BitSet::<u16>(4 | 8 | 256 | 1024);
|
||||
assert!(
|
||||
!s4.contains(0) && !s4.contains(1) && !s4.contains(4) && !s4.contains(5) &&
|
||||
!s4.contains(6) && !s4.contains(7) && !s4.contains(9) && !s4.contains(11)
|
||||
!s4.contains(0) && !s4.contains(1) && !s4.contains(4) && !s4.contains(5)
|
||||
&& !s4.contains(6) && !s4.contains(7) && !s4.contains(9)
|
||||
&& !s4.contains(11)
|
||||
);
|
||||
assert!(s4.contains(2) && s4.contains(3) && s4.contains(8) && s4.contains(10));
|
||||
}
|
||||
|
||||
@@ -24,8 +24,8 @@ use preopt::do_preopt;
|
||||
use regalloc;
|
||||
use result::{CtonError, CtonResult};
|
||||
use settings::{FlagsOrIsa, OptLevel};
|
||||
use std::vec::Vec;
|
||||
use simple_gvn::do_simple_gvn;
|
||||
use std::vec::Vec;
|
||||
use timing;
|
||||
use unreachable_code::eliminate_unreachable_code;
|
||||
use verifier;
|
||||
@@ -251,11 +251,8 @@ impl Context {
|
||||
|
||||
/// Compute the loop analysis.
|
||||
pub fn compute_loop_analysis(&mut self) {
|
||||
self.loop_analysis.compute(
|
||||
&self.func,
|
||||
&self.cfg,
|
||||
&self.domtree,
|
||||
)
|
||||
self.loop_analysis
|
||||
.compute(&self.func, &self.cfg, &self.domtree)
|
||||
}
|
||||
|
||||
/// Compute the control flow graph and dominator tree.
|
||||
@@ -292,12 +289,8 @@ impl Context {
|
||||
|
||||
/// Run the register allocator.
|
||||
pub fn regalloc(&mut self, isa: &TargetIsa) -> CtonResult {
|
||||
self.regalloc.run(
|
||||
isa,
|
||||
&mut self.func,
|
||||
&self.cfg,
|
||||
&mut self.domtree,
|
||||
)
|
||||
self.regalloc
|
||||
.run(isa, &mut self.func, &self.cfg, &mut self.domtree)
|
||||
}
|
||||
|
||||
/// Insert prologue and epilogues after computing the stack frame layout.
|
||||
|
||||
@@ -246,9 +246,11 @@ pub trait Cursor {
|
||||
let new_pos = if let Some(next) = self.layout().next_inst(inst) {
|
||||
CursorPosition::At(next)
|
||||
} else {
|
||||
CursorPosition::After(self.layout().inst_ebb(inst).expect(
|
||||
"current instruction removed?",
|
||||
))
|
||||
CursorPosition::After(
|
||||
self.layout()
|
||||
.inst_ebb(inst)
|
||||
.expect("current instruction removed?"),
|
||||
)
|
||||
};
|
||||
self.set_position(new_pos);
|
||||
}
|
||||
@@ -413,9 +415,11 @@ pub trait Cursor {
|
||||
self.set_position(At(next));
|
||||
Some(next)
|
||||
} else {
|
||||
let pos = After(self.layout().inst_ebb(inst).expect(
|
||||
"current instruction removed?",
|
||||
));
|
||||
let pos = After(
|
||||
self.layout()
|
||||
.inst_ebb(inst)
|
||||
.expect("current instruction removed?"),
|
||||
);
|
||||
self.set_position(pos);
|
||||
None
|
||||
}
|
||||
@@ -465,9 +469,11 @@ pub trait Cursor {
|
||||
self.set_position(At(prev));
|
||||
Some(prev)
|
||||
} else {
|
||||
let pos = Before(self.layout().inst_ebb(inst).expect(
|
||||
"current instruction removed?",
|
||||
));
|
||||
let pos = Before(
|
||||
self.layout()
|
||||
.inst_ebb(inst)
|
||||
.expect("current instruction removed?"),
|
||||
);
|
||||
self.set_position(pos);
|
||||
None
|
||||
}
|
||||
@@ -746,11 +752,9 @@ impl<'c, 'f> ir::InstInserterBase<'c> for &'c mut EncCursor<'f> {
|
||||
// Assign an encoding.
|
||||
// XXX Is there a way to describe this error to the user?
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(match_wild_err_arm))]
|
||||
match self.isa.encode(
|
||||
&self.func,
|
||||
&self.func.dfg[inst],
|
||||
ctrl_typevar,
|
||||
) {
|
||||
match self.isa
|
||||
.encode(&self.func, &self.func.dfg[inst], ctrl_typevar)
|
||||
{
|
||||
Ok(e) => self.func.encodings[inst] = e,
|
||||
Err(_) => panic!("can't encode {}", self.display_inst(inst)),
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ macro_rules! dbg {
|
||||
#[cfg(not(feature = "std"))]
|
||||
#[macro_export]
|
||||
macro_rules! dbg {
|
||||
($($arg:tt)+) => {}
|
||||
($($arg:tt)+) => {};
|
||||
}
|
||||
|
||||
/// Helper for printing lists.
|
||||
|
||||
@@ -13,9 +13,8 @@ use timing;
|
||||
|
||||
/// Test whether the given opcode is unsafe to even consider for DCE.
|
||||
fn trivially_unsafe_for_dce(opcode: Opcode) -> bool {
|
||||
opcode.is_call() || opcode.is_branch() || opcode.is_terminator() ||
|
||||
opcode.is_return() || opcode.can_trap() || opcode.other_side_effects() ||
|
||||
opcode.can_store()
|
||||
opcode.is_call() || opcode.is_branch() || opcode.is_terminator() || opcode.is_return()
|
||||
|| opcode.can_trap() || opcode.other_side_effects() || opcode.can_store()
|
||||
}
|
||||
|
||||
/// Preserve instructions with used result values.
|
||||
@@ -51,9 +50,8 @@ pub fn do_dce(func: &mut Function, domtree: &mut DominatorTree) {
|
||||
{
|
||||
let data = &pos.func.dfg[inst];
|
||||
let opcode = data.opcode();
|
||||
if trivially_unsafe_for_dce(opcode) ||
|
||||
is_load_with_defined_trapping(opcode, &data) ||
|
||||
any_inst_results_used(inst, &live, &pos.func.dfg)
|
||||
if trivially_unsafe_for_dce(opcode) || is_load_with_defined_trapping(opcode, &data)
|
||||
|| any_inst_results_used(inst, &live, &pos.func.dfg)
|
||||
{
|
||||
for arg in pos.func.dfg.inst_args(inst) {
|
||||
let v = pos.func.dfg.resolve_aliases(*arg);
|
||||
|
||||
@@ -101,9 +101,8 @@ impl DominatorTree {
|
||||
{
|
||||
let a = a.into();
|
||||
let b = b.into();
|
||||
self.rpo_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b)).then(
|
||||
layout.cmp(a, b),
|
||||
)
|
||||
self.rpo_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b))
|
||||
.then(layout.cmp(a, b))
|
||||
}
|
||||
|
||||
/// Returns `true` if `a` dominates `b`.
|
||||
@@ -145,9 +144,7 @@ impl DominatorTree {
|
||||
let (mut ebb_b, mut inst_b) = match b.into() {
|
||||
ExpandedProgramPoint::Ebb(ebb) => (ebb, None),
|
||||
ExpandedProgramPoint::Inst(inst) => (
|
||||
layout.inst_ebb(inst).expect(
|
||||
"Instruction not in layout.",
|
||||
),
|
||||
layout.inst_ebb(inst).expect("Instruction not in layout."),
|
||||
Some(inst),
|
||||
),
|
||||
};
|
||||
@@ -163,7 +160,11 @@ impl DominatorTree {
|
||||
ebb_b = layout.inst_ebb(idom).expect("Dominator got removed.");
|
||||
inst_b = Some(idom);
|
||||
}
|
||||
if a == ebb_b { inst_b } else { None }
|
||||
if a == ebb_b {
|
||||
inst_b
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the common dominator of two basic blocks.
|
||||
@@ -418,14 +419,13 @@ impl DominatorTree {
|
||||
// Get an iterator with just the reachable, already visited predecessors to `ebb`.
|
||||
// Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't
|
||||
// been visited yet, 0 for unreachable blocks.
|
||||
let mut reachable_preds = cfg.pred_iter(ebb).filter(|&(pred, _)| {
|
||||
self.nodes[pred].rpo_number > 1
|
||||
});
|
||||
let mut reachable_preds = cfg.pred_iter(ebb)
|
||||
.filter(|&(pred, _)| self.nodes[pred].rpo_number > 1);
|
||||
|
||||
// The RPO must visit at least one predecessor before this node.
|
||||
let mut idom = reachable_preds.next().expect(
|
||||
"EBB node must have one reachable predecessor",
|
||||
);
|
||||
let mut idom = reachable_preds
|
||||
.next()
|
||||
.expect("EBB node must have one reachable predecessor");
|
||||
|
||||
for pred in reachable_preds {
|
||||
idom = self.common_dominator(idom, pred, layout);
|
||||
@@ -450,8 +450,7 @@ impl DominatorTree {
|
||||
}
|
||||
// We use the RPO comparison on the postorder list so we invert the operands of the
|
||||
// comparison
|
||||
let old_ebb_postorder_index =
|
||||
self.postorder
|
||||
let old_ebb_postorder_index = self.postorder
|
||||
.as_slice()
|
||||
.binary_search_by(|probe| self.rpo_cmp_ebb(old_ebb, *probe))
|
||||
.expect("the old ebb is not declared to the dominator tree");
|
||||
@@ -471,11 +470,10 @@ impl DominatorTree {
|
||||
// If there is no gaps in RPo numbers to insert this new number, we iterate
|
||||
// forward in RPO numbers and backwards in the postorder list of EBBs, renumbering the Ebbs
|
||||
// until we find a gap
|
||||
for (¤t_ebb, current_rpo) in
|
||||
self.postorder[0..ebb_postorder_index].iter().rev().zip(
|
||||
inserted_rpo_number +
|
||||
1..,
|
||||
)
|
||||
for (¤t_ebb, current_rpo) in self.postorder[0..ebb_postorder_index]
|
||||
.iter()
|
||||
.rev()
|
||||
.zip(inserted_rpo_number + 1..)
|
||||
{
|
||||
if self.nodes[current_ebb].rpo_number < current_rpo {
|
||||
// There is no gap, we renumber
|
||||
@@ -644,9 +642,8 @@ impl DominatorTreePreorder {
|
||||
{
|
||||
let a = a.into();
|
||||
let b = b.into();
|
||||
self.pre_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b)).then(
|
||||
layout.cmp(a, b),
|
||||
)
|
||||
self.pre_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b))
|
||||
.then(layout.cmp(a, b))
|
||||
}
|
||||
|
||||
/// Compare two value defs according to the dominator tree pre-order.
|
||||
@@ -658,9 +655,8 @@ impl DominatorTreePreorder {
|
||||
pub fn pre_cmp_def(&self, a: Value, b: Value, func: &Function) -> Ordering {
|
||||
let da = func.dfg.value_def(a);
|
||||
let db = func.dfg.value_def(b);
|
||||
self.pre_cmp(da, db, &func.layout).then_with(
|
||||
|| da.num().cmp(&db.num()),
|
||||
)
|
||||
self.pre_cmp(da, db, &func.layout)
|
||||
.then_with(|| da.num().cmp(&db.num()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -128,10 +128,9 @@ impl ControlFlowGraph {
|
||||
// our iteration over successors.
|
||||
let mut successors = mem::replace(&mut self.data[ebb].successors, Default::default());
|
||||
for succ in successors.iter(&self.succ_forest) {
|
||||
self.data[succ].predecessors.retain(
|
||||
&mut self.pred_forest,
|
||||
|_, &mut e| e != ebb,
|
||||
);
|
||||
self.data[succ]
|
||||
.predecessors
|
||||
.retain(&mut self.pred_forest, |_, &mut e| e != ebb);
|
||||
}
|
||||
successors.clear(&mut self.succ_forest);
|
||||
}
|
||||
@@ -149,17 +148,12 @@ impl ControlFlowGraph {
|
||||
}
|
||||
|
||||
fn add_edge(&mut self, from: BasicBlock, to: Ebb) {
|
||||
self.data[from.0].successors.insert(
|
||||
to,
|
||||
&mut self.succ_forest,
|
||||
&(),
|
||||
);
|
||||
self.data[to].predecessors.insert(
|
||||
from.1,
|
||||
from.0,
|
||||
&mut self.pred_forest,
|
||||
&(),
|
||||
);
|
||||
self.data[from.0]
|
||||
.successors
|
||||
.insert(to, &mut self.succ_forest, &());
|
||||
self.data[to]
|
||||
.predecessors
|
||||
.insert(from.1, from.0, &mut self.pred_forest, &());
|
||||
}
|
||||
|
||||
/// Get an iterator over the CFG predecessors to `ebb`.
|
||||
|
||||
@@ -166,9 +166,9 @@ impl DataFlowGraph {
|
||||
/// Get the type of a value.
|
||||
pub fn value_type(&self, v: Value) -> Type {
|
||||
match self.values[v] {
|
||||
ValueData::Inst { ty, .. } |
|
||||
ValueData::Param { ty, .. } |
|
||||
ValueData::Alias { ty, .. } => ty,
|
||||
ValueData::Inst { ty, .. }
|
||||
| ValueData::Param { ty, .. }
|
||||
| ValueData::Alias { ty, .. } => ty,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,11 +235,9 @@ impl DataFlowGraph {
|
||||
// This also avoids the creation of loops.
|
||||
let original = self.resolve_aliases(src);
|
||||
debug_assert_ne!(
|
||||
dest,
|
||||
original,
|
||||
dest, original,
|
||||
"Aliasing {} to {} would create a loop",
|
||||
dest,
|
||||
src
|
||||
dest, src
|
||||
);
|
||||
let ty = self.value_type(original);
|
||||
debug_assert_eq!(
|
||||
@@ -267,8 +265,7 @@ impl DataFlowGraph {
|
||||
///
|
||||
pub fn replace_with_aliases(&mut self, dest_inst: Inst, src_inst: Inst) {
|
||||
debug_assert_ne!(
|
||||
dest_inst,
|
||||
src_inst,
|
||||
dest_inst, src_inst,
|
||||
"Replacing {} with itself would create a loop",
|
||||
dest_inst
|
||||
);
|
||||
@@ -342,8 +339,7 @@ impl ValueDef {
|
||||
/// this value.
|
||||
pub fn num(self) -> usize {
|
||||
match self {
|
||||
ValueDef::Result(_, n) |
|
||||
ValueDef::Param(_, n) => n,
|
||||
ValueDef::Result(_, n) | ValueDef::Param(_, n) => n,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -574,9 +570,9 @@ impl DataFlowGraph {
|
||||
///
|
||||
/// Panics if the instruction doesn't support arguments.
|
||||
pub fn append_inst_arg(&mut self, inst: Inst, new_arg: Value) {
|
||||
let mut branch_values = self.insts[inst].take_value_list().expect(
|
||||
"the instruction doesn't have value arguments",
|
||||
);
|
||||
let mut branch_values = self.insts[inst]
|
||||
.take_value_list()
|
||||
.expect("the instruction doesn't have value arguments");
|
||||
branch_values.push(new_arg, &mut self.value_lists);
|
||||
self.insts[inst].put_value_list(branch_values)
|
||||
}
|
||||
@@ -585,9 +581,9 @@ impl DataFlowGraph {
|
||||
///
|
||||
/// This function panics if the instruction doesn't have any result.
|
||||
pub fn first_result(&self, inst: Inst) -> Value {
|
||||
self.results[inst].first(&self.value_lists).expect(
|
||||
"Instruction has no results",
|
||||
)
|
||||
self.results[inst]
|
||||
.first(&self.value_lists)
|
||||
.expect("Instruction has no results")
|
||||
}
|
||||
|
||||
/// Test if `inst` has any result values currently.
|
||||
@@ -653,9 +649,11 @@ impl DataFlowGraph {
|
||||
} else if constraints.requires_typevar_operand() {
|
||||
// Not all instruction formats have a designated operand, but in that case
|
||||
// `requires_typevar_operand()` should never be true.
|
||||
self.value_type(self[inst].typevar_operand(&self.value_lists).expect(
|
||||
"Instruction format doesn't have a designated operand, bad opcode.",
|
||||
))
|
||||
self.value_type(
|
||||
self[inst]
|
||||
.typevar_operand(&self.value_lists)
|
||||
.expect("Instruction format doesn't have a designated operand, bad opcode."),
|
||||
)
|
||||
} else {
|
||||
self.value_type(self.first_result(inst))
|
||||
}
|
||||
@@ -721,13 +719,16 @@ impl DataFlowGraph {
|
||||
} else {
|
||||
panic!("{} must be an EBB parameter", val);
|
||||
};
|
||||
self.ebbs[ebb].params.swap_remove(
|
||||
num as usize,
|
||||
&mut self.value_lists,
|
||||
);
|
||||
self.ebbs[ebb]
|
||||
.params
|
||||
.swap_remove(num as usize, &mut self.value_lists);
|
||||
if let Some(last_arg_val) = self.ebbs[ebb].params.get(num as usize, &self.value_lists) {
|
||||
// We update the position of the old last arg.
|
||||
if let ValueData::Param { num: ref mut old_num, .. } = self.values[last_arg_val] {
|
||||
if let ValueData::Param {
|
||||
num: ref mut old_num,
|
||||
..
|
||||
} = self.values[last_arg_val]
|
||||
{
|
||||
*old_num = num;
|
||||
} else {
|
||||
panic!("{} should be an Ebb parameter", last_arg_val);
|
||||
@@ -744,27 +745,25 @@ impl DataFlowGraph {
|
||||
} else {
|
||||
panic!("{} must be an EBB parameter", val);
|
||||
};
|
||||
self.ebbs[ebb].params.remove(
|
||||
num as usize,
|
||||
&mut self.value_lists,
|
||||
);
|
||||
self.ebbs[ebb]
|
||||
.params
|
||||
.remove(num as usize, &mut self.value_lists);
|
||||
for index in num..(self.num_ebb_params(ebb) as u16) {
|
||||
match self.values[self.ebbs[ebb]
|
||||
.params
|
||||
.get(index as usize, &self.value_lists)
|
||||
.unwrap()] {
|
||||
.unwrap()]
|
||||
{
|
||||
ValueData::Param { ref mut num, .. } => {
|
||||
*num -= 1;
|
||||
}
|
||||
_ => {
|
||||
panic!(
|
||||
_ => panic!(
|
||||
"{} must be an EBB parameter",
|
||||
self.ebbs[ebb]
|
||||
.params
|
||||
.get(index as usize, &self.value_lists)
|
||||
.unwrap()
|
||||
)
|
||||
}
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -835,7 +834,9 @@ struct EbbData {
|
||||
|
||||
impl EbbData {
|
||||
fn new() -> Self {
|
||||
Self { params: ValueList::new() }
|
||||
Self {
|
||||
params: ValueList::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -878,9 +879,9 @@ impl DataFlowGraph {
|
||||
"this function is only for assigning types to previously invalid values"
|
||||
);
|
||||
match self.values[v] {
|
||||
ValueData::Inst { ref mut ty, .. } |
|
||||
ValueData::Param { ref mut ty, .. } |
|
||||
ValueData::Alias { ref mut ty, .. } => *ty = t,
|
||||
ValueData::Inst { ref mut ty, .. }
|
||||
| ValueData::Param { ref mut ty, .. }
|
||||
| ValueData::Alias { ref mut ty, .. } => *ty = t,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,11 @@ impl Ebb {
|
||||
///
|
||||
/// This method is for use by the parser.
|
||||
pub fn with_number(n: u32) -> Option<Self> {
|
||||
if n < u32::MAX { Some(Ebb(n)) } else { None }
|
||||
if n < u32::MAX {
|
||||
Some(Ebb(n))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,7 +128,11 @@ impl FuncRef {
|
||||
///
|
||||
/// This method is for use by the parser.
|
||||
pub fn with_number(n: u32) -> Option<Self> {
|
||||
if n < u32::MAX { Some(FuncRef(n)) } else { None }
|
||||
if n < u32::MAX {
|
||||
Some(FuncRef(n))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,7 +146,11 @@ impl SigRef {
|
||||
///
|
||||
/// This method is for use by the parser.
|
||||
pub fn with_number(n: u32) -> Option<Self> {
|
||||
if n < u32::MAX { Some(SigRef(n)) } else { None }
|
||||
if n < u32::MAX {
|
||||
Some(SigRef(n))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,7 +164,11 @@ impl Heap {
|
||||
///
|
||||
/// This method is for use by the parser.
|
||||
pub fn with_number(n: u32) -> Option<Self> {
|
||||
if n < u32::MAX { Some(Heap(n)) } else { None }
|
||||
if n < u32::MAX {
|
||||
Some(Heap(n))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -384,8 +384,7 @@ mod tests {
|
||||
CallConv::SystemV,
|
||||
CallConv::WindowsFastcall,
|
||||
CallConv::Baldrdash,
|
||||
]
|
||||
{
|
||||
] {
|
||||
assert_eq!(Ok(cc), cc.to_string().parse())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ use ir::{DataFlowGraph, ExternalName, Layout, Signature};
|
||||
use ir::{Ebb, ExtFuncData, FuncRef, GlobalVar, GlobalVarData, Heap, HeapData, JumpTable,
|
||||
JumpTableData, SigRef, StackSlot, StackSlotData};
|
||||
use ir::{EbbOffsets, InstEncodings, JumpTables, SourceLocs, StackSlots, ValueLocations};
|
||||
use isa::{EncInfo, Legalize, TargetIsa, Encoding};
|
||||
use isa::{EncInfo, Encoding, Legalize, TargetIsa};
|
||||
use settings::CallConv;
|
||||
use std::fmt;
|
||||
use write::write_function;
|
||||
@@ -151,9 +151,9 @@ impl Function {
|
||||
/// Returns the value of the last `purpose` parameter, or `None` if no such parameter exists.
|
||||
pub fn special_param(&self, purpose: ir::ArgumentPurpose) -> Option<ir::Value> {
|
||||
let entry = self.layout.entry_block().expect("Function is empty");
|
||||
self.signature.special_param_index(purpose).map(|i| {
|
||||
self.dfg.ebb_params(entry)[i]
|
||||
})
|
||||
self.signature
|
||||
.special_param_index(purpose)
|
||||
.map(|i| self.dfg.ebb_params(entry)[i])
|
||||
}
|
||||
|
||||
/// Get an iterator over the instructions in `ebb`, including offsets and encoded instruction
|
||||
|
||||
@@ -192,10 +192,12 @@ impl FromStr for Uimm32 {
|
||||
|
||||
// Parse a decimal or hexadecimal `Uimm32`, formatted as above.
|
||||
fn from_str(s: &str) -> Result<Self, &'static str> {
|
||||
parse_i64(s).and_then(|x| if 0 <= x && x <= i64::from(u32::MAX) {
|
||||
parse_i64(s).and_then(|x| {
|
||||
if 0 <= x && x <= i64::from(u32::MAX) {
|
||||
Ok(Uimm32(x as u32))
|
||||
} else {
|
||||
Err("Uimm32 out of range")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -259,12 +261,12 @@ impl FromStr for Offset32 {
|
||||
if !(s.starts_with('-') || s.starts_with('+')) {
|
||||
return Err("Offset must begin with sign");
|
||||
}
|
||||
parse_i64(s).and_then(|x| if i64::from(i32::MIN) <= x &&
|
||||
x <= i64::from(i32::MAX)
|
||||
{
|
||||
parse_i64(s).and_then(|x| {
|
||||
if i64::from(i32::MIN) <= x && x <= i64::from(i32::MAX) {
|
||||
Ok(Self::new(x as i32))
|
||||
} else {
|
||||
Err("Offset out of range")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -447,8 +449,7 @@ fn parse_float(s: &str, w: u8, t: u8) -> Result<u64, &'static str> {
|
||||
Err(_) => return Err("Bad exponent"),
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
match ch.to_digit(16) {
|
||||
_ => match ch.to_digit(16) {
|
||||
Some(digit) => {
|
||||
digits += 1;
|
||||
if digits > 16 {
|
||||
@@ -457,8 +458,7 @@ fn parse_float(s: &str, w: u8, t: u8) -> Result<u64, &'static str> {
|
||||
significand = (significand << 4) | u64::from(digit);
|
||||
}
|
||||
None => return Err("Invalid character"),
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -546,9 +546,7 @@ impl Ieee32 {
|
||||
let n = n.into();
|
||||
debug_assert!(n < 32);
|
||||
debug_assert!(23 + 1 - n < 32);
|
||||
Self::with_bits(
|
||||
(1u32 << (32 - 1)) | Self::pow2(n - 1).0 | (1u32 << (23 + 1 - n)),
|
||||
)
|
||||
Self::with_bits((1u32 << (32 - 1)) | Self::pow2(n - 1).0 | (1u32 << (23 + 1 - n)))
|
||||
}
|
||||
|
||||
/// Return self negated.
|
||||
@@ -609,9 +607,7 @@ impl Ieee64 {
|
||||
let n = n.into();
|
||||
debug_assert!(n < 64);
|
||||
debug_assert!(52 + 1 - n < 64);
|
||||
Self::with_bits(
|
||||
(1u64 << (64 - 1)) | Self::pow2(n - 1).0 | (1u64 << (52 + 1 - n)),
|
||||
)
|
||||
Self::with_bits((1u64 << (64 - 1)) | Self::pow2(n - 1).0 | (1u64 << (52 + 1 - n)))
|
||||
}
|
||||
|
||||
/// Return self negated.
|
||||
|
||||
@@ -178,13 +178,13 @@ impl InstructionData {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
} |
|
||||
InstructionData::BranchFloat {
|
||||
}
|
||||
| InstructionData::BranchFloat {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
} |
|
||||
InstructionData::Branch {
|
||||
}
|
||||
| InstructionData::Branch {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
@@ -208,11 +208,11 @@ impl InstructionData {
|
||||
/// Multi-destination branches like `br_table` return `None`.
|
||||
pub fn branch_destination(&self) -> Option<Ebb> {
|
||||
match *self {
|
||||
InstructionData::Jump { destination, .. } |
|
||||
InstructionData::Branch { destination, .. } |
|
||||
InstructionData::BranchInt { destination, .. } |
|
||||
InstructionData::BranchFloat { destination, .. } |
|
||||
InstructionData::BranchIcmp { destination, .. } => Some(destination),
|
||||
InstructionData::Jump { destination, .. }
|
||||
| InstructionData::Branch { destination, .. }
|
||||
| InstructionData::BranchInt { destination, .. }
|
||||
| InstructionData::BranchFloat { destination, .. }
|
||||
| InstructionData::BranchIcmp { destination, .. } => Some(destination),
|
||||
InstructionData::BranchTable { .. } => None,
|
||||
_ => {
|
||||
debug_assert!(!self.opcode().is_branch());
|
||||
@@ -227,11 +227,26 @@ impl InstructionData {
|
||||
/// Multi-destination branches like `br_table` return `None`.
|
||||
pub fn branch_destination_mut(&mut self) -> Option<&mut Ebb> {
|
||||
match *self {
|
||||
InstructionData::Jump { ref mut destination, .. } |
|
||||
InstructionData::Branch { ref mut destination, .. } |
|
||||
InstructionData::BranchInt { ref mut destination, .. } |
|
||||
InstructionData::BranchFloat { ref mut destination, .. } |
|
||||
InstructionData::BranchIcmp { ref mut destination, .. } => Some(destination),
|
||||
InstructionData::Jump {
|
||||
ref mut destination,
|
||||
..
|
||||
}
|
||||
| InstructionData::Branch {
|
||||
ref mut destination,
|
||||
..
|
||||
}
|
||||
| InstructionData::BranchInt {
|
||||
ref mut destination,
|
||||
..
|
||||
}
|
||||
| InstructionData::BranchFloat {
|
||||
ref mut destination,
|
||||
..
|
||||
}
|
||||
| InstructionData::BranchIcmp {
|
||||
ref mut destination,
|
||||
..
|
||||
} => Some(destination),
|
||||
InstructionData::BranchTable { .. } => None,
|
||||
_ => {
|
||||
debug_assert!(!self.opcode().is_branch());
|
||||
@@ -245,12 +260,12 @@ impl InstructionData {
|
||||
/// Any instruction that can call another function reveals its call signature here.
|
||||
pub fn analyze_call<'a>(&'a self, pool: &'a ValueListPool) -> CallInfo<'a> {
|
||||
match *self {
|
||||
InstructionData::Call { func_ref, ref args, .. } => {
|
||||
CallInfo::Direct(func_ref, args.as_slice(pool))
|
||||
}
|
||||
InstructionData::CallIndirect { sig_ref, ref args, .. } => {
|
||||
CallInfo::Indirect(sig_ref, &args.as_slice(pool)[1..])
|
||||
}
|
||||
InstructionData::Call {
|
||||
func_ref, ref args, ..
|
||||
} => CallInfo::Direct(func_ref, args.as_slice(pool)),
|
||||
InstructionData::CallIndirect {
|
||||
sig_ref, ref args, ..
|
||||
} => CallInfo::Indirect(sig_ref, &args.as_slice(pool)[1..]),
|
||||
_ => {
|
||||
debug_assert!(!self.opcode().is_call());
|
||||
CallInfo::NotACall
|
||||
@@ -512,12 +527,16 @@ impl OperandConstraint {
|
||||
LaneOf => Bound(ctrl_type.lane_type()),
|
||||
AsBool => Bound(ctrl_type.as_bool()),
|
||||
HalfWidth => Bound(ctrl_type.half_width().expect("invalid type for half_width")),
|
||||
DoubleWidth => Bound(ctrl_type.double_width().expect(
|
||||
"invalid type for double_width",
|
||||
)),
|
||||
HalfVector => Bound(ctrl_type.half_vector().expect(
|
||||
"invalid type for half_vector",
|
||||
)),
|
||||
DoubleWidth => Bound(
|
||||
ctrl_type
|
||||
.double_width()
|
||||
.expect("invalid type for double_width"),
|
||||
),
|
||||
HalfVector => Bound(
|
||||
ctrl_type
|
||||
.half_vector()
|
||||
.expect("invalid type for half_vector"),
|
||||
),
|
||||
DoubleVector => Bound(ctrl_type.by(2).expect("invalid type for double_vector")),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,9 +90,9 @@ impl JumpTableData {
|
||||
|
||||
/// Checks if any of the entries branch to `ebb`.
|
||||
pub fn branches_to(&self, ebb: Ebb) -> bool {
|
||||
self.table.iter().any(|target_ebb| {
|
||||
target_ebb.expand() == Some(ebb)
|
||||
})
|
||||
self.table
|
||||
.iter()
|
||||
.any(|target_ebb| target_ebb.expand() == Some(ebb))
|
||||
}
|
||||
|
||||
/// Access the whole table as a mutable slice.
|
||||
|
||||
@@ -90,7 +90,11 @@ fn midpoint(a: SequenceNumber, b: SequenceNumber) -> Option<SequenceNumber> {
|
||||
debug_assert!(a < b);
|
||||
// Avoid integer overflow.
|
||||
let m = a + (b - a) / 2;
|
||||
if m > a { Some(m) } else { None }
|
||||
if m > a {
|
||||
Some(m)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -178,9 +182,8 @@ impl Layout {
|
||||
/// Assign a valid sequence number to `inst` such that the numbers are still monotonic. This may
|
||||
/// require renumbering.
|
||||
fn assign_inst_seq(&mut self, inst: Inst) {
|
||||
let ebb = self.inst_ebb(inst).expect(
|
||||
"inst must be inserted before assigning an seq",
|
||||
);
|
||||
let ebb = self.inst_ebb(inst)
|
||||
.expect("inst must be inserted before assigning an seq");
|
||||
|
||||
// Get the sequence number immediately before `inst`.
|
||||
let prev_seq = match self.insts[inst].prev.expand() {
|
||||
@@ -566,9 +569,8 @@ impl Layout {
|
||||
/// Insert `inst` before the instruction `before` in the same EBB.
|
||||
pub fn insert_inst(&mut self, inst: Inst, before: Inst) {
|
||||
debug_assert_eq!(self.inst_ebb(inst), None);
|
||||
let ebb = self.inst_ebb(before).expect(
|
||||
"Instruction before insertion point not in the layout",
|
||||
);
|
||||
let ebb = self.inst_ebb(before)
|
||||
.expect("Instruction before insertion point not in the layout");
|
||||
let after = self.insts[before].prev;
|
||||
{
|
||||
let inst_node = &mut self.insts[inst];
|
||||
@@ -641,9 +643,8 @@ impl Layout {
|
||||
/// i4
|
||||
/// ```
|
||||
pub fn split_ebb(&mut self, new_ebb: Ebb, before: Inst) {
|
||||
let old_ebb = self.inst_ebb(before).expect(
|
||||
"The `before` instruction must be in the layout",
|
||||
);
|
||||
let old_ebb = self.inst_ebb(before)
|
||||
.expect("The `before` instruction must be in the layout");
|
||||
debug_assert!(!self.is_ebb_inserted(new_ebb));
|
||||
|
||||
// Insert new_ebb after old_ebb.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
//! Naming well-known routines in the runtime library.
|
||||
|
||||
use ir::{types, Opcode, Type, Inst, Function, FuncRef, ExternalName, Signature, AbiParam,
|
||||
ExtFuncData, ArgumentPurpose};
|
||||
use ir::{types, AbiParam, ArgumentPurpose, ExtFuncData, ExternalName, FuncRef, Function, Inst,
|
||||
Opcode, Signature, Type};
|
||||
use isa::{RegUnit, TargetIsa};
|
||||
use settings::CallConv;
|
||||
use isa::{TargetIsa, RegUnit};
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -82,24 +82,20 @@ impl LibCall {
|
||||
/// Returns `None` if no well-known library routine name exists for that instruction.
|
||||
pub fn for_inst(opcode: Opcode, ctrl_type: Type) -> Option<Self> {
|
||||
Some(match ctrl_type {
|
||||
types::F32 => {
|
||||
match opcode {
|
||||
types::F32 => match opcode {
|
||||
Opcode::Ceil => LibCall::CeilF32,
|
||||
Opcode::Floor => LibCall::FloorF32,
|
||||
Opcode::Trunc => LibCall::TruncF32,
|
||||
Opcode::Nearest => LibCall::NearestF32,
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
types::F64 => {
|
||||
match opcode {
|
||||
},
|
||||
types::F64 => match opcode {
|
||||
Opcode::Ceil => LibCall::CeilF64,
|
||||
Opcode::Floor => LibCall::FloorF64,
|
||||
Opcode::Trunc => LibCall::TruncF64,
|
||||
Opcode::Nearest => LibCall::NearestF64,
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => return None,
|
||||
})
|
||||
}
|
||||
@@ -127,9 +123,8 @@ pub fn get_probestack_funcref(
|
||||
arg_reg: RegUnit,
|
||||
isa: &TargetIsa,
|
||||
) -> FuncRef {
|
||||
find_funcref(LibCall::Probestack, func).unwrap_or_else(|| {
|
||||
make_funcref_for_probestack(func, reg_type, arg_reg, isa)
|
||||
})
|
||||
find_funcref(LibCall::Probestack, func)
|
||||
.unwrap_or_else(|| make_funcref_for_probestack(func, reg_type, arg_reg, isa))
|
||||
}
|
||||
|
||||
/// Get the existing function reference for `libcall` in `func` if it exists.
|
||||
|
||||
@@ -33,7 +33,7 @@ pub use ir::heap::{HeapBase, HeapData, HeapStyle};
|
||||
pub use ir::instructions::{InstructionData, Opcode, ValueList, ValueListPool, VariableArgs};
|
||||
pub use ir::jumptable::JumpTableData;
|
||||
pub use ir::layout::Layout;
|
||||
pub use ir::libcall::{LibCall, get_libcall_funcref, get_probestack_funcref};
|
||||
pub use ir::libcall::{get_libcall_funcref, get_probestack_funcref, LibCall};
|
||||
pub use ir::memflags::MemFlags;
|
||||
pub use ir::progpoint::{ExpandedProgramPoint, ProgramOrder, ProgramPoint};
|
||||
pub use ir::sourceloc::SourceLoc;
|
||||
|
||||
@@ -66,12 +66,10 @@ impl<'a> fmt::Display for DisplayValueLoc<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.0 {
|
||||
ValueLoc::Unassigned => write!(f, "-"),
|
||||
ValueLoc::Reg(ru) => {
|
||||
match self.1 {
|
||||
ValueLoc::Reg(ru) => match self.1 {
|
||||
Some(regs) => write!(f, "{}", regs.display_regunit(ru)),
|
||||
None => write!(f, "%{}", ru),
|
||||
}
|
||||
}
|
||||
},
|
||||
ValueLoc::Stack(ss) => write!(f, "{}", ss),
|
||||
}
|
||||
}
|
||||
@@ -153,12 +151,10 @@ impl<'a> fmt::Display for DisplayArgumentLoc<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.0 {
|
||||
ArgumentLoc::Unassigned => write!(f, "-"),
|
||||
ArgumentLoc::Reg(ru) => {
|
||||
match self.1 {
|
||||
ArgumentLoc::Reg(ru) => match self.1 {
|
||||
Some(regs) => write!(f, "{}", regs.display_regunit(ru)),
|
||||
None => write!(f, "%{}", ru),
|
||||
}
|
||||
}
|
||||
},
|
||||
ArgumentLoc::Stack(offset) => write!(f, "{}", offset),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,11 @@ pub fn legalize_signature(
|
||||
|
||||
/// Get register class for a type appearing in a legalized signature.
|
||||
pub fn regclass_for_abi_type(ty: ir::Type) -> RegClass {
|
||||
if ty.is_int() { GPR } else { FPR }
|
||||
if ty.is_int() {
|
||||
GPR
|
||||
} else {
|
||||
FPR
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the set of allocatable registers for `func`.
|
||||
|
||||
@@ -30,16 +30,14 @@ impl OperandConstraint {
|
||||
/// counterpart operand has the same value location.
|
||||
pub fn satisfied(&self, loc: ValueLoc) -> bool {
|
||||
match self.kind {
|
||||
ConstraintKind::Reg |
|
||||
ConstraintKind::Tied(_) => {
|
||||
ConstraintKind::Reg | ConstraintKind::Tied(_) => {
|
||||
if let ValueLoc::Reg(reg) = loc {
|
||||
self.regclass.contains(reg)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
ConstraintKind::FixedReg(reg) |
|
||||
ConstraintKind::FixedTied(reg) => {
|
||||
ConstraintKind::FixedReg(reg) | ConstraintKind::FixedTied(reg) => {
|
||||
loc == ValueLoc::Reg(reg) && self.regclass.contains(reg)
|
||||
}
|
||||
ConstraintKind::Stack => {
|
||||
|
||||
@@ -122,10 +122,9 @@ impl EncInfo {
|
||||
///
|
||||
/// Returns 0 for illegal encodings.
|
||||
pub fn bytes(&self, enc: Encoding) -> CodeOffset {
|
||||
self.sizing.get(enc.recipe()).map_or(
|
||||
0,
|
||||
|s| CodeOffset::from(s.bytes),
|
||||
)
|
||||
self.sizing
|
||||
.get(enc.recipe())
|
||||
.map_or(0, |s| CodeOffset::from(s.bytes))
|
||||
}
|
||||
|
||||
/// Get the branch range that is supported by `enc`, if any.
|
||||
|
||||
@@ -142,11 +142,8 @@ impl settings::Configurable for Builder {
|
||||
/// legalize it?
|
||||
///
|
||||
/// The `Encodings` iterator returns a legalization function to call.
|
||||
pub type Legalize = fn(ir::Inst,
|
||||
&mut ir::Function,
|
||||
&mut flowgraph::ControlFlowGraph,
|
||||
&TargetIsa)
|
||||
-> bool;
|
||||
pub type Legalize =
|
||||
fn(ir::Inst, &mut ir::Function, &mut flowgraph::ControlFlowGraph, &TargetIsa) -> bool;
|
||||
|
||||
/// Methods that are specialized to a target ISA. Implies a Display trait that shows the
|
||||
/// shared flags, as well as any isa-specific flags.
|
||||
|
||||
@@ -89,10 +89,12 @@ impl RegBank {
|
||||
None
|
||||
}
|
||||
}
|
||||
}.and_then(|offset| if offset < self.units {
|
||||
}.and_then(|offset| {
|
||||
if offset < self.units {
|
||||
Some(offset + self.first_unit)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -117,7 +117,11 @@ pub fn legalize_signature(
|
||||
|
||||
/// Get register class for a type appearing in a legalized signature.
|
||||
pub fn regclass_for_abi_type(ty: Type) -> RegClass {
|
||||
if ty.is_float() { FPR } else { GPR }
|
||||
if ty.is_float() {
|
||||
FPR
|
||||
} else {
|
||||
GPR
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocatable_registers(_func: &ir::Function, isa_flags: &settings::Flags) -> RegisterSet {
|
||||
|
||||
@@ -35,9 +35,9 @@ impl StackRef {
|
||||
|
||||
/// Get a reference to `ss` using the stack pointer as a base.
|
||||
pub fn sp(ss: StackSlot, frame: &StackSlots) -> Self {
|
||||
let size = frame.frame_size.expect(
|
||||
"Stack layout must be computed before referencing stack slots",
|
||||
);
|
||||
let size = frame
|
||||
.frame_size
|
||||
.expect("Stack layout must be computed before referencing stack slots");
|
||||
let slot = &frame[ss];
|
||||
let offset = if slot.kind == StackSlotKind::OutgoingArg {
|
||||
// Outgoing argument slots have offsets relative to our stack pointer.
|
||||
|
||||
@@ -6,8 +6,8 @@ use cursor::{Cursor, CursorPosition, EncCursor};
|
||||
use ir;
|
||||
use ir::immediates::Imm64;
|
||||
use ir::stackslot::{StackOffset, StackSize};
|
||||
use ir::{AbiParam, ArgumentExtension, ArgumentLoc, ArgumentPurpose, InstBuilder, ValueLoc,
|
||||
get_probestack_funcref};
|
||||
use ir::{get_probestack_funcref, AbiParam, ArgumentExtension, ArgumentLoc, ArgumentPurpose,
|
||||
InstBuilder, ValueLoc};
|
||||
use isa::{RegClass, RegUnit, TargetIsa};
|
||||
use regalloc::RegisterSet;
|
||||
use result;
|
||||
@@ -97,7 +97,8 @@ impl ArgAssigner for Args {
|
||||
RU::r14
|
||||
} else {
|
||||
RU::rsi
|
||||
} as RegUnit).into()
|
||||
} as RegUnit)
|
||||
.into()
|
||||
}
|
||||
// This is SpiderMonkey's `WasmTableCallSigReg`.
|
||||
ArgumentPurpose::SignatureId => return ArgumentLoc::Reg(RU::rbx as RegUnit).into(),
|
||||
@@ -235,8 +236,8 @@ fn callee_saved_gprs_used(flags: &shared_settings::Flags, func: &ir::Function) -
|
||||
for ebb in &func.layout {
|
||||
for inst in func.layout.ebb_insts(ebb) {
|
||||
match func.dfg[inst] {
|
||||
ir::instructions::InstructionData::RegMove { dst, .. } |
|
||||
ir::instructions::InstructionData::RegFill { dst, .. } => {
|
||||
ir::instructions::InstructionData::RegMove { dst, .. }
|
||||
| ir::instructions::InstructionData::RegFill { dst, .. } => {
|
||||
if !used.is_avail(GPR, dst) {
|
||||
used.free(GPR, dst);
|
||||
}
|
||||
@@ -431,10 +432,8 @@ fn insert_common_prologue(
|
||||
pos.func.locations[fp] = ir::ValueLoc::Reg(RU::rbp as RegUnit);
|
||||
|
||||
pos.ins().x86_push(fp);
|
||||
pos.ins().copy_special(
|
||||
RU::rsp as RegUnit,
|
||||
RU::rbp as RegUnit,
|
||||
);
|
||||
pos.ins()
|
||||
.copy_special(RU::rsp as RegUnit, RU::rbp as RegUnit);
|
||||
|
||||
for reg in csrs.iter(GPR) {
|
||||
// Append param to entry EBB
|
||||
@@ -449,8 +448,8 @@ fn insert_common_prologue(
|
||||
|
||||
// Allocate stack frame storage.
|
||||
if stack_size > 0 {
|
||||
if isa.flags().probestack_enabled() &&
|
||||
stack_size > (1 << isa.flags().probestack_size_log2())
|
||||
if isa.flags().probestack_enabled()
|
||||
&& stack_size > (1 << isa.flags().probestack_size_log2())
|
||||
{
|
||||
// Emit a stack probe.
|
||||
let rax = RU::rax as RegUnit;
|
||||
@@ -464,8 +463,8 @@ fn insert_common_prologue(
|
||||
let callee = get_probestack_funcref(pos.func, reg_type, rax, isa);
|
||||
|
||||
// Make the call.
|
||||
let call = if !isa.flags().is_pic() && isa.flags().is_64bit() &&
|
||||
!pos.func.dfg.ext_funcs[callee].colocated
|
||||
let call = if !isa.flags().is_pic() && isa.flags().is_64bit()
|
||||
&& !pos.func.dfg.ext_funcs[callee].colocated
|
||||
{
|
||||
// 64-bit non-PIC non-colocated calls need to be legalized to call_indirect.
|
||||
// Use r11 as it may be clobbered under all supported calling conventions.
|
||||
|
||||
@@ -84,11 +84,8 @@ fn expand_sdivrem(
|
||||
// Explicitly check for overflow: Trap when x == INT_MIN.
|
||||
debug_assert!(avoid_div_traps, "Native trapping divide handled above");
|
||||
let f = pos.ins().ifcmp_imm(x, -1 << (ty.lane_bits() - 1));
|
||||
pos.ins().trapif(
|
||||
IntCC::Equal,
|
||||
f,
|
||||
ir::TrapCode::IntegerOverflow,
|
||||
);
|
||||
pos.ins()
|
||||
.trapif(IntCC::Equal, f, ir::TrapCode::IntegerOverflow);
|
||||
// x / -1 = -x.
|
||||
pos.ins().irsub_imm(x, 0)
|
||||
};
|
||||
@@ -348,11 +345,8 @@ fn expand_fcvt_to_sint(
|
||||
let mut pos = FuncCursor::new(func).after_inst(inst);
|
||||
pos.use_srcloc(inst);
|
||||
|
||||
let is_done = pos.ins().icmp_imm(
|
||||
IntCC::NotEqual,
|
||||
result,
|
||||
1 << (ty.lane_bits() - 1),
|
||||
);
|
||||
let is_done = pos.ins()
|
||||
.icmp_imm(IntCC::NotEqual, result, 1 << (ty.lane_bits() - 1));
|
||||
pos.ins().brnz(is_done, done, &[]);
|
||||
|
||||
// We now have the following possibilities:
|
||||
@@ -364,10 +358,8 @@ fn expand_fcvt_to_sint(
|
||||
|
||||
// Check for NaN.
|
||||
let is_nan = pos.ins().fcmp(FloatCC::Unordered, x, x);
|
||||
pos.ins().trapnz(
|
||||
is_nan,
|
||||
ir::TrapCode::BadConversionToInteger,
|
||||
);
|
||||
pos.ins()
|
||||
.trapnz(is_nan, ir::TrapCode::BadConversionToInteger);
|
||||
|
||||
// Check for case 1: INT_MIN is the correct result.
|
||||
// Determine the smallest floating point number that would convert to INT_MIN.
|
||||
@@ -376,14 +368,12 @@ fn expand_fcvt_to_sint(
|
||||
let flimit = match xty {
|
||||
// An f32 can represent `i16::min_value() - 1` exactly with precision to spare, so
|
||||
// there are values less than -2^(N-1) that convert correctly to INT_MIN.
|
||||
ir::types::F32 => {
|
||||
pos.ins().f32const(if output_bits < 32 {
|
||||
ir::types::F32 => pos.ins().f32const(if output_bits < 32 {
|
||||
overflow_cc = FloatCC::LessThanOrEqual;
|
||||
Ieee32::fcvt_to_sint_negative_overflow(output_bits)
|
||||
} else {
|
||||
Ieee32::pow2(output_bits - 1).neg()
|
||||
})
|
||||
}
|
||||
}),
|
||||
ir::types::F64 => {
|
||||
// An f64 can represent `i32::min_value() - 1` exactly with precision to spare, so
|
||||
// there are values less than -2^(N-1) that convert correctly to INT_MIN.
|
||||
@@ -458,12 +448,8 @@ fn expand_fcvt_to_uint(
|
||||
_ => panic!("Can't convert {}", xty),
|
||||
};
|
||||
let is_large = pos.ins().ffcmp(x, pow2nm1);
|
||||
pos.ins().brff(
|
||||
FloatCC::GreaterThanOrEqual,
|
||||
is_large,
|
||||
large,
|
||||
&[],
|
||||
);
|
||||
pos.ins()
|
||||
.brff(FloatCC::GreaterThanOrEqual, is_large, large, &[]);
|
||||
|
||||
// We need to generate a specific trap code when `x` is NaN, so reuse the flags from the
|
||||
// previous comparison.
|
||||
@@ -476,12 +462,8 @@ fn expand_fcvt_to_uint(
|
||||
// Now we know that x < 2^(N-1) and not NaN.
|
||||
let sres = pos.ins().x86_cvtt2si(ty, x);
|
||||
let is_neg = pos.ins().ifcmp_imm(sres, 0);
|
||||
pos.ins().brif(
|
||||
IntCC::SignedGreaterThanOrEqual,
|
||||
is_neg,
|
||||
done,
|
||||
&[sres],
|
||||
);
|
||||
pos.ins()
|
||||
.brif(IntCC::SignedGreaterThanOrEqual, is_neg, done, &[sres]);
|
||||
pos.ins().trap(ir::TrapCode::IntegerOverflow);
|
||||
|
||||
// Handle the case where x >= 2^(N-1) and not NaN.
|
||||
@@ -489,11 +471,8 @@ fn expand_fcvt_to_uint(
|
||||
let adjx = pos.ins().fsub(x, pow2nm1);
|
||||
let lres = pos.ins().x86_cvtt2si(ty, adjx);
|
||||
let is_neg = pos.ins().ifcmp_imm(lres, 0);
|
||||
pos.ins().trapif(
|
||||
IntCC::SignedLessThan,
|
||||
is_neg,
|
||||
ir::TrapCode::IntegerOverflow,
|
||||
);
|
||||
pos.ins()
|
||||
.trapif(IntCC::SignedLessThan, is_neg, ir::TrapCode::IntegerOverflow);
|
||||
let lfinal = pos.ins().iadd_imm(lres, 1 << (ty.lane_bits() - 1));
|
||||
|
||||
// Recycle the original instruction as a jump.
|
||||
|
||||
@@ -87,8 +87,7 @@ mod tests {
|
||||
vec![]
|
||||
);
|
||||
assert_eq!(
|
||||
[]
|
||||
.iter()
|
||||
[].iter()
|
||||
.cloned()
|
||||
.adjacent_pairs()
|
||||
.collect::<Vec<(i32, i32)>>(),
|
||||
|
||||
@@ -133,8 +133,7 @@ fn legalize_entry_params(func: &mut Function, entry: Ebb) {
|
||||
}
|
||||
// The callee-save parameters should not appear until after register allocation is
|
||||
// done.
|
||||
ArgumentPurpose::FramePointer |
|
||||
ArgumentPurpose::CalleeSaved => {
|
||||
ArgumentPurpose::FramePointer | ArgumentPurpose::CalleeSaved => {
|
||||
panic!("Premature callee-saved arg {}", arg);
|
||||
}
|
||||
// These can be meaningfully added by `legalize_signature()`.
|
||||
@@ -174,9 +173,8 @@ fn legalize_inst_results<ResType>(pos: &mut FuncCursor, mut get_abi_type: ResTyp
|
||||
where
|
||||
ResType: FnMut(&Function, usize) -> AbiParam,
|
||||
{
|
||||
let call = pos.current_inst().expect(
|
||||
"Cursor must point to a call instruction",
|
||||
);
|
||||
let call = pos.current_inst()
|
||||
.expect("Cursor must point to a call instruction");
|
||||
|
||||
// We theoretically allow for call instructions that return a number of fixed results before
|
||||
// the call return values. In practice, it doesn't happen.
|
||||
@@ -377,8 +375,8 @@ fn check_call_signature(dfg: &DataFlowGraph, inst: Inst) -> Result<(), SigRef> {
|
||||
};
|
||||
let sig = &dfg.signatures[sig_ref];
|
||||
|
||||
if check_arg_types(dfg, args, &sig.params[..]) &&
|
||||
check_arg_types(dfg, dfg.inst_results(inst), &sig.returns[..])
|
||||
if check_arg_types(dfg, args, &sig.params[..])
|
||||
&& check_arg_types(dfg, dfg.inst_results(inst), &sig.returns[..])
|
||||
{
|
||||
// All types check out.
|
||||
Ok(())
|
||||
@@ -407,14 +405,13 @@ fn legalize_inst_arguments<ArgType>(
|
||||
) where
|
||||
ArgType: FnMut(&Function, usize) -> AbiParam,
|
||||
{
|
||||
let inst = pos.current_inst().expect(
|
||||
"Cursor must point to a call instruction",
|
||||
);
|
||||
let inst = pos.current_inst()
|
||||
.expect("Cursor must point to a call instruction");
|
||||
|
||||
// Lift the value list out of the call instruction so we modify it.
|
||||
let mut vlist = pos.func.dfg[inst].take_value_list().expect(
|
||||
"Call must have a value list",
|
||||
);
|
||||
let mut vlist = pos.func.dfg[inst]
|
||||
.take_value_list()
|
||||
.expect("Call must have a value list");
|
||||
|
||||
// The value list contains all arguments to the instruction, including the callee on an
|
||||
// indirect call which isn't part of the call arguments that must match the ABI signature.
|
||||
@@ -544,8 +541,8 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
|
||||
.iter()
|
||||
.rev()
|
||||
.take_while(|&rt| {
|
||||
rt.purpose == ArgumentPurpose::Link || rt.purpose == ArgumentPurpose::StructReturn ||
|
||||
rt.purpose == ArgumentPurpose::VMContext
|
||||
rt.purpose == ArgumentPurpose::Link || rt.purpose == ArgumentPurpose::StructReturn
|
||||
|| rt.purpose == ArgumentPurpose::VMContext
|
||||
})
|
||||
.count();
|
||||
let abi_args = func.signature.returns.len() - special_args;
|
||||
@@ -570,9 +567,9 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
|
||||
let mut vlist = pos.func.dfg[inst].take_value_list().unwrap();
|
||||
for arg in &pos.func.signature.returns[abi_args..] {
|
||||
match arg.purpose {
|
||||
ArgumentPurpose::Link |
|
||||
ArgumentPurpose::StructReturn |
|
||||
ArgumentPurpose::VMContext => {}
|
||||
ArgumentPurpose::Link
|
||||
| ArgumentPurpose::StructReturn
|
||||
| ArgumentPurpose::VMContext => {}
|
||||
ArgumentPurpose::Normal => panic!("unexpected return value {}", arg),
|
||||
_ => panic!("Unsupported special purpose return value {}", arg),
|
||||
}
|
||||
@@ -587,10 +584,9 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
|
||||
.expect("No matching special purpose argument.");
|
||||
// Get the corresponding entry block value and add it to the return instruction's
|
||||
// arguments.
|
||||
let val = pos.func.dfg.ebb_params(
|
||||
pos.func.layout.entry_block().unwrap(),
|
||||
)
|
||||
[idx];
|
||||
let val = pos.func
|
||||
.dfg
|
||||
.ebb_params(pos.func.layout.entry_block().unwrap())[idx];
|
||||
debug_assert_eq!(pos.func.dfg.value_type(val), arg.value_type);
|
||||
vlist.push(val, &mut pos.func.dfg.value_lists);
|
||||
}
|
||||
@@ -630,12 +626,12 @@ fn spill_entry_params(func: &mut Function, entry: Ebb) {
|
||||
/// or calls between writing the stack slots and the call instruction. Writing the slots earlier
|
||||
/// could help reduce register pressure before the call.
|
||||
fn spill_call_arguments(pos: &mut FuncCursor) -> bool {
|
||||
let inst = pos.current_inst().expect(
|
||||
"Cursor must point to a call instruction",
|
||||
);
|
||||
let sig_ref = pos.func.dfg.call_signature(inst).expect(
|
||||
"Call instruction expected.",
|
||||
);
|
||||
let inst = pos.current_inst()
|
||||
.expect("Cursor must point to a call instruction");
|
||||
let sig_ref = pos.func
|
||||
.dfg
|
||||
.call_signature(inst)
|
||||
.expect("Call instruction expected.");
|
||||
|
||||
// Start by building a list of stack slots and arguments to be replaced.
|
||||
// This requires borrowing `pos.func.dfg`, so we can't change anything.
|
||||
|
||||
@@ -51,10 +51,7 @@ pub fn expand_call(
|
||||
);
|
||||
}
|
||||
|
||||
func.dfg.replace(inst).CallIndirect(
|
||||
ir::Opcode::CallIndirect,
|
||||
ptr_ty,
|
||||
sig,
|
||||
new_args,
|
||||
);
|
||||
func.dfg
|
||||
.replace(inst)
|
||||
.CallIndirect(ir::Opcode::CallIndirect, ptr_ty, sig, new_args);
|
||||
}
|
||||
|
||||
@@ -34,9 +34,8 @@ pub fn expand_global_addr(
|
||||
/// Expand a `global_addr` instruction for a vmctx global.
|
||||
fn vmctx_addr(inst: ir::Inst, func: &mut ir::Function, offset: i64) {
|
||||
// Get the value representing the `vmctx` argument.
|
||||
let vmctx = func.special_param(ir::ArgumentPurpose::VMContext).expect(
|
||||
"Missing vmctx parameter",
|
||||
);
|
||||
let vmctx = func.special_param(ir::ArgumentPurpose::VMContext)
|
||||
.expect("Missing vmctx parameter");
|
||||
|
||||
// Simply replace the `global_addr` instruction with an `iadd_imm`, reusing the result value.
|
||||
func.dfg.replace(inst).iadd_imm(vmctx, offset);
|
||||
|
||||
@@ -67,30 +67,21 @@ fn dynamic_addr(
|
||||
let oob;
|
||||
if size == 1 {
|
||||
// `offset > bound - 1` is the same as `offset >= bound`.
|
||||
oob = pos.ins().icmp(
|
||||
IntCC::UnsignedGreaterThanOrEqual,
|
||||
offset,
|
||||
bound,
|
||||
);
|
||||
oob = pos.ins()
|
||||
.icmp(IntCC::UnsignedGreaterThanOrEqual, offset, bound);
|
||||
} else if size <= min_size {
|
||||
// We know that bound >= min_size, so here we can compare `offset > bound - size` without
|
||||
// wrapping.
|
||||
let adj_bound = pos.ins().iadd_imm(bound, -size);
|
||||
oob = pos.ins().icmp(
|
||||
IntCC::UnsignedGreaterThan,
|
||||
offset,
|
||||
adj_bound,
|
||||
);
|
||||
oob = pos.ins()
|
||||
.icmp(IntCC::UnsignedGreaterThan, offset, adj_bound);
|
||||
} else {
|
||||
// We need an overflow check for the adjusted offset.
|
||||
let size_val = pos.ins().iconst(offset_ty, size);
|
||||
let (adj_offset, overflow) = pos.ins().iadd_cout(offset, size_val);
|
||||
pos.ins().trapnz(overflow, ir::TrapCode::HeapOutOfBounds);
|
||||
oob = pos.ins().icmp(
|
||||
IntCC::UnsignedGreaterThan,
|
||||
adj_offset,
|
||||
bound,
|
||||
);
|
||||
oob = pos.ins()
|
||||
.icmp(IntCC::UnsignedGreaterThan, adj_offset, bound);
|
||||
}
|
||||
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
|
||||
|
||||
@@ -137,17 +128,11 @@ fn static_addr(
|
||||
let oob = if limit & 1 == 1 {
|
||||
// Prefer testing `offset >= limit - 1` when limit is odd because an even number is
|
||||
// likely to be a convenient constant on ARM and other RISC architectures.
|
||||
pos.ins().icmp_imm(
|
||||
IntCC::UnsignedGreaterThanOrEqual,
|
||||
offset,
|
||||
limit - 1,
|
||||
)
|
||||
pos.ins()
|
||||
.icmp_imm(IntCC::UnsignedGreaterThanOrEqual, offset, limit - 1)
|
||||
} else {
|
||||
pos.ins().icmp_imm(
|
||||
IntCC::UnsignedGreaterThan,
|
||||
offset,
|
||||
limit,
|
||||
)
|
||||
pos.ins()
|
||||
.icmp_imm(IntCC::UnsignedGreaterThan, offset, limit)
|
||||
};
|
||||
pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
//! Expanding instructions as runtime library calls.
|
||||
|
||||
use ir;
|
||||
use ir::{InstBuilder, get_libcall_funcref};
|
||||
use std::vec::Vec;
|
||||
use ir::{get_libcall_funcref, InstBuilder};
|
||||
use isa::TargetIsa;
|
||||
use std::vec::Vec;
|
||||
|
||||
/// Try to expand `inst` as a library call, returning true is successful.
|
||||
pub fn expand_as_libcall(inst: ir::Inst, func: &mut ir::Function, isa: &TargetIsa) -> bool {
|
||||
// Does the opcode/ctrl_type combo even have a well-known runtime library name.
|
||||
let libcall =
|
||||
match ir::LibCall::for_inst(func.dfg[inst].opcode(), func.dfg.ctrl_typevar(inst)) {
|
||||
let libcall = match ir::LibCall::for_inst(func.dfg[inst].opcode(), func.dfg.ctrl_typevar(inst))
|
||||
{
|
||||
Some(lc) => lc,
|
||||
None => return false,
|
||||
};
|
||||
|
||||
@@ -27,9 +27,9 @@ mod heap;
|
||||
mod libcall;
|
||||
mod split;
|
||||
|
||||
use self::call::expand_call;
|
||||
use self::globalvar::expand_global_addr;
|
||||
use self::heap::expand_heap_addr;
|
||||
use self::call::expand_call;
|
||||
use self::libcall::expand_as_libcall;
|
||||
|
||||
/// Legalize `inst` for `isa`. Return true if any changes to the code were
|
||||
|
||||
@@ -134,9 +134,9 @@ fn split_any(
|
||||
pos.func.dfg.display_inst(inst, None)
|
||||
);
|
||||
let fixed_args = branch_opc.constraints().fixed_value_arguments();
|
||||
let mut args = pos.func.dfg[inst].take_value_list().expect(
|
||||
"Branches must have value lists.",
|
||||
);
|
||||
let mut args = pos.func.dfg[inst]
|
||||
.take_value_list()
|
||||
.expect("Branches must have value lists.");
|
||||
let num_args = args.len(&pos.func.dfg.value_lists);
|
||||
// Get the old value passed to the EBB argument we're repairing.
|
||||
let old_arg = args.get(fixed_args + repair.num, &pos.func.dfg.value_lists)
|
||||
@@ -236,12 +236,9 @@ fn split_value(
|
||||
// Note that it is safe to move `pos` here since `reuse` was set above, so we don't
|
||||
// need to insert a split instruction before returning.
|
||||
pos.goto_first_inst(ebb);
|
||||
pos.ins().with_result(value).Binary(
|
||||
concat,
|
||||
split_type,
|
||||
lo,
|
||||
hi,
|
||||
);
|
||||
pos.ins()
|
||||
.with_result(value)
|
||||
.Binary(concat, split_type, lo, hi);
|
||||
|
||||
// Finally, splitting the EBB parameter is not enough. We also have to repair all
|
||||
// of the predecessor instructions that branch here.
|
||||
|
||||
@@ -31,17 +31,9 @@
|
||||
redundant_field_names,
|
||||
useless_let_if_seq,
|
||||
len_without_is_empty))]
|
||||
#![cfg_attr(feature="cargo-clippy", warn(
|
||||
float_arithmetic,
|
||||
mut_mut,
|
||||
nonminimal_bool,
|
||||
option_map_unwrap_or,
|
||||
option_map_unwrap_or_else,
|
||||
print_stdout,
|
||||
unicode_not_nfc,
|
||||
use_self,
|
||||
))]
|
||||
|
||||
#![cfg_attr(feature = "cargo-clippy",
|
||||
warn(float_arithmetic, mut_mut, nonminimal_bool, option_map_unwrap_or,
|
||||
option_map_unwrap_or_else, print_stdout, unicode_not_nfc, use_self))]
|
||||
// Turns on no_std and alloc features if std is not available.
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![cfg_attr(not(feature = "std"), feature(alloc))]
|
||||
@@ -90,7 +82,6 @@ pub use entity::packed_option;
|
||||
|
||||
mod abi;
|
||||
mod bitset;
|
||||
mod nan_canonicalization;
|
||||
mod constant_hash;
|
||||
mod context;
|
||||
mod dce;
|
||||
@@ -99,6 +90,7 @@ mod fx;
|
||||
mod iterators;
|
||||
mod legalizer;
|
||||
mod licm;
|
||||
mod nan_canonicalization;
|
||||
mod partition_slice;
|
||||
mod postopt;
|
||||
mod predicates;
|
||||
@@ -115,11 +107,11 @@ mod write;
|
||||
/// This replaces `std` in builds with `core`.
|
||||
#[cfg(not(feature = "std"))]
|
||||
mod std {
|
||||
pub use alloc::{boxed, string, vec};
|
||||
pub use core::*;
|
||||
pub use alloc::{boxed, vec, string};
|
||||
pub mod collections {
|
||||
pub use hashmap_core::{HashMap, HashSet};
|
||||
pub use hashmap_core::map as hash_map;
|
||||
pub use alloc::BTreeSet;
|
||||
pub use hashmap_core::map as hash_map;
|
||||
pub use hashmap_core::{HashMap, HashSet};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,9 +132,9 @@ fn change_branch_jump_destination(inst: Inst, new_ebb: Ebb, func: &mut Function)
|
||||
|
||||
/// Test whether the given opcode is unsafe to even consider for LICM.
|
||||
fn trivially_unsafe_for_licm(opcode: Opcode) -> bool {
|
||||
opcode.can_load() || opcode.can_store() || opcode.is_call() || opcode.is_branch() ||
|
||||
opcode.is_terminator() || opcode.is_return() ||
|
||||
opcode.can_trap() || opcode.other_side_effects() || opcode.writes_cpu_flags()
|
||||
opcode.can_load() || opcode.can_store() || opcode.is_call() || opcode.is_branch()
|
||||
|| opcode.is_terminator() || opcode.is_return() || opcode.can_trap()
|
||||
|| opcode.other_side_effects() || opcode.writes_cpu_flags()
|
||||
}
|
||||
|
||||
/// Test whether the given instruction is loop-invariant.
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
//! that will replace nondeterministic NaN's with a single canonical NaN value.
|
||||
|
||||
use cursor::{Cursor, FuncCursor};
|
||||
use ir::{Function, Inst, InstBuilder, InstructionData, Opcode, Value};
|
||||
use ir::condcodes::FloatCC;
|
||||
use ir::immediates::{Ieee32, Ieee64};
|
||||
use ir::types;
|
||||
use ir::types::Type;
|
||||
use ir::{Function, Inst, InstBuilder, InstructionData, Opcode, Value};
|
||||
use timing;
|
||||
|
||||
// Canonical 32-bit and 64-bit NaN values.
|
||||
@@ -33,13 +33,13 @@ pub fn do_nan_canonicalization(func: &mut Function) {
|
||||
fn is_fp_arith(pos: &mut FuncCursor, inst: Inst) -> bool {
|
||||
match pos.func.dfg[inst] {
|
||||
InstructionData::Unary { opcode, .. } => {
|
||||
opcode == Opcode::Ceil || opcode == Opcode::Floor || opcode == Opcode::Nearest ||
|
||||
opcode == Opcode::Sqrt || opcode == Opcode::Trunc
|
||||
opcode == Opcode::Ceil || opcode == Opcode::Floor || opcode == Opcode::Nearest
|
||||
|| opcode == Opcode::Sqrt || opcode == Opcode::Trunc
|
||||
}
|
||||
InstructionData::Binary { opcode, .. } => {
|
||||
opcode == Opcode::Fadd || opcode == Opcode::Fdiv || opcode == Opcode::Fmax ||
|
||||
opcode == Opcode::Fmin || opcode == Opcode::Fmul ||
|
||||
opcode == Opcode::Fsub
|
||||
opcode == Opcode::Fadd || opcode == Opcode::Fdiv || opcode == Opcode::Fmax
|
||||
|| opcode == Opcode::Fmin || opcode == Opcode::Fmul
|
||||
|| opcode == Opcode::Fsub
|
||||
}
|
||||
InstructionData::Ternary { opcode, .. } => opcode == Opcode::Fma,
|
||||
_ => false,
|
||||
@@ -59,11 +59,9 @@ fn add_nan_canon_seq(pos: &mut FuncCursor, inst: Inst) {
|
||||
// the canonical NaN value if `val` is NaN, assign the result to `inst`.
|
||||
let is_nan = pos.ins().fcmp(FloatCC::NotEqual, new_res, new_res);
|
||||
let canon_nan = insert_nan_const(pos, val_type);
|
||||
pos.ins().with_result(val).select(
|
||||
is_nan,
|
||||
canon_nan,
|
||||
new_res,
|
||||
);
|
||||
pos.ins()
|
||||
.with_result(val)
|
||||
.select(is_nan, canon_nan, new_res);
|
||||
|
||||
pos.prev_inst(); // Step backwards so the pass does not skip instructions.
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use ir::condcodes::{CondCode, FloatCC, IntCC};
|
||||
use ir::dfg::ValueDef;
|
||||
use ir::immediates::{Imm64, Offset32};
|
||||
use ir::instructions::{Opcode, ValueList};
|
||||
use ir::{Ebb, Function, Inst, InstBuilder, InstructionData, Value, Type, MemFlags};
|
||||
use ir::{Ebb, Function, Inst, InstBuilder, InstructionData, MemFlags, Type, Value};
|
||||
use isa::TargetIsa;
|
||||
use timing;
|
||||
|
||||
@@ -135,12 +135,10 @@ fn optimize_cpu_flags(
|
||||
if info.invert_branch_cond {
|
||||
cond = cond.inverse();
|
||||
}
|
||||
pos.func.dfg.replace(info.br_inst).brif(
|
||||
cond,
|
||||
flags,
|
||||
info.destination,
|
||||
&args,
|
||||
);
|
||||
pos.func
|
||||
.dfg
|
||||
.replace(info.br_inst)
|
||||
.brif(cond, flags, info.destination, &args);
|
||||
}
|
||||
CmpBrKind::IcmpImm { mut cond, imm } => {
|
||||
let flags = pos.ins().ifcmp_imm(info.cmp_arg, imm);
|
||||
@@ -148,12 +146,10 @@ fn optimize_cpu_flags(
|
||||
if info.invert_branch_cond {
|
||||
cond = cond.inverse();
|
||||
}
|
||||
pos.func.dfg.replace(info.br_inst).brif(
|
||||
cond,
|
||||
flags,
|
||||
info.destination,
|
||||
&args,
|
||||
);
|
||||
pos.func
|
||||
.dfg
|
||||
.replace(info.br_inst)
|
||||
.brif(cond, flags, info.destination, &args);
|
||||
}
|
||||
CmpBrKind::Fcmp { mut cond, arg } => {
|
||||
let flags = pos.ins().ffcmp(info.cmp_arg, arg);
|
||||
@@ -161,12 +157,10 @@ fn optimize_cpu_flags(
|
||||
if info.invert_branch_cond {
|
||||
cond = cond.inverse();
|
||||
}
|
||||
pos.func.dfg.replace(info.br_inst).brff(
|
||||
cond,
|
||||
flags,
|
||||
info.destination,
|
||||
&args,
|
||||
);
|
||||
pos.func
|
||||
.dfg
|
||||
.replace(info.br_inst)
|
||||
.brff(cond, flags, info.destination, &args);
|
||||
}
|
||||
}
|
||||
let ok = pos.func.update_encoding(info.cmp_inst, isa).is_ok();
|
||||
@@ -175,7 +169,6 @@ fn optimize_cpu_flags(
|
||||
debug_assert!(ok);
|
||||
}
|
||||
|
||||
|
||||
struct MemOpInfo {
|
||||
opcode: Opcode,
|
||||
inst: Inst,
|
||||
@@ -326,8 +319,6 @@ fn optimize_complex_addresses(pos: &mut EncCursor, inst: Inst, isa: &TargetIsa)
|
||||
debug_assert!(ok);
|
||||
}
|
||||
|
||||
|
||||
|
||||
//----------------------------------------------------------------------
|
||||
//
|
||||
// The main post-opt pass.
|
||||
@@ -343,10 +334,8 @@ pub fn do_postopt(func: &mut Function, isa: &TargetIsa) {
|
||||
optimize_cpu_flags(&mut pos, inst, last_flags_clobber, isa);
|
||||
|
||||
// Track the most recent seen instruction that clobbers the flags.
|
||||
if let Some(constraints) =
|
||||
isa.encoding_info().operand_constraints(
|
||||
pos.func.encodings[inst],
|
||||
)
|
||||
if let Some(constraints) = isa.encoding_info()
|
||||
.operand_constraints(pos.func.encodings[inst])
|
||||
{
|
||||
if constraints.clobbers_flags {
|
||||
last_flags_clobber = Some(inst)
|
||||
|
||||
@@ -137,27 +137,25 @@ fn get_div_info(inst: Inst, dfg: &DataFlowGraph) -> Option<DivRemByConstInfo> {
|
||||
/// cannot do any transformation, in which case `inst` is left unchanged.
|
||||
fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCursor, inst: Inst) {
|
||||
let isRem = match *divrem_info {
|
||||
DivRemByConstInfo::DivU32(_, _) |
|
||||
DivRemByConstInfo::DivU64(_, _) |
|
||||
DivRemByConstInfo::DivS32(_, _) |
|
||||
DivRemByConstInfo::DivS64(_, _) => false,
|
||||
DivRemByConstInfo::RemU32(_, _) |
|
||||
DivRemByConstInfo::RemU64(_, _) |
|
||||
DivRemByConstInfo::RemS32(_, _) |
|
||||
DivRemByConstInfo::RemS64(_, _) => true,
|
||||
DivRemByConstInfo::DivU32(_, _)
|
||||
| DivRemByConstInfo::DivU64(_, _)
|
||||
| DivRemByConstInfo::DivS32(_, _)
|
||||
| DivRemByConstInfo::DivS64(_, _) => false,
|
||||
DivRemByConstInfo::RemU32(_, _)
|
||||
| DivRemByConstInfo::RemU64(_, _)
|
||||
| DivRemByConstInfo::RemS32(_, _)
|
||||
| DivRemByConstInfo::RemS64(_, _) => true,
|
||||
};
|
||||
|
||||
match *divrem_info {
|
||||
// -------------------- U32 --------------------
|
||||
|
||||
// U32 div, rem by zero: ignore
|
||||
DivRemByConstInfo::DivU32(_n1, 0) |
|
||||
DivRemByConstInfo::RemU32(_n1, 0) => {}
|
||||
DivRemByConstInfo::DivU32(_n1, 0) | DivRemByConstInfo::RemU32(_n1, 0) => {}
|
||||
|
||||
// U32 div by 1: identity
|
||||
// U32 rem by 1: zero
|
||||
DivRemByConstInfo::DivU32(n1, 1) |
|
||||
DivRemByConstInfo::RemU32(n1, 1) => {
|
||||
DivRemByConstInfo::DivU32(n1, 1) | DivRemByConstInfo::RemU32(n1, 1) => {
|
||||
if isRem {
|
||||
pos.func.dfg.replace(inst).iconst(I32, 0);
|
||||
} else {
|
||||
@@ -166,8 +164,9 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
|
||||
// U32 div, rem by a power-of-2
|
||||
DivRemByConstInfo::DivU32(n1, d) |
|
||||
DivRemByConstInfo::RemU32(n1, d) if d.is_power_of_two() => {
|
||||
DivRemByConstInfo::DivU32(n1, d) | DivRemByConstInfo::RemU32(n1, d)
|
||||
if d.is_power_of_two() =>
|
||||
{
|
||||
debug_assert!(d >= 2);
|
||||
// compute k where d == 2^k
|
||||
let k = d.trailing_zeros();
|
||||
@@ -181,8 +180,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
|
||||
// U32 div, rem by non-power-of-2
|
||||
DivRemByConstInfo::DivU32(n1, d) |
|
||||
DivRemByConstInfo::RemU32(n1, d) => {
|
||||
DivRemByConstInfo::DivU32(n1, d) | DivRemByConstInfo::RemU32(n1, d) => {
|
||||
debug_assert!(d >= 3);
|
||||
let MU32 {
|
||||
mulBy,
|
||||
@@ -223,13 +221,11 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
// -------------------- U64 --------------------
|
||||
|
||||
// U64 div, rem by zero: ignore
|
||||
DivRemByConstInfo::DivU64(_n1, 0) |
|
||||
DivRemByConstInfo::RemU64(_n1, 0) => {}
|
||||
DivRemByConstInfo::DivU64(_n1, 0) | DivRemByConstInfo::RemU64(_n1, 0) => {}
|
||||
|
||||
// U64 div by 1: identity
|
||||
// U64 rem by 1: zero
|
||||
DivRemByConstInfo::DivU64(n1, 1) |
|
||||
DivRemByConstInfo::RemU64(n1, 1) => {
|
||||
DivRemByConstInfo::DivU64(n1, 1) | DivRemByConstInfo::RemU64(n1, 1) => {
|
||||
if isRem {
|
||||
pos.func.dfg.replace(inst).iconst(I64, 0);
|
||||
} else {
|
||||
@@ -238,8 +234,9 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
|
||||
// U64 div, rem by a power-of-2
|
||||
DivRemByConstInfo::DivU64(n1, d) |
|
||||
DivRemByConstInfo::RemU64(n1, d) if d.is_power_of_two() => {
|
||||
DivRemByConstInfo::DivU64(n1, d) | DivRemByConstInfo::RemU64(n1, d)
|
||||
if d.is_power_of_two() =>
|
||||
{
|
||||
debug_assert!(d >= 2);
|
||||
// compute k where d == 2^k
|
||||
let k = d.trailing_zeros();
|
||||
@@ -253,8 +250,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
|
||||
// U64 div, rem by non-power-of-2
|
||||
DivRemByConstInfo::DivU64(n1, d) |
|
||||
DivRemByConstInfo::RemU64(n1, d) => {
|
||||
DivRemByConstInfo::DivU64(n1, d) | DivRemByConstInfo::RemU64(n1, d) => {
|
||||
debug_assert!(d >= 3);
|
||||
let MU64 {
|
||||
mulBy,
|
||||
@@ -295,15 +291,14 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
// -------------------- S32 --------------------
|
||||
|
||||
// S32 div, rem by zero or -1: ignore
|
||||
DivRemByConstInfo::DivS32(_n1, -1) |
|
||||
DivRemByConstInfo::RemS32(_n1, -1) |
|
||||
DivRemByConstInfo::DivS32(_n1, 0) |
|
||||
DivRemByConstInfo::RemS32(_n1, 0) => {}
|
||||
DivRemByConstInfo::DivS32(_n1, -1)
|
||||
| DivRemByConstInfo::RemS32(_n1, -1)
|
||||
| DivRemByConstInfo::DivS32(_n1, 0)
|
||||
| DivRemByConstInfo::RemS32(_n1, 0) => {}
|
||||
|
||||
// S32 div by 1: identity
|
||||
// S32 rem by 1: zero
|
||||
DivRemByConstInfo::DivS32(n1, 1) |
|
||||
DivRemByConstInfo::RemS32(n1, 1) => {
|
||||
DivRemByConstInfo::DivS32(n1, 1) | DivRemByConstInfo::RemS32(n1, 1) => {
|
||||
if isRem {
|
||||
pos.func.dfg.replace(inst).iconst(I32, 0);
|
||||
} else {
|
||||
@@ -311,8 +306,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
}
|
||||
|
||||
DivRemByConstInfo::DivS32(n1, d) |
|
||||
DivRemByConstInfo::RemS32(n1, d) => {
|
||||
DivRemByConstInfo::DivS32(n1, d) | DivRemByConstInfo::RemS32(n1, d) => {
|
||||
if let Some((isNeg, k)) = isPowerOf2_S32(d) {
|
||||
// k can be 31 only in the case that d is -2^31.
|
||||
debug_assert!(k >= 1 && k <= 31);
|
||||
@@ -372,15 +366,14 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
// -------------------- S64 --------------------
|
||||
|
||||
// S64 div, rem by zero or -1: ignore
|
||||
DivRemByConstInfo::DivS64(_n1, -1) |
|
||||
DivRemByConstInfo::RemS64(_n1, -1) |
|
||||
DivRemByConstInfo::DivS64(_n1, 0) |
|
||||
DivRemByConstInfo::RemS64(_n1, 0) => {}
|
||||
DivRemByConstInfo::DivS64(_n1, -1)
|
||||
| DivRemByConstInfo::RemS64(_n1, -1)
|
||||
| DivRemByConstInfo::DivS64(_n1, 0)
|
||||
| DivRemByConstInfo::RemS64(_n1, 0) => {}
|
||||
|
||||
// S64 div by 1: identity
|
||||
// S64 rem by 1: zero
|
||||
DivRemByConstInfo::DivS64(n1, 1) |
|
||||
DivRemByConstInfo::RemS64(n1, 1) => {
|
||||
DivRemByConstInfo::DivS64(n1, 1) | DivRemByConstInfo::RemS64(n1, 1) => {
|
||||
if isRem {
|
||||
pos.func.dfg.replace(inst).iconst(I64, 0);
|
||||
} else {
|
||||
@@ -388,8 +381,7 @@ fn do_divrem_transformation(divrem_info: &DivRemByConstInfo, pos: &mut FuncCurso
|
||||
}
|
||||
}
|
||||
|
||||
DivRemByConstInfo::DivS64(n1, d) |
|
||||
DivRemByConstInfo::RemS64(n1, d) => {
|
||||
DivRemByConstInfo::DivS64(n1, d) | DivRemByConstInfo::RemS64(n1, d) => {
|
||||
if let Some((isNeg, k)) = isPowerOf2_S64(d) {
|
||||
// k can be 63 only in the case that d is -2^63.
|
||||
debug_assert!(k >= 1 && k <= 63);
|
||||
@@ -483,12 +475,10 @@ fn simplify(pos: &mut FuncCursor, inst: Inst) {
|
||||
_ => return,
|
||||
};
|
||||
let ty = pos.func.dfg.ctrl_typevar(inst);
|
||||
pos.func.dfg.replace(inst).BinaryImm(
|
||||
new_opcode,
|
||||
ty,
|
||||
imm,
|
||||
args[0],
|
||||
);
|
||||
pos.func
|
||||
.dfg
|
||||
.replace(inst)
|
||||
.BinaryImm(new_opcode, ty, imm, args[0]);
|
||||
}
|
||||
} else if let ValueDef::Result(iconst_inst, _) = pos.func.dfg.value_def(args[0]) {
|
||||
if let InstructionData::UnaryImm {
|
||||
@@ -501,12 +491,10 @@ fn simplify(pos: &mut FuncCursor, inst: Inst) {
|
||||
_ => return,
|
||||
};
|
||||
let ty = pos.func.dfg.ctrl_typevar(inst);
|
||||
pos.func.dfg.replace(inst).BinaryImm(
|
||||
new_opcode,
|
||||
ty,
|
||||
imm,
|
||||
args[1],
|
||||
);
|
||||
pos.func
|
||||
.dfg
|
||||
.replace(inst)
|
||||
.BinaryImm(new_opcode, ty, imm, args[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -522,9 +510,12 @@ fn simplify(pos: &mut FuncCursor, inst: Inst) {
|
||||
}
|
||||
}
|
||||
}
|
||||
InstructionData::CondTrap { .. } |
|
||||
InstructionData::Branch { .. } |
|
||||
InstructionData::Ternary { opcode: Opcode::Select, .. } => {
|
||||
InstructionData::CondTrap { .. }
|
||||
| InstructionData::Branch { .. }
|
||||
| InstructionData::Ternary {
|
||||
opcode: Opcode::Select,
|
||||
..
|
||||
} => {
|
||||
// Fold away a redundant `bint`.
|
||||
let maybe = {
|
||||
let args = pos.func.dfg.inst_args(inst);
|
||||
|
||||
@@ -90,8 +90,7 @@ impl Affinity {
|
||||
Affinity::Reg(rc) => {
|
||||
// If the preferred register class is a subclass of the constraint, there's no need
|
||||
// to change anything.
|
||||
if constraint.kind != ConstraintKind::Stack &&
|
||||
!constraint.regclass.has_subclass(rc)
|
||||
if constraint.kind != ConstraintKind::Stack && !constraint.regclass.has_subclass(rc)
|
||||
{
|
||||
// If the register classes don't overlap, `intersect` returns `Unassigned`, and
|
||||
// we just keep our previous affinity.
|
||||
@@ -120,12 +119,10 @@ impl<'a> fmt::Display for DisplayAffinity<'a> {
|
||||
match self.0 {
|
||||
Affinity::Unassigned => write!(f, "unassigned"),
|
||||
Affinity::Stack => write!(f, "stack"),
|
||||
Affinity::Reg(rci) => {
|
||||
match self.1 {
|
||||
Affinity::Reg(rci) => match self.1 {
|
||||
Some(regs) => write!(f, "{}", regs.rc(rci)),
|
||||
None => write!(f, "{}", rci),
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,8 +196,7 @@ impl<'a> Context<'a> {
|
||||
pred_inst,
|
||||
pred_ebb,
|
||||
self.liveness.context(&self.func.layout),
|
||||
)
|
||||
{
|
||||
) {
|
||||
self.isolate_param(ebb, param);
|
||||
}
|
||||
}
|
||||
@@ -219,8 +218,8 @@ impl<'a> Context<'a> {
|
||||
// pre-spilled, and the rest of the virtual register would be forced to spill to the
|
||||
// `incoming_arg` stack slot too.
|
||||
if let ir::ValueDef::Param(def_ebb, def_num) = self.func.dfg.value_def(arg) {
|
||||
if Some(def_ebb) == self.func.layout.entry_block() &&
|
||||
self.func.signature.params[def_num].location.is_stack()
|
||||
if Some(def_ebb) == self.func.layout.entry_block()
|
||||
&& self.func.signature.params[def_num].location.is_stack()
|
||||
{
|
||||
dbg!("-> isolating function stack parameter {}", arg);
|
||||
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
|
||||
@@ -303,16 +302,11 @@ impl<'a> Context<'a> {
|
||||
&self.encinfo
|
||||
.operand_constraints(pos.func.encodings[inst])
|
||||
.expect("Bad copy encoding")
|
||||
.outs
|
||||
[0],
|
||||
.outs[0],
|
||||
);
|
||||
self.liveness.create_dead(new_val, ebb, affinity);
|
||||
self.liveness.extend_locally(
|
||||
new_val,
|
||||
ebb,
|
||||
inst,
|
||||
&pos.func.layout,
|
||||
);
|
||||
self.liveness
|
||||
.extend_locally(new_val, ebb, inst, &pos.func.layout);
|
||||
|
||||
new_val
|
||||
}
|
||||
@@ -353,16 +347,11 @@ impl<'a> Context<'a> {
|
||||
&self.encinfo
|
||||
.operand_constraints(pos.func.encodings[inst])
|
||||
.expect("Bad copy encoding")
|
||||
.outs
|
||||
[0],
|
||||
.outs[0],
|
||||
);
|
||||
self.liveness.create_dead(copy, inst, affinity);
|
||||
self.liveness.extend_locally(
|
||||
copy,
|
||||
pred_ebb,
|
||||
pred_inst,
|
||||
&pos.func.layout,
|
||||
);
|
||||
self.liveness
|
||||
.extend_locally(copy, pred_ebb, pred_inst, &pos.func.layout);
|
||||
|
||||
pos.func.dfg.inst_variable_args_mut(pred_inst)[argnum] = copy;
|
||||
|
||||
@@ -422,12 +411,9 @@ impl<'a> Context<'a> {
|
||||
let node = Node::value(value, 0, self.func);
|
||||
|
||||
// Push this value and get the nearest dominating def back.
|
||||
let parent = match self.forest.push_node(
|
||||
node,
|
||||
self.func,
|
||||
self.domtree,
|
||||
self.preorder,
|
||||
) {
|
||||
let parent = match self.forest
|
||||
.push_node(node, self.func, self.domtree, self.preorder)
|
||||
{
|
||||
None => continue,
|
||||
Some(n) => n,
|
||||
};
|
||||
@@ -525,12 +511,8 @@ impl<'a> Context<'a> {
|
||||
// Can't merge because of interference. Insert a copy instead.
|
||||
let pred_ebb = self.func.layout.pp_ebb(pred_inst);
|
||||
let new_arg = self.isolate_arg(pred_ebb, pred_inst, argnum, arg);
|
||||
self.virtregs.insert_single(
|
||||
param,
|
||||
new_arg,
|
||||
self.func,
|
||||
self.preorder,
|
||||
);
|
||||
self.virtregs
|
||||
.insert_single(param, new_arg, self.func, self.preorder);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -564,12 +546,8 @@ impl<'a> Context<'a> {
|
||||
|
||||
// Restrict the virtual copy nodes we look at and key the `set_id` and `value` properties
|
||||
// of the nodes. Set_id 0 will be `param` and set_id 1 will be `arg`.
|
||||
self.vcopies.set_filter(
|
||||
[param, arg],
|
||||
func,
|
||||
self.virtregs,
|
||||
preorder,
|
||||
);
|
||||
self.vcopies
|
||||
.set_filter([param, arg], func, self.virtregs, preorder);
|
||||
|
||||
// Now create an ordered sequence of dom-forest nodes from three sources: The two virtual
|
||||
// registers and the filtered virtual copies.
|
||||
@@ -625,8 +603,8 @@ impl<'a> Context<'a> {
|
||||
|
||||
// Check if the parent value interferes with the virtual copy.
|
||||
let inst = node.def.unwrap_inst();
|
||||
if node.set_id != parent.set_id &&
|
||||
self.liveness[parent.value].reaches_use(inst, node.ebb, ctx)
|
||||
if node.set_id != parent.set_id
|
||||
&& self.liveness[parent.value].reaches_use(inst, node.ebb, ctx)
|
||||
{
|
||||
dbg!(
|
||||
" - interference: {} overlaps vcopy at {}:{}",
|
||||
@@ -649,8 +627,8 @@ impl<'a> Context<'a> {
|
||||
|
||||
// Both node and parent are values, so check for interference.
|
||||
debug_assert!(node.is_value() && parent.is_value());
|
||||
if node.set_id != parent.set_id &&
|
||||
self.liveness[parent.value].overlaps_def(node.def, node.ebb, ctx)
|
||||
if node.set_id != parent.set_id
|
||||
&& self.liveness[parent.value].overlaps_def(node.def, node.ebb, ctx)
|
||||
{
|
||||
// The two values are interfering.
|
||||
dbg!(" - interference: {} overlaps def of {}", parent, node.value);
|
||||
@@ -945,9 +923,8 @@ impl VirtualCopies {
|
||||
}
|
||||
|
||||
// Reorder the predecessor branches as required by the dominator forest.
|
||||
self.branches.sort_unstable_by(|&(a, _), &(b, _)| {
|
||||
preorder.pre_cmp(a, b, &func.layout)
|
||||
});
|
||||
self.branches
|
||||
.sort_unstable_by(|&(a, _), &(b, _)| preorder.pre_cmp(a, b, &func.layout));
|
||||
}
|
||||
|
||||
/// Get the next unmerged parameter value.
|
||||
@@ -1097,9 +1074,9 @@ where
|
||||
let ord = match (self.a.peek(), self.b.peek()) {
|
||||
(Some(a), Some(b)) => {
|
||||
let layout = self.layout;
|
||||
self.preorder.pre_cmp_ebb(a.ebb, b.ebb).then_with(|| {
|
||||
layout.cmp(a.def, b.def)
|
||||
})
|
||||
self.preorder
|
||||
.pre_cmp_ebb(a.ebb, b.ebb)
|
||||
.then_with(|| layout.cmp(a.def, b.def))
|
||||
}
|
||||
(Some(_), None) => cmp::Ordering::Less,
|
||||
(None, Some(_)) => cmp::Ordering::Greater,
|
||||
|
||||
@@ -51,10 +51,10 @@ use isa::{regs_overlap, RegClass, RegInfo, RegUnit};
|
||||
use packed_option::PackedOption;
|
||||
use regalloc::RegDiversions;
|
||||
use regalloc::affinity::Affinity;
|
||||
use regalloc::register_set::RegisterSet;
|
||||
use regalloc::live_value_tracker::{LiveValue, LiveValueTracker};
|
||||
use regalloc::liveness::Liveness;
|
||||
use regalloc::liverange::{LiveRange, LiveRangeContext};
|
||||
use regalloc::register_set::RegisterSet;
|
||||
use regalloc::solver::{Solver, SolverError};
|
||||
use std::mem;
|
||||
use timing;
|
||||
@@ -142,9 +142,10 @@ impl Coloring {
|
||||
impl<'a> Context<'a> {
|
||||
/// Run the coloring algorithm.
|
||||
fn run(&mut self, tracker: &mut LiveValueTracker) {
|
||||
self.cur.func.locations.resize(
|
||||
self.cur.func.dfg.num_values(),
|
||||
);
|
||||
self.cur
|
||||
.func
|
||||
.locations
|
||||
.resize(self.cur.func.dfg.num_values());
|
||||
|
||||
// Visit blocks in reverse post-order. We need to ensure that at least one predecessor has
|
||||
// been visited before each EBB. That guarantees that the EBB arguments have been colored.
|
||||
@@ -372,10 +373,8 @@ impl<'a> Context<'a> {
|
||||
|
||||
// Update the global register set which has no diversions.
|
||||
if !lv.is_local {
|
||||
regs.global.free(
|
||||
rc,
|
||||
self.cur.func.locations[lv.value].unwrap_reg(),
|
||||
);
|
||||
regs.global
|
||||
.free(rc, self.cur.func.locations[lv.value].unwrap_reg());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -500,20 +499,14 @@ impl<'a> Context<'a> {
|
||||
// already in a register.
|
||||
let cur_reg = self.divert.reg(value, &self.cur.func.locations);
|
||||
match op.kind {
|
||||
ConstraintKind::FixedReg(regunit) |
|
||||
ConstraintKind::FixedTied(regunit) => {
|
||||
ConstraintKind::FixedReg(regunit) | ConstraintKind::FixedTied(regunit) => {
|
||||
// Add the fixed constraint even if `cur_reg == regunit`.
|
||||
// It is possible that we will want to convert the value to a variable later,
|
||||
// and this identity assignment prevents that from happening.
|
||||
self.solver.reassign_in(
|
||||
value,
|
||||
op.regclass,
|
||||
cur_reg,
|
||||
regunit,
|
||||
);
|
||||
self.solver
|
||||
.reassign_in(value, op.regclass, cur_reg, regunit);
|
||||
}
|
||||
ConstraintKind::Reg |
|
||||
ConstraintKind::Tied(_) => {
|
||||
ConstraintKind::Reg | ConstraintKind::Tied(_) => {
|
||||
if !op.regclass.contains(cur_reg) {
|
||||
self.solver.add_var(value, op.regclass, cur_reg);
|
||||
}
|
||||
@@ -541,8 +534,7 @@ impl<'a> Context<'a> {
|
||||
|
||||
for (op, &value) in constraints.iter().zip(self.cur.func.dfg.inst_args(inst)) {
|
||||
match op.kind {
|
||||
ConstraintKind::Reg |
|
||||
ConstraintKind::Tied(_) => {
|
||||
ConstraintKind::Reg | ConstraintKind::Tied(_) => {
|
||||
let cur_reg = self.divert.reg(value, &self.cur.func.locations);
|
||||
// This is the opposite condition of `program_input_constraints()`.
|
||||
if op.regclass.contains(cur_reg) {
|
||||
@@ -556,9 +548,9 @@ impl<'a> Context<'a> {
|
||||
}
|
||||
}
|
||||
}
|
||||
ConstraintKind::FixedReg(_) |
|
||||
ConstraintKind::FixedTied(_) |
|
||||
ConstraintKind::Stack => {}
|
||||
ConstraintKind::FixedReg(_)
|
||||
| ConstraintKind::FixedTied(_)
|
||||
| ConstraintKind::Stack => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -651,9 +643,9 @@ impl<'a> Context<'a> {
|
||||
Pred: FnMut(&LiveRange, LiveRangeContext<Layout>) -> bool,
|
||||
{
|
||||
for rdiv in self.divert.all() {
|
||||
let lr = self.liveness.get(rdiv.value).expect(
|
||||
"Missing live range for diverted register",
|
||||
);
|
||||
let lr = self.liveness
|
||||
.get(rdiv.value)
|
||||
.expect("Missing live range for diverted register");
|
||||
if pred(lr, self.liveness.context(&self.cur.func.layout)) {
|
||||
if let Affinity::Reg(rci) = lr.affinity {
|
||||
let rc = self.reginfo.rc(rci);
|
||||
@@ -703,8 +695,7 @@ impl<'a> Context<'a> {
|
||||
) {
|
||||
for (op, lv) in constraints.iter().zip(defs) {
|
||||
match op.kind {
|
||||
ConstraintKind::FixedReg(reg) |
|
||||
ConstraintKind::FixedTied(reg) => {
|
||||
ConstraintKind::FixedReg(reg) | ConstraintKind::FixedTied(reg) => {
|
||||
self.add_fixed_output(lv.value, op.regclass, reg, throughs);
|
||||
if !lv.is_local && !global_regs.is_avail(op.regclass, reg) {
|
||||
dbg!(
|
||||
@@ -716,9 +707,7 @@ impl<'a> Context<'a> {
|
||||
*replace_global_defines = true;
|
||||
}
|
||||
}
|
||||
ConstraintKind::Reg |
|
||||
ConstraintKind::Tied(_) |
|
||||
ConstraintKind::Stack => {}
|
||||
ConstraintKind::Reg | ConstraintKind::Tied(_) | ConstraintKind::Stack => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -801,9 +790,9 @@ impl<'a> Context<'a> {
|
||||
) {
|
||||
for (op, lv) in constraints.iter().zip(defs) {
|
||||
match op.kind {
|
||||
ConstraintKind::FixedReg(_) |
|
||||
ConstraintKind::FixedTied(_) |
|
||||
ConstraintKind::Stack => continue,
|
||||
ConstraintKind::FixedReg(_)
|
||||
| ConstraintKind::FixedTied(_)
|
||||
| ConstraintKind::Stack => continue,
|
||||
ConstraintKind::Reg => {
|
||||
self.solver.add_def(lv.value, op.regclass, !lv.is_local);
|
||||
}
|
||||
@@ -816,8 +805,7 @@ impl<'a> Context<'a> {
|
||||
op.regclass,
|
||||
self.divert.reg(arg, &self.cur.func.locations),
|
||||
!lv.is_local,
|
||||
)
|
||||
{
|
||||
) {
|
||||
// The value we're tied to has been assigned to a fixed register.
|
||||
// We need to make sure that fixed output register is compatible with the
|
||||
// global register set.
|
||||
@@ -881,8 +869,8 @@ impl<'a> Context<'a> {
|
||||
// not actually constrained by the instruction. We just want it out of the way.
|
||||
let toprc2 = self.reginfo.toprc(rci);
|
||||
let reg2 = self.divert.reg(lv.value, &self.cur.func.locations);
|
||||
if rc.contains(reg2) && self.solver.can_add_var(lv.value, toprc2, reg2) &&
|
||||
!self.is_live_on_outgoing_edge(lv.value)
|
||||
if rc.contains(reg2) && self.solver.can_add_var(lv.value, toprc2, reg2)
|
||||
&& !self.is_live_on_outgoing_edge(lv.value)
|
||||
{
|
||||
self.solver.add_through_var(lv.value, toprc2, reg2);
|
||||
return true;
|
||||
@@ -911,10 +899,10 @@ impl<'a> Context<'a> {
|
||||
}
|
||||
Table(jt) => {
|
||||
let lr = &self.liveness[value];
|
||||
!lr.is_local() &&
|
||||
self.cur.func.jump_tables[jt].entries().any(|(_, ebb)| {
|
||||
lr.is_livein(ebb, ctx)
|
||||
})
|
||||
!lr.is_local()
|
||||
&& self.cur.func.jump_tables[jt]
|
||||
.entries()
|
||||
.any(|(_, ebb)| lr.is_livein(ebb, ctx))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -940,7 +928,9 @@ impl<'a> Context<'a> {
|
||||
|
||||
for m in self.solver.moves() {
|
||||
match *m {
|
||||
Reg { value, from, to, .. } => {
|
||||
Reg {
|
||||
value, from, to, ..
|
||||
} => {
|
||||
self.divert.regmove(value, from, to);
|
||||
self.cur.ins().regmove(value, from, to);
|
||||
}
|
||||
@@ -951,10 +941,10 @@ impl<'a> Context<'a> {
|
||||
..
|
||||
} => {
|
||||
debug_assert_eq!(slot[to_slot].expand(), None, "Overwriting slot in use");
|
||||
let ss = self.cur.func.stack_slots.get_emergency_slot(
|
||||
self.cur.func.dfg.value_type(value),
|
||||
&slot[0..spills],
|
||||
);
|
||||
let ss = self.cur
|
||||
.func
|
||||
.stack_slots
|
||||
.get_emergency_slot(self.cur.func.dfg.value_type(value), &slot[0..spills]);
|
||||
slot[to_slot] = ss.into();
|
||||
self.divert.regspill(value, from, ss);
|
||||
self.cur.ins().regspill(value, from, ss);
|
||||
@@ -1013,8 +1003,7 @@ impl<'a> Context<'a> {
|
||||
if match self.cur.func.dfg.value_def(lv.value) {
|
||||
ValueDef::Result(i, _) => i != inst,
|
||||
_ => true,
|
||||
}
|
||||
{
|
||||
} {
|
||||
break;
|
||||
}
|
||||
if lv.is_local || !lv.affinity.is_reg() {
|
||||
@@ -1072,10 +1061,8 @@ impl<'a> Context<'a> {
|
||||
};
|
||||
regs.input.free(rc, loc.unwrap_reg());
|
||||
if !lv.is_local {
|
||||
regs.global.free(
|
||||
rc,
|
||||
self.cur.func.locations[lv.value].unwrap_reg(),
|
||||
);
|
||||
regs.global
|
||||
.free(rc, self.cur.func.locations[lv.value].unwrap_reg());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1096,8 +1083,7 @@ fn program_input_abi(
|
||||
) {
|
||||
for (abi, &value) in abi_types.iter().zip(func.dfg.inst_variable_args(inst)) {
|
||||
if let ArgumentLoc::Reg(reg) = abi.location {
|
||||
if let Affinity::Reg(rci) =
|
||||
liveness
|
||||
if let Affinity::Reg(rci) = liveness
|
||||
.get(value)
|
||||
.expect("ABI register must have live range")
|
||||
.affinity
|
||||
|
||||
@@ -140,13 +140,8 @@ impl Context {
|
||||
}
|
||||
|
||||
// Pass: Coloring.
|
||||
self.coloring.run(
|
||||
isa,
|
||||
func,
|
||||
domtree,
|
||||
&mut self.liveness,
|
||||
&mut self.tracker,
|
||||
);
|
||||
self.coloring
|
||||
.run(isa, func, domtree, &mut self.liveness, &mut self.tracker);
|
||||
|
||||
if isa.flags().enable_verifier() {
|
||||
verify_context(func, cfg, domtree, isa)?;
|
||||
|
||||
@@ -46,7 +46,9 @@ pub struct RegDiversions {
|
||||
impl RegDiversions {
|
||||
/// Create a new empty diversion tracker.
|
||||
pub fn new() -> Self {
|
||||
Self { current: Vec::new() }
|
||||
Self {
|
||||
current: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear the tracker, preparing for a new EBB.
|
||||
@@ -152,11 +154,10 @@ impl RegDiversions {
|
||||
///
|
||||
/// Returns the `to` location of the removed diversion.
|
||||
pub fn remove(&mut self, value: Value) -> Option<ValueLoc> {
|
||||
self.current.iter().position(|d| d.value == value).map(
|
||||
|i| {
|
||||
self.current.swap_remove(i).to
|
||||
},
|
||||
)
|
||||
self.current
|
||||
.iter()
|
||||
.position(|d| d.value == value)
|
||||
.map(|i| self.current.swap_remove(i).to)
|
||||
}
|
||||
|
||||
/// Return an object that can display the diversions.
|
||||
|
||||
@@ -187,15 +187,15 @@ impl LiveValueTracker {
|
||||
// If the immediate dominator exits, we must have a stored list for it. This is a
|
||||
// requirement to the order EBBs are visited: All dominators must have been processed
|
||||
// before the current EBB.
|
||||
let idom_live_list = self.idom_sets.get(&idom).expect(
|
||||
"No stored live set for dominator",
|
||||
);
|
||||
let idom_live_list = self.idom_sets
|
||||
.get(&idom)
|
||||
.expect("No stored live set for dominator");
|
||||
let ctx = liveness.context(layout);
|
||||
// Get just the values that are live-in to `ebb`.
|
||||
for &value in idom_live_list.as_slice(&self.idom_pool) {
|
||||
let lr = liveness.get(value).expect(
|
||||
"Immediate dominator value has no live range",
|
||||
);
|
||||
let lr = liveness
|
||||
.get(value)
|
||||
.expect("Immediate dominator value has no live range");
|
||||
|
||||
// Check if this value is live-in here.
|
||||
if let Some(endpoint) = lr.livein_local_end(ebb, ctx) {
|
||||
@@ -217,17 +217,13 @@ impl LiveValueTracker {
|
||||
// This is a dead EBB parameter which is not even live into the first
|
||||
// instruction in the EBB.
|
||||
debug_assert_eq!(
|
||||
local_ebb,
|
||||
ebb,
|
||||
local_ebb, ebb,
|
||||
"EBB parameter live range ends at wrong EBB header"
|
||||
);
|
||||
// Give this value a fake endpoint that is the first instruction in the EBB.
|
||||
// We expect it to be removed by calling `drop_dead_args()`.
|
||||
self.live.push(
|
||||
value,
|
||||
layout.first_inst(ebb).expect("Empty EBB"),
|
||||
lr,
|
||||
);
|
||||
self.live
|
||||
.push(value, layout.first_inst(ebb).expect("Empty EBB"), lr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ use entity::SparseMap;
|
||||
use flowgraph::ControlFlowGraph;
|
||||
use ir::dfg::ValueDef;
|
||||
use ir::{Ebb, Function, Inst, Layout, ProgramPoint, Value};
|
||||
use isa::{EncInfo, TargetIsa, OperandConstraint};
|
||||
use isa::{EncInfo, OperandConstraint, TargetIsa};
|
||||
use regalloc::affinity::Affinity;
|
||||
use regalloc::liverange::{LiveRange, LiveRangeContext, LiveRangeForest};
|
||||
use std::mem;
|
||||
@@ -217,9 +217,9 @@ fn get_or_create<'a>(
|
||||
.map(Affinity::new)
|
||||
.or_else(|| {
|
||||
// If this is a call, get the return value affinity.
|
||||
func.dfg.call_signature(inst).map(|sig| {
|
||||
Affinity::abi(&func.dfg.signatures[sig].returns[rnum], isa)
|
||||
})
|
||||
func.dfg
|
||||
.call_signature(inst)
|
||||
.map(|sig| Affinity::abi(&func.dfg.signatures[sig].returns[rnum], isa))
|
||||
})
|
||||
.unwrap_or_default();
|
||||
}
|
||||
@@ -336,9 +336,8 @@ impl Liveness {
|
||||
where
|
||||
PP: Into<ProgramPoint>,
|
||||
{
|
||||
let old = self.ranges.insert(
|
||||
LiveRange::new(value, def.into(), affinity),
|
||||
);
|
||||
let old = self.ranges
|
||||
.insert(LiveRange::new(value, def.into(), affinity));
|
||||
debug_assert!(old.is_none(), "{} already has a live range", value);
|
||||
}
|
||||
|
||||
|
||||
@@ -249,13 +249,12 @@ impl<PO: ProgramOrder> GenLiveRange<PO> {
|
||||
//
|
||||
// We're assuming here that `to` never precedes `def_begin` in the same EBB, but we can't
|
||||
// check it without a method for getting `to`'s EBB.
|
||||
if order.cmp(ebb, self.def_end) != Ordering::Greater &&
|
||||
order.cmp(to, self.def_begin) != Ordering::Less
|
||||
if order.cmp(ebb, self.def_end) != Ordering::Greater
|
||||
&& order.cmp(to, self.def_begin) != Ordering::Less
|
||||
{
|
||||
let to_pp = to.into();
|
||||
debug_assert_ne!(
|
||||
to_pp,
|
||||
self.def_begin,
|
||||
to_pp, self.def_begin,
|
||||
"Can't use value in the defining instruction."
|
||||
);
|
||||
if order.cmp(to, self.def_end) == Ordering::Greater {
|
||||
@@ -411,8 +410,8 @@ impl<PO: ProgramOrder> GenLiveRange<PO> {
|
||||
}
|
||||
|
||||
// Check for an overlap with the local range.
|
||||
if ctx.order.cmp(def, self.def_begin) != Ordering::Less &&
|
||||
ctx.order.cmp(def, self.def_end) == Ordering::Less
|
||||
if ctx.order.cmp(def, self.def_begin) != Ordering::Less
|
||||
&& ctx.order.cmp(def, self.def_end) == Ordering::Less
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@@ -427,8 +426,8 @@ impl<PO: ProgramOrder> GenLiveRange<PO> {
|
||||
/// Check if this live range reaches a use at `user` in `ebb`.
|
||||
pub fn reaches_use(&self, user: Inst, ebb: Ebb, ctx: LiveRangeContext<PO>) -> bool {
|
||||
// Check for an overlap with the local range.
|
||||
if ctx.order.cmp(user, self.def_begin) == Ordering::Greater &&
|
||||
ctx.order.cmp(user, self.def_end) != Ordering::Greater
|
||||
if ctx.order.cmp(user, self.def_begin) == Ordering::Greater
|
||||
&& ctx.order.cmp(user, self.def_end) != Ordering::Greater
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@@ -535,8 +534,8 @@ mod tests {
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.cmp(lr.def_end, begin) == Ordering::Less ||
|
||||
self.cmp(lr.def_begin, end) == Ordering::Greater,
|
||||
self.cmp(lr.def_end, begin) == Ordering::Less
|
||||
|| self.cmp(lr.def_begin, end) == Ordering::Greater,
|
||||
"Interval can't overlap the def EBB"
|
||||
);
|
||||
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
//!
|
||||
//! This module contains data structures and algorithms used for register allocation.
|
||||
|
||||
pub mod register_set;
|
||||
pub mod coloring;
|
||||
pub mod live_value_tracker;
|
||||
pub mod liveness;
|
||||
pub mod liverange;
|
||||
pub mod register_set;
|
||||
pub mod virtregs;
|
||||
|
||||
mod affinity;
|
||||
@@ -18,6 +18,6 @@ mod reload;
|
||||
mod solver;
|
||||
mod spilling;
|
||||
|
||||
pub use self::register_set::RegisterSet;
|
||||
pub use self::context::Context;
|
||||
pub use self::diversion::RegDiversions;
|
||||
pub use self::register_set::RegisterSet;
|
||||
|
||||
@@ -114,9 +114,10 @@ impl Pressure {
|
||||
}
|
||||
|
||||
// Compute per-class limits from `usable`.
|
||||
for (toprc, rc) in p.toprc.iter_mut().take_while(|t| t.num_toprcs > 0).zip(
|
||||
reginfo.classes,
|
||||
)
|
||||
for (toprc, rc) in p.toprc
|
||||
.iter_mut()
|
||||
.take_while(|t| t.num_toprcs > 0)
|
||||
.zip(reginfo.classes)
|
||||
{
|
||||
toprc.limit = usable.iter(rc).len() as u32;
|
||||
toprc.width = rc.width;
|
||||
@@ -203,16 +204,16 @@ impl Pressure {
|
||||
///
|
||||
/// This does not check if there are enough registers available.
|
||||
pub fn take(&mut self, rc: RegClass) {
|
||||
self.toprc.get_mut(rc.toprc as usize).map(
|
||||
|t| t.base_count += 1,
|
||||
);
|
||||
self.toprc
|
||||
.get_mut(rc.toprc as usize)
|
||||
.map(|t| t.base_count += 1);
|
||||
}
|
||||
|
||||
/// Free a register in `rc`.
|
||||
pub fn free(&mut self, rc: RegClass) {
|
||||
self.toprc.get_mut(rc.toprc as usize).map(
|
||||
|t| t.base_count -= 1,
|
||||
);
|
||||
self.toprc
|
||||
.get_mut(rc.toprc as usize)
|
||||
.map(|t| t.base_count -= 1);
|
||||
}
|
||||
|
||||
/// Reset all counts to 0, both base and transient.
|
||||
@@ -229,9 +230,9 @@ impl Pressure {
|
||||
pub fn take_transient(&mut self, rc: RegClass) -> Result<(), RegClassMask> {
|
||||
let mask = self.check_avail(rc);
|
||||
if mask == 0 {
|
||||
self.toprc.get_mut(rc.toprc as usize).map(|t| {
|
||||
t.transient_count += 1
|
||||
});
|
||||
self.toprc
|
||||
.get_mut(rc.toprc as usize)
|
||||
.map(|t| t.transient_count += 1);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(mask)
|
||||
|
||||
@@ -104,9 +104,10 @@ impl RegisterSet {
|
||||
///
|
||||
/// This assumes that unused bits are 1.
|
||||
pub fn interferes_with(&self, other: &Self) -> bool {
|
||||
self.avail.iter().zip(&other.avail).any(
|
||||
|(&x, &y)| (x | y) != !0,
|
||||
)
|
||||
self.avail
|
||||
.iter()
|
||||
.zip(&other.avail)
|
||||
.any(|(&x, &y)| (x | y) != !0)
|
||||
}
|
||||
|
||||
/// Intersect this set of registers with `other`. This has the effect of removing any register
|
||||
@@ -203,9 +204,10 @@ impl<'a> fmt::Display for DisplayRegisterSet<'a> {
|
||||
bank.names
|
||||
.get(offset as usize)
|
||||
.and_then(|name| name.chars().nth(1))
|
||||
.unwrap_or_else(
|
||||
|| char::from_digit(u32::from(offset % 10), 10).unwrap(),
|
||||
)
|
||||
.unwrap_or_else(|| char::from_digit(
|
||||
u32::from(offset % 10),
|
||||
10
|
||||
).unwrap())
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,10 +166,10 @@ impl<'a> Context<'a> {
|
||||
if arg.affinity.is_stack() {
|
||||
// An incoming register parameter was spilled. Replace the parameter value
|
||||
// with a temporary register value that is immediately spilled.
|
||||
let reg = self.cur.func.dfg.replace_ebb_param(
|
||||
arg.value,
|
||||
abi.value_type,
|
||||
);
|
||||
let reg = self.cur
|
||||
.func
|
||||
.dfg
|
||||
.replace_ebb_param(arg.value, abi.value_type);
|
||||
let affinity = Affinity::abi(&abi, self.cur.isa);
|
||||
self.liveness.create_dead(reg, ebb, affinity);
|
||||
self.insert_spill(ebb, arg.value, reg);
|
||||
@@ -199,9 +199,9 @@ impl<'a> Context<'a> {
|
||||
self.cur.use_srcloc(inst);
|
||||
|
||||
// Get the operand constraints for `inst` that we are trying to satisfy.
|
||||
let constraints = self.encinfo.operand_constraints(encoding).expect(
|
||||
"Missing instruction encoding",
|
||||
);
|
||||
let constraints = self.encinfo
|
||||
.operand_constraints(encoding)
|
||||
.expect("Missing instruction encoding");
|
||||
|
||||
// Identify reload candidates.
|
||||
debug_assert!(self.candidates.is_empty());
|
||||
@@ -226,12 +226,8 @@ impl<'a> Context<'a> {
|
||||
// Create a live range for the new reload.
|
||||
let affinity = Affinity::Reg(cand.regclass.into());
|
||||
self.liveness.create_dead(reg, fill, affinity);
|
||||
self.liveness.extend_locally(
|
||||
reg,
|
||||
ebb,
|
||||
inst,
|
||||
&self.cur.func.layout,
|
||||
);
|
||||
self.liveness
|
||||
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
|
||||
}
|
||||
|
||||
// Rewrite instruction arguments.
|
||||
@@ -280,19 +276,18 @@ impl<'a> Context<'a> {
|
||||
// Same thing for spilled call return values.
|
||||
let retvals = &defs[constraints.outs.len()..];
|
||||
if !retvals.is_empty() {
|
||||
let sig = self.cur.func.dfg.call_signature(inst).expect(
|
||||
"Extra results on non-call instruction",
|
||||
);
|
||||
let sig = self.cur
|
||||
.func
|
||||
.dfg
|
||||
.call_signature(inst)
|
||||
.expect("Extra results on non-call instruction");
|
||||
for (i, lv) in retvals.iter().enumerate() {
|
||||
let abi = self.cur.func.dfg.signatures[sig].returns[i];
|
||||
debug_assert!(abi.location.is_reg());
|
||||
if lv.affinity.is_stack() {
|
||||
let reg = self.cur.func.dfg.replace_result(lv.value, abi.value_type);
|
||||
self.liveness.create_dead(
|
||||
reg,
|
||||
inst,
|
||||
Affinity::abi(&abi, self.cur.isa),
|
||||
);
|
||||
self.liveness
|
||||
.create_dead(reg, inst, Affinity::abi(&abi, self.cur.isa));
|
||||
self.insert_spill(ebb, lv.value, reg);
|
||||
}
|
||||
}
|
||||
@@ -355,12 +350,8 @@ impl<'a> Context<'a> {
|
||||
|
||||
// Update live ranges.
|
||||
self.liveness.move_def_locally(stack, inst);
|
||||
self.liveness.extend_locally(
|
||||
reg,
|
||||
ebb,
|
||||
inst,
|
||||
&self.cur.func.layout,
|
||||
);
|
||||
self.liveness
|
||||
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -297,8 +297,7 @@ impl Move {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(wrong_self_convention))]
|
||||
fn from_reg(&self) -> Option<(RegClass, RegUnit)> {
|
||||
match *self {
|
||||
Move::Reg { rc, from, .. } |
|
||||
Move::Spill { rc, from, .. } => Some((rc, from)),
|
||||
Move::Reg { rc, from, .. } | Move::Spill { rc, from, .. } => Some((rc, from)),
|
||||
Move::Fill { .. } => None,
|
||||
}
|
||||
}
|
||||
@@ -306,8 +305,7 @@ impl Move {
|
||||
/// Get the "to" register and register class, if possible.
|
||||
fn to_reg(&self) -> Option<(RegClass, RegUnit)> {
|
||||
match *self {
|
||||
Move::Reg { rc, to, .. } |
|
||||
Move::Fill { rc, to, .. } => Some((rc, to)),
|
||||
Move::Reg { rc, to, .. } | Move::Fill { rc, to, .. } => Some((rc, to)),
|
||||
Move::Spill { .. } => None,
|
||||
}
|
||||
}
|
||||
@@ -316,8 +314,7 @@ impl Move {
|
||||
fn replace_to_reg(&mut self, new: RegUnit) -> RegUnit {
|
||||
mem::replace(
|
||||
match *self {
|
||||
Move::Reg { ref mut to, .. } |
|
||||
Move::Fill { ref mut to, .. } => to,
|
||||
Move::Reg { ref mut to, .. } | Move::Fill { ref mut to, .. } => to,
|
||||
Move::Spill { .. } => panic!("No to register in a spill {}", self),
|
||||
},
|
||||
new,
|
||||
@@ -348,18 +345,14 @@ impl Move {
|
||||
/// Get the value being moved.
|
||||
fn value(&self) -> Value {
|
||||
match *self {
|
||||
Move::Reg { value, .. } |
|
||||
Move::Fill { value, .. } |
|
||||
Move::Spill { value, .. } => value,
|
||||
Move::Reg { value, .. } | Move::Fill { value, .. } | Move::Spill { value, .. } => value,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the associated register class.
|
||||
fn rc(&self) -> RegClass {
|
||||
match *self {
|
||||
Move::Reg { rc, .. } |
|
||||
Move::Fill { rc, .. } |
|
||||
Move::Spill { rc, .. } => rc,
|
||||
Move::Reg { rc, .. } | Move::Fill { rc, .. } | Move::Spill { rc, .. } => rc,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -372,46 +365,40 @@ impl fmt::Display for Move {
|
||||
from,
|
||||
to,
|
||||
rc,
|
||||
} => {
|
||||
write!(
|
||||
} => write!(
|
||||
f,
|
||||
"{}:{}({} -> {})",
|
||||
value,
|
||||
rc,
|
||||
rc.info.display_regunit(from),
|
||||
rc.info.display_regunit(to)
|
||||
)
|
||||
}
|
||||
),
|
||||
Move::Spill {
|
||||
value,
|
||||
from,
|
||||
to_slot,
|
||||
rc,
|
||||
} => {
|
||||
write!(
|
||||
} => write!(
|
||||
f,
|
||||
"{}:{}({} -> slot {})",
|
||||
value,
|
||||
rc,
|
||||
rc.info.display_regunit(from),
|
||||
to_slot
|
||||
)
|
||||
}
|
||||
),
|
||||
Move::Fill {
|
||||
value,
|
||||
from_slot,
|
||||
to,
|
||||
rc,
|
||||
} => {
|
||||
write!(
|
||||
} => write!(
|
||||
f,
|
||||
"{}:{}(slot {} -> {})",
|
||||
value,
|
||||
rc,
|
||||
from_slot,
|
||||
rc.info.display_regunit(to)
|
||||
)
|
||||
}
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -824,9 +811,8 @@ impl Solver {
|
||||
/// This is similar to `add_var`, except the value doesn't have a prior register assignment.
|
||||
pub fn add_def(&mut self, value: Value, constraint: RegClass, is_global: bool) {
|
||||
debug_assert!(self.inputs_done);
|
||||
self.vars.push(
|
||||
Variable::new_def(value, constraint, is_global),
|
||||
);
|
||||
self.vars
|
||||
.push(Variable::new_def(value, constraint, is_global));
|
||||
}
|
||||
|
||||
/// Clear the `is_global` flag on all solver variables.
|
||||
@@ -992,9 +978,8 @@ impl Solver {
|
||||
|
||||
// Convert all of the fixed register assignments into moves, but omit the ones that are
|
||||
// already in the right register.
|
||||
self.moves.extend(self.assignments.values().filter_map(
|
||||
Move::with_assignment,
|
||||
));
|
||||
self.moves
|
||||
.extend(self.assignments.values().filter_map(Move::with_assignment));
|
||||
|
||||
if !(self.moves.is_empty()) {
|
||||
dbg!("collect_moves: {}", DisplayList(&self.moves));
|
||||
@@ -1029,8 +1014,7 @@ impl Solver {
|
||||
if let Some(j) = self.moves[i..].iter().position(|m| match m.to_reg() {
|
||||
Some((rc, reg)) => avail.is_avail(rc, reg),
|
||||
None => true,
|
||||
})
|
||||
{
|
||||
}) {
|
||||
// This move can be executed now.
|
||||
self.moves.swap(i, i + j);
|
||||
let m = &self.moves[i];
|
||||
@@ -1164,9 +1148,11 @@ mod tests {
|
||||
|
||||
// Get a register class by name.
|
||||
fn rc_by_name(reginfo: &RegInfo, name: &str) -> RegClass {
|
||||
reginfo.classes.iter().find(|rc| rc.name == name).expect(
|
||||
"Can't find named register class.",
|
||||
)
|
||||
reginfo
|
||||
.classes
|
||||
.iter()
|
||||
.find(|rc| rc.name == name)
|
||||
.expect("Can't find named register class.")
|
||||
}
|
||||
|
||||
// Construct a register move.
|
||||
|
||||
@@ -125,10 +125,8 @@ impl<'a> Context<'a> {
|
||||
self.process_spills(tracker);
|
||||
|
||||
while let Some(inst) = self.cur.next_inst() {
|
||||
if let Some(constraints) =
|
||||
self.encinfo.operand_constraints(
|
||||
self.cur.func.encodings[inst],
|
||||
)
|
||||
if let Some(constraints) = self.encinfo
|
||||
.operand_constraints(self.cur.func.encodings[inst])
|
||||
{
|
||||
self.visit_inst(inst, ebb, constraints, tracker);
|
||||
} else {
|
||||
@@ -283,13 +281,11 @@ impl<'a> Context<'a> {
|
||||
dbg!("Need {} reg from {} throughs", op.regclass, throughs.len());
|
||||
match self.spill_candidate(mask, throughs) {
|
||||
Some(cand) => self.spill_reg(cand),
|
||||
None => {
|
||||
panic!(
|
||||
None => panic!(
|
||||
"Ran out of {} registers for {}",
|
||||
op.regclass,
|
||||
self.cur.display_inst(inst)
|
||||
)
|
||||
}
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -349,8 +345,7 @@ impl<'a> Context<'a> {
|
||||
.constraints()
|
||||
.fixed_value_arguments();
|
||||
let args = self.cur.func.dfg.inst_variable_args(inst);
|
||||
for (idx, (abi, &arg)) in
|
||||
self.cur.func.dfg.signatures[sig]
|
||||
for (idx, (abi, &arg)) in self.cur.func.dfg.signatures[sig]
|
||||
.params
|
||||
.iter()
|
||||
.zip(args)
|
||||
@@ -393,9 +388,9 @@ impl<'a> Context<'a> {
|
||||
} else if ru.fixed {
|
||||
// This is a fixed register use which doesn't necessarily require a copy.
|
||||
// Make a copy only if this is not the first use of the value.
|
||||
self.reg_uses.get(i.wrapping_sub(1)).map_or(false, |ru2| {
|
||||
ru2.value == ru.value
|
||||
})
|
||||
self.reg_uses
|
||||
.get(i.wrapping_sub(1))
|
||||
.map_or(false, |ru2| ru2.value == ru.value)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
@@ -430,13 +425,11 @@ impl<'a> Context<'a> {
|
||||
)
|
||||
} {
|
||||
Some(cand) => self.spill_reg(cand),
|
||||
None => {
|
||||
panic!(
|
||||
None => panic!(
|
||||
"Ran out of {} registers when inserting copy before {}",
|
||||
rc,
|
||||
self.cur.display_inst(inst)
|
||||
)
|
||||
}
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -501,9 +494,10 @@ impl<'a> Context<'a> {
|
||||
}
|
||||
|
||||
// Assign a spill slot for the whole virtual register.
|
||||
let ss = self.cur.func.stack_slots.make_spill_slot(
|
||||
self.cur.func.dfg.value_type(value),
|
||||
);
|
||||
let ss = self.cur
|
||||
.func
|
||||
.stack_slots
|
||||
.make_spill_slot(self.cur.func.dfg.value_type(value));
|
||||
for &v in self.virtregs.congruence_class(&value) {
|
||||
self.liveness.spill(v);
|
||||
self.cur.func.locations[v] = ValueLoc::Stack(ss);
|
||||
|
||||
@@ -101,10 +101,8 @@ impl VirtRegs {
|
||||
where
|
||||
'a: 'b,
|
||||
{
|
||||
self.get(*value).map_or_else(
|
||||
|| ref_slice(value),
|
||||
|vr| self.values(vr),
|
||||
)
|
||||
self.get(*value)
|
||||
.map_or_else(|| ref_slice(value), |vr| self.values(vr))
|
||||
}
|
||||
|
||||
/// Check if `a` and `b` belong to the same congruence class.
|
||||
@@ -153,9 +151,9 @@ impl VirtRegs {
|
||||
});
|
||||
|
||||
// Determine the insertion position for `single`.
|
||||
let index = match self.values(vreg).binary_search_by(
|
||||
|&v| preorder.pre_cmp_def(v, single, func),
|
||||
) {
|
||||
let index = match self.values(vreg)
|
||||
.binary_search_by(|&v| preorder.pre_cmp_def(v, single, func))
|
||||
{
|
||||
Ok(_) => panic!("{} already in {}", single, vreg),
|
||||
Err(i) => i,
|
||||
};
|
||||
@@ -181,9 +179,9 @@ impl VirtRegs {
|
||||
|
||||
/// Allocate a new empty virtual register.
|
||||
fn alloc(&mut self) -> VirtReg {
|
||||
self.unused_vregs.pop().unwrap_or_else(|| {
|
||||
self.vregs.push(Default::default())
|
||||
})
|
||||
self.unused_vregs
|
||||
.pop()
|
||||
.unwrap_or_else(|| self.vregs.push(Default::default()))
|
||||
}
|
||||
|
||||
/// Unify `values` into a single virtual register.
|
||||
|
||||
@@ -12,10 +12,7 @@ pub enum CtonError {
|
||||
/// This always represents a bug, either in the code that generated IR for Cretonne, or a bug
|
||||
/// in Cretonne itself.
|
||||
#[fail(display = "Verifier error: {}", _0)]
|
||||
Verifier(
|
||||
#[cause]
|
||||
verifier::Error
|
||||
),
|
||||
Verifier(#[cause] verifier::Error),
|
||||
|
||||
/// An implementation limit was exceeded.
|
||||
///
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
|
||||
use constant_hash::{probe, simple_hash};
|
||||
use isa::TargetIsa;
|
||||
use std::boxed::Box;
|
||||
use std::fmt;
|
||||
use std::result;
|
||||
use std::boxed::Box;
|
||||
use std::str;
|
||||
|
||||
/// A string-based configurator for settings groups.
|
||||
|
||||
@@ -9,9 +9,9 @@ use timing;
|
||||
|
||||
/// Test whether the given opcode is unsafe to even consider for GVN.
|
||||
fn trivially_unsafe_for_gvn(opcode: Opcode) -> bool {
|
||||
opcode.is_call() || opcode.is_branch() || opcode.is_terminator() ||
|
||||
opcode.is_return() || opcode.can_trap() || opcode.other_side_effects() ||
|
||||
opcode.can_store() || opcode.can_load() || opcode.writes_cpu_flags()
|
||||
opcode.is_call() || opcode.is_branch() || opcode.is_terminator() || opcode.is_return()
|
||||
|| opcode.can_trap() || opcode.other_side_effects() || opcode.can_store()
|
||||
|| opcode.can_load() || opcode.writes_cpu_flags()
|
||||
}
|
||||
|
||||
/// Perform simple GVN on `func`.
|
||||
|
||||
@@ -55,9 +55,9 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
|
||||
.ok_or(CtonError::ImplLimitExceeded)?;
|
||||
outgoing_max = max(outgoing_max, offset);
|
||||
}
|
||||
StackSlotKind::SpillSlot |
|
||||
StackSlotKind::ExplicitSlot |
|
||||
StackSlotKind::EmergencySlot => {
|
||||
StackSlotKind::SpillSlot
|
||||
| StackSlotKind::ExplicitSlot
|
||||
| StackSlotKind::EmergencySlot => {
|
||||
// Determine the smallest alignment of any explicit or spill slot.
|
||||
min_align = slot.alignment(min_align);
|
||||
}
|
||||
@@ -73,20 +73,19 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
|
||||
for slot in frame.values_mut() {
|
||||
// Pick out explicit and spill slots with exact alignment `min_align`.
|
||||
match slot.kind {
|
||||
StackSlotKind::SpillSlot |
|
||||
StackSlotKind::ExplicitSlot |
|
||||
StackSlotKind::EmergencySlot => {
|
||||
StackSlotKind::SpillSlot
|
||||
| StackSlotKind::ExplicitSlot
|
||||
| StackSlotKind::EmergencySlot => {
|
||||
if slot.alignment(alignment) != min_align {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
StackSlotKind::IncomingArg |
|
||||
StackSlotKind::OutgoingArg => continue,
|
||||
StackSlotKind::IncomingArg | StackSlotKind::OutgoingArg => continue,
|
||||
}
|
||||
|
||||
offset = offset.checked_sub(slot.size as StackOffset).ok_or(
|
||||
CtonError::ImplLimitExceeded,
|
||||
)?;
|
||||
offset = offset
|
||||
.checked_sub(slot.size as StackOffset)
|
||||
.ok_or(CtonError::ImplLimitExceeded)?;
|
||||
|
||||
// Aligning the negative offset can never cause overflow. We're only clearing bits.
|
||||
offset &= -(min_align as StackOffset);
|
||||
@@ -98,9 +97,9 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
|
||||
}
|
||||
|
||||
// Finally, make room for the outgoing arguments.
|
||||
offset = offset.checked_sub(outgoing_max).ok_or(
|
||||
CtonError::ImplLimitExceeded,
|
||||
)?;
|
||||
offset = offset
|
||||
.checked_sub(outgoing_max)
|
||||
.ok_or(CtonError::ImplLimitExceeded)?;
|
||||
offset &= -(alignment as StackOffset);
|
||||
|
||||
let frame_size = (offset as StackSize).wrapping_neg();
|
||||
|
||||
@@ -209,12 +209,11 @@ mod details {
|
||||
|
||||
/// Add `timings` to the accumulated timings for the current thread.
|
||||
pub fn add_to_current(times: &PassTimes) {
|
||||
PASS_TIME.with(|rc| for (a, b) in rc.borrow_mut().pass.iter_mut().zip(
|
||||
×.pass,
|
||||
)
|
||||
{
|
||||
PASS_TIME.with(|rc| {
|
||||
for (a, b) in rc.borrow_mut().pass.iter_mut().zip(×.pass) {
|
||||
a.total += b.total;
|
||||
a.child += b.child;
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,8 +92,8 @@ impl<'a> CssaVerifier<'a> {
|
||||
}
|
||||
|
||||
// Enforce topological ordering of defs in the virtual register.
|
||||
if self.preorder.dominates(def_ebb, prev_ebb) &&
|
||||
self.domtree.dominates(def, prev_def, &self.func.layout)
|
||||
if self.preorder.dominates(def_ebb, prev_ebb)
|
||||
&& self.domtree.dominates(def, prev_def, &self.func.layout)
|
||||
{
|
||||
return err!(
|
||||
val,
|
||||
@@ -112,8 +112,8 @@ impl<'a> CssaVerifier<'a> {
|
||||
let prev_def: ExpandedProgramPoint = self.func.dfg.value_def(prev_val).into();
|
||||
let prev_ebb = self.func.layout.pp_ebb(prev_def);
|
||||
|
||||
if self.preorder.dominates(prev_ebb, def_ebb) &&
|
||||
self.domtree.dominates(prev_def, def, &self.func.layout)
|
||||
if self.preorder.dominates(prev_ebb, def_ebb)
|
||||
&& self.domtree.dominates(prev_def, def, &self.func.layout)
|
||||
{
|
||||
let ctx = self.liveness.context(&self.func.layout);
|
||||
if self.liveness[prev_val].overlaps_def(def, def_ebb, ctx) {
|
||||
|
||||
@@ -127,8 +127,8 @@ impl<'a> LivenessVerifier<'a> {
|
||||
let ctx = self.liveness.context(&self.func.layout);
|
||||
|
||||
// Check if `inst` is in the def range, not including the def itself.
|
||||
if ctx.order.cmp(lr.def(), inst) == Ordering::Less &&
|
||||
ctx.order.cmp(inst, lr.def_local_end()) != Ordering::Greater
|
||||
if ctx.order.cmp(lr.def(), inst) == Ordering::Less
|
||||
&& ctx.order.cmp(inst, lr.def_local_end()) != Ordering::Greater
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -89,9 +89,9 @@ impl<'a> LocationVerifier<'a> {
|
||||
enc: isa::Encoding,
|
||||
divert: &RegDiversions,
|
||||
) -> Result {
|
||||
let constraints = self.encinfo.operand_constraints(enc).expect(
|
||||
"check_enc_constraints requires a legal encoding",
|
||||
);
|
||||
let constraints = self.encinfo
|
||||
.operand_constraints(enc)
|
||||
.expect("check_enc_constraints requires a legal encoding");
|
||||
|
||||
if constraints.satisfied(inst, divert, self.func) {
|
||||
return Ok(());
|
||||
@@ -235,8 +235,8 @@ impl<'a> LocationVerifier<'a> {
|
||||
/// Update diversions to reflect the current instruction and check their consistency.
|
||||
fn update_diversions(&self, inst: ir::Inst, divert: &mut RegDiversions) -> Result {
|
||||
let (arg, src) = match self.func.dfg[inst] {
|
||||
ir::InstructionData::RegMove { arg, src, .. } |
|
||||
ir::InstructionData::RegSpill { arg, src, .. } => (arg, ir::ValueLoc::Reg(src)),
|
||||
ir::InstructionData::RegMove { arg, src, .. }
|
||||
| ir::InstructionData::RegSpill { arg, src, .. } => (arg, ir::ValueLoc::Reg(src)),
|
||||
ir::InstructionData::RegFill { arg, src, .. } => (arg, ir::ValueLoc::Stack(src)),
|
||||
_ => return Ok(()),
|
||||
};
|
||||
@@ -275,12 +275,10 @@ impl<'a> LocationVerifier<'a> {
|
||||
let dfg = &self.func.dfg;
|
||||
|
||||
match dfg.analyze_branch(inst) {
|
||||
NotABranch => {
|
||||
panic!(
|
||||
NotABranch => panic!(
|
||||
"No branch information for {}",
|
||||
dfg.display_inst(inst, self.isa)
|
||||
)
|
||||
}
|
||||
),
|
||||
SingleDest(ebb, _) => {
|
||||
for d in divert.all() {
|
||||
let lr = &liveness[d.value];
|
||||
|
||||
@@ -237,9 +237,8 @@ impl<'a> Verifier<'a> {
|
||||
|
||||
let fixed_results = inst_data.opcode().constraints().fixed_results();
|
||||
// var_results is 0 if we aren't a call instruction
|
||||
let var_results = dfg.call_signature(inst).map_or(0, |sig| {
|
||||
dfg.signatures[sig].returns.len()
|
||||
});
|
||||
let var_results = dfg.call_signature(inst)
|
||||
.map_or(0, |sig| dfg.signatures[sig].returns.len());
|
||||
let total_results = fixed_results + var_results;
|
||||
|
||||
// All result values for multi-valued instructions are created
|
||||
@@ -281,23 +280,23 @@ impl<'a> Verifier<'a> {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
} |
|
||||
Branch {
|
||||
}
|
||||
| Branch {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
} |
|
||||
BranchInt {
|
||||
}
|
||||
| BranchInt {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
} |
|
||||
BranchFloat {
|
||||
}
|
||||
| BranchFloat {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
} |
|
||||
BranchIcmp {
|
||||
}
|
||||
| BranchIcmp {
|
||||
destination,
|
||||
ref args,
|
||||
..
|
||||
@@ -308,19 +307,22 @@ impl<'a> Verifier<'a> {
|
||||
BranchTable { table, .. } => {
|
||||
self.verify_jump_table(inst, table)?;
|
||||
}
|
||||
Call { func_ref, ref args, .. } => {
|
||||
Call {
|
||||
func_ref, ref args, ..
|
||||
} => {
|
||||
self.verify_func_ref(inst, func_ref)?;
|
||||
self.verify_value_list(inst, args)?;
|
||||
}
|
||||
CallIndirect { sig_ref, ref args, .. } => {
|
||||
CallIndirect {
|
||||
sig_ref, ref args, ..
|
||||
} => {
|
||||
self.verify_sig_ref(inst, sig_ref)?;
|
||||
self.verify_value_list(inst, args)?;
|
||||
}
|
||||
FuncAddr { func_ref, .. } => {
|
||||
self.verify_func_ref(inst, func_ref)?;
|
||||
}
|
||||
StackLoad { stack_slot, .. } |
|
||||
StackStore { stack_slot, .. } => {
|
||||
StackLoad { stack_slot, .. } | StackStore { stack_slot, .. } => {
|
||||
self.verify_stack_slot(inst, stack_slot)?;
|
||||
}
|
||||
UnaryGlobalVar { global_var, .. } => {
|
||||
@@ -343,31 +345,31 @@ impl<'a> Verifier<'a> {
|
||||
}
|
||||
|
||||
// Exhaustive list so we can't forget to add new formats
|
||||
Unary { .. } |
|
||||
UnaryImm { .. } |
|
||||
UnaryIeee32 { .. } |
|
||||
UnaryIeee64 { .. } |
|
||||
UnaryBool { .. } |
|
||||
Binary { .. } |
|
||||
BinaryImm { .. } |
|
||||
Ternary { .. } |
|
||||
InsertLane { .. } |
|
||||
ExtractLane { .. } |
|
||||
IntCompare { .. } |
|
||||
IntCompareImm { .. } |
|
||||
IntCond { .. } |
|
||||
FloatCompare { .. } |
|
||||
FloatCond { .. } |
|
||||
IntSelect { .. } |
|
||||
Load { .. } |
|
||||
Store { .. } |
|
||||
RegMove { .. } |
|
||||
CopySpecial { .. } |
|
||||
Trap { .. } |
|
||||
CondTrap { .. } |
|
||||
IntCondTrap { .. } |
|
||||
FloatCondTrap { .. } |
|
||||
NullAry { .. } => {}
|
||||
Unary { .. }
|
||||
| UnaryImm { .. }
|
||||
| UnaryIeee32 { .. }
|
||||
| UnaryIeee64 { .. }
|
||||
| UnaryBool { .. }
|
||||
| Binary { .. }
|
||||
| BinaryImm { .. }
|
||||
| Ternary { .. }
|
||||
| InsertLane { .. }
|
||||
| ExtractLane { .. }
|
||||
| IntCompare { .. }
|
||||
| IntCompareImm { .. }
|
||||
| IntCond { .. }
|
||||
| FloatCompare { .. }
|
||||
| FloatCond { .. }
|
||||
| IntSelect { .. }
|
||||
| Load { .. }
|
||||
| Store { .. }
|
||||
| RegMove { .. }
|
||||
| CopySpecial { .. }
|
||||
| Trap { .. }
|
||||
| CondTrap { .. }
|
||||
| IntCondTrap { .. }
|
||||
| FloatCondTrap { .. }
|
||||
| NullAry { .. } => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -480,11 +482,8 @@ impl<'a> Verifier<'a> {
|
||||
}
|
||||
// Defining instruction dominates the instruction that uses the value.
|
||||
if is_reachable {
|
||||
if !self.expected_domtree.dominates(
|
||||
def_inst,
|
||||
loc_inst,
|
||||
&self.func.layout,
|
||||
)
|
||||
if !self.expected_domtree
|
||||
.dominates(def_inst, loc_inst, &self.func.layout)
|
||||
{
|
||||
return err!(loc_inst, "uses value from non-dominating {}", def_inst);
|
||||
}
|
||||
@@ -513,12 +512,9 @@ impl<'a> Verifier<'a> {
|
||||
);
|
||||
}
|
||||
// The defining EBB dominates the instruction using this value.
|
||||
if is_reachable &&
|
||||
!self.expected_domtree.dominates(
|
||||
ebb,
|
||||
loc_inst,
|
||||
&self.func.layout,
|
||||
)
|
||||
if is_reachable
|
||||
&& !self.expected_domtree
|
||||
.dominates(ebb, loc_inst, &self.func.layout)
|
||||
{
|
||||
return err!(loc_inst, "uses value arg from non-dominating {}", ebb);
|
||||
}
|
||||
@@ -542,13 +538,11 @@ impl<'a> Verifier<'a> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
ValueDef::Param(_, _) => {
|
||||
err!(
|
||||
ValueDef::Param(_, _) => err!(
|
||||
loc_inst,
|
||||
"instruction result {} is not defined by the instruction",
|
||||
v
|
||||
)
|
||||
}
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -576,8 +570,7 @@ impl<'a> Verifier<'a> {
|
||||
"incorrect number of Ebbs in postorder traversal"
|
||||
);
|
||||
}
|
||||
for (index, (&test_ebb, &true_ebb)) in
|
||||
domtree
|
||||
for (index, (&test_ebb, &true_ebb)) in domtree
|
||||
.cfg_postorder()
|
||||
.iter()
|
||||
.zip(self.expected_domtree.cfg_postorder().iter())
|
||||
@@ -595,11 +588,8 @@ impl<'a> Verifier<'a> {
|
||||
}
|
||||
// We verify rpo_cmp on pairs of adjacent ebbs in the postorder
|
||||
for (&prev_ebb, &next_ebb) in domtree.cfg_postorder().iter().adjacent_pairs() {
|
||||
if self.expected_domtree.rpo_cmp(
|
||||
prev_ebb,
|
||||
next_ebb,
|
||||
&self.func.layout,
|
||||
) != Ordering::Greater
|
||||
if self.expected_domtree
|
||||
.rpo_cmp(prev_ebb, next_ebb, &self.func.layout) != Ordering::Greater
|
||||
{
|
||||
return err!(
|
||||
next_ebb,
|
||||
@@ -737,9 +727,11 @@ impl<'a> Verifier<'a> {
|
||||
fn typecheck_variable_args(&self, inst: Inst) -> Result {
|
||||
match self.func.dfg.analyze_branch(inst) {
|
||||
BranchInfo::SingleDest(ebb, _) => {
|
||||
let iter = self.func.dfg.ebb_params(ebb).iter().map(|&v| {
|
||||
self.func.dfg.value_type(v)
|
||||
});
|
||||
let iter = self.func
|
||||
.dfg
|
||||
.ebb_params(ebb)
|
||||
.iter()
|
||||
.map(|&v| self.func.dfg.value_type(v));
|
||||
self.typecheck_variable_args_iterator(inst, iter)?;
|
||||
}
|
||||
BranchInfo::Table(table) => {
|
||||
@@ -761,16 +753,18 @@ impl<'a> Verifier<'a> {
|
||||
match self.func.dfg[inst].analyze_call(&self.func.dfg.value_lists) {
|
||||
CallInfo::Direct(func_ref, _) => {
|
||||
let sig_ref = self.func.dfg.ext_funcs[func_ref].signature;
|
||||
let arg_types = self.func.dfg.signatures[sig_ref].params.iter().map(|a| {
|
||||
a.value_type
|
||||
});
|
||||
let arg_types = self.func.dfg.signatures[sig_ref]
|
||||
.params
|
||||
.iter()
|
||||
.map(|a| a.value_type);
|
||||
self.typecheck_variable_args_iterator(inst, arg_types)?;
|
||||
self.check_outgoing_args(inst, sig_ref)?;
|
||||
}
|
||||
CallInfo::Indirect(sig_ref, _) => {
|
||||
let arg_types = self.func.dfg.signatures[sig_ref].params.iter().map(|a| {
|
||||
a.value_type
|
||||
});
|
||||
let arg_types = self.func.dfg.signatures[sig_ref]
|
||||
.params
|
||||
.iter()
|
||||
.map(|a| a.value_type);
|
||||
self.typecheck_variable_args_iterator(inst, arg_types)?;
|
||||
self.check_outgoing_args(inst, sig_ref)?;
|
||||
}
|
||||
@@ -1047,8 +1041,7 @@ impl<'a> Verifier<'a> {
|
||||
&self.func,
|
||||
&self.func.dfg[inst],
|
||||
self.func.dfg.ctrl_typevar(inst),
|
||||
)
|
||||
{
|
||||
) {
|
||||
if !possible_encodings.is_empty() {
|
||||
possible_encodings.push_str(", ");
|
||||
multiple_encodings = true;
|
||||
@@ -1119,8 +1112,7 @@ impl<'a> Verifier<'a> {
|
||||
fn verify_return_at_end(&self) -> Result {
|
||||
for ebb in self.func.layout.ebbs() {
|
||||
let inst = self.func.layout.last_inst(ebb).unwrap();
|
||||
if self.func.dfg[inst].opcode().is_return() &&
|
||||
Some(ebb) != self.func.layout.last_ebb()
|
||||
if self.func.dfg[inst].opcode().is_return() && Some(ebb) != self.func.layout.last_ebb()
|
||||
{
|
||||
return err!(inst, "Internal return not allowed with return_at_end=1");
|
||||
}
|
||||
@@ -1155,8 +1147,8 @@ impl<'a> Verifier<'a> {
|
||||
mod tests {
|
||||
use super::{Error, Verifier};
|
||||
use entity::EntityList;
|
||||
use ir::instructions::{InstructionData, Opcode};
|
||||
use ir::Function;
|
||||
use ir::instructions::{InstructionData, Opcode};
|
||||
use settings;
|
||||
|
||||
macro_rules! assert_err_with_msg {
|
||||
|
||||
@@ -346,10 +346,12 @@ pub fn write_operands(
|
||||
write_ebb_args(w, &args[2..])
|
||||
}
|
||||
BranchTable { arg, table, .. } => write!(w, " {}, {}", arg, table),
|
||||
Call { func_ref, ref args, .. } => {
|
||||
write!(w, " {}({})", func_ref, DisplayValues(args.as_slice(pool)))
|
||||
}
|
||||
CallIndirect { sig_ref, ref args, .. } => {
|
||||
Call {
|
||||
func_ref, ref args, ..
|
||||
} => write!(w, " {}({})", func_ref, DisplayValues(args.as_slice(pool))),
|
||||
CallIndirect {
|
||||
sig_ref, ref args, ..
|
||||
} => {
|
||||
let args = args.as_slice(pool);
|
||||
write!(
|
||||
w,
|
||||
@@ -360,7 +362,9 @@ pub fn write_operands(
|
||||
)
|
||||
}
|
||||
FuncAddr { func_ref, .. } => write!(w, " {}", func_ref),
|
||||
StackLoad { stack_slot, offset, .. } => write!(w, " {}{}", stack_slot, offset),
|
||||
StackLoad {
|
||||
stack_slot, offset, ..
|
||||
} => write!(w, " {}{}", stack_slot, offset),
|
||||
StackStore {
|
||||
arg,
|
||||
stack_slot,
|
||||
@@ -368,7 +372,9 @@ pub fn write_operands(
|
||||
..
|
||||
} => write!(w, " {}, {}{}", arg, stack_slot, offset),
|
||||
HeapAddr { heap, arg, imm, .. } => write!(w, " {}, {}, {}", heap, arg, imm),
|
||||
Load { flags, arg, offset, .. } => write!(w, "{} {}{}", flags, arg, offset),
|
||||
Load {
|
||||
flags, arg, offset, ..
|
||||
} => write!(w, "{} {}{}", flags, arg, offset),
|
||||
LoadComplex {
|
||||
flags,
|
||||
ref args,
|
||||
@@ -383,7 +389,6 @@ pub fn write_operands(
|
||||
DisplayValuesWithDelimiter(&args, '+'),
|
||||
offset
|
||||
)
|
||||
|
||||
}
|
||||
Store {
|
||||
flags,
|
||||
@@ -452,8 +457,12 @@ pub fn write_operands(
|
||||
}
|
||||
Trap { code, .. } => write!(w, " {}", code),
|
||||
CondTrap { arg, code, .. } => write!(w, " {}, {}", arg, code),
|
||||
IntCondTrap { cond, arg, code, .. } => write!(w, " {} {}, {}", cond, arg, code),
|
||||
FloatCondTrap { cond, arg, code, .. } => write!(w, " {} {}, {}", cond, arg, code),
|
||||
IntCondTrap {
|
||||
cond, arg, code, ..
|
||||
} => write!(w, " {} {}, {}", cond, arg, code),
|
||||
FloatCondTrap {
|
||||
cond, arg, code, ..
|
||||
} => write!(w, " {} {}, {}", cond, arg, code),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -33,19 +33,10 @@
|
||||
#![warn(unused_import_braces)]
|
||||
#![cfg_attr(feature = "std", warn(unstable_features))]
|
||||
#![cfg_attr(feature = "clippy", plugin(clippy(conf_file = "../../clippy.toml")))]
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default, new_without_default_derive))]
|
||||
#![cfg_attr(feature = "cargo-clippy",
|
||||
allow(new_without_default, new_without_default_derive))]
|
||||
#![cfg_attr(feature="cargo-clippy", warn(
|
||||
float_arithmetic,
|
||||
mut_mut,
|
||||
nonminimal_bool,
|
||||
option_map_unwrap_or,
|
||||
option_map_unwrap_or_else,
|
||||
print_stdout,
|
||||
unicode_not_nfc,
|
||||
use_self,
|
||||
))]
|
||||
|
||||
warn(float_arithmetic, mut_mut, nonminimal_bool, option_map_unwrap_or,
|
||||
option_map_unwrap_or_else, print_stdout, unicode_not_nfc, use_self))]
|
||||
// Turns on no_std and alloc features if std is not available.
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![cfg_attr(not(feature = "std"), feature(alloc))]
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
//! Densely numbered entity references as mapping keys.
|
||||
|
||||
use {EntityRef, Iter, IterMut, Keys};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Index, IndexMut};
|
||||
use std::slice;
|
||||
use std::vec::Vec;
|
||||
use {EntityRef, Iter, IterMut, Keys};
|
||||
|
||||
/// A mapping `K -> V` for densely indexed entity references.
|
||||
///
|
||||
|
||||
@@ -33,7 +33,11 @@ impl<T: ReservedValue> PackedOption<T> {
|
||||
|
||||
/// Expand the packed option into a normal `Option`.
|
||||
pub fn expand(self) -> Option<T> {
|
||||
if self.is_none() { None } else { Some(self.0) }
|
||||
if self.is_none() {
|
||||
None
|
||||
} else {
|
||||
Some(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Maps a `PackedOption<T>` to `Option<U>` by applying a function to a contained value.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
//! Densely numbered entity references as mapping keys.
|
||||
use {EntityRef, Iter, IterMut, Keys};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{Index, IndexMut};
|
||||
use std::slice;
|
||||
use std::vec::Vec;
|
||||
use {EntityRef, Iter, IterMut, Keys};
|
||||
|
||||
/// A primary mapping `K -> V` allocating dense entity references.
|
||||
///
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//! Densely numbered entity references as set keys.
|
||||
|
||||
use {EntityRef, Keys};
|
||||
use std::marker::PhantomData;
|
||||
use std::vec::Vec;
|
||||
use {EntityRef, Keys};
|
||||
|
||||
/// A set of `K` for densely indexed entity references.
|
||||
///
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
//! > Briggs, Torczon, *An efficient representation for sparse sets*,
|
||||
//! ACM Letters on Programming Languages and Systems, Volume 2, Issue 1-4, March-Dec. 1993.
|
||||
|
||||
use {EntityMap, EntityRef};
|
||||
use std::mem;
|
||||
use std::slice;
|
||||
use std::u32;
|
||||
use std::vec::Vec;
|
||||
use {EntityMap, EntityRef};
|
||||
|
||||
/// Trait for extracting keys from values stored in a `SparseMap`.
|
||||
///
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
//! Defines `FaerieBackend`.
|
||||
|
||||
use container;
|
||||
use cretonne_codegen::binemit::{Addend, CodeOffset, Reloc, RelocSink, NullTrapSink};
|
||||
use cretonne_codegen::binemit::{Addend, CodeOffset, NullTrapSink, Reloc, RelocSink};
|
||||
use cretonne_codegen::isa::TargetIsa;
|
||||
use cretonne_codegen::{self, binemit, ir};
|
||||
use cretonne_module::{Backend, DataContext, Linkage, ModuleNamespace, Init, DataDescription,
|
||||
ModuleError};
|
||||
use failure::Error;
|
||||
use cretonne_module::{Backend, DataContext, DataDescription, Init, Linkage, ModuleError,
|
||||
ModuleNamespace};
|
||||
use faerie;
|
||||
use failure::Error;
|
||||
use std::fs::File;
|
||||
use target;
|
||||
use traps::{FaerieTrapManifest, FaerieTrapSink};
|
||||
@@ -33,7 +33,6 @@ pub struct FaerieBuilder {
|
||||
libcall_names: Box<Fn(ir::LibCall) -> String>,
|
||||
}
|
||||
|
||||
|
||||
impl FaerieBuilder {
|
||||
/// Create a new `FaerieBuilder` using the given Cretonne target, that
|
||||
/// can be passed to
|
||||
@@ -89,7 +88,6 @@ impl FaerieBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// A `FaerieBackend` implements `Backend` and emits ".o" files using the `faerie` library.
|
||||
pub struct FaerieBackend {
|
||||
isa: Box<TargetIsa>,
|
||||
@@ -192,9 +190,9 @@ impl Backend for FaerieBackend {
|
||||
}
|
||||
}
|
||||
|
||||
self.artifact.define(name, code).expect(
|
||||
"inconsistent declaration",
|
||||
);
|
||||
self.artifact
|
||||
.define(name, code)
|
||||
.expect("inconsistent declaration");
|
||||
Ok(FaerieCompiledFunction {})
|
||||
}
|
||||
|
||||
@@ -239,8 +237,7 @@ impl Backend for FaerieBackend {
|
||||
}
|
||||
for &(offset, id, addend) in data_relocs {
|
||||
debug_assert_eq!(
|
||||
addend,
|
||||
0,
|
||||
addend, 0,
|
||||
"faerie doesn't support addends in data section relocations yet"
|
||||
);
|
||||
let to = &namespace.get_data_decl(&data_decls[id]).name;
|
||||
@@ -253,9 +250,9 @@ impl Backend for FaerieBackend {
|
||||
.map_err(|e| ModuleError::Backend(format!("{}", e)))?;
|
||||
}
|
||||
|
||||
self.artifact.define(name, bytes).expect(
|
||||
"inconsistent declaration",
|
||||
);
|
||||
self.artifact
|
||||
.define(name, bytes)
|
||||
.expect("inconsistent declaration");
|
||||
Ok(FaerieCompiledData {})
|
||||
}
|
||||
|
||||
@@ -346,25 +343,20 @@ fn translate_function_linkage(linkage: Linkage) -> faerie::Decl {
|
||||
fn translate_data_linkage(linkage: Linkage, writable: bool) -> faerie::Decl {
|
||||
match linkage {
|
||||
Linkage::Import => faerie::Decl::DataImport,
|
||||
Linkage::Local => {
|
||||
faerie::Decl::Data {
|
||||
Linkage::Local => faerie::Decl::Data {
|
||||
global: false,
|
||||
writeable: writable,
|
||||
}
|
||||
}
|
||||
Linkage::Export => {
|
||||
faerie::Decl::Data {
|
||||
},
|
||||
Linkage::Export => faerie::Decl::Data {
|
||||
global: true,
|
||||
writeable: writable,
|
||||
}
|
||||
}
|
||||
},
|
||||
Linkage::Preemptible => {
|
||||
unimplemented!("faerie doesn't support preemptible globals yet");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct FaerieRelocSink<'a> {
|
||||
format: container::Format,
|
||||
artifact: &'a mut faerie::Artifact,
|
||||
|
||||
@@ -5,18 +5,10 @@
|
||||
#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)]
|
||||
#![warn(unused_import_braces, unstable_features)]
|
||||
#![cfg_attr(feature = "clippy", plugin(clippy(conf_file = "../../clippy.toml")))]
|
||||
#![cfg_attr(feature = "cargo-clippy", allow(new_without_default, new_without_default_derive))]
|
||||
#![cfg_attr(feature = "cargo-clippy",
|
||||
allow(new_without_default, new_without_default_derive))]
|
||||
#![cfg_attr(feature="cargo-clippy", warn(
|
||||
float_arithmetic,
|
||||
mut_mut,
|
||||
nonminimal_bool,
|
||||
option_map_unwrap_or,
|
||||
option_map_unwrap_or_else,
|
||||
print_stdout,
|
||||
unicode_not_nfc,
|
||||
use_self,
|
||||
))]
|
||||
warn(float_arithmetic, mut_mut, nonminimal_bool, option_map_unwrap_or,
|
||||
option_map_unwrap_or_else, print_stdout, unicode_not_nfc, use_self))]
|
||||
|
||||
extern crate cretonne_codegen;
|
||||
extern crate cretonne_module;
|
||||
@@ -29,5 +21,5 @@ mod container;
|
||||
mod target;
|
||||
pub mod traps;
|
||||
|
||||
pub use backend::{FaerieBuilder, FaerieBackend, FaerieProduct, FaerieTrapCollection};
|
||||
pub use backend::{FaerieBackend, FaerieBuilder, FaerieProduct, FaerieTrapCollection};
|
||||
pub use container::Format;
|
||||
|
||||
@@ -13,8 +13,9 @@ pub fn translate(isa: &isa::TargetIsa) -> Result<Target, ModuleError> {
|
||||
}),
|
||||
"arm32" => Ok(Target::ARMv7),
|
||||
"arm64" => Ok(Target::ARM64),
|
||||
_ => Err(ModuleError::Backend(
|
||||
format!("unsupported faerie isa: {}", name),
|
||||
)),
|
||||
_ => Err(ModuleError::Backend(format!(
|
||||
"unsupported faerie isa: {}",
|
||||
name
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//! Faerie trap manifests record every `TrapCode` that cretonne outputs during code generation,
|
||||
//! for every function in the module. This data may be useful at runtime.
|
||||
|
||||
use cretonne_codegen::{ir, binemit};
|
||||
use cretonne_codegen::{binemit, ir};
|
||||
|
||||
/// Record of the arguments cretonne passes to `TrapSink::trap`
|
||||
pub struct FaerieTrapSite {
|
||||
@@ -23,7 +23,6 @@ pub struct FaerieTrapSink {
|
||||
pub sites: Vec<FaerieTrapSite>,
|
||||
}
|
||||
|
||||
|
||||
impl FaerieTrapSink {
|
||||
/// Create an empty `FaerieTrapSink`
|
||||
pub fn new(name: &str, code_size: u32) -> Self {
|
||||
|
||||
@@ -47,9 +47,7 @@ impl ConcurrentRunner {
|
||||
heartbeat_thread(reply_tx.clone());
|
||||
|
||||
let handles = (0..num_cpus::get())
|
||||
.map(|num| {
|
||||
worker_thread(num, request_mutex.clone(), reply_tx.clone())
|
||||
})
|
||||
.map(|num| worker_thread(num, request_mutex.clone(), reply_tx.clone()))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
@@ -101,8 +99,10 @@ impl ConcurrentRunner {
|
||||
fn heartbeat_thread(replies: Sender<Reply>) -> thread::JoinHandle<()> {
|
||||
thread::Builder::new()
|
||||
.name("heartbeat".to_string())
|
||||
.spawn(move || while replies.send(Reply::Tick).is_ok() {
|
||||
.spawn(move || {
|
||||
while replies.send(Reply::Tick).is_ok() {
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
@@ -9,14 +9,9 @@
|
||||
type_complexity,
|
||||
// Rustfmt 0.9.0 is at odds with this lint:
|
||||
block_in_if_condition_stmt))]
|
||||
#![cfg_attr(feature="cargo-clippy", warn(
|
||||
mut_mut,
|
||||
nonminimal_bool,
|
||||
option_map_unwrap_or,
|
||||
option_map_unwrap_or_else,
|
||||
unicode_not_nfc,
|
||||
use_self,
|
||||
))]
|
||||
#![cfg_attr(feature = "cargo-clippy",
|
||||
warn(mut_mut, nonminimal_bool, option_map_unwrap_or, option_map_unwrap_or_else,
|
||||
unicode_not_nfc, use_self))]
|
||||
|
||||
#[macro_use(dbg)]
|
||||
extern crate cretonne_codegen;
|
||||
|
||||
@@ -40,15 +40,13 @@ impl Display for QueueEntry {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let p = self.path.to_string_lossy();
|
||||
match self.state {
|
||||
State::Done(Ok(dur)) => {
|
||||
write!(
|
||||
State::Done(Ok(dur)) => write!(
|
||||
f,
|
||||
"{}.{:03} {}",
|
||||
dur.as_secs(),
|
||||
dur.subsec_nanos() / 1_000_000,
|
||||
p
|
||||
)
|
||||
}
|
||||
),
|
||||
State::Done(Err(ref e)) => write!(f, "FAIL {}: {}", p, e),
|
||||
_ => write!(f, "{}", p),
|
||||
}
|
||||
@@ -180,7 +178,11 @@ impl TestRunner {
|
||||
/// Report on the next in-order job, if it's done.
|
||||
fn report_job(&self) -> bool {
|
||||
let jobid = self.reported_tests;
|
||||
if let Some(&QueueEntry { state: State::Done(ref result), .. }) = self.tests.get(jobid) {
|
||||
if let Some(&QueueEntry {
|
||||
state: State::Done(ref result),
|
||||
..
|
||||
}) = self.tests.get(jobid)
|
||||
{
|
||||
if self.verbose || result.is_err() {
|
||||
println!("{}", self.tests[jobid]);
|
||||
}
|
||||
@@ -283,7 +285,10 @@ impl TestRunner {
|
||||
let mut times = self.tests
|
||||
.iter()
|
||||
.filter_map(|entry| match *entry {
|
||||
QueueEntry { state: State::Done(Ok(dur)), .. } => Some(dur),
|
||||
QueueEntry {
|
||||
state: State::Done(Ok(dur)),
|
||||
..
|
||||
} => Some(dur),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -312,10 +317,12 @@ impl TestRunner {
|
||||
}
|
||||
|
||||
for t in self.tests.iter().filter(|entry| match **entry {
|
||||
QueueEntry { state: State::Done(Ok(dur)), .. } => dur > cut,
|
||||
QueueEntry {
|
||||
state: State::Done(Ok(dur)),
|
||||
..
|
||||
} => dur > cut,
|
||||
_ => false,
|
||||
})
|
||||
{
|
||||
}) {
|
||||
println!("slow: {}", t)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,15 +129,11 @@ fn run_one_test<'a>(
|
||||
|
||||
// Should we run the verifier before this test?
|
||||
if !context.verified && test.needs_verifier() {
|
||||
verify_function(&func, context.flags_or_isa()).map_err(
|
||||
|e| {
|
||||
pretty_verifier_error(&func, isa, &e)
|
||||
},
|
||||
)?;
|
||||
verify_function(&func, context.flags_or_isa())
|
||||
.map_err(|e| pretty_verifier_error(&func, isa, &e))?;
|
||||
context.verified = true;
|
||||
}
|
||||
|
||||
test.run(func, context).map_err(
|
||||
|e| format!("{}: {}", name, e),
|
||||
)
|
||||
test.run(func, context)
|
||||
.map_err(|e| format!("{}: {}", name, e))
|
||||
}
|
||||
|
||||
@@ -70,16 +70,16 @@ pub trait SubTest {
|
||||
/// Run filecheck on `text`, using directives extracted from `context`.
|
||||
pub fn run_filecheck(text: &str, context: &Context) -> Result<()> {
|
||||
let checker = build_filechecker(context)?;
|
||||
if checker.check(text, NO_VARIABLES).map_err(|e| {
|
||||
format!("filecheck: {}", e)
|
||||
})?
|
||||
if checker
|
||||
.check(text, NO_VARIABLES)
|
||||
.map_err(|e| format!("filecheck: {}", e))?
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
// Filecheck mismatch. Emit an explanation as output.
|
||||
let (_, explain) = checker.explain(text, NO_VARIABLES).map_err(|e| {
|
||||
format!("explain: {}", e)
|
||||
})?;
|
||||
let (_, explain) = checker
|
||||
.explain(text, NO_VARIABLES)
|
||||
.map_err(|e| format!("explain: {}", e))?;
|
||||
Err(format!("filecheck failed:\n{}{}", checker, explain))
|
||||
}
|
||||
}
|
||||
@@ -89,14 +89,14 @@ pub fn build_filechecker(context: &Context) -> Result<Checker> {
|
||||
let mut builder = CheckerBuilder::new();
|
||||
// Preamble comments apply to all functions.
|
||||
for comment in context.preamble_comments {
|
||||
builder.directive(comment.text).map_err(|e| {
|
||||
format!("filecheck: {}", e)
|
||||
})?;
|
||||
builder
|
||||
.directive(comment.text)
|
||||
.map_err(|e| format!("filecheck: {}", e))?;
|
||||
}
|
||||
for comment in &context.details.comments {
|
||||
builder.directive(comment.text).map_err(|e| {
|
||||
format!("filecheck: {}", e)
|
||||
})?;
|
||||
builder
|
||||
.directive(comment.text)
|
||||
.map_err(|e| format!("filecheck: {}", e))?;
|
||||
}
|
||||
Ok(builder.finish())
|
||||
}
|
||||
|
||||
@@ -149,8 +149,7 @@ impl SubTest for TestBinEmit {
|
||||
// If not optimizing, just use the first encoding.
|
||||
legal_encodings.next()
|
||||
}
|
||||
}
|
||||
{
|
||||
} {
|
||||
func.encodings[inst] = enc;
|
||||
}
|
||||
}
|
||||
@@ -159,9 +158,8 @@ impl SubTest for TestBinEmit {
|
||||
}
|
||||
|
||||
// Relax branches and compute EBB offsets based on the encodings.
|
||||
let code_size = binemit::relax_branches(&mut func, isa).map_err(|e| {
|
||||
pretty_error(&func, context.isa, e)
|
||||
})?;
|
||||
let code_size = binemit::relax_branches(&mut func, isa)
|
||||
.map_err(|e| pretty_error(&func, context.isa, e))?;
|
||||
|
||||
// Collect all of the 'bin:' directives on instructions.
|
||||
let mut bins = HashMap::new();
|
||||
@@ -181,8 +179,7 @@ impl SubTest for TestBinEmit {
|
||||
_ => {
|
||||
return Err(format!(
|
||||
"'bin:' directive on non-inst {}: {}",
|
||||
comment.entity,
|
||||
comment.text
|
||||
comment.entity, comment.text
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -198,8 +195,7 @@ impl SubTest for TestBinEmit {
|
||||
divert.clear();
|
||||
// Correct header offsets should have been computed by `relax_branches()`.
|
||||
assert_eq!(
|
||||
sink.offset,
|
||||
func.offsets[ebb],
|
||||
sink.offset, func.offsets[ebb],
|
||||
"Inconsistent {} header offset",
|
||||
ebb
|
||||
);
|
||||
@@ -211,9 +207,10 @@ impl SubTest for TestBinEmit {
|
||||
// Send legal encodings into the emitter.
|
||||
if enc.is_legal() {
|
||||
// Generate a better error message if output locations are not specified.
|
||||
if let Some(&v) = func.dfg.inst_results(inst).iter().find(|&&v| {
|
||||
!func.locations[v].is_assigned()
|
||||
})
|
||||
if let Some(&v) = func.dfg
|
||||
.inst_results(inst)
|
||||
.iter()
|
||||
.find(|&&v| !func.locations[v].is_assigned())
|
||||
{
|
||||
return Err(format!(
|
||||
"Missing register/stack slot for {} in {}",
|
||||
@@ -239,9 +236,10 @@ impl SubTest for TestBinEmit {
|
||||
if !enc.is_legal() {
|
||||
// A possible cause of an unencoded instruction is a missing location for
|
||||
// one of the input operands.
|
||||
if let Some(&v) = func.dfg.inst_args(inst).iter().find(|&&v| {
|
||||
!func.locations[v].is_assigned()
|
||||
})
|
||||
if let Some(&v) = func.dfg
|
||||
.inst_args(inst)
|
||||
.iter()
|
||||
.find(|&&v| !func.locations[v].is_assigned())
|
||||
{
|
||||
return Err(format!(
|
||||
"Missing register/stack slot for {} in {}",
|
||||
@@ -287,8 +285,7 @@ impl SubTest for TestBinEmit {
|
||||
if sink.offset != code_size {
|
||||
return Err(format!(
|
||||
"Expected code size {}, got {}",
|
||||
code_size,
|
||||
sink.offset
|
||||
code_size, sink.offset
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user