Move the filetest harness into its own crate.

This allows us to run the tests via a library call rather than just
as a command execution. And, it's a step toward a broader goal, which
is to keep the code in the top-level src directory minimal, with
important functionality exposed as crates.
This commit is contained in:
Dan Gohman
2018-03-15 13:00:29 -07:00
parent 00af7a28f3
commit 965b93bd2a
31 changed files with 161 additions and 163 deletions

View File

@@ -0,0 +1,154 @@
//! Run tests concurrently.
//!
//! This module provides the `ConcurrentRunner` struct which uses a pool of threads to run tests
//! concurrently.
use cretonne::timing;
use std::panic::catch_unwind;
use std::path::{Path, PathBuf};
use std::sync::mpsc::{channel, Sender, Receiver};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use num_cpus;
use {TestResult, runone};
// Request sent to worker threads contains jobid and path.
struct Request(usize, PathBuf);
/// Reply from worker thread,
pub enum Reply {
Starting { jobid: usize, thread_num: usize },
Done { jobid: usize, result: TestResult },
Tick,
}
/// Manage threads that run test jobs concurrently.
pub struct ConcurrentRunner {
// Channel for sending requests to the worker threads.
// The workers are sharing the receiver with an `Arc<Mutex<Receiver>>`.
// This is `None` when shutting down.
request_tx: Option<Sender<Request>>,
// Channel for receiving replies from the workers.
// Workers have their own `Sender`.
reply_rx: Receiver<Reply>,
handles: Vec<thread::JoinHandle<timing::PassTimes>>,
}
impl ConcurrentRunner {
/// Create a new `ConcurrentRunner` with threads spun up.
pub fn new() -> Self {
let (request_tx, request_rx) = channel();
let request_mutex = Arc::new(Mutex::new(request_rx));
let (reply_tx, reply_rx) = channel();
heartbeat_thread(reply_tx.clone());
let handles = (0..num_cpus::get())
.map(|num| {
worker_thread(num, request_mutex.clone(), reply_tx.clone())
})
.collect();
Self {
request_tx: Some(request_tx),
reply_rx,
handles,
}
}
/// Shut down worker threads orderly. They will finish any queued jobs first.
pub fn shutdown(&mut self) {
self.request_tx = None;
}
/// Join all the worker threads.
/// Transfer pass timings from the worker threads to the current thread.
pub fn join(&mut self) {
assert!(self.request_tx.is_none(), "must shutdown before join");
for h in self.handles.drain(..) {
match h.join() {
Ok(t) => timing::add_to_current(t),
Err(e) => println!("worker panicked: {:?}", e),
}
}
}
/// Add a new job to the queues.
pub fn put(&mut self, jobid: usize, path: &Path) {
self.request_tx
.as_ref()
.expect("cannot push after shutdown")
.send(Request(jobid, path.to_owned()))
.expect("all the worker threads are gone");
}
/// Get a job reply without blocking.
pub fn try_get(&mut self) -> Option<Reply> {
self.reply_rx.try_recv().ok()
}
/// Get a job reply, blocking until one is available.
pub fn get(&mut self) -> Option<Reply> {
self.reply_rx.recv().ok()
}
}
/// Spawn a heartbeat thread which sends ticks down the reply channel every second.
/// This lets us implement timeouts without the not yet stable `recv_timeout`.
fn heartbeat_thread(replies: Sender<Reply>) -> thread::JoinHandle<()> {
thread::Builder::new()
.name("heartbeat".to_string())
.spawn(move || while replies.send(Reply::Tick).is_ok() {
thread::sleep(Duration::from_secs(1));
})
.unwrap()
}
/// Spawn a worker thread running tests.
fn worker_thread(
thread_num: usize,
requests: Arc<Mutex<Receiver<Request>>>,
replies: Sender<Reply>,
) -> thread::JoinHandle<timing::PassTimes> {
thread::Builder::new()
.name(format!("worker #{}", thread_num))
.spawn(move || {
loop {
// Lock the mutex only long enough to extract a request.
let Request(jobid, path) = match requests.lock().unwrap().recv() {
Err(..) => break, // TX end shut down. exit thread.
Ok(req) => req,
};
// Tell them we're starting this job.
// The receiver should always be present for this as long as we have jobs.
replies.send(Reply::Starting { jobid, thread_num }).unwrap();
let result = catch_unwind(|| runone::run(path.as_path())).unwrap_or_else(|e| {
// The test panicked, leaving us a `Box<Any>`.
// Panics are usually strings.
if let Some(msg) = e.downcast_ref::<String>() {
Err(format!("panicked in worker #{}: {}", thread_num, msg))
} else if let Some(msg) = e.downcast_ref::<&'static str>() {
Err(format!("panicked in worker #{}: {}", thread_num, msg))
} else {
Err(format!("panicked in worker #{}", thread_num))
}
});
if let Err(ref msg) = result {
dbg!("FAIL: {}", msg);
}
replies.send(Reply::Done { jobid, result }).unwrap();
}
// Timing is accumulated independently per thread.
// Timings from this worker thread will be aggregated by `ConcurrentRunner::join()`.
timing::take_current()
})
.unwrap()
}

81
lib/filetests/src/lib.rs Normal file
View File

@@ -0,0 +1,81 @@
//! File tests.
//!
//! This crate contains the main test driver as well as implementations of the
//! available filetest commands.
#[macro_use(dbg)]
extern crate cretonne;
extern crate cton_reader;
extern crate filecheck;
extern crate num_cpus;
use std::path::Path;
use std::time;
use cton_reader::TestCommand;
use runner::TestRunner;
mod concurrent;
mod runner;
mod runone;
mod subtest;
mod match_directive;
mod test_binemit;
mod test_cat;
mod test_compile;
mod test_domtree;
mod test_legalizer;
mod test_licm;
mod test_preopt;
mod test_print_cfg;
mod test_regalloc;
mod test_simple_gvn;
mod test_verifier;
/// The result of running the test in a file.
type TestResult = Result<time::Duration, String>;
/// Main entry point for `cton-util test`.
///
/// Take a list of filenames which can be either `.cton` files or directories.
///
/// Files are interpreted as test cases and executed immediately.
///
/// Directories are scanned recursively for test cases ending in `.cton`. These test cases are
/// executed on background threads.
///
pub fn run(verbose: bool, files: Vec<String>) -> TestResult {
let mut runner = TestRunner::new(verbose);
for path in files.iter().map(Path::new) {
if path.is_file() {
runner.push_test(path);
} else {
runner.push_dir(path);
}
}
runner.start_threads();
runner.run()
}
/// Create a new subcommand trait object to match `parsed.command`.
///
/// This function knows how to create all of the possible `test <foo>` commands that can appear in
/// a `.cton` test file.
fn new_subtest(parsed: &TestCommand) -> subtest::Result<Box<subtest::SubTest>> {
match parsed.command {
"binemit" => test_binemit::subtest(parsed),
"cat" => test_cat::subtest(parsed),
"compile" => test_compile::subtest(parsed),
"domtree" => test_domtree::subtest(parsed),
"legalizer" => test_legalizer::subtest(parsed),
"licm" => test_licm::subtest(parsed),
"preopt" => test_preopt::subtest(parsed),
"print-cfg" => test_print_cfg::subtest(parsed),
"regalloc" => test_regalloc::subtest(parsed),
"simple-gvn" => test_simple_gvn::subtest(parsed),
"verifier" => test_verifier::subtest(parsed),
_ => Err(format!("unknown test command '{}'", parsed.command)),
}
}

View File

@@ -0,0 +1,27 @@
/// Look for a directive in a comment string.
/// The directive is of the form "foo:" and should follow the leading `;` in the comment:
///
/// ; dominates: ebb3 ebb4
///
/// Return the comment text following the directive.
pub fn match_directive<'a>(comment: &'a str, directive: &str) -> Option<&'a str> {
assert!(
directive.ends_with(':'),
"Directive must include trailing colon"
);
let text = comment.trim_left_matches(';').trim_left();
if text.starts_with(directive) {
Some(text[directive.len()..].trim())
} else {
None
}
}
#[test]
fn test_match_directive() {
assert_eq!(match_directive("; foo: bar ", "foo:"), Some("bar"));
assert_eq!(match_directive(" foo:bar", "foo:"), Some("bar"));
assert_eq!(match_directive("foo:bar", "foo:"), Some("bar"));
assert_eq!(match_directive(";x foo: bar", "foo:"), None);
assert_eq!(match_directive(";;; foo: bar", "foo:"), Some("bar"));
}

339
lib/filetests/src/runner.rs Normal file
View File

@@ -0,0 +1,339 @@
//! Test runner.
//!
//! This module implements the `TestRunner` struct which manages executing tests as well as
//! scanning directories for tests.
use std::error::Error;
use std::fmt::{self, Display};
use std::ffi::OsStr;
use std::path::{Path, PathBuf};
use std::time;
use {TestResult, runone};
use concurrent::{ConcurrentRunner, Reply};
// Timeout in seconds when we're not making progress.
const TIMEOUT_PANIC: usize = 10;
// Timeout for reporting slow tests without panicking.
const TIMEOUT_SLOW: usize = 3;
struct QueueEntry {
path: PathBuf,
state: State,
}
#[derive(PartialEq, Eq, Debug)]
enum State {
New,
Queued,
Running,
Done(TestResult),
}
impl QueueEntry {
pub fn path(&self) -> &Path {
self.path.as_path()
}
}
impl Display for QueueEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let p = self.path.to_string_lossy();
match self.state {
State::Done(Ok(dur)) => {
write!(
f,
"{}.{:03} {}",
dur.as_secs(),
dur.subsec_nanos() / 1000000,
p
)
}
State::Done(Err(ref e)) => write!(f, "FAIL {}: {}", p, e),
_ => write!(f, "{}", p),
}
}
}
pub struct TestRunner {
verbose: bool,
// Directories that have not yet been scanned.
dir_stack: Vec<PathBuf>,
// Filenames of tests to run.
tests: Vec<QueueEntry>,
// Pointer into `tests` where the `New` entries begin.
new_tests: usize,
// Number of contiguous reported tests at the front of `tests`.
reported_tests: usize,
// Number of errors seen so far.
errors: usize,
// Number of ticks received since we saw any progress.
ticks_since_progress: usize,
threads: Option<ConcurrentRunner>,
}
impl TestRunner {
/// Create a new blank TrstRunner.
pub fn new(verbose: bool) -> Self {
Self {
verbose,
dir_stack: Vec::new(),
tests: Vec::new(),
new_tests: 0,
reported_tests: 0,
errors: 0,
ticks_since_progress: 0,
threads: None,
}
}
/// Add a directory path to be scanned later.
///
/// If `dir` turns out to be a regular file, it is silently ignored.
/// Otherwise, any problems reading the directory are reported.
pub fn push_dir<P: Into<PathBuf>>(&mut self, dir: P) {
self.dir_stack.push(dir.into());
}
/// Add a test to be executed later.
///
/// Any problems reading `file` as a test case file will be reported as a test failure.
pub fn push_test<P: Into<PathBuf>>(&mut self, file: P) {
self.tests.push(QueueEntry {
path: file.into(),
state: State::New,
});
}
/// Begin running tests concurrently.
pub fn start_threads(&mut self) {
assert!(self.threads.is_none());
self.threads = Some(ConcurrentRunner::new());
}
/// Scan any directories pushed so far.
/// Push any potential test cases found.
pub fn scan_dirs(&mut self) {
// This recursive search tries to minimize statting in a directory hierarchy containing
// mostly test cases.
//
// - Directory entries with a "cton" extension are presumed to be test case files.
// - Directory entries with no extension are presumed to be subdirectories.
// - Anything else is ignored.
//
while let Some(dir) = self.dir_stack.pop() {
match dir.read_dir() {
Err(err) => {
// Fail silently if `dir` was actually a regular file.
// This lets us skip spurious extensionless files without statting everything
// needlessly.
if !dir.is_file() {
self.path_error(dir, err);
}
}
Ok(entries) => {
// Read all directory entries. Avoid statting.
for entry_result in entries {
match entry_result {
Err(err) => {
// Not sure why this would happen. `read_dir` succeeds, but there's
// a problem with an entry. I/O error during a getdirentries
// syscall seems to be the reason. The implementation in
// libstd/sys/unix/fs.rs seems to suggest that breaking now would
// be a good idea, or the iterator could keep returning the same
// error forever.
self.path_error(dir, err);
break;
}
Ok(entry) => {
let path = entry.path();
// Recognize directories and tests by extension.
// Yes, this means we ignore directories with '.' in their name.
match path.extension().and_then(OsStr::to_str) {
Some("cton") => self.push_test(path),
Some(_) => {}
None => self.push_dir(path),
}
}
}
}
}
}
// Get the new jobs running before moving on to the next directory.
self.schedule_jobs();
}
}
/// Report an error related to a path.
fn path_error<E: Error>(&mut self, path: PathBuf, err: E) {
self.errors += 1;
println!("{}: {}", path.to_string_lossy(), err);
}
/// Report on the next in-order job, if it's done.
fn report_job(&self) -> bool {
let jobid = self.reported_tests;
if let Some(&QueueEntry { state: State::Done(ref result), .. }) = self.tests.get(jobid) {
if self.verbose || result.is_err() {
println!("{}", self.tests[jobid]);
}
true
} else {
false
}
}
/// Schedule any new jobs to run.
fn schedule_jobs(&mut self) {
for jobid in self.new_tests..self.tests.len() {
assert_eq!(self.tests[jobid].state, State::New);
if let Some(ref mut conc) = self.threads {
// Queue test for concurrent execution.
self.tests[jobid].state = State::Queued;
conc.put(jobid, self.tests[jobid].path());
} else {
// Run test synchronously.
self.tests[jobid].state = State::Running;
let result = runone::run(self.tests[jobid].path());
self.finish_job(jobid, result);
}
self.new_tests = jobid + 1;
}
// Check for any asynchronous replies without blocking.
while let Some(reply) = self.threads.as_mut().and_then(ConcurrentRunner::try_get) {
self.handle_reply(reply);
}
}
/// Report the end of a job.
fn finish_job(&mut self, jobid: usize, result: TestResult) {
assert_eq!(self.tests[jobid].state, State::Running);
if result.is_err() {
self.errors += 1;
}
self.tests[jobid].state = State::Done(result);
// Reports jobs in order.
while self.report_job() {
self.reported_tests += 1;
}
}
/// Handle a reply from the async threads.
fn handle_reply(&mut self, reply: Reply) {
match reply {
Reply::Starting { jobid, .. } => {
assert_eq!(self.tests[jobid].state, State::Queued);
self.tests[jobid].state = State::Running;
}
Reply::Done { jobid, result } => {
self.ticks_since_progress = 0;
self.finish_job(jobid, result)
}
Reply::Tick => {
self.ticks_since_progress += 1;
if self.ticks_since_progress == TIMEOUT_SLOW {
println!(
"STALLED for {} seconds with {}/{} tests finished",
self.ticks_since_progress,
self.reported_tests,
self.tests.len()
);
for jobid in self.reported_tests..self.tests.len() {
if self.tests[jobid].state == State::Running {
println!("slow: {}", self.tests[jobid]);
}
}
}
if self.ticks_since_progress >= TIMEOUT_PANIC {
panic!(
"worker threads stalled for {} seconds.",
self.ticks_since_progress
);
}
}
}
}
/// Drain the async jobs and shut down the threads.
fn drain_threads(&mut self) {
if let Some(mut conc) = self.threads.take() {
conc.shutdown();
while self.reported_tests < self.tests.len() {
match conc.get() {
Some(reply) => self.handle_reply(reply),
None => break,
}
}
conc.join();
}
}
/// Print out a report of slow tests.
fn report_slow_tests(&self) {
// Collect runtimes of succeeded tests.
let mut times = self.tests
.iter()
.filter_map(|entry| match *entry {
QueueEntry { state: State::Done(Ok(dur)), .. } => Some(dur),
_ => None,
})
.collect::<Vec<_>>();
// Get me some real data, kid.
let len = times.len();
if len < 4 {
return;
}
// Compute quartiles.
times.sort();
let qlen = len / 4;
let q1 = times[qlen];
let q3 = times[len - 1 - qlen];
// Inter-quartile range.
let iqr = q3 - q1;
// Cut-off for what we consider a 'slow' test: 3 IQR from the 75% quartile.
//
// Q3 + 1.5 IQR are the data points that would be plotted as outliers outside a box plot,
// but we have a wider distribution of test times, so double it to 3 IQR.
let cut = q3 + iqr * 3;
if cut > *times.last().unwrap() {
return;
}
for t in self.tests.iter().filter(|entry| match **entry {
QueueEntry { state: State::Done(Ok(dur)), .. } => dur > cut,
_ => false,
})
{
println!("slow: {}", t)
}
}
/// Scan pushed directories for tests and run them.
pub fn run(&mut self) -> TestResult {
let started = time::Instant::now();
self.scan_dirs();
self.schedule_jobs();
self.drain_threads();
self.report_slow_tests();
println!("{} tests", self.tests.len());
match self.errors {
0 => Ok(started.elapsed()),
1 => Err("1 failure".to_string()),
n => Err(format!("{} failures", n)),
}
}
}

144
lib/filetests/src/runone.rs Normal file
View File

@@ -0,0 +1,144 @@
//! Run the tests in a single test file.
use std::borrow::Cow;
use std::path::Path;
use std::time;
use std::io::{self, Read};
use std::fs;
use cretonne::ir::Function;
use cretonne::isa::TargetIsa;
use cretonne::settings::Flags;
use cretonne::timing;
use cretonne::verify_function;
use cretonne::print_errors::pretty_verifier_error;
use cton_reader::parse_test;
use cton_reader::IsaSpec;
use {TestResult, new_subtest};
use subtest::{SubTest, Context, Result};
/// Read an entire file into a string.
fn read_to_string<P: AsRef<Path>>(path: P) -> io::Result<String> {
let mut file = fs::File::open(path)?;
let mut buffer = String::new();
file.read_to_string(&mut buffer)?;
Ok(buffer)
}
/// Load `path` and run the test in it.
///
/// If running this test causes a panic, it will propagate as normal.
pub fn run(path: &Path) -> TestResult {
let _tt = timing::process_file();
dbg!("---\nFile: {}", path.to_string_lossy());
let started = time::Instant::now();
let buffer = read_to_string(path).map_err(|e| e.to_string())?;
let testfile = parse_test(&buffer).map_err(|e| e.to_string())?;
if testfile.functions.is_empty() {
return Err("no functions found".to_string());
}
// Parse the test commands.
let mut tests = testfile
.commands
.iter()
.map(new_subtest)
.collect::<Result<Vec<_>>>()?;
// Flags to use for those tests that don't need an ISA.
// This is the cumulative effect of all the `set` commands in the file.
let flags = match testfile.isa_spec {
IsaSpec::None(ref f) => f,
IsaSpec::Some(ref v) => v.last().expect("Empty ISA list").flags(),
};
// Sort the tests so the mutators are at the end, and those that don't need the verifier are at
// the front.
tests.sort_by_key(|st| (st.is_mutating(), st.needs_verifier()));
// Expand the tests into (test, flags, isa) tuples.
let mut tuples = test_tuples(&tests, &testfile.isa_spec, flags)?;
// Isolate the last test in the hope that this is the only mutating test.
// If so, we can completely avoid cloning functions.
let last_tuple = match tuples.pop() {
None => return Err("no test commands found".to_string()),
Some(t) => t,
};
for (func, details) in testfile.functions {
let mut context = Context {
preamble_comments: &testfile.preamble_comments,
details,
verified: false,
flags,
isa: None,
};
for tuple in &tuples {
run_one_test(*tuple, Cow::Borrowed(&func), &mut context)?;
}
// Run the last test with an owned function which means it won't need to clone it before
// mutating.
run_one_test(last_tuple, Cow::Owned(func), &mut context)?;
}
Ok(started.elapsed())
}
// Given a slice of tests, generate a vector of (test, flags, isa) tuples.
fn test_tuples<'a>(
tests: &'a [Box<SubTest>],
isa_spec: &'a IsaSpec,
no_isa_flags: &'a Flags,
) -> Result<Vec<(&'a SubTest, &'a Flags, Option<&'a TargetIsa>)>> {
let mut out = Vec::new();
for test in tests {
if test.needs_isa() {
match *isa_spec {
IsaSpec::None(_) => {
// TODO: Generate a list of default ISAs.
return Err(format!("test {} requires an ISA", test.name()));
}
IsaSpec::Some(ref isas) => {
for isa in isas {
out.push((&**test, isa.flags(), Some(&**isa)));
}
}
}
} else {
// This test doesn't require an ISA, and we only want to run one instance of it.
// Still, give it an ISA ref if we happen to have a unique one.
// For example, `test cat` can use this to print encodings and register names.
out.push((&**test, no_isa_flags, isa_spec.unique_isa()));
}
}
Ok(out)
}
fn run_one_test<'a>(
tuple: (&'a SubTest, &'a Flags, Option<&'a TargetIsa>),
func: Cow<Function>,
context: &mut Context<'a>,
) -> Result<()> {
let (test, flags, isa) = tuple;
let name = format!("{}({})", test.name(), func.name);
dbg!("Test: {} {}", name, isa.map(TargetIsa::name).unwrap_or("-"));
context.flags = flags;
context.isa = isa;
// Should we run the verifier before this test?
if !context.verified && test.needs_verifier() {
verify_function(&func, context.flags_or_isa()).map_err(
|e| {
pretty_verifier_error(&func, isa, e)
},
)?;
context.verified = true;
}
test.run(func, context).map_err(
|e| format!("{}: {}", name, e),
)
}

View File

@@ -0,0 +1,102 @@
//! SubTest trait.
use std::result;
use std::borrow::Cow;
use cretonne::ir::Function;
use cretonne::isa::TargetIsa;
use cretonne::settings::{Flags, FlagsOrIsa};
use cton_reader::{Details, Comment};
use filecheck::{CheckerBuilder, Checker, NO_VARIABLES};
pub type Result<T> = result::Result<T, String>;
/// Context for running a test on a single function.
pub struct Context<'a> {
/// Comments from the preamble f the test file. These apply to all functions.
pub preamble_comments: &'a [Comment<'a>],
/// Additional details about the function from the parser.
pub details: Details<'a>,
/// Was the function verified before running this test?
pub verified: bool,
/// ISA-independent flags for this test.
pub flags: &'a Flags,
/// Target ISA to test against. Only guaranteed to be present for sub-tests whose `needs_isa`
/// method returned `true`. For other sub-tests, this is set if the test file has a unique ISA.
pub isa: Option<&'a TargetIsa>,
}
impl<'a> Context<'a> {
/// Get a `FlagsOrIsa` object for passing to the verifier.
pub fn flags_or_isa(&self) -> FlagsOrIsa<'a> {
FlagsOrIsa {
flags: self.flags,
isa: self.isa,
}
}
}
/// Common interface for implementations of test commands.
///
/// Each `.cton` test file may contain multiple test commands, each represented by a `SubTest`
/// trait object.
pub trait SubTest {
/// Name identifying this subtest. Typically the same as the test command.
fn name(&self) -> Cow<str>;
/// Should the verifier be run on the function before running the test?
fn needs_verifier(&self) -> bool {
true
}
/// Does this test mutate the function when it runs?
/// This is used as a hint to avoid cloning the function needlessly.
fn is_mutating(&self) -> bool {
false
}
/// Does this test need a `TargetIsa` trait object?
fn needs_isa(&self) -> bool {
false
}
/// Run this test on `func`.
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()>;
}
/// Run filecheck on `text`, using directives extracted from `context`.
pub fn run_filecheck(text: &str, context: &Context) -> Result<()> {
let checker = build_filechecker(context)?;
if checker.check(text, NO_VARIABLES).map_err(|e| {
format!("filecheck: {}", e)
})?
{
Ok(())
} else {
// Filecheck mismatch. Emit an explanation as output.
let (_, explain) = checker.explain(text, NO_VARIABLES).map_err(|e| {
format!("explain: {}", e)
})?;
Err(format!("filecheck failed:\n{}{}", checker, explain))
}
}
/// Build a filechecker using the directives in the file preamble and the function's comments.
pub fn build_filechecker(context: &Context) -> Result<Checker> {
let mut builder = CheckerBuilder::new();
// Preamble comments apply to all functions.
for comment in context.preamble_comments {
builder.directive(comment.text).map_err(|e| {
format!("filecheck: {}", e)
})?;
}
for comment in &context.details.comments {
builder.directive(comment.text).map_err(|e| {
format!("filecheck: {}", e)
})?;
}
Ok(builder.finish())
}

View File

@@ -0,0 +1,306 @@
//! Test command for testing the binary machine code emission.
//!
//! The `binemit` test command generates binary machine code for every instruction in the input
//! functions and compares the results to the expected output.
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt::Write;
use cretonne::binemit;
use cretonne::dbg::DisplayList;
use cretonne::ir;
use cretonne::ir::entities::AnyEntity;
use cretonne::binemit::RegDiversions;
use cretonne::print_errors::pretty_error;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result};
use match_directive::match_directive;
struct TestBinEmit;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "binemit");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestBinEmit))
}
}
// Code sink that generates text.
struct TextSink {
offset: binemit::CodeOffset,
text: String,
}
impl TextSink {
/// Create a new empty TextSink.
pub fn new() -> Self {
Self {
offset: 0,
text: String::new(),
}
}
}
impl binemit::CodeSink for TextSink {
fn offset(&self) -> binemit::CodeOffset {
self.offset
}
fn put1(&mut self, x: u8) {
write!(self.text, "{:02x} ", x).unwrap();
self.offset += 1;
}
fn put2(&mut self, x: u16) {
write!(self.text, "{:04x} ", x).unwrap();
self.offset += 2;
}
fn put4(&mut self, x: u32) {
write!(self.text, "{:08x} ", x).unwrap();
self.offset += 4;
}
fn put8(&mut self, x: u64) {
write!(self.text, "{:016x} ", x).unwrap();
self.offset += 8;
}
fn reloc_ebb(&mut self, reloc: binemit::Reloc, ebb_offset: binemit::CodeOffset) {
write!(self.text, "{}({}) ", reloc, ebb_offset).unwrap();
}
fn reloc_external(
&mut self,
reloc: binemit::Reloc,
name: &ir::ExternalName,
addend: binemit::Addend,
) {
write!(
self.text,
"{}({}",
reloc,
name,
).unwrap();
if addend != 0 {
write!(
self.text,
"{:+}",
addend,
).unwrap();
}
write!(
self.text,
") ",
).unwrap();
}
fn reloc_jt(&mut self, reloc: binemit::Reloc, jt: ir::JumpTable) {
write!(self.text, "{}({}) ", reloc, jt).unwrap();
}
}
impl SubTest for TestBinEmit {
fn name(&self) -> Cow<str> {
Cow::from("binemit")
}
fn is_mutating(&self) -> bool {
true
}
fn needs_isa(&self) -> bool {
true
}
fn run(&self, func: Cow<ir::Function>, context: &Context) -> Result<()> {
let isa = context.isa.expect("binemit needs an ISA");
let encinfo = isa.encoding_info();
// TODO: Run a verifier pass over the code first to detect any bad encodings or missing/bad
// value locations. The current error reporting is just crashing...
let mut func = func.into_owned();
// Fix the stack frame layout so we can test spill/fill encodings.
let min_offset = func.stack_slots
.keys()
.map(|ss| func.stack_slots[ss].offset.unwrap())
.min();
func.stack_slots.frame_size = min_offset.map(|off| (-off) as u32);
let is_compressed = isa.flags().is_compressed();
// Give an encoding to any instruction that doesn't already have one.
let mut divert = RegDiversions::new();
for ebb in func.layout.ebbs() {
divert.clear();
for inst in func.layout.ebb_insts(ebb) {
if !func.encodings[inst].is_legal() {
// Find an encoding that satisfies both immediate field and register
// constraints.
if let Some(enc) = {
let mut legal_encodings = isa.legal_encodings(
&func.dfg,
&func.dfg[inst],
func.dfg.ctrl_typevar(inst),
).filter(|e| {
let recipe_constraints = &encinfo.constraints[e.recipe()];
recipe_constraints.satisfied(inst, &divert, &func)
});
if is_compressed {
// Get the smallest legal encoding
legal_encodings.min_by_key(|&e| encinfo.bytes(e))
} else {
// If not using compressed, just use the first encoding.
legal_encodings.next()
}
}
{
func.encodings[inst] = enc;
}
}
divert.apply(&func.dfg[inst]);
}
}
// Relax branches and compute EBB offsets based on the encodings.
let code_size = binemit::relax_branches(&mut func, isa).map_err(|e| {
pretty_error(&func, context.isa, e)
})?;
// Collect all of the 'bin:' directives on instructions.
let mut bins = HashMap::new();
for comment in &context.details.comments {
if let Some(want) = match_directive(comment.text, "bin:") {
match comment.entity {
AnyEntity::Inst(inst) => {
if let Some(prev) = bins.insert(inst, want) {
return Err(format!(
"multiple 'bin:' directives on {}: '{}' and '{}'",
func.dfg.display_inst(inst, isa),
prev,
want
));
}
}
_ => {
return Err(format!(
"'bin:' directive on non-inst {}: {}",
comment.entity,
comment.text
))
}
}
}
}
if bins.is_empty() {
return Err("No 'bin:' directives found".to_string());
}
// Now emit all instructions.
let mut sink = TextSink::new();
for ebb in func.layout.ebbs() {
divert.clear();
// Correct header offsets should have been computed by `relax_branches()`.
assert_eq!(
sink.offset,
func.offsets[ebb],
"Inconsistent {} header offset",
ebb
);
for (offset, inst, enc_bytes) in func.inst_offsets(ebb, &encinfo) {
assert_eq!(sink.offset, offset);
sink.text.clear();
let enc = func.encodings[inst];
// Send legal encodings into the emitter.
if enc.is_legal() {
// Generate a better error message if output locations are not specified.
if let Some(&v) = func.dfg.inst_results(inst).iter().find(|&&v| {
!func.locations[v].is_assigned()
})
{
return Err(format!(
"Missing register/stack slot for {} in {}",
v,
func.dfg.display_inst(inst, isa)
));
}
let before = sink.offset;
isa.emit_inst(&func, inst, &mut divert, &mut sink);
let emitted = sink.offset - before;
// Verify the encoding recipe sizes against the ISAs emit_inst implementation.
assert_eq!(
emitted,
enc_bytes,
"Inconsistent size for [{}] {}",
encinfo.display(enc),
func.dfg.display_inst(inst, isa)
);
}
// Check against bin: directives.
if let Some(want) = bins.remove(&inst) {
if !enc.is_legal() {
// A possible cause of an unencoded instruction is a missing location for
// one of the input operands.
if let Some(&v) = func.dfg.inst_args(inst).iter().find(|&&v| {
!func.locations[v].is_assigned()
})
{
return Err(format!(
"Missing register/stack slot for {} in {}",
v,
func.dfg.display_inst(inst, isa)
));
}
// Do any encodings exist?
let encodings = isa.legal_encodings(
&func.dfg,
&func.dfg[inst],
func.dfg.ctrl_typevar(inst),
).map(|e| encinfo.display(e))
.collect::<Vec<_>>();
if encodings.is_empty() {
return Err(format!(
"No encodings found for: {}",
func.dfg.display_inst(inst, isa)
));
}
return Err(format!(
"No matching encodings for {} in {}",
func.dfg.display_inst(inst, isa),
DisplayList(&encodings),
));
}
let have = sink.text.trim();
if have != want {
return Err(format!(
"Bad machine code for {}: {}\nWant: {}\nGot: {}",
inst,
func.dfg.display_inst(inst, isa),
want,
have
));
}
}
}
}
if sink.offset != code_size {
return Err(format!(
"Expected code size {}, got {}",
code_size,
sink.offset
));
}
Ok(())
}
}

View File

@@ -0,0 +1,37 @@
//! The `cat` subtest.
use std::borrow::Cow;
use cretonne::ir::Function;
use cton_reader::TestCommand;
use subtest::{self, SubTest, Context, Result as STResult};
/// Object implementing the `test cat` sub-test.
///
/// This command is used for testing the parser and function printer. It simply parses a function
/// and prints it out again.
///
/// The result is verified by filecheck.
struct TestCat;
pub fn subtest(parsed: &TestCommand) -> STResult<Box<SubTest>> {
assert_eq!(parsed.command, "cat");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestCat))
}
}
impl SubTest for TestCat {
fn name(&self) -> Cow<str> {
Cow::from("cat")
}
fn needs_verifier(&self) -> bool {
false
}
fn run(&self, func: Cow<Function>, context: &Context) -> STResult<()> {
subtest::run_filecheck(&func.display(context.isa).to_string(), context)
}
}

View File

@@ -0,0 +1,114 @@
//! Test command for testing the code generator pipeline
//!
//! The `compile` test command runs each function through the full code generator pipeline
use cretonne::binemit;
use cretonne::ir;
use cretonne;
use cretonne::print_errors::pretty_error;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result, run_filecheck};
use std::borrow::Cow;
use std::fmt::Write;
struct TestCompile;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "compile");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestCompile))
}
}
impl SubTest for TestCompile {
fn name(&self) -> Cow<str> {
Cow::from("compile")
}
fn is_mutating(&self) -> bool {
true
}
fn needs_isa(&self) -> bool {
true
}
fn run(&self, func: Cow<ir::Function>, context: &Context) -> Result<()> {
let isa = context.isa.expect("compile needs an ISA");
// Create a compilation context, and drop in the function.
let mut comp_ctx = cretonne::Context::new();
comp_ctx.func = func.into_owned();
let code_size = comp_ctx.compile(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
dbg!(
"Generated {} bytes of code:\n{}",
code_size,
comp_ctx.func.display(isa)
);
// Verify that the returned code size matches the emitted bytes.
let mut sink = SizeSink { offset: 0 };
binemit::emit_function(
&comp_ctx.func,
|func, inst, div, sink| isa.emit_inst(func, inst, div, sink),
&mut sink,
);
if sink.offset != code_size {
return Err(format!(
"Expected code size {}, got {}",
code_size,
sink.offset
));
}
// Run final code through filecheck.
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func.display(Some(isa)))
.map_err(|e| e.to_string())?;
run_filecheck(&text, context)
}
}
// Code sink that simply counts bytes.
struct SizeSink {
offset: binemit::CodeOffset,
}
impl binemit::CodeSink for SizeSink {
fn offset(&self) -> binemit::CodeOffset {
self.offset
}
fn put1(&mut self, _: u8) {
self.offset += 1;
}
fn put2(&mut self, _: u16) {
self.offset += 2;
}
fn put4(&mut self, _: u32) {
self.offset += 4;
}
fn put8(&mut self, _: u64) {
self.offset += 8;
}
fn reloc_ebb(&mut self, _reloc: binemit::Reloc, _ebb_offset: binemit::CodeOffset) {}
fn reloc_external(
&mut self,
_reloc: binemit::Reloc,
_name: &ir::ExternalName,
_addend: binemit::Addend,
) {
}
fn reloc_jt(&mut self, _reloc: binemit::Reloc, _jt: ir::JumpTable) {}
}

View File

@@ -0,0 +1,150 @@
//! Test command for verifying dominator trees.
//!
//! The `test domtree` test command looks for annotations on instructions like this:
//!
//! ```cton
//! jump ebb3 ; dominates: ebb3
//! ```
//!
//! This annotation means that the jump instruction is expected to be the immediate dominator of
//! `ebb3`.
//!
//! We verify that the dominator tree annotations are complete and correct.
//!
use cretonne::dominator_tree::{DominatorTree, DominatorTreePreorder};
use cretonne::flowgraph::ControlFlowGraph;
use cretonne::ir::Function;
use cretonne::ir::entities::AnyEntity;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result, run_filecheck};
use std::borrow::{Borrow, Cow};
use std::collections::HashMap;
use std::fmt::{self, Write};
use std::result;
use match_directive::match_directive;
struct TestDomtree;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "domtree");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestDomtree))
}
}
impl SubTest for TestDomtree {
fn name(&self) -> Cow<str> {
Cow::from("domtree")
}
// Extract our own dominator tree from
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()> {
let func = func.borrow();
let cfg = ControlFlowGraph::with_function(func);
let domtree = DominatorTree::with_function(func, &cfg);
// Build an expected domtree from the source annotations.
let mut expected = HashMap::new();
for comment in &context.details.comments {
if let Some(tail) = match_directive(comment.text, "dominates:") {
let inst = match comment.entity {
AnyEntity::Inst(inst) => inst,
_ => {
return Err(format!(
"annotation on non-inst {}: {}",
comment.entity,
comment.text
))
}
};
for src_ebb in tail.split_whitespace() {
let ebb = match context.details.map.lookup_str(src_ebb) {
Some(AnyEntity::Ebb(ebb)) => ebb,
_ => return Err(format!("expected defined EBB, got {}", src_ebb)),
};
// Annotations say that `inst` is the idom of `ebb`.
if expected.insert(ebb, inst).is_some() {
return Err(format!("multiple dominators for {}", src_ebb));
}
// Compare to computed domtree.
match domtree.idom(ebb) {
Some(got_inst) if got_inst != inst => {
return Err(format!(
"mismatching idoms for {}:\n\
want: {}, got: {}",
src_ebb,
inst,
got_inst
));
}
None => {
return Err(format!(
"mismatching idoms for {}:\n\
want: {}, got: unreachable",
src_ebb,
inst
));
}
_ => {}
}
}
}
}
// Now we know that everything in `expected` is consistent with `domtree`.
// All other EBB's should be either unreachable or the entry block.
for ebb in func.layout.ebbs().skip(1).filter(
|ebb| !expected.contains_key(ebb),
)
{
if let Some(got_inst) = domtree.idom(ebb) {
return Err(format!(
"mismatching idoms for renumbered {}:\n\
want: unrechable, got: {}",
ebb,
got_inst
));
}
}
let text = filecheck_text(func, &domtree).expect("formatting error");
run_filecheck(&text, context)
}
}
// Generate some output for filecheck testing
fn filecheck_text(func: &Function, domtree: &DominatorTree) -> result::Result<String, fmt::Error> {
let mut s = String::new();
write!(s, "cfg_postorder:")?;
for &ebb in domtree.cfg_postorder() {
write!(s, " {}", ebb)?;
}
writeln!(s, "")?;
// Compute and print out a pre-order of the dominator tree.
writeln!(s, "domtree_preorder {{")?;
let mut dtpo = DominatorTreePreorder::new();
dtpo.compute(domtree, &func.layout);
let mut stack = Vec::new();
stack.extend(func.layout.entry_block());
while let Some(ebb) = stack.pop() {
write!(s, " {}:", ebb)?;
let i = stack.len();
for ch in dtpo.children(ebb) {
write!(s, " {}", ch)?;
stack.push(ch);
}
writeln!(s, "")?;
// Reverse the children we just pushed so we'll pop them in order.
stack[i..].reverse();
}
writeln!(s, "}}")?;
Ok(s)
}

View File

@@ -0,0 +1,53 @@
//! Test command for checking the IL legalizer.
//!
//! The `test legalizer` test command runs each function through `legalize_function()` and sends
//! the result to filecheck.
use std::borrow::Cow;
use cretonne;
use cretonne::ir::Function;
use cretonne::print_errors::pretty_error;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result, run_filecheck};
use std::fmt::Write;
struct TestLegalizer;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "legalizer");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestLegalizer))
}
}
impl SubTest for TestLegalizer {
fn name(&self) -> Cow<str> {
Cow::from("legalizer")
}
fn is_mutating(&self) -> bool {
true
}
fn needs_isa(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()> {
let mut comp_ctx = cretonne::Context::new();
comp_ctx.func = func.into_owned();
let isa = context.isa.expect("legalizer needs an ISA");
comp_ctx.compute_cfg();
comp_ctx.legalize(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func.display(Some(isa)))
.map_err(|e| e.to_string())?;
run_filecheck(&text, context)
}
}

View File

@@ -0,0 +1,53 @@
//! Test command for testing the LICM pass.
//!
//! The `licm` test command runs each function through the LICM pass after ensuring
//! that all instructions are legal for the target.
//!
//! The resulting function is sent to `filecheck`.
use cretonne::ir::Function;
use cretonne;
use cretonne::print_errors::pretty_error;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result, run_filecheck};
use std::borrow::Cow;
use std::fmt::Write;
struct TestLICM;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "licm");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestLICM))
}
}
impl SubTest for TestLICM {
fn name(&self) -> Cow<str> {
Cow::from("licm")
}
fn is_mutating(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()> {
// Create a compilation context, and drop in the function.
let mut comp_ctx = cretonne::Context::new();
comp_ctx.func = func.into_owned();
comp_ctx.flowgraph();
comp_ctx.compute_loop_analysis();
comp_ctx.licm(context.flags_or_isa()).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, Into::into(e))
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func).map_err(
|e| e.to_string(),
)?;
run_filecheck(&text, context)
}
}

View File

@@ -0,0 +1,50 @@
//! Test command for testing the preopt pass.
//!
//! The resulting function is sent to `filecheck`.
use cretonne::ir::Function;
use cretonne;
use cretonne::print_errors::pretty_error;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result, run_filecheck};
use std::borrow::Cow;
use std::fmt::Write;
struct TestPreopt;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "preopt");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestPreopt))
}
}
impl SubTest for TestPreopt {
fn name(&self) -> Cow<str> {
Cow::from("preopt")
}
fn is_mutating(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()> {
// Create a compilation context, and drop in the function.
let mut comp_ctx = cretonne::Context::new();
comp_ctx.func = func.into_owned();
let isa = context.isa.expect("preopt needs an ISA");
comp_ctx.flowgraph();
comp_ctx.preopt(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, Into::into(e))
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func).map_err(
|e| e.to_string(),
)?;
run_filecheck(&text, context)
}
}

View File

@@ -0,0 +1,37 @@
//! The `print-cfg` sub-command.
//!
//! Read a series of Cretonne IL files and print their control flow graphs
//! in graphviz format.
use std::borrow::Cow;
use cretonne::ir::Function;
use cretonne::cfg_printer::CFGPrinter;
use cton_reader::TestCommand;
use subtest::{self, SubTest, Context, Result as STResult};
/// Object implementing the `test print-cfg` sub-test.
struct TestPrintCfg;
pub fn subtest(parsed: &TestCommand) -> STResult<Box<SubTest>> {
assert_eq!(parsed.command, "print-cfg");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestPrintCfg))
}
}
impl SubTest for TestPrintCfg {
fn name(&self) -> Cow<str> {
Cow::from("print-cfg")
}
fn needs_verifier(&self) -> bool {
false
}
fn run(&self, func: Cow<Function>, context: &Context) -> STResult<()> {
subtest::run_filecheck(&CFGPrinter::new(&func).to_string(), context)
}
}

View File

@@ -0,0 +1,62 @@
//! Test command for testing the register allocator.
//!
//! The `regalloc` test command runs each function through the register allocator after ensuring
//! that all instructions are legal for the target.
//!
//! The resulting function is sent to `filecheck`.
use cretonne::ir::Function;
use cretonne;
use cretonne::print_errors::pretty_error;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result, run_filecheck};
use std::borrow::Cow;
use std::fmt::Write;
struct TestRegalloc;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "regalloc");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestRegalloc))
}
}
impl SubTest for TestRegalloc {
fn name(&self) -> Cow<str> {
Cow::from("regalloc")
}
fn is_mutating(&self) -> bool {
true
}
fn needs_isa(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()> {
let isa = context.isa.expect("register allocator needs an ISA");
// Create a compilation context, and drop in the function.
let mut comp_ctx = cretonne::Context::new();
comp_ctx.func = func.into_owned();
comp_ctx.compute_cfg();
// TODO: Should we have an option to skip legalization?
comp_ctx.legalize(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
comp_ctx.compute_domtree();
comp_ctx.regalloc(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func.display(Some(isa)))
.map_err(|e| e.to_string())?;
run_filecheck(&text, context)
}
}

View File

@@ -0,0 +1,52 @@
//! Test command for testing the simple GVN pass.
//!
//! The `simple-gvn` test command runs each function through the simple GVN pass after ensuring
//! that all instructions are legal for the target.
//!
//! The resulting function is sent to `filecheck`.
use cretonne::ir::Function;
use cretonne;
use cretonne::print_errors::pretty_error;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result, run_filecheck};
use std::borrow::Cow;
use std::fmt::Write;
struct TestSimpleGVN;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "simple-gvn");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestSimpleGVN))
}
}
impl SubTest for TestSimpleGVN {
fn name(&self) -> Cow<str> {
Cow::from("simple-gvn")
}
fn is_mutating(&self) -> bool {
true
}
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()> {
// Create a compilation context, and drop in the function.
let mut comp_ctx = cretonne::Context::new();
comp_ctx.func = func.into_owned();
comp_ctx.flowgraph();
comp_ctx.simple_gvn(context.flags_or_isa()).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, Into::into(e))
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func).map_err(
|e| e.to_string(),
)?;
run_filecheck(&text, context)
}
}

View File

@@ -0,0 +1,82 @@
//! Test command for checking the IL verifier.
//!
//! The `test verifier` test command looks for annotations on instructions like this:
//!
//! ```cton
//! jump ebb3 ; error: jump to non-existent EBB
//! ```
//!
//! This annotation means that the verifier is expected to given an error for the jump instruction
//! containing the substring "jump to non-existent EBB".
use std::borrow::{Borrow, Cow};
use cretonne::verify_function;
use cretonne::ir::Function;
use cton_reader::TestCommand;
use subtest::{SubTest, Context, Result};
use match_directive::match_directive;
struct TestVerifier;
pub fn subtest(parsed: &TestCommand) -> Result<Box<SubTest>> {
assert_eq!(parsed.command, "verifier");
if !parsed.options.is_empty() {
Err(format!("No options allowed on {}", parsed))
} else {
Ok(Box::new(TestVerifier))
}
}
impl SubTest for TestVerifier {
fn name(&self) -> Cow<str> {
Cow::from("verifier")
}
fn needs_verifier(&self) -> bool {
// Running the verifier before this test would defeat its purpose.
false
}
fn run(&self, func: Cow<Function>, context: &Context) -> Result<()> {
let func = func.borrow();
// Scan source annotations for "error:" directives.
let mut expected = None;
for comment in &context.details.comments {
if let Some(tail) = match_directive(comment.text, "error:") {
// Currently, the verifier can only report one problem at a time.
// Reject more than one `error:` directives.
if expected.is_some() {
return Err("cannot handle multiple error: directives".to_string());
}
expected = Some((comment.entity, tail));
}
}
match verify_function(func, context.flags_or_isa()) {
Ok(_) => {
match expected {
None => Ok(()),
Some((_, msg)) => Err(format!("passed, expected error: {}", msg)),
}
}
Err(got) => {
match expected {
None => Err(format!("verifier pass, got {}", got)),
Some((want_loc, want_msg)) if got.message.contains(want_msg) => {
if want_loc == got.location {
Ok(())
} else {
Err(format!(
"correct error reported on {}, but wanted {}",
got.location,
want_loc
))
}
}
Some(_) => Err(format!("mismatching error: {}", got)),
}
}
}
}
}