Update rustfmt to 0.9.0.

This commit is contained in:
Dan Gohman
2017-08-31 10:44:59 -07:00
parent 46fb64cbb4
commit 2efdc0ed37
111 changed files with 4692 additions and 3379 deletions

View File

@@ -15,7 +15,7 @@
# With the --install option, also tries to install the right version.
# This version should always be bumped to the newest version available.
VERS="0.8.4"
VERS="0.9.0"
if cargo install --list | grep -q "^rustfmt v$VERS"; then
exit 0

View File

@@ -21,10 +21,12 @@ pub fn run(files: Vec<String>) -> CommandResult {
}
fn cat_one(filename: String) -> CommandResult {
let buffer = read_to_string(&filename)
.map_err(|e| format!("{}: {}", filename, e))?;
let items = parse_functions(&buffer)
.map_err(|e| format!("{}: {}", filename, e))?;
let buffer = read_to_string(&filename).map_err(
|e| format!("{}: {}", filename, e),
)?;
let items = parse_functions(&buffer).map_err(
|e| format!("{}: {}", filename, e),
)?;
for (idx, func) in items.into_iter().enumerate() {
if idx != 0 {

View File

@@ -64,10 +64,10 @@ fn cton_util() -> CommandResult {
// Parse command line arguments.
let args: Args = Docopt::new(USAGE)
.and_then(|d| {
d.help(true)
.version(Some(format!("Cretonne {}", VERSION)))
.deserialize()
})
d.help(true)
.version(Some(format!("Cretonne {}", VERSION)))
.deserialize()
})
.unwrap_or_else(|e| e.exit());
// Find the sub-command to execute.
@@ -80,10 +80,12 @@ fn cton_util() -> CommandResult {
} else if args.cmd_print_cfg {
print_cfg::run(args.arg_file)
} else if args.cmd_wasm {
wasm::run(args.arg_file,
args.flag_verbose,
args.flag_optimize,
args.flag_check)
wasm::run(
args.arg_file,
args.flag_verbose,
args.flag_optimize,
args.flag_check,
)
} else {
// Debugging / shouldn't happen with proper command line handling above.
Err(format!("Unhandled args: {:?}", args))

View File

@@ -108,9 +108,12 @@ impl SubTest for TestBinEmit {
for ebb in func.layout.ebbs() {
for inst in func.layout.ebb_insts(ebb) {
if !func.encodings[inst].is_legal() {
if let Ok(enc) = isa.encode(&func.dfg,
&func.dfg[inst],
func.dfg.ctrl_typevar(inst)) {
if let Ok(enc) = isa.encode(
&func.dfg,
&func.dfg[inst],
func.dfg.ctrl_typevar(inst),
)
{
func.encodings[inst] = enc;
}
}
@@ -118,8 +121,9 @@ impl SubTest for TestBinEmit {
}
// Relax branches and compute EBB offsets based on the encodings.
let code_size = binemit::relax_branches(&mut func, isa)
.map_err(|e| pretty_error(&func, context.isa, e))?;
let code_size = binemit::relax_branches(&mut func, isa).map_err(|e| {
pretty_error(&func, context.isa, e)
})?;
// Collect all of the 'bin:' directives on instructions.
let mut bins = HashMap::new();
@@ -128,16 +132,20 @@ impl SubTest for TestBinEmit {
match comment.entity {
AnyEntity::Inst(inst) => {
if let Some(prev) = bins.insert(inst, want) {
return Err(format!("multiple 'bin:' directives on {}: '{}' and '{}'",
func.dfg.display_inst(inst, isa),
prev,
want));
return Err(format!(
"multiple 'bin:' directives on {}: '{}' and '{}'",
func.dfg.display_inst(inst, isa),
prev,
want
));
}
}
_ => {
return Err(format!("'bin:' directive on non-inst {}: {}",
comment.entity,
comment.text))
return Err(format!(
"'bin:' directive on non-inst {}: {}",
comment.entity,
comment.text
))
}
}
}
@@ -152,10 +160,12 @@ impl SubTest for TestBinEmit {
for ebb in func.layout.ebbs() {
divert.clear();
// Correct header offsets should have been computed by `relax_branches()`.
assert_eq!(sink.offset,
func.offsets[ebb],
"Inconsistent {} header offset",
ebb);
assert_eq!(
sink.offset,
func.offsets[ebb],
"Inconsistent {} header offset",
ebb
);
for inst in func.layout.ebb_insts(ebb) {
sink.text.clear();
let enc = func.encodings[inst];
@@ -166,34 +176,44 @@ impl SubTest for TestBinEmit {
isa.emit_inst(&func, inst, &mut divert, &mut sink);
let emitted = sink.offset - before;
// Verify the encoding recipe sizes against the ISAs emit_inst implementation.
assert_eq!(emitted,
encinfo.bytes(enc),
"Inconsistent size for [{}] {}",
encinfo.display(enc),
func.dfg.display_inst(inst, isa));
assert_eq!(
emitted,
encinfo.bytes(enc),
"Inconsistent size for [{}] {}",
encinfo.display(enc),
func.dfg.display_inst(inst, isa)
);
}
// Check against bin: directives.
if let Some(want) = bins.remove(&inst) {
if !enc.is_legal() {
return Err(format!("{} can't be encoded: {}",
inst,
func.dfg.display_inst(inst, isa)));
return Err(format!(
"{} can't be encoded: {}",
inst,
func.dfg.display_inst(inst, isa)
));
}
let have = sink.text.trim();
if have != want {
return Err(format!("Bad machine code for {}: {}\nWant: {}\nGot: {}",
inst,
func.dfg.display_inst(inst, isa),
want,
have));
return Err(format!(
"Bad machine code for {}: {}\nWant: {}\nGot: {}",
inst,
func.dfg.display_inst(inst, isa),
want,
have
));
}
}
}
}
if sink.offset != code_size {
return Err(format!("Expected code size {}, got {}", code_size, sink.offset));
return Err(format!(
"Expected code size {}, got {}",
code_size,
sink.offset
));
}
Ok(())

View File

@@ -41,22 +41,30 @@ impl SubTest for TestCompile {
let mut comp_ctx = cretonne::Context::new();
comp_ctx.func = func.into_owned();
let code_size = comp_ctx
.compile(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
let code_size = comp_ctx.compile(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
dbg!("Generated {} bytes of code:\n{}",
code_size,
comp_ctx.func.display(isa));
dbg!(
"Generated {} bytes of code:\n{}",
code_size,
comp_ctx.func.display(isa)
);
// Finally verify that the returned code size matches the emitted bytes.
let mut sink = SizeSink { offset: 0 };
binemit::emit_function(&comp_ctx.func,
|func, inst, div, sink| isa.emit_inst(func, inst, div, sink),
&mut sink);
binemit::emit_function(
&comp_ctx.func,
|func, inst, div, sink| isa.emit_inst(func, inst, div, sink),
&mut sink,
);
if sink.offset != code_size {
return Err(format!("Expected code size {}, got {}", code_size, sink.offset));
return Err(format!(
"Expected code size {}, got {}",
code_size,
sink.offset
));
}
Ok(())

View File

@@ -46,7 +46,9 @@ impl ConcurrentRunner {
heartbeat_thread(reply_tx.clone());
let handles = (0..num_cpus::get())
.map(|num| worker_thread(num, request_mutex.clone(), reply_tx.clone()))
.map(|num| {
worker_thread(num, request_mutex.clone(), reply_tx.clone())
})
.collect();
ConcurrentRunner {
@@ -97,16 +99,17 @@ fn heartbeat_thread(replies: Sender<Reply>) -> thread::JoinHandle<()> {
thread::Builder::new()
.name("heartbeat".to_string())
.spawn(move || while replies.send(Reply::Tick).is_ok() {
thread::sleep(Duration::from_secs(1));
})
thread::sleep(Duration::from_secs(1));
})
.unwrap()
}
/// Spawn a worker thread running tests.
fn worker_thread(thread_num: usize,
requests: Arc<Mutex<Receiver<Request>>>,
replies: Sender<Reply>)
-> thread::JoinHandle<()> {
fn worker_thread(
thread_num: usize,
requests: Arc<Mutex<Receiver<Request>>>,
replies: Sender<Reply>,
) -> thread::JoinHandle<()> {
thread::Builder::new()
.name(format!("worker #{}", thread_num))
.spawn(move || {

View File

@@ -50,9 +50,11 @@ impl SubTest for TestDomtree {
let inst = match comment.entity {
AnyEntity::Inst(inst) => inst,
_ => {
return Err(format!("annotation on non-inst {}: {}",
comment.entity,
comment.text))
return Err(format!(
"annotation on non-inst {}: {}",
comment.entity,
comment.text
))
}
};
for src_ebb in tail.split_whitespace() {
@@ -69,17 +71,21 @@ impl SubTest for TestDomtree {
// Compare to computed domtree.
match domtree.idom(ebb) {
Some(got_inst) if got_inst != inst => {
return Err(format!("mismatching idoms for {}:\n\
return Err(format!(
"mismatching idoms for {}:\n\
want: {}, got: {}",
src_ebb,
inst,
got_inst));
src_ebb,
inst,
got_inst
));
}
None => {
return Err(format!("mismatching idoms for {}:\n\
return Err(format!(
"mismatching idoms for {}:\n\
want: {}, got: unreachable",
src_ebb,
inst));
src_ebb,
inst
));
}
_ => {}
}
@@ -89,15 +95,17 @@ impl SubTest for TestDomtree {
// Now we know that everything in `expected` is consistent with `domtree`.
// All other EBB's should be either unreachable or the entry block.
for ebb in func.layout
.ebbs()
.skip(1)
.filter(|ebb| !expected.contains_key(&ebb)) {
for ebb in func.layout.ebbs().skip(1).filter(
|ebb| !expected.contains_key(&ebb),
)
{
if let Some(got_inst) = domtree.idom(ebb) {
return Err(format!("mismatching idoms for renumbered {}:\n\
return Err(format!(
"mismatching idoms for renumbered {}:\n\
want: unrechable, got: {}",
ebb,
got_inst));
ebb,
got_inst
));
}
}

View File

@@ -41,9 +41,9 @@ impl SubTest for TestLegalizer {
let isa = context.isa.expect("legalizer needs an ISA");
comp_ctx.flowgraph();
comp_ctx
.legalize(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx.legalize(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func.display(Some(isa)))

View File

@@ -39,13 +39,14 @@ impl SubTest for TestLICM {
comp_ctx.func = func.into_owned();
comp_ctx.flowgraph();
comp_ctx
.licm()
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx.licm().map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func)
.map_err(|e| e.to_string())?;
write!(&mut text, "{}", &comp_ctx.func).map_err(
|e| e.to_string(),
)?;
run_filecheck(&text, context)
}
}

View File

@@ -46,12 +46,12 @@ impl SubTest for TestRegalloc {
comp_ctx.flowgraph();
// TODO: Should we have an option to skip legalization?
comp_ctx
.legalize(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx
.regalloc(isa)
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx.legalize(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
comp_ctx.regalloc(isa).map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func.display(Some(isa)))

View File

@@ -41,11 +41,13 @@ impl Display for QueueEntry {
let p = self.path.to_string_lossy();
match self.state {
State::Done(Ok(dur)) => {
write!(f,
"{}.{:03} {}",
dur.as_secs(),
dur.subsec_nanos() / 1000000,
p)
write!(
f,
"{}.{:03} {}",
dur.as_secs(),
dur.subsec_nanos() / 1000000,
p
)
}
State::Done(Err(ref e)) => write!(f, "FAIL {}: {}", p, e),
_ => write!(f, "{}", p),
@@ -104,11 +106,10 @@ impl TestRunner {
///
/// Any problems reading `file` as a test case file will be reported as a test failure.
pub fn push_test<P: Into<PathBuf>>(&mut self, file: P) {
self.tests
.push(QueueEntry {
path: file.into(),
state: State::New,
});
self.tests.push(QueueEntry {
path: file.into(),
state: State::New,
});
}
/// Begin running tests concurrently.
@@ -240,10 +241,12 @@ impl TestRunner {
Reply::Tick => {
self.ticks_since_progress += 1;
if self.ticks_since_progress == TIMEOUT_SLOW {
println!("STALLED for {} seconds with {}/{} tests finished",
self.ticks_since_progress,
self.reported_tests,
self.tests.len());
println!(
"STALLED for {} seconds with {}/{} tests finished",
self.ticks_since_progress,
self.reported_tests,
self.tests.len()
);
for jobid in self.reported_tests..self.tests.len() {
if self.tests[jobid].state == State::Running {
println!("slow: {}", self.tests[jobid]);
@@ -251,8 +254,10 @@ impl TestRunner {
}
}
if self.ticks_since_progress >= TIMEOUT_PANIC {
panic!("worker threads stalled for {} seconds.",
self.ticks_since_progress);
panic!(
"worker threads stalled for {} seconds.",
self.ticks_since_progress
);
}
}
}
@@ -278,9 +283,9 @@ impl TestRunner {
let mut times = self.tests
.iter()
.filter_map(|entry| match *entry {
QueueEntry { state: State::Done(Ok(dur)), .. } => Some(dur),
_ => None,
})
QueueEntry { state: State::Done(Ok(dur)), .. } => Some(dur),
_ => None,
})
.collect::<Vec<_>>();
// Get me some real data, kid.
@@ -304,12 +309,11 @@ impl TestRunner {
return;
}
for t in self.tests
.iter()
.filter(|entry| match **entry {
QueueEntry { state: State::Done(Ok(dur)), .. } => dur > cut,
_ => false,
}) {
for t in self.tests.iter().filter(|entry| match **entry {
QueueEntry { state: State::Done(Ok(dur)), .. } => dur > cut,
_ => false,
})
{
println!("slow: {}", t)
}

View File

@@ -76,10 +76,11 @@ pub fn run(path: &Path) -> TestResult {
}
// Given a slice of tests, generate a vector of (test, flags, isa) tuples.
fn test_tuples<'a>(tests: &'a [Box<SubTest>],
isa_spec: &'a IsaSpec,
no_isa_flags: &'a Flags)
-> Result<Vec<(&'a SubTest, &'a Flags, Option<&'a TargetIsa>)>> {
fn test_tuples<'a>(
tests: &'a [Box<SubTest>],
isa_spec: &'a IsaSpec,
no_isa_flags: &'a Flags,
) -> Result<Vec<(&'a SubTest, &'a Flags, Option<&'a TargetIsa>)>> {
let mut out = Vec::new();
for test in tests {
if test.needs_isa() {
@@ -104,10 +105,11 @@ fn test_tuples<'a>(tests: &'a [Box<SubTest>],
Ok(out)
}
fn run_one_test<'a>(tuple: (&'a SubTest, &'a Flags, Option<&'a TargetIsa>),
func: Cow<Function>,
context: &mut Context<'a>)
-> Result<()> {
fn run_one_test<'a>(
tuple: (&'a SubTest, &'a Flags, Option<&'a TargetIsa>),
func: Cow<Function>,
context: &mut Context<'a>,
) -> Result<()> {
let (test, flags, isa) = tuple;
let name = format!("{}({})", test.name(), func.name);
dbg!("Test: {} {}", name, isa.map(TargetIsa::name).unwrap_or("-"));
@@ -117,11 +119,13 @@ fn run_one_test<'a>(tuple: (&'a SubTest, &'a Flags, Option<&'a TargetIsa>),
// Should we run the verifier before this test?
if !context.verified && test.needs_verifier() {
verify_function(&func, isa)
.map_err(|e| pretty_verifier_error(&func, isa, e))?;
verify_function(&func, isa).map_err(|e| {
pretty_verifier_error(&func, isa, e)
})?;
context.verified = true;
}
test.run(func, context)
.map_err(|e| format!("{}: {}", name, e))
test.run(func, context).map_err(
|e| format!("{}: {}", name, e),
)
}

View File

@@ -39,13 +39,14 @@ impl SubTest for TestSimpleGVN {
comp_ctx.func = func.into_owned();
comp_ctx.flowgraph();
comp_ctx
.simple_gvn()
.map_err(|e| pretty_error(&comp_ctx.func, context.isa, e))?;
comp_ctx.simple_gvn().map_err(|e| {
pretty_error(&comp_ctx.func, context.isa, e)
})?;
let mut text = String::new();
write!(&mut text, "{}", &comp_ctx.func)
.map_err(|e| e.to_string())?;
write!(&mut text, "{}", &comp_ctx.func).map_err(
|e| e.to_string(),
)?;
run_filecheck(&text, context)
}
}

View File

@@ -66,25 +66,25 @@ pub trait SubTest {
/// match 'inst10'.
impl<'a> filecheck::VariableMap for Context<'a> {
fn lookup(&self, varname: &str) -> Option<FCValue> {
self.details
.map
.lookup_str(varname)
.map(|e| FCValue::Regex(format!(r"\b{}\b", e).into()))
self.details.map.lookup_str(varname).map(|e| {
FCValue::Regex(format!(r"\b{}\b", e).into())
})
}
}
/// Run filecheck on `text`, using directives extracted from `context`.
pub fn run_filecheck(text: &str, context: &Context) -> Result<()> {
let checker = build_filechecker(context)?;
if checker
.check(&text, context)
.map_err(|e| format!("filecheck: {}", e))? {
if checker.check(&text, context).map_err(
|e| format!("filecheck: {}", e),
)?
{
Ok(())
} else {
// Filecheck mismatch. Emit an explanation as output.
let (_, explain) = checker
.explain(&text, context)
.map_err(|e| format!("explain: {}", e))?;
let (_, explain) = checker.explain(&text, context).map_err(
|e| format!("explain: {}", e),
)?;
Err(format!("filecheck failed:\n{}{}", checker, explain))
}
}
@@ -94,14 +94,14 @@ pub fn build_filechecker(context: &Context) -> Result<Checker> {
let mut builder = CheckerBuilder::new();
// Preamble comments apply to all functions.
for comment in context.preamble_comments {
builder
.directive(comment.text)
.map_err(|e| format!("filecheck: {}", e))?;
builder.directive(comment.text).map_err(|e| {
format!("filecheck: {}", e)
})?;
}
for comment in &context.details.comments {
builder
.directive(comment.text)
.map_err(|e| format!("filecheck: {}", e))?;
builder.directive(comment.text).map_err(|e| {
format!("filecheck: {}", e)
})?;
}
let checker = builder.finish();
if checker.is_empty() {

View File

@@ -65,9 +65,11 @@ impl SubTest for TestVerifier {
if want_loc == got.location {
Ok(())
} else {
Err(format!("correct error reported on {}, but wanted {}",
got.location,
want_loc))
Err(format!(
"correct error reported on {}, but wanted {}",
got.location,
want_loc
))
}
}
Some(_) => Err(format!("mismatching error: {}", got)),

View File

@@ -91,10 +91,12 @@ impl<'a> Display for CFGPrinter<'a> {
}
fn print_cfg(filename: String) -> CommandResult {
let buffer = read_to_string(&filename)
.map_err(|e| format!("{}: {}", filename, e))?;
let items = parse_functions(&buffer)
.map_err(|e| format!("{}: {}", filename, e))?;
let buffer = read_to_string(&filename).map_err(
|e| format!("{}: {}", filename, e),
)?;
let items = parse_functions(&buffer).map_err(
|e| format!("{}: {}", filename, e),
)?;
for (idx, func) in items.into_iter().enumerate() {
if idx != 0 {

View File

@@ -18,14 +18,14 @@ pub fn run(files: Vec<String>, verbose: bool) -> CommandResult {
}
let mut buffer = String::new();
io::stdin()
.read_to_string(&mut buffer)
.map_err(|e| format!("stdin: {}", e))?;
io::stdin().read_to_string(&mut buffer).map_err(|e| {
format!("stdin: {}", e)
})?;
if verbose {
let (success, explain) = checker
.explain(&buffer, NO_VARIABLES)
.map_err(|e| e.to_string())?;
let (success, explain) = checker.explain(&buffer, NO_VARIABLES).map_err(
|e| e.to_string(),
)?;
print!("{}", explain);
if success {
println!("OK");
@@ -33,25 +33,27 @@ pub fn run(files: Vec<String>, verbose: bool) -> CommandResult {
} else {
Err("Check failed".to_string())
}
} else if checker
.check(&buffer, NO_VARIABLES)
.map_err(|e| e.to_string())? {
} else if checker.check(&buffer, NO_VARIABLES).map_err(
|e| e.to_string(),
)?
{
Ok(())
} else {
let (_, explain) = checker
.explain(&buffer, NO_VARIABLES)
.map_err(|e| e.to_string())?;
let (_, explain) = checker.explain(&buffer, NO_VARIABLES).map_err(
|e| e.to_string(),
)?;
print!("{}", explain);
Err("Check failed".to_string())
}
}
fn read_checkfile(filename: &str) -> Result<Checker, String> {
let buffer = read_to_string(&filename)
.map_err(|e| format!("{}: {}", filename, e))?;
let buffer = read_to_string(&filename).map_err(
|e| format!("{}: {}", filename, e),
)?;
let mut builder = CheckerBuilder::new();
builder
.text(&buffer)
.map_err(|e| format!("{}: {}", filename, e))?;
builder.text(&buffer).map_err(
|e| format!("{}: {}", filename, e),
)?;
Ok(builder.finish())
}

View File

@@ -24,8 +24,10 @@ pub fn read_to_string<P: AsRef<Path>>(path: P) -> Result<String> {
///
/// Return the comment text following the directive.
pub fn match_directive<'a>(comment: &'a str, directive: &str) -> Option<&'a str> {
assert!(directive.ends_with(':'),
"Directive must include trailing colon");
assert!(
directive.ends_with(':'),
"Directive must include trailing colon"
);
let text = comment.trim_left_matches(';').trim_left();
if text.starts_with(directive) {
Some(text[directive.len()..].trim())
@@ -35,10 +37,11 @@ pub fn match_directive<'a>(comment: &'a str, directive: &str) -> Option<&'a str>
}
/// Pretty-print a verifier error.
pub fn pretty_verifier_error(func: &ir::Function,
isa: Option<&TargetIsa>,
err: verifier::Error)
-> String {
pub fn pretty_verifier_error(
func: &ir::Function,
isa: Option<&TargetIsa>,
err: verifier::Error,
) -> String {
let mut msg = err.to_string();
match err.location {
AnyEntity::Inst(inst) => {

View File

@@ -51,19 +51,22 @@ fn read_wasm_file(path: PathBuf) -> Result<Vec<u8>, io::Error> {
}
pub fn run(files: Vec<String>,
flag_verbose: bool,
flag_optimize: bool,
flag_check: bool)
-> Result<(), String> {
pub fn run(
files: Vec<String>,
flag_verbose: bool,
flag_optimize: bool,
flag_check: bool,
) -> Result<(), String> {
for filename in files.iter() {
let path = Path::new(&filename);
let name = String::from(path.as_os_str().to_string_lossy());
match handle_module(flag_verbose,
flag_optimize,
flag_check,
path.to_path_buf(),
name) {
match handle_module(
flag_verbose,
flag_optimize,
flag_check,
path.to_path_buf(),
name,
) {
Ok(()) => {}
Err(message) => return Err(message),
}
@@ -71,12 +74,13 @@ pub fn run(files: Vec<String>,
Ok(())
}
fn handle_module(flag_verbose: bool,
flag_optimize: bool,
flag_check: bool,
path: PathBuf,
name: String)
-> Result<(), String> {
fn handle_module(
flag_verbose: bool,
flag_optimize: bool,
flag_check: bool,
path: PathBuf,
name: String,
) -> Result<(), String> {
let mut terminal = term::stdout().unwrap();
terminal.fg(term::color::YELLOW).unwrap();
vprint!(flag_verbose, "Handling: ");
@@ -109,10 +113,10 @@ fn handle_module(flag_verbose: bool,
.arg(file_path.to_str().unwrap())
.output()
.or_else(|e| if let io::ErrorKind::NotFound = e.kind() {
return Err(String::from("wast2wasm not found"));
} else {
return Err(String::from(e.description()));
})
return Err(String::from("wast2wasm not found"));
} else {
return Err(String::from(e.description()));
})
.unwrap();
match read_wasm_file(file_path) {
Ok(data) => data,
@@ -221,17 +225,20 @@ fn handle_module(flag_verbose: bool,
}
/// Pretty-print a verifier error.
pub fn pretty_verifier_error(func: &ir::Function,
isa: Option<&TargetIsa>,
err: verifier::Error)
-> String {
pub fn pretty_verifier_error(
func: &ir::Function,
isa: Option<&TargetIsa>,
err: verifier::Error,
) -> String {
let msg = err.to_string();
let str1 = match err.location {
AnyEntity::Inst(inst) => {
format!("{}\n{}: {}\n\n",
msg,
inst,
func.dfg.display_inst(inst, isa))
format!(
"{}\n{}: {}\n\n",
msg,
inst,
func.dfg.display_inst(inst, isa)
)
}
_ => String::from(format!("{}\n", msg)),
};

View File

@@ -26,7 +26,8 @@ fn test_reverse_postorder_traversal(function_source: &str, ebb_order: Vec<u32>)
#[test]
fn simple_traversal() {
test_reverse_postorder_traversal("
test_reverse_postorder_traversal(
"
function %test(i32) native {
ebb0(v0: i32):
brz v0, ebb1
@@ -50,12 +51,14 @@ fn simple_traversal() {
trap
}
",
vec![0, 1, 3, 2, 4, 5]);
vec![0, 1, 3, 2, 4, 5],
);
}
#[test]
fn loops_one() {
test_reverse_postorder_traversal("
test_reverse_postorder_traversal(
"
function %test(i32) native {
ebb0(v0: i32):
jump ebb1
@@ -68,12 +71,14 @@ fn loops_one() {
return
}
",
vec![0, 1, 3, 2]);
vec![0, 1, 3, 2],
);
}
#[test]
fn loops_two() {
test_reverse_postorder_traversal("
test_reverse_postorder_traversal(
"
function %test(i32) native {
ebb0(v0: i32):
brz v0, ebb1
@@ -93,12 +98,14 @@ fn loops_two() {
return
}
",
vec![0, 1, 2, 4, 3, 5]);
vec![0, 1, 2, 4, 3, 5],
);
}
#[test]
fn loops_three() {
test_reverse_postorder_traversal("
test_reverse_postorder_traversal(
"
function %test(i32) native {
ebb0(v0: i32):
brz v0, ebb1
@@ -123,12 +130,14 @@ fn loops_three() {
return
}
",
vec![0, 1, 2, 4, 3, 6, 7, 5]);
vec![0, 1, 2, 4, 3, 6, 7, 5],
);
}
#[test]
fn back_edge_one() {
test_reverse_postorder_traversal("
test_reverse_postorder_traversal(
"
function %test(i32) native {
ebb0(v0: i32):
brz v0, ebb1
@@ -146,5 +155,6 @@ fn back_edge_one() {
trap
}
",
vec![0, 1, 3, 2, 4]);
vec![0, 1, 3, 2, 4],
);
}

View File

@@ -49,8 +49,10 @@ fn main() {
// Make sure we rebuild is this build script changes.
// I guess that won't happen if you have non-UTF8 bytes in your path names.
// The `build.py` script prints out its own dependencies.
println!("cargo:rerun-if-changed={}",
crate_dir.join("build.rs").to_string_lossy());
println!(
"cargo:rerun-if-changed={}",
crate_dir.join("build.rs").to_string_lossy()
);
// Scripts are in `$crate_dir/meta`.
let meta_dir = crate_dir.join("meta");
@@ -130,9 +132,11 @@ fn isa_targets(cretonne_targets: Option<&str>, target_triple: &str) -> Result<Ve
Isa::from_arch(target_triple.split('-').next().unwrap())
.map(|isa| vec![isa])
.ok_or_else(|| {
format!("no supported isa found for target triple `{}`",
target_triple)
})
format!(
"no supported isa found for target triple `{}`",
target_triple
)
})
}
Some(targets) => {
let unknown_isa_targets = targets
@@ -143,7 +147,10 @@ fn isa_targets(cretonne_targets: Option<&str>, target_triple: &str) -> Result<Ve
match (unknown_isa_targets.is_empty(), isa_targets.is_empty()) {
(true, true) => Ok(Isa::all().to_vec()),
(true, _) => Ok(isa_targets),
(_, _) => Err(format!("unknown isa targets: `{}`", unknown_isa_targets.join(", "))),
(_, _) => Err(format!(
"unknown isa targets: `{}`",
unknown_isa_targets.join(", ")
)),
}
}
None => Ok(Isa::all().to_vec()),

View File

@@ -150,8 +150,10 @@ pub fn legalize_abi_value(have: Type, arg: &ArgumentType) -> ValueConversion {
match have_bits.cmp(&arg_bits) {
// We have fewer bits than the ABI argument.
Ordering::Less => {
assert!(have.is_int() && arg.value_type.is_int(),
"Can only extend integer values");
assert!(
have.is_int() && arg.value_type.is_int(),
"Can only extend integer values"
);
match arg.extension {
ArgumentExtension::Uext => ValueConversion::Uext(arg.value_type),
ArgumentExtension::Sext => ValueConversion::Sext(arg.value_type),
@@ -192,22 +194,34 @@ mod tests {
fn legalize() {
let mut arg = ArgumentType::new(types::I32);
assert_eq!(legalize_abi_value(types::I64X2, &arg),
ValueConversion::VectorSplit);
assert_eq!(legalize_abi_value(types::I64, &arg),
ValueConversion::IntSplit);
assert_eq!(
legalize_abi_value(types::I64X2, &arg),
ValueConversion::VectorSplit
);
assert_eq!(
legalize_abi_value(types::I64, &arg),
ValueConversion::IntSplit
);
// Vector of integers is broken down, then sign-extended.
arg.extension = ArgumentExtension::Sext;
assert_eq!(legalize_abi_value(types::I16X4, &arg),
ValueConversion::VectorSplit);
assert_eq!(legalize_abi_value(types::I16.by(2).unwrap(), &arg),
ValueConversion::VectorSplit);
assert_eq!(legalize_abi_value(types::I16, &arg),
ValueConversion::Sext(types::I32));
assert_eq!(
legalize_abi_value(types::I16X4, &arg),
ValueConversion::VectorSplit
);
assert_eq!(
legalize_abi_value(types::I16.by(2).unwrap(), &arg),
ValueConversion::VectorSplit
);
assert_eq!(
legalize_abi_value(types::I16, &arg),
ValueConversion::Sext(types::I32)
);
// 64-bit float is split as an integer.
assert_eq!(legalize_abi_value(types::F64, &arg),
ValueConversion::IntBits);
assert_eq!(
legalize_abi_value(types::F64, &arg),
ValueConversion::IntBits
);
}
}

View File

@@ -54,9 +54,11 @@ pub trait CodeSink {
/// Report a bad encoding error.
#[inline(never)]
pub fn bad_encoding(func: &Function, inst: Inst) -> ! {
panic!("Bad encoding {} for {}",
func.encodings[inst],
func.dfg.display_inst(inst, None));
panic!(
"Bad encoding {} for {}",
func.encodings[inst],
func.dfg.display_inst(inst, None)
);
}
/// Emit a function to `sink`, given an instruction emitter function.
@@ -64,8 +66,9 @@ pub fn bad_encoding(func: &Function, inst: Inst) -> ! {
/// This function is called from the `TargetIsa::emit_function()` implementations with the
/// appropriate instruction emitter.
pub fn emit_function<CS, EI>(func: &Function, emit_inst: EI, sink: &mut CS)
where CS: CodeSink,
EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS)
where
CS: CodeSink,
EI: Fn(&Function, Inst, &mut RegDiversions, &mut CS),
{
let mut divert = RegDiversions::new();
for ebb in func.layout.ebbs() {

View File

@@ -60,8 +60,10 @@ pub fn relax_branches(func: &mut Function, isa: &TargetIsa) -> Result<CodeOffset
while let Some(ebb) = cur.next_ebb() {
// Record the offset for `ebb` and make sure we iterate until offsets are stable.
if cur.func.offsets[ebb] != offset {
assert!(cur.func.offsets[ebb] < offset,
"Code shrinking during relaxation");
assert!(
cur.func.offsets[ebb] < offset,
"Code shrinking during relaxation"
);
cur.func.offsets[ebb] = offset;
go_again = true;
}
@@ -99,10 +101,11 @@ fn fallthroughs(func: &mut Function) {
for (ebb, succ) in func.layout.ebbs().adjacent_pairs() {
let term = func.layout.last_inst(ebb).expect("EBB has no terminator.");
if let InstructionData::Jump {
ref mut opcode,
destination,
..
} = func.dfg[term] {
ref mut opcode,
destination,
..
} = func.dfg[term]
{
match *opcode {
Opcode::Fallthrough => {
// Somebody used a fall-through instruction before the branch relaxation pass.
@@ -126,16 +129,19 @@ fn fallthroughs(func: &mut Function) {
///
/// Return the size of the replacement instructions up to and including the location where `pos` is
/// left.
fn relax_branch(cur: &mut FuncCursor,
offset: CodeOffset,
dest_offset: CodeOffset,
encinfo: &EncInfo)
-> CodeOffset {
fn relax_branch(
cur: &mut FuncCursor,
offset: CodeOffset,
dest_offset: CodeOffset,
encinfo: &EncInfo,
) -> CodeOffset {
let inst = cur.current_inst().unwrap();
dbg!("Relaxing [{}] {} for {:#x}-{:#x} range",
encinfo.display(cur.func.encodings[inst]),
cur.func.dfg.display_inst(inst, None),
offset,
dest_offset);
dbg!(
"Relaxing [{}] {} for {:#x}-{:#x} range",
encinfo.display(cur.func.encodings[inst]),
cur.func.dfg.display_inst(inst, None),
offset,
dest_offset
);
unimplemented!();
}

View File

@@ -14,27 +14,34 @@ use std::convert::{Into, From};
pub struct BitSet<T>(pub T);
impl<T> BitSet<T>
where T: Into<u32> + From<u8> + BitOr<T, Output = T> + Shl<u8, Output = T> + Sub<T, Output=T> +
Add<T, Output=T> + PartialEq + Copy
where
T: Into<u32>
+ From<u8>
+ BitOr<T, Output = T>
+ Shl<u8, Output = T>
+ Sub<T, Output = T>
+ Add<T, Output = T>
+ PartialEq
+ Copy,
{
/// Maximum number of bits supported by this BitSet instance
/// Maximum number of bits supported by this BitSet instance
pub fn bits() -> usize {
size_of::<T>() * 8
}
/// Maximum number of bits supported by any bitset instance atm.
/// Maximum number of bits supported by any bitset instance atm.
pub fn max_bits() -> usize {
size_of::<u32>() * 8
}
/// Check if this BitSet contains the number num
/// Check if this BitSet contains the number num
pub fn contains(&self, num: u8) -> bool {
assert!((num as usize) < Self::bits());
assert!((num as usize) < Self::max_bits());
self.0.into() & (1 << num) != 0
}
/// Return the smallest number contained in the bitset or None if empty
/// Return the smallest number contained in the bitset or None if empty
pub fn min(&self) -> Option<u8> {
if self.0.into() == 0 {
None
@@ -43,7 +50,7 @@ impl<T> BitSet<T>
}
}
/// Return the largest number contained in the bitset or None if empty
/// Return the largest number contained in the bitset or None if empty
pub fn max(&self) -> Option<u8> {
if self.0.into() == 0 {
None
@@ -53,17 +60,17 @@ impl<T> BitSet<T>
}
}
/// Construct a BitSet with the half-open range [lo,hi) filled in
/// Construct a BitSet with the half-open range [lo,hi) filled in
pub fn from_range(lo: u8, hi: u8) -> BitSet<T> {
assert!(lo <= hi);
assert!((hi as usize) <= Self::bits());
let one : T = T::from(1);
// I can't just do (one << hi) - one here as the shift may overflow
let one: T = T::from(1);
// I can't just do (one << hi) - one here as the shift may overflow
let hi_rng = if hi >= 1 {
(one << (hi-1)) + ((one << (hi-1)) - one)
} else {
T::from(0)
};
(one << (hi - 1)) + ((one << (hi - 1)) - one)
} else {
T::from(0)
};
let lo_rng = (one << lo) - one;
@@ -94,14 +101,15 @@ mod tests {
assert!(!s2.contains(7));
let s3 = BitSet::<u8>(2 | 4 | 64);
assert!(!s3.contains(0) && !s3.contains(3) && !s3.contains(4) && !s3.contains(5) &&
!s3.contains(7));
assert!(!s3.contains(0) && !s3.contains(3) && !s3.contains(4));
assert!(!s3.contains(5) && !s3.contains(7));
assert!(s3.contains(1) && s3.contains(2) && s3.contains(6));
let s4 = BitSet::<u16>(4 | 8 | 256 | 1024);
assert!(!s4.contains(0) && !s4.contains(1) && !s4.contains(4) && !s4.contains(5) &&
!s4.contains(6) && !s4.contains(7) &&
!s4.contains(9) && !s4.contains(11));
assert!(
!s4.contains(0) && !s4.contains(1) && !s4.contains(4) && !s4.contains(5) &&
!s4.contains(6) && !s4.contains(7) && !s4.contains(9) && !s4.contains(11)
);
assert!(s4.contains(2) && s4.contains(3) && s4.contains(8) && s4.contains(10));
}

View File

@@ -63,7 +63,8 @@ impl<K: Default, V: Default> NodePool<K, V> {
}
impl<K: Default, V: Default> BTree<K, V> {
/// Search for `key` and return a `Cursor` that either points at `key` or the position where it would be inserted.
/// Search for `key` and return a `Cursor` that either points at `key` or the position
/// where it would be inserted.
pub fn search(&mut self, key: K) -> Cursor<K, V> {
unimplemented!()
}

View File

@@ -26,10 +26,11 @@ pub trait Table<K: Copy + Eq> {
///
/// Returns `Ok(idx)` with the table index containing the found entry, or `Err(idx)` with the empty
/// sentinel entry if no entry could be found.
pub fn probe<K: Copy + Eq, T: Table<K> + ?Sized>(table: &T,
key: K,
hash: usize)
-> Result<usize, usize> {
pub fn probe<K: Copy + Eq, T: Table<K> + ?Sized>(
table: &T,
key: K,
hash: usize,
) -> Result<usize, usize> {
debug_assert!(table.len().is_power_of_two());
let mask = table.len() - 1;

View File

@@ -133,18 +133,24 @@ impl Context {
/// Perform LICM on the function.
pub fn licm(&mut self) -> CtonResult {
self.ensure_domtree();
do_licm(&mut self.func,
&mut self.cfg,
&mut self.domtree,
&mut self.loop_analysis);
do_licm(
&mut self.func,
&mut self.cfg,
&mut self.domtree,
&mut self.loop_analysis,
);
self.verify(None).map_err(Into::into)
}
/// Run the register allocator.
pub fn regalloc(&mut self, isa: &TargetIsa) -> CtonResult {
self.ensure_domtree();
self.regalloc
.run(isa, &mut self.func, &self.cfg, &self.domtree)
self.regalloc.run(
isa,
&mut self.func,
&self.cfg,
&self.domtree,
)
}
/// Insert prologue and epilogues after computing the stack frame layout.

View File

@@ -146,17 +146,21 @@ impl<'c, 'f> ir::InstInserterBase<'c> for &'c mut EncCursor<'f> {
&mut self.func.dfg
}
fn insert_built_inst(self,
inst: ir::Inst,
ctrl_typevar: ir::Type)
-> &'c mut ir::DataFlowGraph {
fn insert_built_inst(
self,
inst: ir::Inst,
ctrl_typevar: ir::Type,
) -> &'c mut ir::DataFlowGraph {
// Insert the instruction and remember the reference.
self.insert_inst(inst);
self.built_inst = Some(inst);
// Assign an encoding.
match self.isa
.encode(&self.func.dfg, &self.func.dfg[inst], ctrl_typevar) {
match self.isa.encode(
&self.func.dfg,
&self.func.dfg[inst],
ctrl_typevar,
) {
Ok(e) => self.func.encodings[inst] = e,
Err(_) => panic!("can't encode {}", self.display_inst(inst)),
}

View File

@@ -69,18 +69,17 @@ pub fn writeln_with_format_args(args: fmt::Arguments) -> io::Result<()> {
/// Open the tracing file for the current thread.
fn open_file() -> io::BufWriter<File> {
let file = match thread::current().name() {
None => File::create("cretonne.dbg"),
Some(name) => {
let mut path = "cretonne.dbg.".to_owned();
for ch in name.chars() {
if ch.is_ascii() && ch.is_alphanumeric() {
path.push(ch);
}
None => File::create("cretonne.dbg"),
Some(name) => {
let mut path = "cretonne.dbg.".to_owned();
for ch in name.chars() {
if ch.is_ascii() && ch.is_alphanumeric() {
path.push(ch);
}
File::create(path)
}
File::create(path)
}
.expect("Can't open tracing file");
}.expect("Can't open tracing file");
io::BufWriter::new(file)
}
@@ -99,10 +98,13 @@ macro_rules! dbg {
}
/// Helper for printing lists.
pub struct DisplayList<'a, T>(pub &'a [T]) where T: 'a + fmt::Display;
pub struct DisplayList<'a, T>(pub &'a [T])
where
T: 'a + fmt::Display;
impl<'a, T> fmt::Display for DisplayList<'a, T>
where T: 'a + fmt::Display
where
T: 'a + fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0.split_first() {

View File

@@ -85,13 +85,15 @@ impl DominatorTree {
///
/// If `a` and `b` belong to the same EBB, compare their relative position in the EBB.
pub fn rpo_cmp<A, B>(&self, a: A, b: B, layout: &Layout) -> Ordering
where A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
let a = a.into();
let b = b.into();
self.rpo_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b))
.then(layout.cmp(a, b))
self.rpo_cmp_ebb(layout.pp_ebb(a), layout.pp_ebb(b)).then(
layout.cmp(a, b),
)
}
/// Returns `true` if `a` dominates `b`.
@@ -104,8 +106,9 @@ impl DominatorTree {
///
/// An instruction is considered to dominate itself.
pub fn dominates<A, B>(&self, a: A, b: B, layout: &Layout) -> bool
where A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
let a = a.into();
let b = b.into();
@@ -126,12 +129,16 @@ impl DominatorTree {
/// Find the last instruction in `a` that dominates `b`.
/// If no instructions in `a` dominate `b`, return `None`.
fn last_dominator<B>(&self, a: Ebb, b: B, layout: &Layout) -> Option<Inst>
where B: Into<ExpandedProgramPoint>
where
B: Into<ExpandedProgramPoint>,
{
let (mut ebb_b, mut inst_b) = match b.into() {
ExpandedProgramPoint::Ebb(ebb) => (ebb, None),
ExpandedProgramPoint::Inst(inst) => {
(layout.inst_ebb(inst).expect("Instruction not in layout."), Some(inst))
(
layout.inst_ebb(inst).expect("Instruction not in layout."),
Some(inst),
)
}
};
let rpo_a = self.nodes[a].rpo_number;
@@ -149,22 +156,29 @@ impl DominatorTree {
/// Compute the common dominator of two basic blocks.
///
/// Both basic blocks are assumed to be reachable.
pub fn common_dominator(&self,
mut a: BasicBlock,
mut b: BasicBlock,
layout: &Layout)
-> BasicBlock {
pub fn common_dominator(
&self,
mut a: BasicBlock,
mut b: BasicBlock,
layout: &Layout,
) -> BasicBlock {
loop {
match self.rpo_cmp_ebb(a.0, b.0) {
Ordering::Less => {
// `a` comes before `b` in the RPO. Move `b` up.
let idom = self.nodes[b.0].idom.expect("Unreachable basic block?");
b = (layout.inst_ebb(idom).expect("Dangling idom instruction"), idom);
b = (
layout.inst_ebb(idom).expect("Dangling idom instruction"),
idom,
);
}
Ordering::Greater => {
// `b` comes before `a` in the RPO. Move `a` up.
let idom = self.nodes[a.0].idom.expect("Unreachable basic block?");
a = (layout.inst_ebb(idom).expect("Dangling idom instruction"), idom);
a = (
layout.inst_ebb(idom).expect("Dangling idom instruction"),
idom,
);
}
Ordering::Equal => break,
}
@@ -327,15 +341,16 @@ impl DominatorTree {
// Get an iterator with just the reachable, already visited predecessors to `ebb`.
// Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't
// been visited yet, 0 for unreachable blocks.
let mut reachable_preds = cfg.get_predecessors(ebb)
.iter()
.cloned()
.filter(|&(pred, _)| self.nodes[pred].rpo_number > 1);
let mut reachable_preds = cfg.get_predecessors(ebb).iter().cloned().filter(
|&(pred, _)| {
self.nodes[pred].rpo_number > 1
},
);
// The RPO must visit at least one predecessor before this node.
let mut idom = reachable_preds
.next()
.expect("EBB node must have one reachable predecessor");
let mut idom = reachable_preds.next().expect(
"EBB node must have one reachable predecessor",
);
for pred in reachable_preds {
idom = self.common_dominator(idom, pred, layout);
@@ -383,10 +398,11 @@ impl DominatorTree {
// forward in RPO numbers and backwards in the postorder list of EBBs, renumbering the Ebbs
// until we find a gap
for (&current_ebb, current_rpo) in
self.postorder[0..ebb_postorder_index]
.iter()
.rev()
.zip(inserted_rpo_number + 1..) {
self.postorder[0..ebb_postorder_index].iter().rev().zip(
inserted_rpo_number +
1..,
)
{
if self.nodes[current_ebb].rpo_number < current_rpo {
// There is no gap, we renumber
self.nodes[current_ebb].rpo_number = current_rpo;
@@ -457,10 +473,14 @@ mod test {
assert_eq!(dt.rpo_cmp(ebb3, ebb3, &cur.func.layout), Ordering::Equal);
assert_eq!(dt.rpo_cmp(ebb3, ebb1, &cur.func.layout), Ordering::Less);
assert_eq!(dt.rpo_cmp(ebb3, jmp_ebb3_ebb1, &cur.func.layout),
Ordering::Less);
assert_eq!(dt.rpo_cmp(jmp_ebb3_ebb1, jmp_ebb1_ebb2, &cur.func.layout),
Ordering::Less);
assert_eq!(
dt.rpo_cmp(ebb3, jmp_ebb3_ebb1, &cur.func.layout),
Ordering::Less
);
assert_eq!(
dt.rpo_cmp(jmp_ebb3_ebb1, jmp_ebb1_ebb2, &cur.func.layout),
Ordering::Less
);
assert_eq!(dt.cfg_postorder(), &[ebb2, ebb0, ebb1, ebb3]);
}

View File

@@ -212,12 +212,13 @@ impl<T: EntityRef> ListPool<T> {
/// Reallocate a block to a different size class.
///
/// Copy `elems_to_copy` elements from the old to the new block.
fn realloc(&mut self,
block: usize,
from_sclass: SizeClass,
to_sclass: SizeClass,
elems_to_copy: usize)
-> usize {
fn realloc(
&mut self,
block: usize,
from_sclass: SizeClass,
to_sclass: SizeClass,
elems_to_copy: usize,
) -> usize {
assert!(elems_to_copy <= sclass_size(from_sclass));
assert!(elems_to_copy <= sclass_size(to_sclass));
let new_block = self.alloc(to_sclass);
@@ -384,7 +385,8 @@ impl<T: EntityRef> EntityList<T> {
/// Appends multiple elements to the back of the list.
pub fn extend<I>(&mut self, elements: I, pool: &mut ListPool<T>)
where I: IntoIterator<Item = T>
where
I: IntoIterator<Item = T>,
{
// TODO: use `size_hint()` to reduce reallocations.
for x in elements {
@@ -597,8 +599,10 @@ mod tests {
list.extend([i1, i1, i2, i2, i3, i3, i4, i4].iter().cloned(), pool);
assert_eq!(list.len(pool), 12);
assert_eq!(list.as_slice(pool),
&[i1, i2, i3, i4, i1, i1, i2, i2, i3, i3, i4, i4]);
assert_eq!(
list.as_slice(pool),
&[i1, i2, i3, i4, i1, i1, i2, i2, i3, i3, i4, i4]
);
}
#[test]

View File

@@ -14,8 +14,9 @@ use std::ops::{Index, IndexMut};
/// all keys have a default entry from the beginning.
#[derive(Debug, Clone)]
pub struct EntityMap<K, V>
where K: EntityRef,
V: Clone
where
K: EntityRef,
V: Clone,
{
elems: Vec<V>,
default: V,
@@ -24,12 +25,14 @@ pub struct EntityMap<K, V>
/// Shared `EntityMap` implementation for all value types.
impl<K, V> EntityMap<K, V>
where K: EntityRef,
V: Clone
where
K: EntityRef,
V: Clone,
{
/// Create a new empty map.
pub fn new() -> Self
where V: Default
where
V: Default,
{
EntityMap {
elems: Vec::new(),
@@ -68,8 +71,9 @@ impl<K, V> EntityMap<K, V>
///
/// All keys are permitted. Untouched entries have the default value.
impl<K, V> Index<K> for EntityMap<K, V>
where K: EntityRef,
V: Clone
where
K: EntityRef,
V: Clone,
{
type Output = V;
@@ -82,8 +86,9 @@ impl<K, V> Index<K> for EntityMap<K, V>
///
/// The map grows as needed to accommodate new keys.
impl<K, V> IndexMut<K> for EntityMap<K, V>
where K: EntityRef,
V: Clone
where
K: EntityRef,
V: Clone,
{
fn index_mut(&mut self, k: K) -> &mut V {
let i = k.index();

View File

@@ -14,14 +14,16 @@ use std::ops::{Index, IndexMut};
/// conflicting references will be created. Using unknown keys for indexing will cause a panic.
#[derive(Debug, Clone)]
pub struct PrimaryMap<K, V>
where K: EntityRef
where
K: EntityRef,
{
elems: Vec<V>,
unused: PhantomData<K>,
}
impl<K, V> PrimaryMap<K, V>
where K: EntityRef
where
K: EntityRef,
{
/// Create a new empty map.
pub fn new() -> Self {
@@ -77,7 +79,8 @@ impl<K, V> PrimaryMap<K, V>
/// Immutable indexing into an `PrimaryMap`.
/// The indexed value must be in the map.
impl<K, V> Index<K> for PrimaryMap<K, V>
where K: EntityRef
where
K: EntityRef,
{
type Output = V;
@@ -88,7 +91,8 @@ impl<K, V> Index<K> for PrimaryMap<K, V>
/// Mutable indexing into an `PrimaryMap`.
impl<K, V> IndexMut<K> for PrimaryMap<K, V>
where K: EntityRef
where
K: EntityRef,
{
fn index_mut(&mut self, k: K) -> &mut V {
&mut self.elems[k.index()]

View File

@@ -51,16 +51,18 @@ pub trait SparseMapValue<K> {
/// - `SparseMap` requires the values to implement `SparseMapValue<K>` which means that they must
/// contain their own key.
pub struct SparseMap<K, V>
where K: EntityRef,
V: SparseMapValue<K>
where
K: EntityRef,
V: SparseMapValue<K>,
{
sparse: EntityMap<K, u32>,
dense: Vec<V>,
}
impl<K, V> SparseMap<K, V>
where K: EntityRef,
V: SparseMapValue<K>
where
K: EntityRef,
V: SparseMapValue<K>,
{
/// Create a new empty mapping.
pub fn new() -> Self {
@@ -191,8 +193,9 @@ impl<K, V> SparseMap<K, V>
/// Iterating over the elements of a set.
impl<'a, K, V> IntoIterator for &'a SparseMap<K, V>
where K: EntityRef,
V: SparseMapValue<K>
where
K: EntityRef,
V: SparseMapValue<K>,
{
type Item = &'a V;
type IntoIter = slice::Iter<'a, V>;
@@ -204,7 +207,8 @@ impl<'a, K, V> IntoIterator for &'a SparseMap<K, V>
/// Any `EntityRef` can be used as a sparse map value representing itself.
impl<T> SparseMapValue<T> for T
where T: EntityRef
where
T: EntityRef,
{
fn key(&self) -> T {
*self
@@ -290,8 +294,10 @@ mod tests {
assert_eq!(map.insert(Obj(i0, "baz")), None);
// Iteration order = insertion order when nothing has been removed yet.
assert_eq!(map.values().map(|obj| obj.1).collect::<Vec<_>>(),
["foo", "bar", "baz"]);
assert_eq!(
map.values().map(|obj| obj.1).collect::<Vec<_>>(),
["foo", "bar", "baz"]
);
assert_eq!(map.len(), 3);
assert_eq!(map.get(i0), Some(&Obj(i0, "baz")));

View File

@@ -89,7 +89,8 @@ impl<'f, IIB: InstInserterBase<'f>> InsertBuilder<'f, IIB> {
///
/// The `reuse` argument is expected to be an array of `Option<Value>`.
pub fn with_results<Array>(self, reuse: Array) -> InsertReuseBuilder<'f, IIB, Array>
where Array: AsRef<[Option<Value>]>
where
Array: AsRef<[Option<Value>]>,
{
InsertReuseBuilder {
inserter: self.inserter,
@@ -134,8 +135,9 @@ impl<'f, IIB: InstInserterBase<'f>> InstBuilderBase<'f> for InsertBuilder<'f, II
/// Builder that inserts a new instruction like `InsertBuilder`, but reusing result values.
pub struct InsertReuseBuilder<'f, IIB, Array>
where IIB: InstInserterBase<'f>,
Array: AsRef<[Option<Value>]>
where
IIB: InstInserterBase<'f>,
Array: AsRef<[Option<Value>]>,
{
inserter: IIB,
reuse: Array,

View File

@@ -89,17 +89,17 @@ impl Display for IntCC {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use self::IntCC::*;
f.write_str(match *self {
Equal => "eq",
NotEqual => "ne",
SignedGreaterThan => "sgt",
SignedGreaterThanOrEqual => "sge",
SignedLessThan => "slt",
SignedLessThanOrEqual => "sle",
UnsignedGreaterThan => "ugt",
UnsignedGreaterThanOrEqual => "uge",
UnsignedLessThan => "ult",
UnsignedLessThanOrEqual => "ule",
})
Equal => "eq",
NotEqual => "ne",
SignedGreaterThan => "sgt",
SignedGreaterThanOrEqual => "sge",
SignedLessThan => "slt",
SignedLessThanOrEqual => "sle",
UnsignedGreaterThan => "ugt",
UnsignedGreaterThanOrEqual => "uge",
UnsignedLessThan => "ult",
UnsignedLessThanOrEqual => "ule",
})
}
}
@@ -220,21 +220,21 @@ impl Display for FloatCC {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use self::FloatCC::*;
f.write_str(match *self {
Ordered => "ord",
Unordered => "uno",
Equal => "eq",
NotEqual => "ne",
OrderedNotEqual => "one",
UnorderedOrEqual => "ueq",
LessThan => "lt",
LessThanOrEqual => "le",
GreaterThan => "gt",
GreaterThanOrEqual => "ge",
UnorderedOrLessThan => "ult",
UnorderedOrLessThanOrEqual => "ule",
UnorderedOrGreaterThan => "ugt",
UnorderedOrGreaterThanOrEqual => "uge",
})
Ordered => "ord",
Unordered => "uno",
Equal => "eq",
NotEqual => "ne",
OrderedNotEqual => "one",
UnorderedOrEqual => "ueq",
LessThan => "lt",
LessThanOrEqual => "le",
GreaterThan => "gt",
GreaterThanOrEqual => "ge",
UnorderedOrLessThan => "ult",
UnorderedOrLessThanOrEqual => "ule",
UnorderedOrGreaterThan => "ugt",
UnorderedOrGreaterThanOrEqual => "uge",
})
}
}
@@ -267,16 +267,18 @@ impl FromStr for FloatCC {
mod tests {
use super::*;
static INT_ALL: [IntCC; 10] = [IntCC::Equal,
IntCC::NotEqual,
IntCC::SignedLessThan,
IntCC::SignedGreaterThanOrEqual,
IntCC::SignedGreaterThan,
IntCC::SignedLessThanOrEqual,
IntCC::UnsignedLessThan,
IntCC::UnsignedGreaterThanOrEqual,
IntCC::UnsignedGreaterThan,
IntCC::UnsignedLessThanOrEqual];
static INT_ALL: [IntCC; 10] = [
IntCC::Equal,
IntCC::NotEqual,
IntCC::SignedLessThan,
IntCC::SignedGreaterThanOrEqual,
IntCC::SignedGreaterThan,
IntCC::SignedLessThanOrEqual,
IntCC::UnsignedLessThan,
IntCC::UnsignedGreaterThanOrEqual,
IntCC::UnsignedGreaterThan,
IntCC::UnsignedLessThanOrEqual,
];
#[test]
fn int_inverse() {
@@ -306,20 +308,22 @@ mod tests {
assert_eq!("bogus".parse::<IntCC>(), Err(()));
}
static FLOAT_ALL: [FloatCC; 14] = [FloatCC::Ordered,
FloatCC::Unordered,
FloatCC::Equal,
FloatCC::NotEqual,
FloatCC::OrderedNotEqual,
FloatCC::UnorderedOrEqual,
FloatCC::LessThan,
FloatCC::LessThanOrEqual,
FloatCC::GreaterThan,
FloatCC::GreaterThanOrEqual,
FloatCC::UnorderedOrLessThan,
FloatCC::UnorderedOrLessThanOrEqual,
FloatCC::UnorderedOrGreaterThan,
FloatCC::UnorderedOrGreaterThanOrEqual];
static FLOAT_ALL: [FloatCC; 14] = [
FloatCC::Ordered,
FloatCC::Unordered,
FloatCC::Equal,
FloatCC::NotEqual,
FloatCC::OrderedNotEqual,
FloatCC::UnorderedOrEqual,
FloatCC::LessThan,
FloatCC::LessThanOrEqual,
FloatCC::GreaterThan,
FloatCC::GreaterThanOrEqual,
FloatCC::UnorderedOrLessThan,
FloatCC::UnorderedOrLessThanOrEqual,
FloatCC::UnorderedOrGreaterThan,
FloatCC::UnorderedOrGreaterThanOrEqual,
];
#[test]
fn float_inverse() {

View File

@@ -153,17 +153,21 @@ impl DataFlowGraph {
pub fn value_def(&self, v: Value) -> ValueDef {
match self.values[v] {
ValueData::Inst { inst, num, .. } => {
assert_eq!(Some(v),
self.results[inst].get(num as usize, &self.value_lists),
"Dangling result value {}: {}",
v,
self.display_inst(inst, None));
assert_eq!(
Some(v),
self.results[inst].get(num as usize, &self.value_lists),
"Dangling result value {}: {}",
v,
self.display_inst(inst, None)
);
ValueDef::Res(inst, num as usize)
}
ValueData::Arg { ebb, num, .. } => {
assert_eq!(Some(v),
self.ebbs[ebb].args.get(num as usize, &self.value_lists),
"Dangling EBB argument value");
assert_eq!(
Some(v),
self.ebbs[ebb].args.get(num as usize, &self.value_lists),
"Dangling EBB argument value"
);
ValueDef::Arg(ebb, num as usize)
}
ValueData::Alias { original, .. } => {
@@ -247,19 +251,23 @@ impl DataFlowGraph {
// Try to create short alias chains by finding the original source value.
// This also avoids the creation of loops.
let original = self.resolve_aliases(src);
assert_ne!(dest,
original,
"Aliasing {} to {} would create a loop",
dest,
src);
assert_ne!(
dest,
original,
"Aliasing {} to {} would create a loop",
dest,
src
);
let ty = self.value_type(original);
assert_eq!(self.value_type(dest),
ty,
"Aliasing {} to {} would change its type {} to {}",
dest,
src,
self.value_type(dest),
ty);
assert_eq!(
self.value_type(dest),
ty,
"Aliasing {} to {} would change its type {} to {}",
dest,
src,
self.value_type(dest),
ty
);
self.values[dest] = ValueData::Alias { ty, original };
}
@@ -274,29 +282,36 @@ impl DataFlowGraph {
/// cleared, so it likely needs to be removed from the graph.
///
pub fn replace_with_aliases(&mut self, dest_inst: Inst, src_inst: Inst) {
debug_assert_ne!(dest_inst,
src_inst,
"Replacing {} with itself would create a loop",
dest_inst);
debug_assert_eq!(self.results[dest_inst].len(&self.value_lists),
self.results[src_inst].len(&self.value_lists),
"Replacing {} with {} would produce a different number of results.",
dest_inst,
src_inst);
debug_assert_ne!(
dest_inst,
src_inst,
"Replacing {} with itself would create a loop",
dest_inst
);
debug_assert_eq!(
self.results[dest_inst].len(&self.value_lists),
self.results[src_inst].len(&self.value_lists),
"Replacing {} with {} would produce a different number of results.",
dest_inst,
src_inst
);
for (&dest, &src) in self.results[dest_inst]
.as_slice(&self.value_lists)
.iter()
.zip(self.results[src_inst].as_slice(&self.value_lists)) {
.as_slice(&self.value_lists)
.iter()
.zip(self.results[src_inst].as_slice(&self.value_lists))
{
let original = src;
let ty = self.value_type(original);
assert_eq!(self.value_type(dest),
ty,
"Aliasing {} to {} would change its type {} to {}",
dest,
src,
self.value_type(dest),
ty);
assert_eq!(
self.value_type(dest),
ty,
"Aliasing {} to {} would change its type {} to {}",
dest,
src,
self.value_type(dest),
ty
);
self.values[dest] = ValueData::Alias { ty, original };
}
@@ -371,10 +386,11 @@ impl DataFlowGraph {
}
/// Returns an object that displays `inst`.
pub fn display_inst<'a, I: Into<Option<&'a TargetIsa>>>(&'a self,
inst: Inst,
isa: I)
-> DisplayInst<'a> {
pub fn display_inst<'a, I: Into<Option<&'a TargetIsa>>>(
&'a self,
inst: Inst,
isa: I,
) -> DisplayInst<'a> {
DisplayInst(self, isa.into(), inst)
}
@@ -433,12 +449,14 @@ impl DataFlowGraph {
/// Create a new set of result values for `inst` using `ctrl_typevar` to determine the result
/// types. Any values provided by `reuse` will be reused. When `reuse` is exhausted or when it
/// produces `None`, a new value is created.
pub fn make_inst_results_reusing<I>(&mut self,
inst: Inst,
ctrl_typevar: Type,
reuse: I)
-> usize
where I: Iterator<Item = Option<Value>>
pub fn make_inst_results_reusing<I>(
&mut self,
inst: Inst,
ctrl_typevar: Type,
reuse: I,
) -> usize
where
I: Iterator<Item = Option<Value>>,
{
let mut reuse = reuse.fuse();
let constraints = self.insts[inst].opcode().constraints();
@@ -478,9 +496,10 @@ impl DataFlowGraph {
}
/// Create an `InsertBuilder` that will insert an instruction at the cursor's current position.
pub fn ins<'c, 'fc: 'c, 'fd>(&'fd mut self,
at: &'c mut Cursor<'fc>)
-> InsertBuilder<'fd, LayoutCursorInserter<'c, 'fc, 'fd>> {
pub fn ins<'c, 'fc: 'c, 'fd>(
&'fd mut self,
at: &'c mut Cursor<'fc>,
) -> InsertBuilder<'fd, LayoutCursorInserter<'c, 'fc, 'fd>> {
InsertBuilder::new(LayoutCursorInserter::new(at, self))
}
@@ -537,20 +556,24 @@ impl DataFlowGraph {
_ => panic!("{} is not an instruction result value", old_value),
};
let new_value = self.make_value(ValueData::Inst {
ty: new_type,
num,
inst,
});
ty: new_type,
num,
inst,
});
let num = num as usize;
let attached = mem::replace(self.results[inst]
.get_mut(num, &mut self.value_lists)
.expect("Replacing detached result"),
new_value);
assert_eq!(attached,
old_value,
"{} wasn't detached from {}",
old_value,
self.display_inst(inst, None));
let attached = mem::replace(
self.results[inst]
.get_mut(num, &mut self.value_lists)
.expect("Replacing detached result"),
new_value,
);
assert_eq!(
attached,
old_value,
"{} wasn't detached from {}",
old_value,
self.display_inst(inst, None)
);
new_value
}
@@ -560,19 +583,19 @@ impl DataFlowGraph {
let num = self.results[inst].push(res, &mut self.value_lists);
assert!(num <= u16::MAX as usize, "Too many result values");
self.make_value(ValueData::Inst {
ty,
inst,
num: num as u16,
})
ty,
inst,
num: num as u16,
})
}
/// Append a new value argument to an instruction.
///
/// Panics if the instruction doesn't support arguments.
pub fn append_inst_arg(&mut self, inst: Inst, new_arg: Value) {
let mut branch_values = self.insts[inst]
.take_value_list()
.expect("the instruction doesn't have value arguments");
let mut branch_values = self.insts[inst].take_value_list().expect(
"the instruction doesn't have value arguments",
);
branch_values.push(new_arg, &mut self.value_lists);
self.insts[inst].put_value_list(branch_values)
}
@@ -581,9 +604,9 @@ impl DataFlowGraph {
///
/// This function panics if the instruction doesn't have any result.
pub fn first_result(&self, inst: Inst) -> Value {
self.results[inst]
.first(&self.value_lists)
.expect("Instruction has no results")
self.results[inst].first(&self.value_lists).expect(
"Instruction has no results",
)
}
/// Test if `inst` has any result values currently.
@@ -613,11 +636,12 @@ impl DataFlowGraph {
/// called first.
///
/// Returns `None` if asked about a result index that is too large.
pub fn compute_result_type(&self,
inst: Inst,
result_idx: usize,
ctrl_typevar: Type)
-> Option<Type> {
pub fn compute_result_type(
&self,
inst: Inst,
result_idx: usize,
ctrl_typevar: Type,
) -> Option<Type> {
let constraints = self.insts[inst].opcode().constraints();
let fixed_results = constraints.fixed_results();
@@ -626,13 +650,12 @@ impl DataFlowGraph {
}
// Not a fixed result, try to extract a return type from the call signature.
self.call_signature(inst)
.and_then(|sigref| {
self.signatures[sigref]
.return_types
.get(result_idx - fixed_results)
.map(|&arg| arg.value_type)
})
self.call_signature(inst).and_then(|sigref| {
self.signatures[sigref]
.return_types
.get(result_idx - fixed_results)
.map(|&arg| arg.value_type)
})
}
/// Get the controlling type variable, or `VOID` if `inst` isn't polymorphic.
@@ -644,8 +667,9 @@ impl DataFlowGraph {
} else if constraints.requires_typevar_operand() {
// Not all instruction formats have a designated operand, but in that case
// `requires_typevar_operand()` should never be true.
self.value_type(self[inst].typevar_operand(&self.value_lists)
.expect("Instruction format doesn't have a designated operand, bad opcode."))
self.value_type(self[inst].typevar_operand(&self.value_lists).expect(
"Instruction format doesn't have a designated operand, bad opcode.",
))
} else {
self.value_type(self.first_result(inst))
}
@@ -691,10 +715,10 @@ impl DataFlowGraph {
let num = self.ebbs[ebb].args.push(arg, &mut self.value_lists);
assert!(num <= u16::MAX as usize, "Too many arguments to EBB");
self.make_value(ValueData::Arg {
ty,
num: num as u16,
ebb,
})
ty,
num: num as u16,
ebb,
})
}
/// Removes `val` from `ebb`'s argument by swapping it with the last argument of `ebb`.
@@ -712,9 +736,10 @@ impl DataFlowGraph {
} else {
panic!("{} must be an EBB argument", val);
};
self.ebbs[ebb]
.args
.swap_remove(num as usize, &mut self.value_lists);
self.ebbs[ebb].args.swap_remove(
num as usize,
&mut self.value_lists,
);
if let Some(last_arg_val) = self.ebbs[ebb].args.get(num as usize, &self.value_lists) {
// We update the position of the old last arg.
if let ValueData::Arg { num: ref mut old_num, .. } = self.values[last_arg_val] {
@@ -734,23 +759,26 @@ impl DataFlowGraph {
} else {
panic!("{} must be an EBB argument", val);
};
self.ebbs[ebb]
.args
.remove(num as usize, &mut self.value_lists);
self.ebbs[ebb].args.remove(
num as usize,
&mut self.value_lists,
);
for index in num..(self.ebb_args(ebb).len() as u16) {
match self.values[self.ebbs[ebb]
.args
.get(index as usize, &self.value_lists)
.unwrap()] {
.args
.get(index as usize, &self.value_lists)
.unwrap()] {
ValueData::Arg { ref mut num, .. } => {
*num -= 1;
}
_ => {
panic!("{} must be an EBB argument",
self.ebbs[ebb]
.args
.get(index as usize, &self.value_lists)
.unwrap())
panic!(
"{} must be an EBB argument",
self.ebbs[ebb]
.args
.get(index as usize, &self.value_lists)
.unwrap()
)
}
}
}
@@ -791,10 +819,10 @@ impl DataFlowGraph {
panic!("{} must be an EBB argument", old_arg);
};
let new_arg = self.make_value(ValueData::Arg {
ty: new_type,
num,
ebb,
});
ty: new_type,
num,
ebb,
});
self.ebbs[ebb].args.as_mut_slice(&mut self.value_lists)[num as usize] = new_arg;
new_arg

View File

@@ -218,7 +218,9 @@ mod tests {
use std::mem;
use packed_option::PackedOption;
// This is the whole point of `PackedOption`.
assert_eq!(mem::size_of::<Value>(),
mem::size_of::<PackedOption<Value>>());
assert_eq!(
mem::size_of::<Value>(),
mem::size_of::<PackedOption<Value>>()
);
}
}

View File

@@ -55,11 +55,11 @@ impl Signature {
let bytes = self.argument_types
.iter()
.filter_map(|arg| match arg.location {
ArgumentLoc::Stack(offset) if offset >= 0 => {
Some(offset as u32 + arg.value_type.bytes())
}
_ => None,
})
ArgumentLoc::Stack(offset) if offset >= 0 => {
Some(offset as u32 + arg.value_type.bytes())
}
_ => None,
})
.fold(0, cmp::max);
self.argument_bytes = Some(bytes);
}
@@ -73,10 +73,11 @@ impl Signature {
/// Wrapper type capable of displaying a `Signature` with correct register names.
pub struct DisplaySignature<'a>(&'a Signature, Option<&'a RegInfo>);
fn write_list(f: &mut fmt::Formatter,
args: &[ArgumentType],
regs: Option<&RegInfo>)
-> fmt::Result {
fn write_list(
f: &mut fmt::Formatter,
args: &[ArgumentType],
regs: Option<&RegInfo>,
) -> fmt::Result {
match args.split_first() {
None => {}
Some((first, rest)) => {
@@ -310,9 +311,9 @@ impl fmt::Display for CallConv {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::CallConv::*;
f.write_str(match *self {
Native => "native",
SpiderWASM => "spiderwasm",
})
Native => "native",
SpiderWASM => "spiderwasm",
})
}
}
@@ -346,12 +347,14 @@ mod tests {
#[test]
fn argument_purpose() {
let all_purpose = [ArgumentPurpose::Normal,
ArgumentPurpose::StructReturn,
ArgumentPurpose::Link,
ArgumentPurpose::FramePointer,
ArgumentPurpose::CalleeSaved,
ArgumentPurpose::VMContext];
let all_purpose = [
ArgumentPurpose::Normal,
ArgumentPurpose::StructReturn,
ArgumentPurpose::Link,
ArgumentPurpose::FramePointer,
ArgumentPurpose::CalleeSaved,
ArgumentPurpose::VMContext,
];
for (&e, &n) in all_purpose.iter().zip(PURPOSE_NAMES.iter()) {
assert_eq!(e.to_string(), n);
assert_eq!(Ok(e), n.parse());
@@ -373,8 +376,9 @@ mod tests {
assert_eq!(sig.to_string(), "(i32) spiderwasm");
sig.return_types.push(ArgumentType::new(F32));
assert_eq!(sig.to_string(), "(i32) -> f32 spiderwasm");
sig.argument_types
.push(ArgumentType::new(I32.by(4).unwrap()));
sig.argument_types.push(
ArgumentType::new(I32.by(4).unwrap()),
);
assert_eq!(sig.to_string(), "(i32, i32x4) -> f32 spiderwasm");
sig.return_types.push(ArgumentType::new(B8));
assert_eq!(sig.to_string(), "(i32, i32x4) -> f32, b8 spiderwasm");
@@ -391,7 +395,9 @@ mod tests {
assert_eq!(sig.argument_bytes, Some(28));
// Writing ABI-annotated signatures.
assert_eq!(sig.to_string(),
"(i32 [24], i32x4 [8]) -> f32, b8 spiderwasm");
assert_eq!(
sig.to_string(),
"(i32 [24], i32x4 [8]) -> f32, b8 spiderwasm"
);
}
}

View File

@@ -30,7 +30,8 @@ impl FunctionName {
/// assert_eq!(name.to_string(), "#0a0908");
/// ```
pub fn new<T>(v: T) -> FunctionName
where T: Into<Vec<u8>>
where
T: Into<Vec<u8>>,
{
let vec = v.into();
if vec.len() <= NAME_LENGTH_THRESHOLD {
@@ -39,9 +40,9 @@ impl FunctionName {
bytes[i] = byte;
}
FunctionName(NameRepr::Short {
length: vec.len() as u8,
bytes: bytes,
})
length: vec.len() as u8,
bytes: bytes,
})
} else {
FunctionName(NameRepr::Long(vec))
}
@@ -114,11 +115,17 @@ mod tests {
assert_eq!(FunctionName::new("x").to_string(), "%x");
assert_eq!(FunctionName::new("x_1").to_string(), "%x_1");
assert_eq!(FunctionName::new(" ").to_string(), "#20");
assert_eq!(FunctionName::new("кретон").to_string(),
"#d0bad180d0b5d182d0bed0bd");
assert_eq!(FunctionName::new("印花棉布").to_string(),
"#e58db0e88ab1e6a389e5b883");
assert_eq!(FunctionName::new(vec![0, 1, 2, 3, 4, 5]).to_string(),
"#000102030405");
assert_eq!(
FunctionName::new("кретон").to_string(),
"#d0bad180d0b5d182d0bed0bd"
);
assert_eq!(
FunctionName::new("印花棉布").to_string(),
"#e58db0e88ab1e6a389e5b883"
);
assert_eq!(
FunctionName::new(vec![0, 1, 2, 3, 4, 5]).to_string(),
"#000102030405"
);
}
}

View File

@@ -51,9 +51,9 @@ pub enum HeapStyle {
impl fmt::Display for HeapData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self.style {
HeapStyle::Dynamic { .. } => "dynamic",
HeapStyle::Static { .. } => "static",
})?;
HeapStyle::Dynamic { .. } => "dynamic",
HeapStyle::Static { .. } => "static",
})?;
match self.base {
HeapBase::ReservedReg => write!(f, " reserved_reg")?,

View File

@@ -192,10 +192,10 @@ impl FromStr for Uimm32 {
// Parse a decimal or hexadecimal `Uimm32`, formatted as above.
fn from_str(s: &str) -> Result<Uimm32, &'static str> {
parse_i64(s).and_then(|x| if 0 <= x && x <= u32::MAX as i64 {
Ok(Uimm32(x as u32))
} else {
Err("Uimm32 out of range")
})
Ok(Uimm32(x as u32))
} else {
Err("Uimm32 out of range")
})
}
}
@@ -260,10 +260,10 @@ impl FromStr for Offset32 {
return Err("Offset must begin with sign");
}
parse_i64(s).and_then(|x| if i32::MIN as i64 <= x && x <= i32::MAX as i64 {
Ok(Offset32::new(x as i32))
} else {
Err("Offset out of range")
})
Ok(Offset32::new(x as i32))
} else {
Err("Offset out of range")
})
}
}
@@ -325,10 +325,10 @@ impl FromStr for Uoffset32 {
return Err("Unsigned offset must begin with '+' sign");
}
parse_i64(s).and_then(|x| if 0 <= x && x <= u32::MAX as i64 {
Ok(Uoffset32::new(x as u32))
} else {
Err("Offset out of range")
})
Ok(Uoffset32::new(x as u32))
} else {
Err("Offset out of range")
})
}
}
@@ -458,20 +458,20 @@ fn parse_float(s: &str, w: u8, t: u8) -> Result<u64, &'static str> {
if s2.starts_with("NaN:0x") {
// Quiet NaN with payload.
return match u64::from_str_radix(&s2[6..], 16) {
Ok(payload) if payload < quiet_bit => {
Ok(sign_bit | max_e_bits | quiet_bit | payload)
}
_ => Err("Invalid NaN payload"),
};
Ok(payload) if payload < quiet_bit => {
Ok(sign_bit | max_e_bits | quiet_bit | payload)
}
_ => Err("Invalid NaN payload"),
};
}
if s2.starts_with("sNaN:0x") {
// Signaling NaN with payload.
return match u64::from_str_radix(&s2[7..], 16) {
Ok(payload) if 0 < payload && payload < quiet_bit => {
Ok(sign_bit | max_e_bits | payload)
}
_ => Err("Invalid sNaN payload"),
};
Ok(payload) if 0 < payload && payload < quiet_bit => {
Ok(sign_bit | max_e_bits | payload)
}
_ => Err("Invalid sNaN payload"),
};
}
return Err("Float must be hexadecimal");
@@ -662,7 +662,8 @@ mod tests {
// Verify that `text` can be parsed as a `T` into a value that displays as `want`.
fn parse_ok<T: FromStr + Display>(text: &str, want: &str)
where <T as FromStr>::Err: Display
where
<T as FromStr>::Err: Display,
{
match text.parse::<T>() {
Err(s) => panic!("\"{}\".parse() error: {}", text, s),
@@ -672,7 +673,8 @@ mod tests {
// Verify that `text` fails to parse as `T` with the error `msg`.
fn parse_err<T: FromStr + Display>(text: &str, msg: &str)
where <T as FromStr>::Err: Display
where
<T as FromStr>::Err: Display,
{
match text.parse::<T>() {
Err(s) => assert_eq!(s.to_string(), msg),
@@ -781,18 +783,26 @@ mod tests {
assert_eq!(Ieee32::with_float(1.0).to_string(), "0x1.000000p0");
assert_eq!(Ieee32::with_float(1.5).to_string(), "0x1.800000p0");
assert_eq!(Ieee32::with_float(0.5).to_string(), "0x1.000000p-1");
assert_eq!(Ieee32::with_float(f32::EPSILON).to_string(),
"0x1.000000p-23");
assert_eq!(
Ieee32::with_float(f32::EPSILON).to_string(),
"0x1.000000p-23"
);
assert_eq!(Ieee32::with_float(f32::MIN).to_string(), "-0x1.fffffep127");
assert_eq!(Ieee32::with_float(f32::MAX).to_string(), "0x1.fffffep127");
// Smallest positive normal number.
assert_eq!(Ieee32::with_float(f32::MIN_POSITIVE).to_string(),
"0x1.000000p-126");
assert_eq!(
Ieee32::with_float(f32::MIN_POSITIVE).to_string(),
"0x1.000000p-126"
);
// Subnormals.
assert_eq!(Ieee32::with_float(f32::MIN_POSITIVE / 2.0).to_string(),
"0x0.800000p-126");
assert_eq!(Ieee32::with_float(f32::MIN_POSITIVE * f32::EPSILON).to_string(),
"0x0.000002p-126");
assert_eq!(
Ieee32::with_float(f32::MIN_POSITIVE / 2.0).to_string(),
"0x0.800000p-126"
);
assert_eq!(
Ieee32::with_float(f32::MIN_POSITIVE * f32::EPSILON).to_string(),
"0x0.000002p-126"
);
assert_eq!(Ieee32::with_float(f32::INFINITY).to_string(), "+Inf");
assert_eq!(Ieee32::with_float(f32::NEG_INFINITY).to_string(), "-Inf");
assert_eq!(Ieee32::with_float(f32::NAN).to_string(), "+NaN");
@@ -883,32 +893,48 @@ mod tests {
assert_eq!(Ieee64::with_float(1.0).to_string(), "0x1.0000000000000p0");
assert_eq!(Ieee64::with_float(1.5).to_string(), "0x1.8000000000000p0");
assert_eq!(Ieee64::with_float(0.5).to_string(), "0x1.0000000000000p-1");
assert_eq!(Ieee64::with_float(f64::EPSILON).to_string(),
"0x1.0000000000000p-52");
assert_eq!(Ieee64::with_float(f64::MIN).to_string(),
"-0x1.fffffffffffffp1023");
assert_eq!(Ieee64::with_float(f64::MAX).to_string(),
"0x1.fffffffffffffp1023");
assert_eq!(
Ieee64::with_float(f64::EPSILON).to_string(),
"0x1.0000000000000p-52"
);
assert_eq!(
Ieee64::with_float(f64::MIN).to_string(),
"-0x1.fffffffffffffp1023"
);
assert_eq!(
Ieee64::with_float(f64::MAX).to_string(),
"0x1.fffffffffffffp1023"
);
// Smallest positive normal number.
assert_eq!(Ieee64::with_float(f64::MIN_POSITIVE).to_string(),
"0x1.0000000000000p-1022");
assert_eq!(
Ieee64::with_float(f64::MIN_POSITIVE).to_string(),
"0x1.0000000000000p-1022"
);
// Subnormals.
assert_eq!(Ieee64::with_float(f64::MIN_POSITIVE / 2.0).to_string(),
"0x0.8000000000000p-1022");
assert_eq!(Ieee64::with_float(f64::MIN_POSITIVE * f64::EPSILON).to_string(),
"0x0.0000000000001p-1022");
assert_eq!(
Ieee64::with_float(f64::MIN_POSITIVE / 2.0).to_string(),
"0x0.8000000000000p-1022"
);
assert_eq!(
Ieee64::with_float(f64::MIN_POSITIVE * f64::EPSILON).to_string(),
"0x0.0000000000001p-1022"
);
assert_eq!(Ieee64::with_float(f64::INFINITY).to_string(), "+Inf");
assert_eq!(Ieee64::with_float(f64::NEG_INFINITY).to_string(), "-Inf");
assert_eq!(Ieee64::with_float(f64::NAN).to_string(), "+NaN");
assert_eq!(Ieee64::with_float(-f64::NAN).to_string(), "-NaN");
// Construct some qNaNs with payloads.
assert_eq!(Ieee64(0x7ff8000000000001).to_string(), "+NaN:0x1");
assert_eq!(Ieee64(0x7ffc000000000001).to_string(),
"+NaN:0x4000000000001");
assert_eq!(
Ieee64(0x7ffc000000000001).to_string(),
"+NaN:0x4000000000001"
);
// Signaling NaNs.
assert_eq!(Ieee64(0x7ff0000000000001).to_string(), "+sNaN:0x1");
assert_eq!(Ieee64(0x7ff4000000000001).to_string(),
"+sNaN:0x4000000000001");
assert_eq!(
Ieee64(0x7ff4000000000001).to_string(),
"+sNaN:0x4000000000001"
);
}
#[test]

View File

@@ -481,7 +481,8 @@ impl OpcodeConstraints {
pub fn result_type(self, n: usize, ctrl_type: Type) -> Type {
assert!(n < self.fixed_results(), "Invalid result index");
if let ResolvedConstraint::Bound(t) =
OPERAND_CONSTRAINTS[self.constraint_offset() + n].resolve(ctrl_type) {
OPERAND_CONSTRAINTS[self.constraint_offset() + n].resolve(ctrl_type)
{
t
} else {
panic!("Result constraints can't be free");
@@ -494,8 +495,10 @@ impl OpcodeConstraints {
/// Unlike results, it is possible for some input values to vary freely within a specific
/// `ValueTypeSet`. This is represented with the `ArgumentConstraint::Free` variant.
pub fn value_argument_constraint(self, n: usize, ctrl_type: Type) -> ResolvedConstraint {
assert!(n < self.fixed_value_arguments(),
"Invalid value argument index");
assert!(
n < self.fixed_value_arguments(),
"Invalid value argument index"
);
let offset = self.constraint_offset() + self.fixed_results();
OPERAND_CONSTRAINTS[offset + n].resolve(ctrl_type)
}
@@ -613,14 +616,14 @@ impl OperandConstraint {
AsBool => Bound(ctrl_type.as_bool()),
HalfWidth => Bound(ctrl_type.half_width().expect("invalid type for half_width")),
DoubleWidth => {
Bound(ctrl_type
.double_width()
.expect("invalid type for double_width"))
Bound(ctrl_type.double_width().expect(
"invalid type for double_width",
))
}
HalfVector => {
Bound(ctrl_type
.half_vector()
.expect("invalid type for half_vector"))
Bound(ctrl_type.half_vector().expect(
"invalid type for half_vector",
))
}
DoubleVector => Bound(ctrl_type.by(2).expect("invalid type for double_vector")),
}
@@ -688,10 +691,14 @@ mod tests {
assert_eq!(a.fixed_value_arguments(), 2);
assert_eq!(a.result_type(0, types::I32), types::I32);
assert_eq!(a.result_type(0, types::I8), types::I8);
assert_eq!(a.value_argument_constraint(0, types::I32),
ResolvedConstraint::Bound(types::I32));
assert_eq!(a.value_argument_constraint(1, types::I32),
ResolvedConstraint::Bound(types::I32));
assert_eq!(
a.value_argument_constraint(0, types::I32),
ResolvedConstraint::Bound(types::I32)
);
assert_eq!(
a.value_argument_constraint(1, types::I32),
ResolvedConstraint::Bound(types::I32)
);
let b = Opcode::Bitcast.constraints();
assert!(!b.use_typevar_operand());

View File

@@ -71,9 +71,9 @@ impl JumpTableData {
/// Checks if any of the entries branch to `ebb`.
pub fn branches_to(&self, ebb: Ebb) -> bool {
self.table
.iter()
.any(|target_ebb| target_ebb.expand() == Some(ebb))
self.table.iter().any(|target_ebb| {
target_ebb.expand() == Some(ebb)
})
}
/// Access the whole table as a mutable slice.
@@ -148,8 +148,10 @@ mod tests {
jt.set_entry(0, e2);
jt.set_entry(10, e1);
assert_eq!(jt.to_string(),
"jump_table ebb2, 0, 0, 0, 0, 0, 0, 0, 0, 0, ebb1");
assert_eq!(
jt.to_string(),
"jump_table ebb2, 0, 0, 0, 0, 0, 0, 0, 0, 0, ebb1"
);
let v: Vec<(usize, Ebb)> = jt.entries().collect();
assert_eq!(v, [(0, e2), (10, e1)]);

View File

@@ -96,8 +96,9 @@ fn test_midpoint() {
impl ProgramOrder for Layout {
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
where A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
let a_seq = self.seq(a);
let b_seq = self.seq(b);
@@ -166,8 +167,9 @@ impl Layout {
/// Assign a valid sequence number to `inst` such that the numbers are still monotonic. This may
/// require renumbering.
fn assign_inst_seq(&mut self, inst: Inst) {
let ebb = self.inst_ebb(inst)
.expect("inst must be inserted before assigning an seq");
let ebb = self.inst_ebb(inst).expect(
"inst must be inserted before assigning an seq",
);
// Get the sequence number immediately before `inst`.
let prev_seq = match self.insts[inst].prev.expand() {
@@ -283,8 +285,10 @@ impl Layout {
/// Insert `ebb` as the last EBB in the layout.
pub fn append_ebb(&mut self, ebb: Ebb) {
assert!(!self.is_ebb_inserted(ebb),
"Cannot append EBB that is already in the layout");
assert!(
!self.is_ebb_inserted(ebb),
"Cannot append EBB that is already in the layout"
);
{
let node = &mut self.ebbs[ebb];
assert!(node.first_inst.is_none() && node.last_inst.is_none());
@@ -302,10 +306,14 @@ impl Layout {
/// Insert `ebb` in the layout before the existing EBB `before`.
pub fn insert_ebb(&mut self, ebb: Ebb, before: Ebb) {
assert!(!self.is_ebb_inserted(ebb),
"Cannot insert EBB that is already in the layout");
assert!(self.is_ebb_inserted(before),
"EBB Insertion point not in the layout");
assert!(
!self.is_ebb_inserted(ebb),
"Cannot insert EBB that is already in the layout"
);
assert!(
self.is_ebb_inserted(before),
"EBB Insertion point not in the layout"
);
let after = self.ebbs[before].prev;
{
let node = &mut self.ebbs[ebb];
@@ -322,10 +330,14 @@ impl Layout {
/// Insert `ebb` in the layout *after* the existing EBB `after`.
pub fn insert_ebb_after(&mut self, ebb: Ebb, after: Ebb) {
assert!(!self.is_ebb_inserted(ebb),
"Cannot insert EBB that is already in the layout");
assert!(self.is_ebb_inserted(after),
"EBB Insertion point not in the layout");
assert!(
!self.is_ebb_inserted(ebb),
"Cannot insert EBB that is already in the layout"
);
assert!(
self.is_ebb_inserted(after),
"EBB Insertion point not in the layout"
);
let before = self.ebbs[after].next;
{
let node = &mut self.ebbs[ebb];
@@ -411,7 +423,8 @@ impl Layout {
/// Get the EBB containing the program point `pp`. Panic if `pp` is not in the layout.
pub fn pp_ebb<PP>(&self, pp: PP) -> Ebb
where PP: Into<ExpandedProgramPoint>
where
PP: Into<ExpandedProgramPoint>,
{
match pp.into() {
ExpandedProgramPoint::Ebb(ebb) => ebb,
@@ -424,8 +437,10 @@ impl Layout {
/// Append `inst` to the end of `ebb`.
pub fn append_inst(&mut self, inst: Inst, ebb: Ebb) {
assert_eq!(self.inst_ebb(inst), None);
assert!(self.is_ebb_inserted(ebb),
"Cannot append instructions to EBB not in layout");
assert!(
self.is_ebb_inserted(ebb),
"Cannot append instructions to EBB not in layout"
);
{
let ebb_node = &mut self.ebbs[ebb];
{
@@ -457,8 +472,9 @@ impl Layout {
/// Insert `inst` before the instruction `before` in the same EBB.
pub fn insert_inst(&mut self, inst: Inst, before: Inst) {
assert_eq!(self.inst_ebb(inst), None);
let ebb = self.inst_ebb(before)
.expect("Instruction before insertion point not in the layout");
let ebb = self.inst_ebb(before).expect(
"Instruction before insertion point not in the layout",
);
let after = self.insts[before].prev;
{
let inst_node = &mut self.insts[inst];
@@ -531,8 +547,9 @@ impl Layout {
/// i4
/// ```
pub fn split_ebb(&mut self, new_ebb: Ebb, before: Inst) {
let old_ebb = self.inst_ebb(before)
.expect("The `before` instruction must be in the layout");
let old_ebb = self.inst_ebb(before).expect(
"The `before` instruction must be in the layout",
);
assert!(!self.is_ebb_inserted(new_ebb));
// Insert new_ebb after old_ebb.
@@ -683,7 +700,8 @@ pub trait CursorBase {
/// }
/// ```
fn at_inst(mut self, inst: Inst) -> Self
where Self: Sized
where
Self: Sized,
{
self.goto_inst(inst);
self
@@ -703,7 +721,8 @@ pub trait CursorBase {
/// }
/// ```
fn at_first_inst(mut self, ebb: Ebb) -> Self
where Self: Sized
where
Self: Sized,
{
self.goto_first_inst(ebb);
self
@@ -783,9 +802,9 @@ pub trait CursorBase {
self.layout().first_ebb
};
self.set_position(match next {
Some(ebb) => CursorPosition::Before(ebb),
None => CursorPosition::Nowhere,
});
Some(ebb) => CursorPosition::Before(ebb),
None => CursorPosition::Nowhere,
});
next
}
@@ -816,9 +835,9 @@ pub trait CursorBase {
self.layout().last_ebb
};
self.set_position(match prev {
Some(ebb) => CursorPosition::After(ebb),
None => CursorPosition::Nowhere,
});
Some(ebb) => CursorPosition::After(ebb),
None => CursorPosition::Nowhere,
});
prev
}
@@ -872,9 +891,9 @@ pub trait CursorBase {
self.set_position(At(next));
Some(next)
} else {
let pos = After(self.layout()
.inst_ebb(inst)
.expect("current instruction removed?"));
let pos = After(self.layout().inst_ebb(inst).expect(
"current instruction removed?",
));
self.set_position(pos);
None
}
@@ -925,9 +944,9 @@ pub trait CursorBase {
self.set_position(At(prev));
Some(prev)
} else {
let pos = Before(self.layout()
.inst_ebb(inst)
.expect("current instruction removed?"));
let pos = Before(self.layout().inst_ebb(inst).expect(
"current instruction removed?",
));
self.set_position(pos);
None
}
@@ -1057,9 +1076,10 @@ pub struct LayoutCursorInserter<'c, 'fc: 'c, 'fd> {
impl<'c, 'fc: 'c, 'fd> LayoutCursorInserter<'c, 'fc, 'fd> {
/// Create a new inserter. Don't use this, use `dfg.ins(pos)`.
pub fn new(pos: &'c mut Cursor<'fc>,
dfg: &'fd mut DataFlowGraph)
-> LayoutCursorInserter<'c, 'fc, 'fd> {
pub fn new(
pos: &'c mut Cursor<'fc>,
dfg: &'fd mut DataFlowGraph,
) -> LayoutCursorInserter<'c, 'fc, 'fd> {
LayoutCursorInserter { pos, dfg }
}
}

View File

@@ -123,8 +123,9 @@ pub trait ProgramOrder {
/// directly. Depending on the implementation, there is a good chance performance will be
/// improved for those cases where the type of either argument is known statically.
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
where A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
/// Is the range from `inst` to `ebb` just the gap between consecutive EBBs?
///

View File

@@ -65,11 +65,11 @@ impl fmt::Display for StackSlotKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::StackSlotKind::*;
f.write_str(match *self {
Local => "local",
SpillSlot => "spill_slot",
IncomingArg => "incoming_arg",
OutgoingArg => "outgoing_arg",
})
Local => "local",
SpillSlot => "spill_slot",
IncomingArg => "incoming_arg",
OutgoingArg => "outgoing_arg",
})
}
}
@@ -228,9 +228,9 @@ impl StackSlots {
let size = ty.bytes();
// Look for an existing outgoing stack slot with the same offset and size.
let inspos = match self.outgoing
.binary_search_by_key(&(offset, size),
|&ss| (self[ss].offset, self[ss].size)) {
let inspos = match self.outgoing.binary_search_by_key(&(offset, size), |&ss| {
(self[ss].offset, self[ss].size)
}) {
Ok(idx) => return self.outgoing[idx],
Err(idx) => idx,
};
@@ -255,10 +255,14 @@ mod tests {
fn stack_slot() {
let mut func = Function::new();
let ss0 = func.stack_slots
.push(StackSlotData::new(StackSlotKind::IncomingArg, 4));
let ss1 = func.stack_slots
.push(StackSlotData::new(StackSlotKind::SpillSlot, 8));
let ss0 = func.stack_slots.push(StackSlotData::new(
StackSlotKind::IncomingArg,
4,
));
let ss1 = func.stack_slots.push(StackSlotData::new(
StackSlotKind::SpillSlot,
8,
));
assert_eq!(ss0.to_string(), "ss0");
assert_eq!(ss1.to_string(), "ss1");

View File

@@ -7,9 +7,11 @@ use settings as shared_settings;
use super::registers::{S, D, Q, GPR};
/// Legalize `sig`.
pub fn legalize_signature(_sig: &mut ir::Signature,
_flags: &shared_settings::Flags,
_current: bool) {
pub fn legalize_signature(
_sig: &mut ir::Signature,
_flags: &shared_settings::Flags,
_current: bool,
) {
unimplemented!()
}

View File

@@ -29,19 +29,20 @@ pub fn isa_builder() -> IsaBuilder {
}
}
fn isa_constructor(shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder)
-> Box<TargetIsa> {
fn isa_constructor(
shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder,
) -> Box<TargetIsa> {
let level1 = if shared_flags.is_compressed() {
&enc_tables::LEVEL1_T32[..]
} else {
&enc_tables::LEVEL1_A32[..]
};
Box::new(Isa {
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
}
impl TargetIsa for Isa {
@@ -61,21 +62,24 @@ impl TargetIsa for Isa {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type)
-> Encodings<'a> {
lookup_enclist(ctrl_typevar,
inst,
dfg,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view())
fn legal_encodings<'a>(
&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
dfg,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut ir::Signature, current: bool) {
@@ -90,11 +94,13 @@ impl TargetIsa for Isa {
abi::allocatable_registers(func)
}
fn emit_inst(&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink) {
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
}

View File

@@ -7,9 +7,11 @@ use settings as shared_settings;
use super::registers::{GPR, FPR};
/// Legalize `sig`.
pub fn legalize_signature(_sig: &mut ir::Signature,
_flags: &shared_settings::Flags,
_current: bool) {
pub fn legalize_signature(
_sig: &mut ir::Signature,
_flags: &shared_settings::Flags,
_current: bool,
) {
unimplemented!()
}

View File

@@ -28,13 +28,14 @@ pub fn isa_builder() -> IsaBuilder {
}
}
fn isa_constructor(shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder)
-> Box<TargetIsa> {
fn isa_constructor(
shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder,
) -> Box<TargetIsa> {
Box::new(Isa {
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
})
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
})
}
impl TargetIsa for Isa {
@@ -54,21 +55,24 @@ impl TargetIsa for Isa {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type)
-> Encodings<'a> {
lookup_enclist(ctrl_typevar,
inst,
dfg,
&enc_tables::LEVEL1_A64[..],
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view())
fn legal_encodings<'a>(
&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
dfg,
&enc_tables::LEVEL1_A64[..],
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut ir::Signature, current: bool) {
@@ -83,11 +87,13 @@ impl TargetIsa for Isa {
abi::allocatable_registers(func)
}
fn emit_inst(&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink) {
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
}

View File

@@ -103,19 +103,21 @@ impl<OffT: Into<u32> + Copy> Table<Opcode> for [Level2Entry<OffT>] {
/// list.
///
/// Returns an iterator that produces legal encodings for `inst`.
pub fn lookup_enclist<'a, OffT1, OffT2>(ctrl_typevar: Type,
inst: &'a InstructionData,
dfg: &'a DataFlowGraph,
level1_table: &'static [Level1Entry<OffT1>],
level2_table: &'static [Level2Entry<OffT2>],
enclist: &'static [EncListEntry],
legalize_actions: &'static [Legalize],
recipe_preds: &'static [RecipePredicate],
inst_preds: &'static [InstPredicate],
isa_preds: PredicateView<'a>)
-> Encodings<'a>
where OffT1: Into<u32> + Copy,
OffT2: Into<u32> + Copy
pub fn lookup_enclist<'a, OffT1, OffT2>(
ctrl_typevar: Type,
inst: &'a InstructionData,
dfg: &'a DataFlowGraph,
level1_table: &'static [Level1Entry<OffT1>],
level2_table: &'static [Level2Entry<OffT2>],
enclist: &'static [EncListEntry],
legalize_actions: &'static [Legalize],
recipe_preds: &'static [RecipePredicate],
inst_preds: &'static [InstPredicate],
isa_preds: PredicateView<'a>,
) -> Encodings<'a>
where
OffT1: Into<u32> + Copy,
OffT2: Into<u32> + Copy,
{
let (offset, legalize) = match probe(level1_table, ctrl_typevar, ctrl_typevar.index()) {
Err(l1idx) => {
@@ -144,15 +146,17 @@ pub fn lookup_enclist<'a, OffT1, OffT2>(ctrl_typevar: Type,
// Now we have an offset into `enclist` that is `!0` when no encoding list could be found.
// The default legalization code is always valid.
Encodings::new(offset,
legalize,
inst,
dfg,
enclist,
legalize_actions,
recipe_preds,
inst_preds,
isa_preds)
Encodings::new(
offset,
legalize,
inst,
dfg,
enclist,
legalize_actions,
recipe_preds,
inst_preds,
isa_preds,
)
}
/// Encoding list entry.
@@ -187,16 +191,17 @@ impl<'a> Encodings<'a> {
/// This iterator provides search for encodings that applies to the given instruction. The
/// encoding lists are laid out such that first call to `next` returns valid entry in the list
/// or `None`.
pub fn new(offset: usize,
legalize: LegalizeCode,
inst: &'a InstructionData,
dfg: &'a DataFlowGraph,
enclist: &'static [EncListEntry],
legalize_actions: &'static [Legalize],
recipe_preds: &'static [RecipePredicate],
inst_preds: &'static [InstPredicate],
isa_preds: PredicateView<'a>)
-> Self {
pub fn new(
offset: usize,
legalize: LegalizeCode,
inst: &'a InstructionData,
dfg: &'a DataFlowGraph,
enclist: &'static [EncListEntry],
legalize_actions: &'static [Legalize],
recipe_preds: &'static [RecipePredicate],
inst_preds: &'static [InstPredicate],
isa_preds: PredicateView<'a>,
) -> Self {
Encodings {
offset,
inst,

View File

@@ -66,10 +66,12 @@ pub struct DisplayEncoding {
impl fmt::Display for DisplayEncoding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.encoding.is_legal() {
write!(f,
"{}#{:02x}",
self.recipe_names[self.encoding.recipe()],
self.encoding.bits)
write!(
f,
"{}#{:02x}",
self.recipe_names[self.encoding.recipe()],
self.encoding.bits
)
} else {
write!(f, "-")
}

View File

@@ -87,9 +87,7 @@ impl ArgAssigner for Args {
}
/// Legalize `sig`.
pub fn legalize_signature(sig: &mut ir::Signature,
flags: &shared_settings::Flags,
_current: bool) {
pub fn legalize_signature(sig: &mut ir::Signature, flags: &shared_settings::Flags, _current: bool) {
let bits = if flags.is_64bit() { 64 } else { 32 };
let mut args = Args::new(bits, &ARG_GPRS, 8);
@@ -105,9 +103,10 @@ pub fn regclass_for_abi_type(ty: ir::Type) -> RegClass {
}
/// Get the set of allocatable registers for `func`.
pub fn allocatable_registers(_func: &ir::Function,
flags: &shared_settings::Flags)
-> AllocatableSet {
pub fn allocatable_registers(
_func: &ir::Function,
flags: &shared_settings::Flags,
) -> AllocatableSet {
let mut regs = AllocatableSet::new();
regs.take(GPR, RU::rsp as RegUnit);
regs.take(GPR, RU::rbp as RegUnit);

View File

@@ -29,19 +29,20 @@ pub fn isa_builder() -> IsaBuilder {
}
}
fn isa_constructor(shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder)
-> Box<TargetIsa> {
fn isa_constructor(
shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder,
) -> Box<TargetIsa> {
let level1 = if shared_flags.is_64bit() {
&enc_tables::LEVEL1_I64[..]
} else {
&enc_tables::LEVEL1_I32[..]
};
Box::new(Isa {
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
}
impl TargetIsa for Isa {
@@ -61,21 +62,24 @@ impl TargetIsa for Isa {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type)
-> Encodings<'a> {
lookup_enclist(ctrl_typevar,
inst,
dfg,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view())
fn legal_encodings<'a>(
&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
dfg,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut ir::Signature, current: bool) {
@@ -90,11 +94,13 @@ impl TargetIsa for Isa {
abi::allocatable_registers(func, &self.shared_flags)
}
fn emit_inst(&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink) {
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
}

View File

@@ -155,11 +155,12 @@ pub trait TargetIsa {
fn register_info(&self) -> RegInfo;
/// Returns an iterartor over legal encodings for the instruction.
fn legal_encodings<'a>(&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type)
-> Encodings<'a>;
fn legal_encodings<'a>(
&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a>;
/// Encode an instruction after determining it is legal.
///
@@ -167,11 +168,12 @@ pub trait TargetIsa {
/// Otherwise, return `Legalize` action.
///
/// This is also the main entry point for determining if an instruction is legal.
fn encode(&self,
dfg: &ir::DataFlowGraph,
inst: &ir::InstructionData,
ctrl_typevar: ir::Type)
-> Result<Encoding, Legalize> {
fn encode(
&self,
dfg: &ir::DataFlowGraph,
inst: &ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Result<Encoding, Legalize> {
let mut iter = self.legal_encodings(dfg, inst, ctrl_typevar);
iter.next().ok_or_else(|| iter.legalize().into())
}
@@ -244,11 +246,13 @@ pub trait TargetIsa {
///
/// Note that this will call `put*` methods on the trait object via its vtable which is not the
/// fastest way of emitting code.
fn emit_inst(&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut binemit::CodeSink);
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut binemit::CodeSink,
);
/// Emit a whole function into memory.
///

View File

@@ -74,24 +74,23 @@ impl RegBank {
/// Try to parse a regunit name. The name is not expected to begin with `%`.
fn parse_regunit(&self, name: &str) -> Option<RegUnit> {
match self.names.iter().position(|&x| x == name) {
Some(offset) => {
// This is one of the special-cased names.
Some(offset as RegUnit)
}
None => {
// Try a regular prefixed name.
if name.starts_with(self.prefix) {
name[self.prefix.len()..].parse().ok()
} else {
None
}
Some(offset) => {
// This is one of the special-cased names.
Some(offset as RegUnit)
}
None => {
// Try a regular prefixed name.
if name.starts_with(self.prefix) {
name[self.prefix.len()..].parse().ok()
} else {
None
}
}
.and_then(|offset| if offset < self.units {
Some(offset + self.first_unit)
} else {
None
})
}.and_then(|offset| if offset < self.units {
Some(offset + self.first_unit)
} else {
None
})
}
/// Write `regunit` to `w`, assuming that it belongs to this bank.

View File

@@ -86,10 +86,12 @@ impl ArgAssigner for Args {
}
/// Legalize `sig` for RISC-V.
pub fn legalize_signature(sig: &mut ir::Signature,
flags: &shared_settings::Flags,
isa_flags: &settings::Flags,
current: bool) {
pub fn legalize_signature(
sig: &mut ir::Signature,
flags: &shared_settings::Flags,
isa_flags: &settings::Flags,
current: bool,
) {
let bits = if flags.is_64bit() { 64 } else { 32 };
let mut args = Args::new(bits, isa_flags.enable_e());

View File

@@ -29,11 +29,7 @@ impl Into<Reloc> for RelocKind {
/// 25 20 15 12 7 0
///
/// Encoding bits: `opcode[6:2] | (funct3 << 5) | (funct7 << 8)`.
fn put_r<CS: CodeSink + ?Sized>(bits: u16,
rs1: RegUnit,
rs2: RegUnit,
rd: RegUnit,
sink: &mut CS) {
fn put_r<CS: CodeSink + ?Sized>(bits: u16, rs1: RegUnit, rs2: RegUnit, rd: RegUnit, sink: &mut CS) {
let bits = bits as u32;
let opcode5 = bits & 0x1f;
let funct3 = (bits >> 5) & 0x7;
@@ -63,11 +59,13 @@ fn put_r<CS: CodeSink + ?Sized>(bits: u16,
/// Both funct7 and shamt contribute to bit 25. In RV64, shamt uses it for shifts > 31.
///
/// Encoding bits: `opcode[6:2] | (funct3 << 5) | (funct7 << 8)`.
fn put_rshamt<CS: CodeSink + ?Sized>(bits: u16,
rs1: RegUnit,
shamt: i64,
rd: RegUnit,
sink: &mut CS) {
fn put_rshamt<CS: CodeSink + ?Sized>(
bits: u16,
rs1: RegUnit,
shamt: i64,
rd: RegUnit,
sink: &mut CS,
) {
let bits = bits as u32;
let opcode5 = bits & 0x1f;
let funct3 = (bits >> 5) & 0x7;

View File

@@ -29,19 +29,20 @@ pub fn isa_builder() -> IsaBuilder {
}
}
fn isa_constructor(shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder)
-> Box<TargetIsa> {
fn isa_constructor(
shared_flags: shared_settings::Flags,
builder: &shared_settings::Builder,
) -> Box<TargetIsa> {
let level1 = if shared_flags.is_64bit() {
&enc_tables::LEVEL1_RV64[..]
} else {
&enc_tables::LEVEL1_RV32[..]
};
Box::new(Isa {
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
isa_flags: settings::Flags::new(&shared_flags, builder),
shared_flags,
cpumode: level1,
})
}
impl TargetIsa for Isa {
@@ -61,21 +62,24 @@ impl TargetIsa for Isa {
enc_tables::INFO.clone()
}
fn legal_encodings<'a>(&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type)
-> Encodings<'a> {
lookup_enclist(ctrl_typevar,
inst,
dfg,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view())
fn legal_encodings<'a>(
&'a self,
dfg: &'a ir::DataFlowGraph,
inst: &'a ir::InstructionData,
ctrl_typevar: ir::Type,
) -> Encodings<'a> {
lookup_enclist(
ctrl_typevar,
inst,
dfg,
self.cpumode,
&enc_tables::LEVEL2[..],
&enc_tables::ENCLISTS[..],
&enc_tables::LEGALIZE_ACTIONS[..],
&enc_tables::RECIPE_PREDICATES[..],
&enc_tables::INST_PREDICATES[..],
self.isa_flags.predicate_view(),
)
}
fn legalize_signature(&self, sig: &mut ir::Signature, current: bool) {
@@ -90,11 +94,13 @@ impl TargetIsa for Isa {
abi::allocatable_registers(func, &self.isa_flags)
}
fn emit_inst(&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink) {
fn emit_inst(
&self,
func: &ir::Function,
inst: ir::Inst,
divert: &mut regalloc::RegDiversions,
sink: &mut CodeSink,
) {
binemit::emit_inst(func, inst, divert, sink)
}

View File

@@ -18,14 +18,16 @@ mod tests {
let shared = settings::Flags::new(&settings::builder());
let b = builder();
let f = Flags::new(&shared, &b);
assert_eq!(f.to_string(),
"[riscv]\n\
assert_eq!(
f.to_string(),
"[riscv]\n\
supports_m = false\n\
supports_a = false\n\
supports_f = false\n\
supports_d = false\n\
enable_m = true\n\
enable_e = false\n");
enable_e = false\n"
);
// Predicates are not part of the Display output.
assert_eq!(f.full_float(), false);
}

View File

@@ -4,40 +4,45 @@
pub trait IteratorExtras: Iterator {
/// Create an iterator that produces adjacent pairs of elements from the iterator.
fn adjacent_pairs(mut self) -> AdjacentPairs<Self>
where Self: Sized,
Self::Item: Clone
where
Self: Sized,
Self::Item: Clone,
{
let elem = self.next();
AdjacentPairs { iter: self, elem }
}
}
impl<T> IteratorExtras for T where T: Iterator {}
impl<T> IteratorExtras for T
where
T: Iterator,
{
}
/// Adjacent pairs iterator returned by `adjacent_pairs()`.
///
/// This wraps another iterator and produces a sequence of adjacent pairs of elements.
pub struct AdjacentPairs<I>
where I: Iterator,
I::Item: Clone
where
I: Iterator,
I::Item: Clone,
{
iter: I,
elem: Option<I::Item>,
}
impl<I> Iterator for AdjacentPairs<I>
where I: Iterator,
I::Item: Clone
where
I: Iterator,
I::Item: Clone,
{
type Item = (I::Item, I::Item);
fn next(&mut self) -> Option<Self::Item> {
self.elem
.take()
.and_then(|e| {
self.elem = self.iter.next();
self.elem.clone().map(|n| (e, n))
})
self.elem.take().and_then(|e| {
self.elem = self.iter.next();
self.elem.clone().map(|n| (e, n))
})
}
}
@@ -47,33 +52,45 @@ mod tests {
fn adjpairs() {
use super::IteratorExtras;
assert_eq!([1, 2, 3, 4]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<_>>(),
vec![(1, 2), (2, 3), (3, 4)]);
assert_eq!([2, 3, 4]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<_>>(),
vec![(2, 3), (3, 4)]);
assert_eq!([2, 3, 4]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<_>>(),
vec![(2, 3), (3, 4)]);
assert_eq!([3, 4].iter().cloned().adjacent_pairs().collect::<Vec<_>>(),
vec![(3, 4)]);
assert_eq!([4].iter().cloned().adjacent_pairs().collect::<Vec<_>>(),
vec![]);
assert_eq!([]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<(i32, i32)>>(),
vec![]);
assert_eq!(
[1, 2, 3, 4]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<_>>(),
vec![(1, 2), (2, 3), (3, 4)]
);
assert_eq!(
[2, 3, 4]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<_>>(),
vec![(2, 3), (3, 4)]
);
assert_eq!(
[2, 3, 4]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<_>>(),
vec![(2, 3), (3, 4)]
);
assert_eq!(
[3, 4].iter().cloned().adjacent_pairs().collect::<Vec<_>>(),
vec![(3, 4)]
);
assert_eq!(
[4].iter().cloned().adjacent_pairs().collect::<Vec<_>>(),
vec![]
);
assert_eq!(
[]
.iter()
.cloned()
.adjacent_pairs()
.collect::<Vec<(i32, i32)>>(),
vec![]
);
}
}

View File

@@ -98,9 +98,11 @@ fn legalize_entry_arguments(func: &mut Function, entry: Ebb) {
// Compute the value we want for `arg` from the legalized ABI arguments.
let mut get_arg = |dfg: &mut DataFlowGraph, ty| {
let abi_type = abi_types[abi_arg];
assert_eq!(abi_type.purpose,
ArgumentPurpose::Normal,
"Can't legalize special-purpose argument");
assert_eq!(
abi_type.purpose,
ArgumentPurpose::Normal,
"Can't legalize special-purpose argument"
);
if ty == abi_type.value_type {
abi_arg += 1;
Ok(dfg.append_ebb_arg(entry, ty))
@@ -159,14 +161,17 @@ fn legalize_entry_arguments(func: &mut Function, entry: Ebb) {
/// This function is very similar to the `legalize_entry_arguments` function above.
///
/// Returns the possibly new instruction representing the call.
fn legalize_inst_results<ResType>(dfg: &mut DataFlowGraph,
pos: &mut Cursor,
mut get_abi_type: ResType)
-> Inst
where ResType: FnMut(&DataFlowGraph, usize) -> ArgumentType
fn legalize_inst_results<ResType>(
dfg: &mut DataFlowGraph,
pos: &mut Cursor,
mut get_abi_type: ResType,
) -> Inst
where
ResType: FnMut(&DataFlowGraph, usize) -> ArgumentType,
{
let call = pos.current_inst()
.expect("Cursor must point to a call instruction");
let call = pos.current_inst().expect(
"Cursor must point to a call instruction",
);
// We theoretically allow for call instructions that return a number of fixed results before
// the call return values. In practice, it doesn't happen.
@@ -216,13 +221,15 @@ fn legalize_inst_results<ResType>(dfg: &mut DataFlowGraph,
/// - `Err(arg_type)` if further conversions are needed from the ABI argument `arg_type`.
///
/// If the `into_result` value is provided, the converted result will be written into that value.
fn convert_from_abi<GetArg>(dfg: &mut DataFlowGraph,
pos: &mut Cursor,
ty: Type,
into_result: Option<Value>,
get_arg: &mut GetArg)
-> Value
where GetArg: FnMut(&mut DataFlowGraph, Type) -> Result<Value, ArgumentType>
fn convert_from_abi<GetArg>(
dfg: &mut DataFlowGraph,
pos: &mut Cursor,
ty: Type,
into_result: Option<Value>,
get_arg: &mut GetArg,
) -> Value
where
GetArg: FnMut(&mut DataFlowGraph, Type) -> Result<Value, ArgumentType>,
{
// Terminate the recursion when we get the desired type.
let arg_type = match get_arg(dfg, ty) {
@@ -246,11 +253,13 @@ fn convert_from_abi<GetArg>(dfg: &mut DataFlowGraph,
let abi_ty = ty.half_width().expect("Invalid type for conversion");
let lo = convert_from_abi(dfg, pos, abi_ty, None, get_arg);
let hi = convert_from_abi(dfg, pos, abi_ty, None, get_arg);
dbg!("intsplit {}: {}, {}: {}",
lo,
dfg.value_type(lo),
hi,
dfg.value_type(hi));
dbg!(
"intsplit {}: {}, {}: {}",
lo,
dfg.value_type(lo),
hi,
dfg.value_type(hi)
);
dfg.ins(pos).with_results([into_result]).iconcat(lo, hi)
}
// Construct a `ty` by concatenating two halves of a vector.
@@ -296,12 +305,14 @@ fn convert_from_abi<GetArg>(dfg: &mut DataFlowGraph,
/// 2. If the suggested argument doesn't have the right value type, don't change anything, but
/// return the `Err(ArgumentType)` that is needed.
///
fn convert_to_abi<PutArg>(dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value,
put_arg: &mut PutArg)
where PutArg: FnMut(&mut DataFlowGraph, Value) -> Result<(), ArgumentType>
fn convert_to_abi<PutArg>(
dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value,
put_arg: &mut PutArg,
) where
PutArg: FnMut(&mut DataFlowGraph, Value) -> Result<(), ArgumentType>,
{
// Start by invoking the closure to either terminate the recursion or get the argument type
// we're trying to match.
@@ -360,7 +371,8 @@ fn check_call_signature(dfg: &DataFlowGraph, inst: Inst) -> Result<(), SigRef> {
let sig = &dfg.signatures[sig_ref];
if check_arg_types(dfg, args, &sig.argument_types[..]) &&
check_arg_types(dfg, dfg.inst_results(inst), &sig.return_types[..]) {
check_arg_types(dfg, dfg.inst_results(inst), &sig.return_types[..])
{
// All types check out.
Ok(())
} else {
@@ -380,20 +392,23 @@ fn check_return_signature(dfg: &DataFlowGraph, inst: Inst, sig: &Signature) -> b
/// - `get_abi_type` is a closure that can provide the desired `ArgumentType` for a given ABI
/// argument number in `0..abi_args`.
///
fn legalize_inst_arguments<ArgType>(dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
abi_args: usize,
mut get_abi_type: ArgType)
where ArgType: FnMut(&DataFlowGraph, usize) -> ArgumentType
fn legalize_inst_arguments<ArgType>(
dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
abi_args: usize,
mut get_abi_type: ArgType,
) where
ArgType: FnMut(&DataFlowGraph, usize) -> ArgumentType,
{
let inst = pos.current_inst()
.expect("Cursor must point to a call instruction");
let inst = pos.current_inst().expect(
"Cursor must point to a call instruction",
);
// Lift the value list out of the call instruction so we modify it.
let mut vlist = dfg[inst]
.take_value_list()
.expect("Call must have a value list");
let mut vlist = dfg[inst].take_value_list().expect(
"Call must have a value list",
);
// The value list contains all arguments to the instruction, including the callee on an
// indirect call which isn't part of the call arguments that must match the ABI signature.
@@ -474,23 +489,23 @@ pub fn handle_call_abi(mut inst: Inst, func: &mut Function, cfg: &ControlFlowGra
// OK, we need to fix the call arguments to match the ABI signature.
let abi_args = dfg.signatures[sig_ref].argument_types.len();
legalize_inst_arguments(dfg,
cfg,
pos,
abi_args,
|dfg, abi_arg| dfg.signatures[sig_ref].argument_types[abi_arg]);
legalize_inst_arguments(dfg, cfg, pos, abi_args, |dfg, abi_arg| {
dfg.signatures[sig_ref].argument_types[abi_arg]
});
if !dfg.signatures[sig_ref].return_types.is_empty() {
inst = legalize_inst_results(dfg,
pos,
|dfg, abi_res| dfg.signatures[sig_ref].return_types[abi_res]);
inst = legalize_inst_results(dfg, pos, |dfg, abi_res| {
dfg.signatures[sig_ref].return_types[abi_res]
});
}
debug_assert!(check_call_signature(dfg, inst).is_ok(),
"Signature still wrong: {}, {}{}",
dfg.display_inst(inst, None),
sig_ref,
dfg.signatures[sig_ref]);
debug_assert!(
check_call_signature(dfg, inst).is_ok(),
"Signature still wrong: {}, {}{}",
dfg.display_inst(inst, None),
sig_ref,
dfg.signatures[sig_ref]
);
// Go back and insert spills for any stack arguments.
pos.goto_inst(inst);
@@ -519,27 +534,30 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
.iter()
.rev()
.take_while(|&rt| {
rt.purpose == ArgumentPurpose::Link ||
rt.purpose == ArgumentPurpose::StructReturn ||
rt.purpose == ArgumentPurpose::VMContext
})
rt.purpose == ArgumentPurpose::Link || rt.purpose == ArgumentPurpose::StructReturn ||
rt.purpose == ArgumentPurpose::VMContext
})
.count();
let abi_args = sig.return_types.len() - special_args;
legalize_inst_arguments(dfg,
cfg,
pos,
abi_args,
|_, abi_arg| sig.return_types[abi_arg]);
legalize_inst_arguments(
dfg,
cfg,
pos,
abi_args,
|_, abi_arg| sig.return_types[abi_arg],
);
assert_eq!(dfg.inst_variable_args(inst).len(), abi_args);
// Append special return arguments for any `sret`, `link`, and `vmctx` return values added to
// the legalized signature. These values should simply be propagated from the entry block
// arguments.
if special_args > 0 {
dbg!("Adding {} special-purpose arguments to {}",
special_args,
dfg.display_inst(inst, None));
dbg!(
"Adding {} special-purpose arguments to {}",
special_args,
dfg.display_inst(inst, None)
);
let mut vlist = dfg[inst].take_value_list().unwrap();
for arg in &sig.return_types[abi_args..] {
match arg.purpose {
@@ -565,10 +583,12 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
dfg[inst].put_value_list(vlist);
}
debug_assert!(check_return_signature(dfg, inst, sig),
"Signature still wrong: {} / signature {}",
dfg.display_inst(inst, None),
sig);
debug_assert!(
check_return_signature(dfg, inst, sig),
"Signature still wrong: {} / signature {}",
dfg.display_inst(inst, None),
sig
);
// Yes, we changed stuff.
true
@@ -579,10 +599,10 @@ pub fn handle_return_abi(inst: Inst, func: &mut Function, cfg: &ControlFlowGraph
/// Values that are passed into the function on the stack must be assigned to an `IncomingArg`
/// stack slot already during legalization.
fn spill_entry_arguments(func: &mut Function, entry: Ebb) {
for (abi, &arg) in func.signature
.argument_types
.iter()
.zip(func.dfg.ebb_args(entry)) {
for (abi, &arg) in func.signature.argument_types.iter().zip(
func.dfg.ebb_args(entry),
)
{
if let ArgumentLoc::Stack(offset) = abi.location {
let ss = func.stack_slots.make_incoming_arg(abi.value_type, offset);
func.locations[arg] = ValueLoc::Stack(ss);
@@ -598,15 +618,18 @@ fn spill_entry_arguments(func: &mut Function, entry: Ebb) {
/// TODO: The outgoing stack slots can be written a bit earlier, as long as there are no branches
/// or calls between writing the stack slots and the call instruction. Writing the slots earlier
/// could help reduce register pressure before the call.
fn spill_call_arguments(dfg: &mut DataFlowGraph,
locations: &mut ValueLocations,
stack_slots: &mut StackSlots,
pos: &mut Cursor)
-> bool {
let inst = pos.current_inst()
.expect("Cursor must point to a call instruction");
let sig_ref = dfg.call_signature(inst)
.expect("Call instruction expected.");
fn spill_call_arguments(
dfg: &mut DataFlowGraph,
locations: &mut ValueLocations,
stack_slots: &mut StackSlots,
pos: &mut Cursor,
) -> bool {
let inst = pos.current_inst().expect(
"Cursor must point to a call instruction",
);
let sig_ref = dfg.call_signature(inst).expect(
"Call instruction expected.",
);
// Start by building a list of stack slots and arguments to be replaced.
// This requires borrowing `dfg`, so we can't change anything.

View File

@@ -35,12 +35,14 @@ pub fn expand_heap_addr(inst: ir::Inst, func: &mut ir::Function, _cfg: &mut Cont
}
/// Expand a `heap_addr` for a dynamic heap.
fn dynamic_addr(inst: ir::Inst,
heap: ir::Heap,
offset: ir::Value,
size: u32,
bound_gv: ir::GlobalVar,
func: &mut ir::Function) {
fn dynamic_addr(
inst: ir::Inst,
heap: ir::Heap,
offset: ir::Value,
size: u32,
bound_gv: ir::GlobalVar,
func: &mut ir::Function,
) {
let size = size as i64;
let offset_ty = func.dfg.value_type(offset);
let addr_ty = func.dfg.value_type(func.dfg.first_result(inst));
@@ -54,21 +56,30 @@ fn dynamic_addr(inst: ir::Inst,
let oob;
if size == 1 {
// `offset > bound - 1` is the same as `offset >= bound`.
oob = pos.ins()
.icmp(IntCC::UnsignedGreaterThanOrEqual, offset, bound);
oob = pos.ins().icmp(
IntCC::UnsignedGreaterThanOrEqual,
offset,
bound,
);
} else if size <= min_size {
// We know that bound >= min_size, so here we can compare `offset > bound - size` without
// wrapping.
let adj_bound = pos.ins().iadd_imm(bound, -size);
oob = pos.ins()
.icmp(IntCC::UnsignedGreaterThan, offset, adj_bound);
oob = pos.ins().icmp(
IntCC::UnsignedGreaterThan,
offset,
adj_bound,
);
} else {
// We need an overflow check for the adjusted offset.
let size_val = pos.ins().iconst(offset_ty, size);
let (adj_offset, overflow) = pos.ins().iadd_cout(offset, size_val);
pos.ins().trapnz(overflow);
oob = pos.ins()
.icmp(IntCC::UnsignedGreaterThan, adj_offset, bound);
oob = pos.ins().icmp(
IntCC::UnsignedGreaterThan,
adj_offset,
bound,
);
}
pos.ins().trapnz(oob);
@@ -76,12 +87,14 @@ fn dynamic_addr(inst: ir::Inst,
}
/// Expand a `heap_addr` for a static heap.
fn static_addr(inst: ir::Inst,
heap: ir::Heap,
offset: ir::Value,
size: u32,
bound: i64,
func: &mut ir::Function) {
fn static_addr(
inst: ir::Inst,
heap: ir::Heap,
offset: ir::Value,
size: u32,
bound: i64,
func: &mut ir::Function,
) {
let size = size as i64;
let offset_ty = func.dfg.value_type(offset);
let addr_ty = func.dfg.value_type(func.dfg.first_result(inst));
@@ -104,11 +117,17 @@ fn static_addr(inst: ir::Inst,
let oob = if limit & 1 == 1 {
// Prefer testing `offset >= limit - 1` when limit is odd because an even number is
// likely to be a convenient constant on ARM and other RISC architectures.
pos.ins()
.icmp_imm(IntCC::UnsignedGreaterThanOrEqual, offset, limit - 1)
pos.ins().icmp_imm(
IntCC::UnsignedGreaterThanOrEqual,
offset,
limit - 1,
)
} else {
pos.ins()
.icmp_imm(IntCC::UnsignedGreaterThan, offset, limit)
pos.ins().icmp_imm(
IntCC::UnsignedGreaterThan,
offset,
limit,
)
};
pos.ins().trapnz(oob);
}
@@ -119,12 +138,14 @@ fn static_addr(inst: ir::Inst,
/// Emit code for the base address computation of a `heap_addr` instruction.
///
///
fn offset_addr(inst: ir::Inst,
heap: ir::Heap,
addr_ty: ir::Type,
mut offset: ir::Value,
offset_ty: ir::Type,
func: &mut ir::Function) {
fn offset_addr(
inst: ir::Inst,
heap: ir::Heap,
addr_ty: ir::Type,
mut offset: ir::Value,
offset_ty: ir::Type,
func: &mut ir::Function,
) {
let mut pos = FuncCursor::new(func).at_inst(inst);
// Convert `offset` to `addr_ty`.

View File

@@ -66,9 +66,11 @@ pub fn legalize_function(func: &mut ir::Function, cfg: &mut ControlFlowGraph, is
split::simplify_branch_arguments(&mut pos.func.dfg, inst);
}
match isa.encode(&pos.func.dfg,
&pos.func.dfg[inst],
pos.func.dfg.ctrl_typevar(inst)) {
match isa.encode(
&pos.func.dfg,
&pos.func.dfg[inst],
pos.func.dfg.ctrl_typevar(inst),
) {
Ok(encoding) => pos.func.encodings[inst] = encoding,
Err(action) => {
// We should transform the instruction into legal equivalents.

View File

@@ -71,21 +71,23 @@ use std::iter;
/// Split `value` into two values using the `isplit` semantics. Do this by reusing existing values
/// if possible.
pub fn isplit(dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value)
-> (Value, Value) {
pub fn isplit(
dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value,
) -> (Value, Value) {
split_any(dfg, cfg, pos, value, Opcode::Iconcat)
}
/// Split `value` into halves using the `vsplit` semantics. Do this by reusing existing values if
/// possible.
pub fn vsplit(dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value)
-> (Value, Value) {
pub fn vsplit(
dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value,
) -> (Value, Value) {
split_any(dfg, cfg, pos, value, Opcode::Vconcat)
}
@@ -107,12 +109,13 @@ struct Repair {
}
/// Generic version of `isplit` and `vsplit` controlled by the `concat` opcode.
fn split_any(dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value,
concat: Opcode)
-> (Value, Value) {
fn split_any(
dfg: &mut DataFlowGraph,
cfg: &ControlFlowGraph,
pos: &mut Cursor,
value: Value,
concat: Opcode,
) -> (Value, Value) {
let saved_pos = pos.position();
let mut repairs = Vec::new();
let result = split_value(dfg, pos, value, concat, &mut repairs);
@@ -121,17 +124,20 @@ fn split_any(dfg: &mut DataFlowGraph,
while let Some(repair) = repairs.pop() {
for &(_, inst) in cfg.get_predecessors(repair.ebb) {
let branch_opc = dfg[inst].opcode();
assert!(branch_opc.is_branch(),
"Predecessor not a branch: {}",
dfg.display_inst(inst, None));
assert!(
branch_opc.is_branch(),
"Predecessor not a branch: {}",
dfg.display_inst(inst, None)
);
let fixed_args = branch_opc.constraints().fixed_value_arguments();
let mut args = dfg[inst]
.take_value_list()
.expect("Branches must have value lists.");
let mut args = dfg[inst].take_value_list().expect(
"Branches must have value lists.",
);
let num_args = args.len(&dfg.value_lists);
// Get the old value passed to the EBB argument we're repairing.
let old_arg = args.get(fixed_args + repair.num, &dfg.value_lists)
.expect("Too few branch arguments");
let old_arg = args.get(fixed_args + repair.num, &dfg.value_lists).expect(
"Too few branch arguments",
);
// It's possible that the CFG's predecessor list has duplicates. Detect them here.
if dfg.value_type(old_arg) == repair.split_type {
@@ -145,19 +151,21 @@ fn split_any(dfg: &mut DataFlowGraph,
// The `lo` part replaces the original argument.
*args.get_mut(fixed_args + repair.num, &mut dfg.value_lists)
.unwrap() = lo;
.unwrap() = lo;
// The `hi` part goes at the end. Since multiple repairs may have been scheduled to the
// same EBB, there could be multiple arguments missing.
if num_args > fixed_args + repair.hi_num {
*args.get_mut(fixed_args + repair.hi_num, &mut dfg.value_lists)
.unwrap() = hi;
.unwrap() = hi;
} else {
// We need to append one or more arguments. If we're adding more than one argument,
// there must be pending repairs on the stack that will fill in the correct values
// instead of `hi`.
args.extend(iter::repeat(hi).take(1 + fixed_args + repair.hi_num - num_args),
&mut dfg.value_lists);
args.extend(
iter::repeat(hi).take(1 + fixed_args + repair.hi_num - num_args),
&mut dfg.value_lists,
);
}
// Put the value list back after manipulating it.
@@ -175,12 +183,13 @@ fn split_any(dfg: &mut DataFlowGraph,
/// instruction.
///
/// Return the two new values representing the parts of `value`.
fn split_value(dfg: &mut DataFlowGraph,
pos: &mut Cursor,
value: Value,
concat: Opcode,
repairs: &mut Vec<Repair>)
-> (Value, Value) {
fn split_value(
dfg: &mut DataFlowGraph,
pos: &mut Cursor,
value: Value,
concat: Opcode,
repairs: &mut Vec<Repair>,
) -> (Value, Value) {
let value = dfg.resolve_copies(value);
let mut reuse = None;
@@ -228,9 +237,12 @@ fn split_value(dfg: &mut DataFlowGraph,
// need to insert a split instruction before returning.
pos.goto_top(ebb);
pos.next_inst();
dfg.ins(pos)
.with_result(value)
.Binary(concat, split_type, lo, hi);
dfg.ins(pos).with_result(value).Binary(
concat,
split_type,
lo,
hi,
);
// Finally, splitting the EBB argument is not enough. We also have to repair all
// of the predecessor instructions that branch here.
@@ -254,19 +266,21 @@ fn split_value(dfg: &mut DataFlowGraph,
}
// Add a repair entry to the work list.
fn add_repair(concat: Opcode,
split_type: Type,
ebb: Ebb,
num: usize,
hi_num: usize,
repairs: &mut Vec<Repair>) {
fn add_repair(
concat: Opcode,
split_type: Type,
ebb: Ebb,
num: usize,
hi_num: usize,
repairs: &mut Vec<Repair>,
) {
repairs.push(Repair {
concat,
split_type,
ebb,
num,
hi_num,
});
concat,
split_type,
ebb,
num,
hi_num,
});
}
/// Strip concat-split chains. Return a simpler way of computing the same value.

View File

@@ -10,10 +10,12 @@ use loop_analysis::{Loop, LoopAnalysis};
/// Performs the LICM pass by detecting loops within the CFG and moving
/// loop-invariant instructions out of them.
/// Changes the CFG and domtree in-place during the operation.
pub fn do_licm(func: &mut Function,
cfg: &mut ControlFlowGraph,
domtree: &mut DominatorTree,
loop_analysis: &mut LoopAnalysis) {
pub fn do_licm(
func: &mut Function,
cfg: &mut ControlFlowGraph,
domtree: &mut DominatorTree,
loop_analysis: &mut LoopAnalysis,
) {
loop_analysis.compute(func, cfg, domtree);
for lp in loop_analysis.loops() {
// For each loop that we want to optimize we determine the set of loop-invariant
@@ -53,11 +55,12 @@ pub fn do_licm(func: &mut Function,
// Insert a pre-header before the header, modifying the function layout and CFG to reflect it.
// A jump instruction to the header is placed at the end of the pre-header.
fn create_pre_header(header: Ebb,
func: &mut Function,
cfg: &mut ControlFlowGraph,
domtree: &DominatorTree)
-> Ebb {
fn create_pre_header(
header: Ebb,
func: &mut Function,
cfg: &mut ControlFlowGraph,
domtree: &DominatorTree,
) -> Ebb {
let pool = &mut ListPool::<Value>::new();
let header_args_values: Vec<Value> = func.dfg.ebb_args(header).into_iter().cloned().collect();
let header_args_types: Vec<Type> = header_args_values
@@ -82,9 +85,10 @@ fn create_pre_header(header: Ebb,
// Inserts the pre-header at the right place in the layout.
pos.insert_ebb(pre_header);
pos.next_inst();
func.dfg
.ins(&mut pos)
.jump(header, pre_header_args_value.as_slice(pool));
func.dfg.ins(&mut pos).jump(
header,
pre_header_args_value.as_slice(pool),
);
}
pre_header
}
@@ -94,11 +98,12 @@ fn create_pre_header(header: Ebb,
// A loop header has a pre-header if there is only one predecessor that the header doesn't
// dominate.
// Returns the pre-header Ebb and the instruction jumping to the header.
fn has_pre_header(layout: &Layout,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
header: Ebb)
-> Option<(Ebb, Inst)> {
fn has_pre_header(
layout: &Layout,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
header: Ebb,
) -> Option<(Ebb, Inst)> {
let mut result = None;
let mut found = false;
for &(pred_ebb, last_inst) in cfg.get_predecessors(header) {
@@ -129,11 +134,12 @@ fn change_branch_jump_destination(inst: Inst, new_ebb: Ebb, func: &mut Function)
// Traverses a loop in reverse post-order from a header EBB and identify loop-invariant
// instructions. These loop-invariant instructions are then removed from the code and returned
// (in reverse post-order) for later use.
fn remove_loop_invariant_instructions(lp: Loop,
func: &mut Function,
cfg: &ControlFlowGraph,
loop_analysis: &LoopAnalysis)
-> Vec<Inst> {
fn remove_loop_invariant_instructions(
lp: Loop,
func: &mut Function,
cfg: &ControlFlowGraph,
loop_analysis: &LoopAnalysis,
) -> Vec<Inst> {
let mut loop_values: HashSet<Value> = HashSet::new();
let mut invariant_inst: Vec<Inst> = Vec::new();
let mut pos = Cursor::new(&mut func.layout);
@@ -146,10 +152,10 @@ fn remove_loop_invariant_instructions(lp: Loop,
pos.goto_top(*ebb);
while let Some(inst) = pos.next_inst() {
if func.dfg.has_results(inst) &&
func.dfg
.inst_args(inst)
.into_iter()
.all(|arg| !loop_values.contains(arg)) {
func.dfg.inst_args(inst).into_iter().all(|arg| {
!loop_values.contains(arg)
})
{
// If all the instruction's argument are defined outside the loop
// then this instruction is loop-invariant
invariant_inst.push(inst);

View File

@@ -105,10 +105,12 @@ impl LoopAnalysis {
// Traverses the CFG in reverse postorder and create a loop object for every EBB having a
// back edge.
fn find_loop_headers(&mut self,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
layout: &Layout) {
fn find_loop_headers(
&mut self,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
layout: &Layout,
) {
// We traverse the CFG in reverse postorder
for &ebb in domtree.cfg_postorder().iter().rev() {
for &(_, pred_inst) in cfg.get_predecessors(ebb) {
@@ -127,10 +129,12 @@ impl LoopAnalysis {
// Intended to be called after `find_loop_headers`. For each detected loop header,
// discovers all the ebb belonging to the loop and its inner loops. After a call to this
// function, the loop tree is fully constructed.
fn discover_loop_blocks(&mut self,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
layout: &Layout) {
fn discover_loop_blocks(
&mut self,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
layout: &Layout,
) {
let mut stack: Vec<Ebb> = Vec::new();
// We handle each loop header in reverse order, corresponding to a pesudo postorder
// traversal of the graph.

View File

@@ -38,7 +38,8 @@ impl<T: ReservedValue> PackedOption<T> {
/// Maps a `PackedOption<T>` to `Option<U>` by applying a function to a contained value.
pub fn map<U, F>(self, f: F) -> Option<U>
where F: FnOnce(T) -> U
where
F: FnOnce(T) -> U,
{
self.expand().map(f)
}
@@ -69,8 +70,10 @@ impl<T: ReservedValue> Default for PackedOption<T> {
impl<T: ReservedValue> From<T> for PackedOption<T> {
/// Convert `t` into a packed `Some(x)`.
fn from(t: T) -> PackedOption<T> {
debug_assert!(t != T::reserved_value(),
"Can't make a PackedOption from the reserved value.");
debug_assert!(
t != T::reserved_value(),
"Can't make a PackedOption from the reserved value."
);
PackedOption(t)
}
}
@@ -92,7 +95,8 @@ impl<T: ReservedValue> Into<Option<T>> for PackedOption<T> {
}
impl<T> fmt::Debug for PackedOption<T>
where T: ReservedValue + fmt::Debug
where
T: ReservedValue + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.is_none() {

View File

@@ -7,7 +7,8 @@
///
/// Returns the number of elements where `p(t)` is true.
pub fn partition_slice<'a, T: 'a, F>(s: &'a mut [T], mut p: F) -> usize
where F: FnMut(&T) -> bool
where
F: FnMut(&T) -> bool,
{
// Count the length of the prefix where `p` returns true.
let mut count = match s.iter().position(|t| !p(t)) {

View File

@@ -91,7 +91,8 @@ impl Affinity {
// If the preferred register class is a subclass of the constraint, there's no need
// to change anything.
if constraint.kind != ConstraintKind::Stack &&
!constraint.regclass.has_subclass(rc) {
!constraint.regclass.has_subclass(rc)
{
// If the register classes don't overlap, `intersect` returns `None`, and we
// just keep our previous affinity.
if let Some(subclass) = constraint.regclass.intersect(reg_info.rc(rc)) {

View File

@@ -86,10 +86,9 @@ impl AllocatableSet {
///
/// This assumes that unused bits are 1.
pub fn interferes_with(&self, other: &AllocatableSet) -> bool {
self.avail
.iter()
.zip(&other.avail)
.any(|(&x, &y)| (x | y) != !0)
self.avail.iter().zip(&other.avail).any(
|(&x, &y)| (x | y) != !0,
)
}
/// Intersect this set of allocatable registers with `other`. This has the effect of removing

View File

@@ -132,14 +132,15 @@ impl DomForest {
///
/// If the merge succeeds, returns `Ok(())`. The merged sequence can be extracted with
/// `swap()`.
pub fn try_merge(&mut self,
va: &[Value],
vb: &[Value],
dfg: &DataFlowGraph,
layout: &Layout,
domtree: &DominatorTree,
liveness: &Liveness)
-> Result<(), (Value, Value)> {
pub fn try_merge(
&mut self,
va: &[Value],
vb: &[Value],
dfg: &DataFlowGraph,
layout: &Layout,
domtree: &DominatorTree,
liveness: &Liveness,
) -> Result<(), (Value, Value)> {
self.stack.clear();
self.values.clear();
self.values.reserve(va.len() + vb.len());
@@ -154,16 +155,16 @@ impl DomForest {
for node in merged {
if let Some(parent) = self.push_node(node, layout, domtree) {
// Check if `parent` live range contains `node.def`.
let lr = liveness
.get(parent)
.expect("No live range for parent value");
let lr = liveness.get(parent).expect(
"No live range for parent value",
);
if lr.overlaps_def(node.def, layout.pp_ebb(node.def), layout) {
// Interference detected. Get the `(a, b)` order right in the error.
return Err(if node.set == 0 {
(node.value, parent)
} else {
(parent, node.value)
});
(node.value, parent)
} else {
(parent, node.value)
});
}
}
}
@@ -177,8 +178,9 @@ impl DomForest {
/// Given two ordered sequences of nodes, yield an ordered sequence containing all of them.
/// Duplicates are removed.
struct MergedNodes<'a, IA, IB>
where IA: Iterator<Item = Node>,
IB: Iterator<Item = Node>
where
IA: Iterator<Item = Node>,
IB: Iterator<Item = Node>,
{
a: Peekable<IA>,
b: Peekable<IB>,
@@ -187,8 +189,9 @@ struct MergedNodes<'a, IA, IB>
}
impl<'a, IA, IB> Iterator for MergedNodes<'a, IA, IB>
where IA: Iterator<Item = Node>,
IB: Iterator<Item = Node>
where
IA: Iterator<Item = Node>,
IB: Iterator<Item = Node>,
{
type Item = Node;
@@ -198,9 +201,12 @@ impl<'a, IA, IB> Iterator for MergedNodes<'a, IA, IB>
// If the two values are defined at the same point, compare value numbers instead
// this is going to cause an interference conflict unless its actually the same
// value appearing in both streams.
self.domtree
.rpo_cmp(a.def, b.def, self.layout)
.then(Ord::cmp(&a.value, &b.value))
self.domtree.rpo_cmp(a.def, b.def, self.layout).then(
Ord::cmp(
&a.value,
&b.value,
),
)
}
(Some(_), None) => Ordering::Less,
(None, Some(_)) => Ordering::Greater,
@@ -256,13 +262,15 @@ impl Coalescing {
}
/// Convert `func` to conventional SSA form and build virtual registers in the process.
pub fn conventional_ssa(&mut self,
isa: &TargetIsa,
func: &mut Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
liveness: &mut Liveness,
virtregs: &mut VirtRegs) {
pub fn conventional_ssa(
&mut self,
isa: &TargetIsa,
func: &mut Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
liveness: &mut Liveness,
virtregs: &mut VirtRegs,
) {
dbg!("Coalescing for:\n{}", func.display(isa));
let mut context = Context {
isa,
@@ -329,9 +337,11 @@ impl<'a> Context<'a> {
//
// Try to catch infinite splitting loops. The values created by splitting should never
// have irreconcilable interferences.
assert!(!self.split_values.contains(&bad_value),
"{} was already isolated",
bad_value);
assert!(
!self.split_values.contains(&bad_value),
"{} was already isolated",
bad_value
);
let split_len = self.split_values.len();
// The bad value can be both the successor value and a predecessor value at the same
@@ -349,18 +359,22 @@ impl<'a> Context<'a> {
}
// Second loop check.
assert_ne!(split_len,
self.split_values.len(),
"Couldn't isolate {}",
bad_value);
assert_ne!(
split_len,
self.split_values.len(),
"Couldn't isolate {}",
bad_value
);
}
let vreg = self.virtregs.unify(self.values);
dbg!("Coalesced {} arg {} into {} = {}",
ebb,
argnum,
vreg,
DisplayList(self.virtregs.values(vreg)));
dbg!(
"Coalesced {} arg {} into {} = {}",
ebb,
argnum,
vreg,
DisplayList(self.virtregs.values(vreg))
);
}
/// Reset `self.values` to just the set of split values.
@@ -369,21 +383,21 @@ impl<'a> Context<'a> {
self.values.extend_from_slice(self.split_values);
let domtree = &self.domtree;
let func = &self.func;
self.values
.sort_by(|&a, &b| {
domtree.rpo_cmp(func.dfg.value_def(a), func.dfg.value_def(b), &func.layout)
});
self.values.sort_by(|&a, &b| {
domtree.rpo_cmp(func.dfg.value_def(a), func.dfg.value_def(b), &func.layout)
});
}
/// Try coalescing predecessors with `succ_val`.
///
/// Returns a value from a congruence class that needs to be split before starting over, or
/// `None` if everything was successfully coalesced into `self.values`.
fn try_coalesce(&mut self,
argnum: usize,
succ_val: Value,
preds: &[BasicBlock])
-> Option<Value> {
fn try_coalesce(
&mut self,
argnum: usize,
succ_val: Value,
preds: &[BasicBlock],
) -> Option<Value> {
// Initialize the value list with the split values. These are guaranteed to be
// interference free, and anything that interferes with them must be split away.
self.reset_values();
@@ -397,19 +411,22 @@ impl<'a> Context<'a> {
for &(pred_ebb, pred_inst) in preds {
let pred_val = self.func.dfg.inst_variable_args(pred_inst)[argnum];
dbg!("Checking {}: {}: {}",
pred_val,
pred_ebb,
self.func.dfg.display_inst(pred_inst, self.isa));
dbg!(
"Checking {}: {}: {}",
pred_val,
pred_ebb,
self.func.dfg.display_inst(pred_inst, self.isa)
);
// Never coalesce incoming function arguments on the stack. These arguments are
// pre-spilled, and the rest of the virtual register would be forced to spill to the
// `incoming_arg` stack slot too.
if let ValueDef::Arg(def_ebb, def_num) = self.func.dfg.value_def(pred_val) {
if Some(def_ebb) == self.func.layout.entry_block() &&
self.func.signature.argument_types[def_num]
.location
.is_stack() {
self.func.signature.argument_types[def_num]
.location
.is_stack()
{
dbg!("Isolating incoming stack parameter {}", pred_val);
let new_val = self.split_pred(pred_inst, pred_ebb, argnum, pred_val);
assert!(self.add_class(new_val).is_ok());
@@ -424,9 +441,10 @@ impl<'a> Context<'a> {
//
// Check if the `a` live range is fundamentally incompatible with `pred_inst`.
if self.liveness
.get(a)
.expect("No live range for interfering value")
.reaches_use(pred_inst, pred_ebb, &self.func.layout) {
.get(a)
.expect("No live range for interfering value")
.reaches_use(pred_inst, pred_ebb, &self.func.layout)
{
// Splitting at `pred_inst` wouldn't resolve the interference, so we need to
// start over.
return Some(a);
@@ -435,8 +453,10 @@ impl<'a> Context<'a> {
// The local conflict could be avoided by splitting at this predecessor, so try
// that. This split is not necessarily required, but it allows us to make progress.
let new_val = self.split_pred(pred_inst, pred_ebb, argnum, pred_val);
assert!(self.add_class(new_val).is_ok(),
"Splitting didn't resolve conflict.");
assert!(
self.add_class(new_val).is_ok(),
"Splitting didn't resolve conflict."
);
}
}
@@ -447,42 +467,52 @@ impl<'a> Context<'a> {
///
/// Leave `self.values` unchanged on failure.
fn add_class(&mut self, value: Value) -> Result<(), (Value, Value)> {
self.forest
.try_merge(&self.values,
self.virtregs.congruence_class(&value),
&self.func.dfg,
&self.func.layout,
self.domtree,
self.liveness)?;
self.forest.try_merge(
&self.values,
self.virtregs.congruence_class(&value),
&self.func.dfg,
&self.func.layout,
self.domtree,
self.liveness,
)?;
self.forest.swap(&mut self.values);
Ok(())
}
/// Split the congruence class for the `argnum` argument to `pred_inst` by inserting a copy.
fn split_pred(&mut self,
pred_inst: Inst,
pred_ebb: Ebb,
argnum: usize,
pred_val: Value)
-> Value {
fn split_pred(
&mut self,
pred_inst: Inst,
pred_ebb: Ebb,
argnum: usize,
pred_val: Value,
) -> Value {
let mut pos = EncCursor::new(self.func, self.isa).at_inst(pred_inst);
let copy = pos.ins().copy(pred_val);
let inst = pos.built_inst();
dbg!("Inserted {}, before {}: {}",
pos.display_inst(inst),
pred_ebb,
pos.display_inst(pred_inst));
dbg!(
"Inserted {}, before {}: {}",
pos.display_inst(inst),
pred_ebb,
pos.display_inst(pred_inst)
);
// Create a live range for the new value.
let affinity = Affinity::new(&self.encinfo
.operand_constraints(pos.func.encodings[inst])
.expect("Bad copy encoding")
.outs
[0]);
let affinity = Affinity::new(
&self.encinfo
.operand_constraints(pos.func.encodings[inst])
.expect("Bad copy encoding")
.outs
[0],
);
self.liveness.create_dead(copy, inst, affinity);
self.liveness
.extend_locally(copy, pred_ebb, pred_inst, &pos.func.layout);
self.liveness.extend_locally(
copy,
pred_ebb,
pred_inst,
&pos.func.layout,
);
pos.func.dfg.inst_variable_args_mut(pred_inst)[argnum] = copy;
self.split_values.push(copy);
@@ -500,21 +530,29 @@ impl<'a> Context<'a> {
let inst = pos.built_inst();
self.liveness.move_def_locally(succ_val, inst);
dbg!("Inserted {}, following {}({}: {})",
pos.display_inst(inst),
ebb,
new_val,
ty);
dbg!(
"Inserted {}, following {}({}: {})",
pos.display_inst(inst),
ebb,
new_val,
ty
);
// Create a live range for the new value.
let affinity = Affinity::new(&self.encinfo
.operand_constraints(pos.func.encodings[inst])
.expect("Bad copy encoding")
.outs
[0]);
let affinity = Affinity::new(
&self.encinfo
.operand_constraints(pos.func.encodings[inst])
.expect("Bad copy encoding")
.outs
[0],
);
self.liveness.create_dead(new_val, ebb, affinity);
self.liveness
.extend_locally(new_val, ebb, inst, &pos.func.layout);
self.liveness.extend_locally(
new_val,
ebb,
inst,
&pos.func.layout,
);
self.split_values.push(new_val);
new_val

View File

@@ -105,12 +105,14 @@ impl Coloring {
}
/// Run the coloring algorithm over `func`.
pub fn run(&mut self,
isa: &TargetIsa,
func: &mut Function,
domtree: &DominatorTree,
liveness: &mut Liveness,
tracker: &mut LiveValueTracker) {
pub fn run(
&mut self,
isa: &TargetIsa,
func: &mut Function,
domtree: &DominatorTree,
liveness: &mut Liveness,
tracker: &mut LiveValueTracker,
) {
dbg!("Coloring for:\n{}", func.display(isa));
let mut ctx = Context {
isa,
@@ -150,15 +152,17 @@ impl<'a> Context<'a> {
pos.goto_top(ebb);
while let Some(inst) = pos.next_inst() {
if let Some(constraints) = self.encinfo.operand_constraints(func.encodings[inst]) {
self.visit_inst(inst,
constraints,
&mut pos,
&mut func.dfg,
tracker,
&mut regs,
&mut func.locations,
&mut func.encodings,
&func.signature);
self.visit_inst(
inst,
constraints,
&mut pos,
&mut func.dfg,
tracker,
&mut regs,
&mut func.locations,
&mut func.encodings,
&func.signature,
);
} else {
let (_throughs, kills) = tracker.process_ghost(inst);
self.process_ghost_kills(kills, &mut regs, &func.locations);
@@ -170,11 +174,12 @@ impl<'a> Context<'a> {
/// Visit the `ebb` header.
///
/// Initialize the set of live registers and color the arguments to `ebb`.
fn visit_ebb_header(&self,
ebb: Ebb,
func: &mut Function,
tracker: &mut LiveValueTracker)
-> AllocatableSet {
fn visit_ebb_header(
&self,
ebb: Ebb,
func: &mut Function,
tracker: &mut LiveValueTracker,
) -> AllocatableSet {
// Reposition the live value tracker and deal with the EBB arguments.
tracker.ebb_top(ebb, &func.dfg, self.liveness, &func.layout, self.domtree);
@@ -204,10 +209,12 @@ impl<'a> Context<'a> {
.get(value)
.expect("No live range for live-in")
.affinity;
dbg!("Live-in: {}:{} in {}",
value,
affinity.display(&self.reginfo),
func.locations[value].display(&self.reginfo));
dbg!(
"Live-in: {}:{} in {}",
value,
affinity.display(&self.reginfo),
func.locations[value].display(&self.reginfo)
);
if let Affinity::Reg(rci) = affinity {
let rc = self.reginfo.rc(rci);
let loc = func.locations[value];
@@ -230,11 +237,12 @@ impl<'a> Context<'a> {
/// function signature.
///
/// Return the set of remaining allocatable registers after filtering out the dead arguments.
fn color_entry_args(&self,
sig: &Signature,
args: &[LiveValue],
locations: &mut ValueLocations)
-> AllocatableSet {
fn color_entry_args(
&self,
sig: &Signature,
args: &[LiveValue],
locations: &mut ValueLocations,
) -> AllocatableSet {
assert_eq!(sig.argument_types.len(), args.len());
let mut regs = self.usable_regs.clone();
@@ -250,10 +258,12 @@ impl<'a> Context<'a> {
locations[lv.value] = ValueLoc::Reg(reg);
} else {
// This should have been fixed by the reload pass.
panic!("Entry arg {} has {} affinity, but ABI {}",
lv.value,
lv.affinity.display(&self.reginfo),
abi.display(&self.reginfo));
panic!(
"Entry arg {} has {} affinity, but ABI {}",
lv.value,
lv.affinity.display(&self.reginfo),
abi.display(&self.reginfo)
);
}
}
@@ -273,19 +283,23 @@ impl<'a> Context<'a> {
///
/// Update `regs` to reflect the allocated registers after `inst`, including removing any dead
/// or killed values from the set.
fn visit_inst(&mut self,
inst: Inst,
constraints: &RecipeConstraints,
pos: &mut Cursor,
dfg: &mut DataFlowGraph,
tracker: &mut LiveValueTracker,
regs: &mut AllocatableSet,
locations: &mut ValueLocations,
encodings: &mut InstEncodings,
func_signature: &Signature) {
dbg!("Coloring {}\n {}",
dfg.display_inst(inst, self.isa),
regs.display(&self.reginfo));
fn visit_inst(
&mut self,
inst: Inst,
constraints: &RecipeConstraints,
pos: &mut Cursor,
dfg: &mut DataFlowGraph,
tracker: &mut LiveValueTracker,
regs: &mut AllocatableSet,
locations: &mut ValueLocations,
encodings: &mut InstEncodings,
func_signature: &Signature,
) {
dbg!(
"Coloring {}\n {}",
dfg.display_inst(inst, self.isa),
regs.display(&self.reginfo)
);
// EBB whose arguments should be colored to match the current branch instruction's
// arguments.
@@ -310,10 +324,12 @@ impl<'a> Context<'a> {
} else {
// This is a multi-way branch like `br_table`. We only support arguments on
// single-destination branches.
assert_eq!(dfg.inst_variable_args(inst).len(),
0,
"Can't handle EBB arguments: {}",
dfg.display_inst(inst, self.isa));
assert_eq!(
dfg.inst_variable_args(inst).len(),
0,
"Can't handle EBB arguments: {}",
dfg.display_inst(inst, self.isa)
);
self.undivert_regs(|lr| !lr.is_local());
}
}
@@ -329,10 +345,11 @@ impl<'a> Context<'a> {
// Get rid of the killed values.
for lv in kills {
if let Affinity::Reg(rci) = lv.affinity {
self.solver
.add_kill(lv.value,
self.reginfo.rc(rci),
self.divert.reg(lv.value, locations));
self.solver.add_kill(
lv.value,
self.reginfo.rc(rci),
self.divert.reg(lv.value, locations),
);
}
}
@@ -350,9 +367,9 @@ impl<'a> Context<'a> {
// Finally, we've fully programmed the constraint solver.
// We expect a quick solution in most cases.
let mut output_regs = self.solver
.quick_solve()
.unwrap_or_else(|_| self.iterate_solution());
let mut output_regs = self.solver.quick_solve().unwrap_or_else(
|_| self.iterate_solution(),
);
// The solution and/or fixed input constraints may require us to shuffle the set of live
@@ -399,30 +416,42 @@ impl<'a> Context<'a> {
}
/// Program the input-side constraints for `inst` into the constraint solver.
fn program_input_constraints(&mut self,
inst: Inst,
constraints: &[OperandConstraint],
dfg: &DataFlowGraph,
locations: &ValueLocations) {
for (op, &value) in constraints
.iter()
.zip(dfg.inst_args(inst))
.filter(|&(op, _)| op.kind != ConstraintKind::Stack) {
fn program_input_constraints(
&mut self,
inst: Inst,
constraints: &[OperandConstraint],
dfg: &DataFlowGraph,
locations: &ValueLocations,
) {
for (op, &value) in constraints.iter().zip(dfg.inst_args(inst)).filter(
|&(op, _)| {
op.kind != ConstraintKind::Stack
},
)
{
// Reload pass is supposed to ensure that all arguments to register operands are
// already in a register.
let cur_reg = self.divert.reg(value, locations);
match op.kind {
ConstraintKind::FixedReg(regunit) => {
if regunit != cur_reg {
self.solver
.reassign_in(value, op.regclass, cur_reg, regunit);
self.solver.reassign_in(
value,
op.regclass,
cur_reg,
regunit,
);
}
}
ConstraintKind::Reg |
ConstraintKind::Tied(_) => {
if !op.regclass.contains(cur_reg) {
self.solver
.add_var(value, op.regclass, cur_reg, &self.reginfo);
self.solver.add_var(
value,
op.regclass,
cur_reg,
&self.reginfo,
);
}
}
ConstraintKind::Stack => unreachable!(),
@@ -433,18 +462,21 @@ impl<'a> Context<'a> {
/// Program the input-side ABI constraints for `inst` into the constraint solver.
///
/// ABI constraints are the fixed register assignments used for calls and returns.
fn program_input_abi(&mut self,
inst: Inst,
abi_types: &[ArgumentType],
dfg: &DataFlowGraph,
locations: &ValueLocations) {
fn program_input_abi(
&mut self,
inst: Inst,
abi_types: &[ArgumentType],
dfg: &DataFlowGraph,
locations: &ValueLocations,
) {
for (abi, &value) in abi_types.iter().zip(dfg.inst_variable_args(inst)) {
if let ArgumentLoc::Reg(reg) = abi.location {
if let Affinity::Reg(rci) =
self.liveness
.get(value)
.expect("ABI register must have live range")
.affinity {
.affinity
{
let rc = self.reginfo.rc(rci);
let cur_reg = self.divert.reg(value, locations);
self.solver.reassign_in(value, rc, cur_reg, reg);
@@ -464,13 +496,14 @@ impl<'a> Context<'a> {
///
/// Returns true if this is the first time a branch to `dest` is seen, so the `dest` argument
/// values should be colored after `shuffle_inputs`.
fn program_ebb_arguments(&mut self,
inst: Inst,
dest: Ebb,
dfg: &DataFlowGraph,
layout: &Layout,
locations: &ValueLocations)
-> bool {
fn program_ebb_arguments(
&mut self,
inst: Inst,
dest: Ebb,
dfg: &DataFlowGraph,
layout: &Layout,
locations: &ValueLocations,
) -> bool {
// Find diverted registers that are live-in to `dest` and reassign them to their global
// home.
//
@@ -523,11 +556,13 @@ impl<'a> Context<'a> {
/// register state.
///
/// This function is only called when `program_ebb_arguments()` returned `true`.
fn color_ebb_arguments(&mut self,
inst: Inst,
dest: Ebb,
dfg: &DataFlowGraph,
locations: &mut ValueLocations) {
fn color_ebb_arguments(
&mut self,
inst: Inst,
dest: Ebb,
dfg: &DataFlowGraph,
locations: &mut ValueLocations,
) {
let br_args = dfg.inst_variable_args(inst);
let dest_args = dfg.ebb_args(dest);
assert_eq!(br_args.len(), dest_args.len());
@@ -549,20 +584,23 @@ impl<'a> Context<'a> {
/// Find all diverted registers where `pred` returns `true` and undo their diversion so they
/// are reallocated to their global register assignments.
fn undivert_regs<Pred>(&mut self, mut pred: Pred)
where Pred: FnMut(&LiveRange) -> bool
where
Pred: FnMut(&LiveRange) -> bool,
{
for rdiv in self.divert.all() {
let lr = self.liveness
.get(rdiv.value)
.expect("Missing live range for diverted register");
let lr = self.liveness.get(rdiv.value).expect(
"Missing live range for diverted register",
);
if pred(lr) {
if let Affinity::Reg(rci) = lr.affinity {
let rc = self.reginfo.rc(rci);
self.solver.reassign_in(rdiv.value, rc, rdiv.to, rdiv.from);
} else {
panic!("Diverted register {} with {} affinity",
rdiv.value,
lr.affinity.display(&self.reginfo));
panic!(
"Diverted register {} with {} affinity",
rdiv.value,
lr.affinity.display(&self.reginfo)
);
}
}
}
@@ -570,9 +608,7 @@ impl<'a> Context<'a> {
// Find existing live values that conflict with the fixed input register constraints programmed
// into the constraint solver. Convert them to solver variables so they can be diverted.
fn divert_fixed_input_conflicts(&mut self,
live: &[LiveValue],
locations: &mut ValueLocations) {
fn divert_fixed_input_conflicts(&mut self, live: &[LiveValue], locations: &mut ValueLocations) {
for lv in live {
if let Affinity::Reg(rci) = lv.affinity {
let rc = self.reginfo.rc(rci);
@@ -587,11 +623,13 @@ impl<'a> Context<'a> {
/// Program any fixed-register output constraints into the solver. This may also detect
/// conflicts between live-through registers and fixed output registers. These live-through
/// values need to be turned into solver variables so they can be reassigned.
fn program_fixed_outputs(&mut self,
constraints: &[OperandConstraint],
defs: &[LiveValue],
throughs: &[LiveValue],
locations: &mut ValueLocations) {
fn program_fixed_outputs(
&mut self,
constraints: &[OperandConstraint],
defs: &[LiveValue],
throughs: &[LiveValue],
locations: &mut ValueLocations,
) {
for (op, lv) in constraints.iter().zip(defs) {
if let ConstraintKind::FixedReg(reg) = op.kind {
self.add_fixed_output(lv.value, op.regclass, reg, throughs, locations);
@@ -602,11 +640,13 @@ impl<'a> Context<'a> {
/// Program the output-side ABI constraints for `inst` into the constraint solver.
///
/// That means return values for a call instruction.
fn program_output_abi(&mut self,
abi_types: &[ArgumentType],
defs: &[LiveValue],
throughs: &[LiveValue],
locations: &mut ValueLocations) {
fn program_output_abi(
&mut self,
abi_types: &[ArgumentType],
defs: &[LiveValue],
throughs: &[LiveValue],
locations: &mut ValueLocations,
) {
// It's technically possible for a call instruction to have fixed results before the
// variable list of results, but we have no known instances of that.
// Just assume all results are variable return values.
@@ -624,12 +664,14 @@ impl<'a> Context<'a> {
}
/// Add a single fixed output value to the solver.
fn add_fixed_output(&mut self,
value: Value,
rc: RegClass,
reg: RegUnit,
throughs: &[LiveValue],
locations: &mut ValueLocations) {
fn add_fixed_output(
&mut self,
value: Value,
rc: RegClass,
reg: RegUnit,
throughs: &[LiveValue],
locations: &mut ValueLocations,
) {
if !self.solver.add_fixed_output(rc, reg) {
// The fixed output conflicts with some of the live-through registers.
for lv in throughs {
@@ -656,12 +698,14 @@ impl<'a> Context<'a> {
/// Program the output-side constraints for `inst` into the constraint solver.
///
/// It is assumed that all fixed outputs have already been handled.
fn program_output_constraints(&mut self,
inst: Inst,
constraints: &[OperandConstraint],
defs: &[LiveValue],
dfg: &mut DataFlowGraph,
locations: &mut ValueLocations) {
fn program_output_constraints(
&mut self,
inst: Inst,
constraints: &[OperandConstraint],
defs: &[LiveValue],
dfg: &mut DataFlowGraph,
locations: &mut ValueLocations,
) {
for (op, lv) in constraints.iter().zip(defs) {
match op.kind {
ConstraintKind::FixedReg(_) |
@@ -673,8 +717,11 @@ impl<'a> Context<'a> {
// Find the input operand we're tied to.
// The solver doesn't care about the output value.
let arg = dfg.inst_args(inst)[num as usize];
self.solver
.add_tied_input(arg, op.regclass, self.divert.reg(arg, locations));
self.solver.add_tied_input(
arg,
op.regclass,
self.divert.reg(arg, locations),
);
}
}
}
@@ -695,11 +742,13 @@ impl<'a> Context<'a> {
/// before.
///
/// The solver needs to be reminded of the available registers before any moves are inserted.
fn shuffle_inputs(&mut self,
pos: &mut Cursor,
dfg: &mut DataFlowGraph,
regs: &mut AllocatableSet,
encodings: &mut InstEncodings) {
fn shuffle_inputs(
&mut self,
pos: &mut Cursor,
dfg: &mut DataFlowGraph,
regs: &mut AllocatableSet,
encodings: &mut InstEncodings,
) {
self.solver.schedule_moves(regs);
for m in self.solver.moves() {
@@ -729,10 +778,12 @@ impl<'a> Context<'a> {
/// Process kills on a ghost instruction.
/// - Forget diversions.
/// - Free killed registers.
fn process_ghost_kills(&mut self,
kills: &[LiveValue],
regs: &mut AllocatableSet,
locations: &ValueLocations) {
fn process_ghost_kills(
&mut self,
kills: &[LiveValue],
regs: &mut AllocatableSet,
locations: &ValueLocations,
) {
for lv in kills {
if let Affinity::Reg(rci) = lv.affinity {
let rc = self.reginfo.rc(rci);

View File

@@ -53,12 +53,13 @@ impl Context {
///
/// After register allocation, all values in `func` have been assigned to a register or stack
/// location that is consistent with instruction encoding constraints.
pub fn run(&mut self,
isa: &TargetIsa,
func: &mut Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree)
-> CtonResult {
pub fn run(
&mut self,
isa: &TargetIsa,
func: &mut Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
) -> CtonResult {
// `Liveness` and `Coloring` are self-clearing.
self.virtregs.clear();
@@ -74,13 +75,14 @@ impl Context {
}
// Pass: Coalesce and create conventional SSA form.
self.coalescing
.conventional_ssa(isa,
func,
cfg,
domtree,
&mut self.liveness,
&mut self.virtregs);
self.coalescing.conventional_ssa(
isa,
func,
cfg,
domtree,
&mut self.liveness,
&mut self.virtregs,
);
if isa.flags().enable_verifier() {
verify_context(func, cfg, domtree, Some(isa))?;
@@ -90,14 +92,15 @@ impl Context {
// Pass: Spilling.
self.spilling
.run(isa,
func,
domtree,
&mut self.liveness,
&self.virtregs,
&mut self.topo,
&mut self.tracker);
self.spilling.run(
isa,
func,
domtree,
&mut self.liveness,
&self.virtregs,
&mut self.topo,
&mut self.tracker,
);
if isa.flags().enable_verifier() {
verify_context(func, cfg, domtree, Some(isa))?;
@@ -106,13 +109,14 @@ impl Context {
}
// Pass: Reload.
self.reload
.run(isa,
func,
domtree,
&mut self.liveness,
&mut self.topo,
&mut self.tracker);
self.reload.run(
isa,
func,
domtree,
&mut self.liveness,
&mut self.topo,
&mut self.tracker,
);
if isa.flags().enable_verifier() {
verify_context(func, cfg, domtree, Some(isa))?;
@@ -121,8 +125,13 @@ impl Context {
}
// Pass: Coloring.
self.coloring
.run(isa, func, domtree, &mut self.liveness, &mut self.tracker);
self.coloring.run(
isa,
func,
domtree,
&mut self.liveness,
&mut self.tracker,
);
if isa.flags().enable_verifier() {
verify_context(func, cfg, domtree, Some(isa))?;

View File

@@ -93,10 +93,11 @@ impl RegDiversions {
///
/// Returns the `to` register of the removed diversion.
pub fn remove(&mut self, value: Value) -> Option<RegUnit> {
self.current
.iter()
.position(|d| d.value == value)
.map(|i| self.current.swap_remove(i).to)
self.current.iter().position(|d| d.value == value).map(
|i| {
self.current.swap_remove(i).to
},
)
}
}
@@ -113,12 +114,14 @@ mod tests {
let v2 = Value::new(2);
divs.regmove(v1, 10, 12);
assert_eq!(divs.diversion(v1),
Some(&Diversion {
value: v1,
from: 10,
to: 12,
}));
assert_eq!(
divs.diversion(v1),
Some(&Diversion {
value: v1,
from: 10,
to: 12,
})
);
assert_eq!(divs.diversion(v2), None);
divs.regmove(v1, 12, 11);

View File

@@ -74,14 +74,13 @@ impl LiveValueVec {
/// Add a new live value to `values`. Copy some properties from `lr`.
fn push(&mut self, value: Value, endpoint: Inst, lr: &LiveRange) {
self.values
.push(LiveValue {
value,
endpoint,
affinity: lr.affinity,
is_local: lr.is_local(),
is_dead: lr.is_dead(),
});
self.values.push(LiveValue {
value,
endpoint,
affinity: lr.affinity,
is_local: lr.is_local(),
is_dead: lr.is_dead(),
});
}
/// Remove all elements.
@@ -157,13 +156,14 @@ impl LiveValueTracker {
/// from the immediate dominator. The second slice is the set of `ebb` arguments that are live.
///
/// Dead arguments with no uses are included in `args`. Call `drop_dead_args()` to remove them.
pub fn ebb_top(&mut self,
ebb: Ebb,
dfg: &DataFlowGraph,
liveness: &Liveness,
layout: &Layout,
domtree: &DominatorTree)
-> (&[LiveValue], &[LiveValue]) {
pub fn ebb_top(
&mut self,
ebb: Ebb,
dfg: &DataFlowGraph,
liveness: &Liveness,
layout: &Layout,
domtree: &DominatorTree,
) -> (&[LiveValue], &[LiveValue]) {
// Start over, compute the set of live values at the top of the EBB from two sources:
//
// 1. Values that were live before `ebb`'s immediate dominator, filtered for those that are
@@ -179,14 +179,14 @@ impl LiveValueTracker {
// If the immediate dominator exits, we must have a stored list for it. This is a
// requirement to the order EBBs are visited: All dominators must have been processed
// before the current EBB.
let idom_live_list = self.idom_sets
.get(&idom)
.expect("No stored live set for dominator");
let idom_live_list = self.idom_sets.get(&idom).expect(
"No stored live set for dominator",
);
// Get just the values that are live-in to `ebb`.
for &value in idom_live_list.as_slice(&self.idom_pool) {
let lr = liveness
.get(value)
.expect("Immediate dominator value has no live range");
let lr = liveness.get(value).expect(
"Immediate dominator value has no live range",
);
// Check if this value is live-in here.
if let Some(endpoint) = lr.livein_local_end(ebb, layout) {
@@ -198,9 +198,9 @@ impl LiveValueTracker {
// Now add all the live arguments to `ebb`.
let first_arg = self.live.values.len();
for &value in dfg.ebb_args(ebb) {
let lr = liveness
.get(value)
.expect("EBB argument value has no live range");
let lr = liveness.get(value).expect(
"EBB argument value has no live range",
);
assert_eq!(lr.def(), ebb.into());
match lr.def_local_end().into() {
ExpandedProgramPoint::Inst(endpoint) => {
@@ -209,13 +209,18 @@ impl LiveValueTracker {
ExpandedProgramPoint::Ebb(local_ebb) => {
// This is a dead EBB argument which is not even live into the first
// instruction in the EBB.
assert_eq!(local_ebb,
ebb,
"EBB argument live range ends at wrong EBB header");
assert_eq!(
local_ebb,
ebb,
"EBB argument live range ends at wrong EBB header"
);
// Give this value a fake endpoint that is the first instruction in the EBB.
// We expect it to be removed by calling `drop_dead_args()`.
self.live
.push(value, layout.first_inst(ebb).expect("Empty EBB"), lr);
self.live.push(
value,
layout.first_inst(ebb).expect("Empty EBB"),
lr,
);
}
}
}
@@ -241,11 +246,12 @@ impl LiveValueTracker {
///
/// The `drop_dead()` method must be called next to actually remove the dead values from the
/// tracked set after the two returned slices are no longer needed.
pub fn process_inst(&mut self,
inst: Inst,
dfg: &DataFlowGraph,
liveness: &Liveness)
-> (&[LiveValue], &[LiveValue], &[LiveValue]) {
pub fn process_inst(
&mut self,
inst: Inst,
dfg: &DataFlowGraph,
liveness: &Liveness,
) -> (&[LiveValue], &[LiveValue], &[LiveValue]) {
// Save a copy of the live values before any branches or jumps that could be somebody's
// immediate dominator.
match dfg[inst].analyze_branch(&dfg.value_lists) {
@@ -272,9 +278,11 @@ impl LiveValueTracker {
}
}
(&self.live.values[0..first_kill],
&self.live.values[first_kill..first_def],
&self.live.values[first_def..])
(
&self.live.values[0..first_kill],
&self.live.values[first_kill..first_def],
&self.live.values[first_def..],
)
}
/// Prepare to move past a ghost instruction.
@@ -310,7 +318,8 @@ impl LiveValueTracker {
/// Any values where `f` returns true are spilled and will be treated as if their affinity was
/// `Stack`.
pub fn process_spills<F>(&mut self, mut f: F)
where F: FnMut(Value) -> bool
where
F: FnMut(Value) -> bool,
{
for lv in &mut self.live.values {
if f(lv.value) {
@@ -324,12 +333,10 @@ impl LiveValueTracker {
let values = self.live.values.iter().map(|lv| lv.value);
let pool = &mut self.idom_pool;
// If there already is a set saved for `idom`, just keep it.
self.idom_sets
.entry(idom)
.or_insert_with(|| {
let mut list = ValueList::default();
list.extend(values, pool);
list
});
self.idom_sets.entry(idom).or_insert_with(|| {
let mut list = ValueList::default();
list.extend(values, pool);
list
});
}
}

View File

@@ -190,12 +190,13 @@ type LiveRangeSet = SparseMap<Value, LiveRange>;
/// Get a mutable reference to the live range for `value`.
/// Create it if necessary.
fn get_or_create<'a>(lrset: &'a mut LiveRangeSet,
value: Value,
isa: &TargetIsa,
func: &Function,
enc_info: &EncInfo)
-> &'a mut LiveRange {
fn get_or_create<'a>(
lrset: &'a mut LiveRangeSet,
value: Value,
isa: &TargetIsa,
func: &Function,
enc_info: &EncInfo,
) -> &'a mut LiveRange {
// It would be better to use `get_mut()` here, but that leads to borrow checker fighting
// which can probably only be resolved by non-lexical lifetimes.
// https://github.com/rust-lang/rfcs/issues/811
@@ -233,12 +234,14 @@ fn get_or_create<'a>(lrset: &'a mut LiveRangeSet,
}
/// Extend the live range for `value` so it reaches `to` which must live in `ebb`.
fn extend_to_use(lr: &mut LiveRange,
ebb: Ebb,
to: Inst,
worklist: &mut Vec<Ebb>,
func: &Function,
cfg: &ControlFlowGraph) {
fn extend_to_use(
lr: &mut LiveRange,
ebb: Ebb,
to: Inst,
worklist: &mut Vec<Ebb>,
func: &Function,
cfg: &ControlFlowGraph,
) {
// This is our scratch working space, and we'll leave it empty when we return.
assert!(worklist.is_empty());
@@ -309,10 +312,12 @@ impl Liveness {
///
/// This asserts that `value` does not have an existing live range.
pub fn create_dead<PP>(&mut self, value: Value, def: PP, affinity: Affinity)
where PP: Into<ProgramPoint>
where
PP: Into<ProgramPoint>,
{
let old = self.ranges
.insert(LiveRange::new(value, def.into(), affinity));
let old = self.ranges.insert(
LiveRange::new(value, def.into(), affinity),
);
assert!(old.is_none(), "{} already has a live range", value);
}
@@ -320,7 +325,8 @@ impl Liveness {
///
/// The old and new def points must be in the same EBB, and before the end of the live range.
pub fn move_def_locally<PP>(&mut self, value: Value, def: PP)
where PP: Into<ProgramPoint>
where
PP: Into<ProgramPoint>,
{
let mut lr = self.ranges.get_mut(value).expect("Value has no live range");
lr.move_def_locally(def.into());
@@ -331,12 +337,13 @@ impl Liveness {
/// It is assumed the `value` is already live before `user` in `ebb`.
///
/// Returns a mutable reference to the value's affinity in case that also needs to be updated.
pub fn extend_locally(&mut self,
value: Value,
ebb: Ebb,
user: Inst,
layout: &Layout)
-> &mut Affinity {
pub fn extend_locally(
&mut self,
value: Value,
ebb: Ebb,
user: Inst,
layout: &Layout,
) -> &mut Affinity {
debug_assert_eq!(Some(ebb), layout.inst_ebb(user));
let mut lr = self.ranges.get_mut(value).expect("Value has no live range");
let livein = lr.extend_in_ebb(ebb, user, layout);
@@ -401,7 +408,8 @@ impl Liveness {
if let Some(constraint) = operand_constraints.next() {
lr.affinity.merge(constraint, &reg_info);
} else if lr.affinity.is_none() && encoding.is_legal() &&
!func.dfg[inst].opcode().is_branch() {
!func.dfg[inst].opcode().is_branch()
{
// This is a real encoded instruction using a value that doesn't yet have a
// concrete affinity. Most likely a call argument or a return value. Give
// the value a register affinity matching the ABI type.

View File

@@ -224,13 +224,13 @@ impl LiveRange {
self.liveins
.binary_search_by(|intv| order.cmp(intv.begin, ebb))
.or_else(|n| {
// The interval at `n-1` may cover `ebb`.
if n > 0 && order.cmp(self.liveins[n - 1].end, ebb) == Ordering::Greater {
Ok(n - 1)
} else {
Err(n)
}
})
// The interval at `n-1` may cover `ebb`.
if n > 0 && order.cmp(self.liveins[n - 1].end, ebb) == Ordering::Greater {
Ok(n - 1)
} else {
Err(n)
}
})
}
/// Extend the local interval for `ebb` so it reaches `to` which must belong to `ebb`.
@@ -250,11 +250,14 @@ impl LiveRange {
// We're assuming here that `to` never precedes `def_begin` in the same EBB, but we can't
// check it without a method for getting `to`'s EBB.
if order.cmp(ebb, self.def_end) != Ordering::Greater &&
order.cmp(to, self.def_begin) != Ordering::Less {
order.cmp(to, self.def_begin) != Ordering::Less
{
let to_pp = to.into();
assert_ne!(to_pp,
self.def_begin,
"Can't use value in the defining instruction.");
assert_ne!(
to_pp,
self.def_begin,
"Can't use value in the defining instruction."
);
if order.cmp(to, self.def_end) == Ordering::Greater {
self.def_end = to_pp;
}
@@ -288,8 +291,10 @@ impl LiveRange {
let prev = n.checked_sub(1).and_then(|i| self.liveins.get(i));
let next = self.liveins.get(n);
(prev.map_or(false, |prev| order.is_ebb_gap(prev.end, ebb)),
next.map_or(false, |next| order.is_ebb_gap(to, next.begin)))
(
prev.map_or(false, |prev| order.is_ebb_gap(prev.end, ebb)),
next.map_or(false, |next| order.is_ebb_gap(to, next.begin)),
)
};
match (coalesce_prev, coalesce_next) {
@@ -309,12 +314,13 @@ impl LiveRange {
}
// Cannot coalesce; insert new interval
(false, false) => {
self.liveins
.insert(n,
Interval {
begin: ebb,
end: to,
});
self.liveins.insert(
n,
Interval {
begin: ebb,
end: to,
},
);
}
}
@@ -372,9 +378,9 @@ impl LiveRange {
/// answer, but it is also possible that an even later program point is returned. So don't
/// depend on the returned `Inst` to belong to `ebb`.
pub fn livein_local_end<PO: ProgramOrder>(&self, ebb: Ebb, order: &PO) -> Option<Inst> {
self.find_ebb_interval(ebb, order)
.ok()
.map(|n| self.liveins[n].end)
self.find_ebb_interval(ebb, order).ok().map(|n| {
self.liveins[n].end
})
}
/// Get all the live-in intervals.
@@ -384,11 +390,13 @@ impl LiveRange {
/// Check if this live range overlaps a definition in `ebb`.
pub fn overlaps_def<PO>(&self, def: ExpandedProgramPoint, ebb: Ebb, order: &PO) -> bool
where PO: ProgramOrder
where
PO: ProgramOrder,
{
// Check for an overlap with the local range.
if order.cmp(def, self.def_begin) != Ordering::Less &&
order.cmp(def, self.def_end) == Ordering::Less {
order.cmp(def, self.def_end) == Ordering::Less
{
return true;
}
@@ -401,11 +409,13 @@ impl LiveRange {
/// Check if this live range reaches a use at `user` in `ebb`.
pub fn reaches_use<PO>(&self, user: Inst, ebb: Ebb, order: &PO) -> bool
where PO: ProgramOrder
where
PO: ProgramOrder,
{
// Check for an overlap with the local range.
if order.cmp(user, self.def_begin) == Ordering::Greater &&
order.cmp(user, self.def_end) != Ordering::Greater {
order.cmp(user, self.def_end) != Ordering::Greater
{
return true;
}
@@ -418,7 +428,8 @@ impl LiveRange {
/// Check if this live range is killed at `user` in `ebb`.
pub fn killed_at<PO>(&self, user: Inst, ebb: Ebb, order: &PO) -> bool
where PO: ProgramOrder
where
PO: ProgramOrder,
{
self.def_local_end() == user.into() || self.livein_local_end(ebb, order) == Some(user)
}
@@ -447,8 +458,9 @@ mod tests {
impl ProgramOrder for ProgOrder {
fn cmp<A, B>(&self, a: A, b: B) -> Ordering
where A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
fn idx(pp: ExpandedProgramPoint) -> usize {
match pp {
@@ -505,9 +517,11 @@ mod tests {
assert_eq!(self.cmp(e, li.begin), Ordering::Less);
}
assert!(self.cmp(lr.def_end, li.begin) == Ordering::Less ||
assert!(
self.cmp(lr.def_end, li.begin) == Ordering::Less ||
self.cmp(lr.def_begin, li.end) == Ordering::Greater,
"Interval can't overlap the def EBB");
"Interval can't overlap the def EBB"
);
// Save for next round.
prev_end = Some(li.end);

View File

@@ -103,10 +103,10 @@ impl Pressure {
}
// Compute per-class limits from `usable`.
for (toprc, rc) in p.toprc
.iter_mut()
.take_while(|t| t.num_toprcs > 0)
.zip(reginfo.classes) {
for (toprc, rc) in p.toprc.iter_mut().take_while(|t| t.num_toprcs > 0).zip(
reginfo.classes,
)
{
toprc.limit = usable.iter(rc).len() as u32;
toprc.width = rc.width;
}

View File

@@ -54,13 +54,15 @@ impl Reload {
}
/// Run the reload algorithm over `func`.
pub fn run(&mut self,
isa: &TargetIsa,
func: &mut Function,
domtree: &DominatorTree,
liveness: &mut Liveness,
topo: &mut TopoOrder,
tracker: &mut LiveValueTracker) {
pub fn run(
&mut self,
isa: &TargetIsa,
func: &mut Function,
domtree: &DominatorTree,
liveness: &mut Liveness,
topo: &mut TopoOrder,
tracker: &mut LiveValueTracker,
) {
dbg!("Reload for:\n{}", func.display(isa));
let mut ctx = Context {
cur: EncCursor::new(func, isa),
@@ -125,11 +127,13 @@ impl<'a> Context<'a> {
/// Process the EBB parameters. Move to the next instruction in the EBB to be processed
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
let (liveins, args) = tracker.ebb_top(ebb,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
self.domtree);
let (liveins, args) = tracker.ebb_top(
ebb,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
self.domtree,
);
if self.cur.func.layout.entry_block() == Some(ebb) {
assert_eq!(liveins.len(), 0);
@@ -172,15 +176,17 @@ impl<'a> Context<'a> {
/// Process the instruction pointed to by `pos`, and advance the cursor to the next instruction
/// that needs processing.
fn visit_inst(&mut self,
ebb: Ebb,
inst: Inst,
encoding: Encoding,
tracker: &mut LiveValueTracker) {
fn visit_inst(
&mut self,
ebb: Ebb,
inst: Inst,
encoding: Encoding,
tracker: &mut LiveValueTracker,
) {
// Get the operand constraints for `inst` that we are trying to satisfy.
let constraints = self.encinfo
.operand_constraints(encoding)
.expect("Missing instruction encoding");
let constraints = self.encinfo.operand_constraints(encoding).expect(
"Missing instruction encoding",
);
// Identify reload candidates.
assert!(self.candidates.is_empty());
@@ -195,17 +201,20 @@ impl<'a> Context<'a> {
let reg = self.cur.ins().fill(cand.value);
let fill = self.cur.built_inst();
self.reloads
.insert(ReloadedValue {
stack: cand.value,
reg: reg,
});
self.reloads.insert(ReloadedValue {
stack: cand.value,
reg: reg,
});
// Create a live range for the new reload.
let affinity = Affinity::Reg(cand.regclass.into());
self.liveness.create_dead(reg, fill, affinity);
self.liveness
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
self.liveness.extend_locally(
reg,
ebb,
inst,
&self.cur.func.layout,
);
}
// Rewrite arguments.
@@ -218,8 +227,8 @@ impl<'a> Context<'a> {
// TODO: Reuse reloads for future instructions.
self.reloads.clear();
let (_throughs, _kills, defs) = tracker
.process_inst(inst, &self.cur.func.dfg, self.liveness);
let (_throughs, _kills, defs) =
tracker.process_inst(inst, &self.cur.func.dfg, self.liveness);
// Advance to the next instruction so we can insert any spills after the instruction.
self.cur.next_inst();
@@ -255,11 +264,10 @@ impl<'a> Context<'a> {
for (op, &arg) in constraints.ins.iter().zip(args) {
if op.kind != ConstraintKind::Stack {
if self.liveness[arg].affinity.is_stack() {
self.candidates
.push(ReloadCandidate {
value: arg,
regclass: op.regclass,
})
self.candidates.push(ReloadCandidate {
value: arg,
regclass: op.regclass,
})
}
}
}
@@ -272,17 +280,21 @@ impl<'a> Context<'a> {
// Handle ABI arguments.
if let Some(sig) = self.cur.func.dfg.call_signature(inst) {
handle_abi_args(self.candidates,
&self.cur.func.dfg.signatures[sig].argument_types,
var_args,
self.cur.isa,
self.liveness);
handle_abi_args(
self.candidates,
&self.cur.func.dfg.signatures[sig].argument_types,
var_args,
self.cur.isa,
self.liveness,
);
} else if self.cur.func.dfg[inst].opcode().is_return() {
handle_abi_args(self.candidates,
&self.cur.func.signature.return_types,
var_args,
self.cur.isa,
self.liveness);
handle_abi_args(
self.candidates,
&self.cur.func.signature.return_types,
var_args,
self.cur.isa,
self.liveness,
);
}
}
@@ -297,27 +309,33 @@ impl<'a> Context<'a> {
// Update live ranges.
self.liveness.move_def_locally(stack, inst);
self.liveness
.extend_locally(reg, ebb, inst, &self.cur.func.layout);
self.liveness.extend_locally(
reg,
ebb,
inst,
&self.cur.func.layout,
);
}
}
/// Find reload candidates in the instruction's ABI variable arguments. This handles both
/// return values and call arguments.
fn handle_abi_args(candidates: &mut Vec<ReloadCandidate>,
abi_types: &[ArgumentType],
var_args: &[Value],
isa: &TargetIsa,
liveness: &Liveness) {
fn handle_abi_args(
candidates: &mut Vec<ReloadCandidate>,
abi_types: &[ArgumentType],
var_args: &[Value],
isa: &TargetIsa,
liveness: &Liveness,
) {
assert_eq!(abi_types.len(), var_args.len());
for (abi, &arg) in abi_types.iter().zip(var_args) {
if abi.location.is_reg() {
let lv = liveness.get(arg).expect("Missing live range for ABI arg");
if lv.affinity.is_stack() {
candidates.push(ReloadCandidate {
value: arg,
regclass: isa.regclass_for_abi_type(abi.value_type),
});
value: arg,
regclass: isa.regclass_for_abi_type(abi.value_type),
});
}
}
}

View File

@@ -231,12 +231,14 @@ impl SparseMapValue<Value> for Assignment {
impl fmt::Display for Assignment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"{}:{}(%{} -> %{})",
self.value,
self.rc,
self.from,
self.to)
write!(
f,
"{}:{}(%{} -> %{})",
self.value,
self.rc,
self.from,
self.to
)
}
}
@@ -244,7 +246,7 @@ impl fmt::Display for Assignment {
impl PartialEq for Assignment {
fn eq(&self, other: &Assignment) -> bool {
self.value == other.value && self.from == other.from && self.to == other.to &&
self.rc.index == other.rc.index
self.rc.index == other.rc.index
}
}
@@ -363,22 +365,23 @@ impl Solver {
dbg!("-> converting variable {} to a fixed constraint", v);
// The spiller is responsible for ensuring that all constraints on the uses of a
// value are compatible.
assert!(v.constraint.contains(to),
"Incompatible constraints for {}",
value);
assert!(
v.constraint.contains(to),
"Incompatible constraints for {}",
value
);
} else {
panic!("Invalid from register for fixed {} constraint", value);
}
}
self.regs_in.free(rc, from);
self.regs_out.take(rc, to);
self.assignments
.insert(Assignment {
value,
rc,
from,
to,
});
self.assignments.insert(Assignment {
value,
rc,
from,
to,
});
}
/// Add a variable representing an input side value with an existing register assignment.
@@ -388,18 +391,22 @@ impl Solver {
///
/// It is assumed initially that the value is also live on the output side of the instruction.
/// This can be changed by calling to `add_kill()`.
pub fn add_var(&mut self,
value: Value,
constraint: RegClass,
from: RegUnit,
reginfo: &RegInfo) {
pub fn add_var(
&mut self,
value: Value,
constraint: RegClass,
from: RegUnit,
reginfo: &RegInfo,
) {
// Check for existing entries for this value.
if self.regs_in.is_avail(constraint, from) {
dbg!("add_var({}:{}, from={}/%{}) for existing entry",
value,
constraint,
reginfo.display_regunit(from),
from);
dbg!(
"add_var({}:{}, from={}/%{}) for existing entry",
value,
constraint,
reginfo.display_regunit(from),
from
);
// There could be an existing variable entry.
if let Some(v) = self.vars.iter_mut().find(|v| v.value == value) {
@@ -419,9 +426,11 @@ impl Solver {
// No variable, then it must be a fixed reassignment.
if let Some(a) = self.assignments.get(value) {
dbg!("-> already fixed assignment {}", a);
assert!(constraint.contains(a.to),
"Incompatible constraints for {}",
value);
assert!(
constraint.contains(a.to),
"Incompatible constraints for {}",
value
);
return;
}
@@ -430,12 +439,14 @@ impl Solver {
}
let new_var = Variable::new_live(value, constraint, from);
dbg!("add_var({}:{}, from={}/%{}) new entry: {}",
value,
constraint,
reginfo.display_regunit(from),
from,
new_var);
dbg!(
"add_var({}:{}, from={}/%{}) new entry: {}",
value,
constraint,
reginfo.display_regunit(from),
from,
new_var
);
self.regs_in.free(constraint, from);
if self.inputs_done {
@@ -623,23 +634,20 @@ impl Solver {
// Collect moves from the chosen solution for all non-define variables.
for v in &self.vars {
if let Some(from) = v.from {
self.moves
.push(Assignment {
value: v.value,
from,
to: v.solution,
rc: v.constraint,
});
self.moves.push(Assignment {
value: v.value,
from,
to: v.solution,
rc: v.constraint,
});
}
}
// Convert all of the fixed register assignments into moves, but omit the ones that are
// already in the right register.
self.moves
.extend(self.assignments
.values()
.cloned()
.filter(|v| v.from != v.to));
self.moves.extend(self.assignments.values().cloned().filter(
|v| v.from != v.to,
));
dbg!("collect_moves: {}", DisplayList(self.moves.as_slice()));
}
@@ -661,9 +669,10 @@ impl Solver {
let mut i = 0;
while i < self.moves.len() {
// Find the first move that can be executed now.
if let Some(j) = self.moves[i..]
.iter()
.position(|m| avail.is_avail(m.rc, m.to)) {
if let Some(j) = self.moves[i..].iter().position(
|m| avail.is_avail(m.rc, m.to),
)
{
// This move can be executed now.
self.moves.swap(i, i + j);
let m = &self.moves[i];
@@ -709,17 +718,16 @@ impl Solver {
// Append a fixup move so we end up in the right place. This move will be scheduled
// later. That's ok because it is the single remaining move of `m.value` after the
// next iteration.
self.moves
.push(Assignment {
value: m.value,
rc: m.rc,
from: reg,
to: m.to,
});
// TODO: What if allocating an extra register is not enough to break a cycle? This
// can happen when there are registers of different widths in a cycle. For ARM, we
// may have to move two S-registers out of the way before we can resolve a cycle
// involving a D-register.
self.moves.push(Assignment {
value: m.value,
rc: m.rc,
from: reg,
to: m.to,
});
// TODO: What if allocating an extra register is not enough to break a cycle? This
// can happen when there are registers of different widths in a cycle. For ARM, we
// may have to move two S-registers out of the way before we can resolve a cycle
// involving a D-register.
} else {
panic!("Not enough registers in {} to schedule moves", m.rc);
}
@@ -738,9 +746,11 @@ impl Solver {
impl fmt::Display for Solver {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Solver {{ inputs_done: {},", self.inputs_done)?;
writeln!(f,
" assignments: {}",
DisplayList(self.assignments.as_slice()))?;
writeln!(
f,
" assignments: {}",
DisplayList(self.assignments.as_slice())
)?;
writeln!(f, " vars: {}", DisplayList(self.vars.as_slice()))?;
writeln!(f, " moves: {}", DisplayList(self.moves.as_slice()))?;
writeln!(f, "}}")
@@ -817,8 +827,10 @@ mod tests {
solver.inputs_done();
assert!(solver.quick_solve().is_ok());
assert_eq!(solver.schedule_moves(&regs), 0);
assert_eq!(solver.moves(),
&[mov(v11, gpr, r1, r2), mov(v10, gpr, r0, r1)]);
assert_eq!(
solver.moves(),
&[mov(v11, gpr, r1, r2), mov(v10, gpr, r0, r1)]
);
// Swap r0 and r1 in three moves using r2 as a scratch.
solver.reset(&regs);
@@ -827,10 +839,14 @@ mod tests {
solver.inputs_done();
assert!(solver.quick_solve().is_ok());
assert_eq!(solver.schedule_moves(&regs), 0);
assert_eq!(solver.moves(),
&[mov(v10, gpr, r0, r2),
mov(v11, gpr, r1, r0),
mov(v10, gpr, r2, r1)]);
assert_eq!(
solver.moves(),
&[
mov(v10, gpr, r0, r2),
mov(v11, gpr, r1, r0),
mov(v10, gpr, r2, r1),
]
);
}
#[test]
@@ -862,11 +878,15 @@ mod tests {
solver.inputs_done();
assert!(solver.quick_solve().is_ok());
assert_eq!(solver.schedule_moves(&regs), 0);
assert_eq!(solver.moves(),
&[mov(v10, d, d0, d2),
mov(v11, s, s2, s0),
mov(v12, s, s3, s1),
mov(v10, d, d2, d1)]);
assert_eq!(
solver.moves(),
&[
mov(v10, d, d0, d2),
mov(v11, s, s2, s0),
mov(v12, s, s3, s1),
mov(v10, d, d2, d1),
]
);
// Same problem in the other direction: Swap (s0, s1) <-> d1.
//
@@ -879,10 +899,14 @@ mod tests {
solver.inputs_done();
assert!(solver.quick_solve().is_ok());
assert_eq!(solver.schedule_moves(&regs), 0);
assert_eq!(solver.moves(),
&[mov(v10, d, d1, d2),
mov(v12, s, s1, s3),
mov(v11, s, s0, s2),
mov(v10, d, d2, d0)]);
assert_eq!(
solver.moves(),
&[
mov(v10, d, d1, d2),
mov(v12, s, s1, s3),
mov(v11, s, s0, s2),
mov(v10, d, d2, d0),
]
);
}
}

View File

@@ -71,14 +71,16 @@ impl Spilling {
}
/// Run the spilling algorithm over `func`.
pub fn run(&mut self,
isa: &TargetIsa,
func: &mut Function,
domtree: &DominatorTree,
liveness: &mut Liveness,
virtregs: &VirtRegs,
topo: &mut TopoOrder,
tracker: &mut LiveValueTracker) {
pub fn run(
&mut self,
isa: &TargetIsa,
func: &mut Function,
domtree: &DominatorTree,
liveness: &mut Liveness,
virtregs: &VirtRegs,
topo: &mut TopoOrder,
tracker: &mut LiveValueTracker,
) {
dbg!("Spilling for:\n{}", func.display(isa));
let reginfo = isa.register_info();
let usable_regs = isa.allocatable_registers(func);
@@ -114,8 +116,10 @@ impl<'a> Context<'a> {
while let Some(inst) = self.cur.next_inst() {
if let Some(constraints) =
self.encinfo
.operand_constraints(self.cur.func.encodings[inst]) {
self.encinfo.operand_constraints(
self.cur.func.encodings[inst],
)
{
self.visit_inst(inst, ebb, constraints, tracker);
} else {
let (_throughs, kills) = tracker.process_ghost(inst);
@@ -150,11 +154,13 @@ impl<'a> Context<'a> {
}
fn visit_ebb_header(&mut self, ebb: Ebb, tracker: &mut LiveValueTracker) {
let (liveins, args) = tracker.ebb_top(ebb,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
self.domtree);
let (liveins, args) = tracker.ebb_top(
ebb,
&self.cur.func.dfg,
self.liveness,
&self.cur.func.layout,
self.domtree,
);
// Count the live-in registers. These should already fit in registers; they did at the
// dominator.
@@ -167,16 +173,20 @@ impl<'a> Context<'a> {
if let Affinity::Reg(rci) = lv.affinity {
let rc = self.reginfo.rc(rci);
'try_take: while let Err(mask) = self.pressure.take_transient(rc) {
dbg!("Need {} reg for EBB argument {} from {} live-ins",
rc,
lv.value,
liveins.len());
dbg!(
"Need {} reg for EBB argument {} from {} live-ins",
rc,
lv.value,
liveins.len()
);
match self.spill_candidate(mask, liveins) {
Some(cand) => {
dbg!("Spilling live-in {} to make room for {} EBB argument {}",
cand,
rc,
lv.value);
dbg!(
"Spilling live-in {} to make room for {} EBB argument {}",
cand,
rc,
lv.value
);
self.spill_reg(cand);
}
None => {
@@ -199,11 +209,13 @@ impl<'a> Context<'a> {
self.pressure.preserve_transient();
}
fn visit_inst(&mut self,
inst: Inst,
ebb: Ebb,
constraints: &RecipeConstraints,
tracker: &mut LiveValueTracker) {
fn visit_inst(
&mut self,
inst: Inst,
ebb: Ebb,
constraints: &RecipeConstraints,
tracker: &mut LiveValueTracker,
) {
dbg!("Inst {}, {}", self.cur.display_inst(inst), self.pressure);
debug_assert_eq!(self.cur.current_inst(), Some(inst));
debug_assert_eq!(self.cur.current_ebb(), Some(ebb));
@@ -250,9 +262,11 @@ impl<'a> Context<'a> {
match self.spill_candidate(mask, throughs) {
Some(cand) => self.spill_reg(cand),
None => {
panic!("Ran out of {} registers for {}",
op.regclass,
self.cur.display_inst(inst))
panic!(
"Ran out of {} registers for {}",
op.regclass,
self.cur.display_inst(inst)
)
}
}
}
@@ -313,12 +327,16 @@ impl<'a> Context<'a> {
.argument_types
.iter()
.zip(args)
.enumerate() {
.enumerate()
{
if abi.location.is_reg() {
let (rci, spilled) = match self.liveness[arg].affinity {
Affinity::Reg(rci) => (rci, false),
Affinity::Stack => {
(self.cur.isa.regclass_for_abi_type(abi.value_type).into(), true)
(
self.cur.isa.regclass_for_abi_type(abi.value_type).into(),
true,
)
}
Affinity::None => panic!("Missing affinity for {}", arg),
};
@@ -373,17 +391,19 @@ impl<'a> Context<'a> {
// Spill a live register that is *not* used by the current instruction.
// Spilling a use wouldn't help.
match {
let args = self.cur.func.dfg.inst_args(inst);
self.spill_candidate(mask,
tracker.live().iter().filter(|lv| {
!args.contains(&lv.value)
}))
} {
let args = self.cur.func.dfg.inst_args(inst);
self.spill_candidate(
mask,
tracker.live().iter().filter(|lv| !args.contains(&lv.value)),
)
} {
Some(cand) => self.spill_reg(cand),
None => {
panic!("Ran out of {} registers when inserting copy before {}",
rc,
self.cur.display_inst(inst))
panic!(
"Ran out of {} registers when inserting copy before {}",
rc,
self.cur.display_inst(inst)
)
}
}
}
@@ -395,7 +415,8 @@ impl<'a> Context<'a> {
// Find a spill candidate from `candidates` whose top-level register class is in `mask`.
fn spill_candidate<'ii, II>(&self, mask: RegClassMask, candidates: II) -> Option<Value>
where II: IntoIterator<Item = &'ii LiveValue>
where
II: IntoIterator<Item = &'ii LiveValue>,
{
// Find the best viable spill candidate.
//
@@ -420,12 +441,13 @@ impl<'a> Context<'a> {
None
})
.min_by(|&a, &b| {
// Find the minimum candidate according to the RPO of their defs.
self.domtree
.rpo_cmp(self.cur.func.dfg.value_def(a),
self.cur.func.dfg.value_def(b),
&self.cur.func.layout)
})
// Find the minimum candidate according to the RPO of their defs.
self.domtree.rpo_cmp(
self.cur.func.dfg.value_def(a),
self.cur.func.dfg.value_def(b),
&self.cur.func.layout,
)
})
}
/// Spill `value` immediately by
@@ -447,10 +469,9 @@ impl<'a> Context<'a> {
}
// Assign a spill slot for the whole virtual register.
let ss = self.cur
.func
.stack_slots
.make_spill_slot(self.cur.func.dfg.value_type(value));
let ss = self.cur.func.stack_slots.make_spill_slot(
self.cur.func.dfg.value_type(value),
);
for &v in self.virtregs.congruence_class(&value) {
self.liveness.spill(v);
self.cur.func.locations[v] = ValueLoc::Stack(ss);
@@ -481,11 +502,12 @@ impl<'a> Context<'a> {
// Update live ranges.
self.liveness.create_dead(copy, inst, Affinity::Reg(rci));
self.liveness
.extend_locally(copy,
self.cur.func.layout.pp_ebb(inst),
self.cur.current_inst().expect("must be at an instruction"),
&self.cur.func.layout);
self.liveness.extend_locally(
copy,
self.cur.func.layout.pp_ebb(inst),
self.cur.current_inst().expect("must be at an instruction"),
&self.cur.func.layout,
);
copy
}

View File

@@ -81,11 +81,12 @@ impl VirtRegs {
/// If `value` belongs to a virtual register, the congruence class is the values of the virtual
/// register. Otherwise it is just the value itself.
pub fn congruence_class<'a, 'b>(&'a self, value: &'b Value) -> &'b [Value]
where 'a: 'b
where
'a: 'b,
{
self.get(*value)
.map(|vr| self.values(vr))
.unwrap_or(ref_slice(value))
self.get(*value).map(|vr| self.values(vr)).unwrap_or(
ref_slice(value),
)
}
/// Check if `a` and `b` belong to the same congruence class.
@@ -126,9 +127,11 @@ impl VirtRegs {
.min()
.unwrap_or_else(|| self.vregs.push(Default::default()));
assert_eq!(values.len(),
singletons + cleared,
"Can't unify partial virtual registers");
assert_eq!(
values.len(),
singletons + cleared,
"Can't unify partial virtual registers"
);
self.vregs[vreg].extend(values.iter().cloned(), &mut self.pool);
for &v in values {

View File

@@ -137,8 +137,8 @@ impl Configurable for Builder {
self.bytes[offset] = value.parse().map_err(|_| Error::BadValue)?;
}
Detail::Enum { last, enumerators } => {
self.bytes[offset] = parse_enum_value(value,
self.template.enums(last, enumerators))?;
self.bytes[offset] =
parse_enum_value(value, self.template.enums(last, enumerators))?;
}
Detail::Preset => return Err(Error::BadName),
}
@@ -218,11 +218,12 @@ pub mod detail {
/// Format a setting value as a TOML string. This is mostly for use by the generated
/// `Display` implementation.
pub fn format_toml_value(&self,
detail: Detail,
byte: u8,
f: &mut fmt::Formatter)
-> fmt::Result {
pub fn format_toml_value(
&self,
detail: Detail,
byte: u8,
f: &mut fmt::Formatter,
) -> fmt::Result {
match detail {
Detail::Bool { bit } => write!(f, "{}", (byte & (1 << bit)) != 0),
Detail::Num => write!(f, "{}", byte),
@@ -312,15 +313,17 @@ mod tests {
fn display_default() {
let b = builder();
let f = Flags::new(&b);
assert_eq!(f.to_string(),
"[shared]\n\
assert_eq!(
f.to_string(),
"[shared]\n\
opt_level = \"default\"\n\
enable_verifier = false\n\
is_64bit = false\n\
is_compressed = false\n\
enable_float = true\n\
enable_simd = true\n\
enable_atomics = true\n");
enable_atomics = true\n"
);
assert_eq!(f.opt_level(), super::OptLevel::Default);
assert_eq!(f.enable_simd(), true);
}

View File

@@ -8,7 +8,7 @@ use std::collections::HashMap;
/// Test whether the given opcode is unsafe to even consider for GVN.
fn trivially_unsafe_for_gvn(opcode: Opcode) -> bool {
opcode.is_call() || opcode.is_branch() || opcode.is_terminator() || opcode.is_return() ||
opcode.can_trap() || opcode.other_side_effects()
opcode.can_trap() || opcode.other_side_effects()
}
/// Perform simple GVN on `func`.

View File

@@ -51,9 +51,9 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
incoming_min = min(incoming_min, slot.offset);
}
StackSlotKind::OutgoingArg => {
let offset = slot.offset
.checked_add(slot.size as StackOffset)
.ok_or(CtonError::ImplLimitExceeded)?;
let offset = slot.offset.checked_add(slot.size as StackOffset).ok_or(
CtonError::ImplLimitExceeded,
)?;
outgoing_max = max(outgoing_max, offset);
}
StackSlotKind::SpillSlot | StackSlotKind::Local => {
@@ -82,9 +82,9 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
_ => continue,
}
offset = offset
.checked_sub(slot.size as StackOffset)
.ok_or(CtonError::ImplLimitExceeded)?;
offset = offset.checked_sub(slot.size as StackOffset).ok_or(
CtonError::ImplLimitExceeded,
)?;
// Aligning the negative offset can never cause overflow. We're only clearing bits.
offset &= -(min_align as StackOffset);
@@ -96,9 +96,9 @@ pub fn layout_stack(frame: &mut StackSlots, alignment: StackSize) -> Result<Stac
}
// Finally, make room for the outgoing arguments.
offset = offset
.checked_sub(outgoing_max)
.ok_or(CtonError::ImplLimitExceeded)?;
offset = offset.checked_sub(outgoing_max).ok_or(
CtonError::ImplLimitExceeded,
)?;
offset &= -(alignment as StackOffset);
let frame_size = (offset as StackSize).wrapping_neg();

View File

@@ -38,7 +38,8 @@ impl TopoOrder {
/// Reset and initialize with a preferred sequence of EBBs. The resulting topological order is
/// guaranteed to contain all of the EBBs in `preferred` as well as any dominators.
pub fn reset<Ebbs>(&mut self, preferred: Ebbs)
where Ebbs: IntoIterator<Item = Ebb>
where
Ebbs: IntoIterator<Item = Ebb>,
{
self.preferred.clear();
self.preferred.extend(preferred);

View File

@@ -22,12 +22,13 @@ use verifier::Result;
/// - The values in a virtual register are ordered according to the dominator tree's `rpo_cmp()`.
///
/// We don't verify that virtual registers are minimal. Minimal CSSA is not required.
pub fn verify_cssa(func: &Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
liveness: &Liveness,
virtregs: &VirtRegs)
-> Result {
pub fn verify_cssa(
func: &Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
liveness: &Liveness,
virtregs: &VirtRegs,
) -> Result {
let verifier = CssaVerifier {
func,
cfg,
@@ -77,10 +78,12 @@ impl<'a> CssaVerifier<'a> {
return err!(val, "Value in {} has same def as {}", vreg, prev_val);
}
Ordering::Greater => {
return err!(val,
"Value in {} in wrong order relative to {}",
vreg,
prev_val);
return err!(
val,
"Value in {} in wrong order relative to {}",
vreg,
prev_val
);
}
}
@@ -102,16 +105,20 @@ impl<'a> CssaVerifier<'a> {
for &(_, pred) in self.cfg.get_predecessors(ebb) {
let pred_args = self.func.dfg.inst_variable_args(pred);
// This should have been caught by an earlier verifier pass.
assert_eq!(ebb_args.len(),
pred_args.len(),
"Wrong arguments on branch.");
assert_eq!(
ebb_args.len(),
pred_args.len(),
"Wrong arguments on branch."
);
for (&ebb_arg, &pred_arg) in ebb_args.iter().zip(pred_args) {
if !self.virtregs.same_class(ebb_arg, pred_arg) {
return err!(pred,
"{} and {} must be in the same virtual register",
ebb_arg,
pred_arg);
return err!(
pred,
"{} and {} must be in the same virtual register",
ebb_arg,
pred_arg
);
}
}
}

View File

@@ -21,11 +21,12 @@ use verifier::Result;
///
/// We don't verify that live ranges are minimal. This would require recomputing live ranges for
/// all values.
pub fn verify_liveness(isa: &TargetIsa,
func: &Function,
cfg: &ControlFlowGraph,
liveness: &Liveness)
-> Result {
pub fn verify_liveness(
isa: &TargetIsa,
func: &Function,
cfg: &ControlFlowGraph,
liveness: &Liveness,
) -> Result {
let verifier = LivenessVerifier {
isa,
func,
@@ -76,18 +77,22 @@ impl<'a> LivenessVerifier<'a> {
if encoding.is_legal() {
// A legal instruction is not allowed to define ghost values.
if lr.affinity.is_none() {
return err!(inst,
"{} is a ghost value defined by a real [{}] instruction",
val,
self.isa.encoding_info().display(encoding));
return err!(
inst,
"{} is a ghost value defined by a real [{}] instruction",
val,
self.isa.encoding_info().display(encoding)
);
}
} else {
// A non-encoded instruction can only define ghost values.
if !lr.affinity.is_none() {
return err!(inst,
"{} is a real {} value defined by a ghost instruction",
val,
lr.affinity.display(&self.isa.register_info()));
return err!(
inst,
"{} is a real {} value defined by a ghost instruction",
val,
lr.affinity.display(&self.isa.register_info())
);
}
}
}
@@ -108,10 +113,12 @@ impl<'a> LivenessVerifier<'a> {
// A branch argument can be a ghost value if the corresponding destination
// EBB argument is a ghost value.
if lr.affinity.is_none() && !self.is_ghost_branch_argument(inst, idx) {
return err!(inst,
"{} is a ghost value used by a real [{}] instruction",
val,
self.isa.encoding_info().display(encoding));
return err!(
inst,
"{} is a ghost value used by a real [{}] instruction",
val,
self.isa.encoding_info().display(encoding)
);
}
}
}
@@ -126,7 +133,8 @@ impl<'a> LivenessVerifier<'a> {
// Check if `inst` is in the def range, not including the def itself.
if l.cmp(lr.def(), inst) == Ordering::Less &&
l.cmp(inst, lr.def_local_end()) != Ordering::Greater {
l.cmp(inst, lr.def_local_end()) != Ordering::Greater
{
return true;
}
@@ -205,11 +213,13 @@ impl<'a> LivenessVerifier<'a> {
let end_ebb = match l.inst_ebb(livein.end) {
Some(e) => e,
None => {
return err!(loc,
"{} livein for {} ends at {} which is not in the layout",
val,
ebb,
livein.end)
return err!(
loc,
"{} livein for {} ends at {} which is not in the layout",
val,
ebb,
livein.end
)
}
};
@@ -218,10 +228,12 @@ impl<'a> LivenessVerifier<'a> {
// If `val` is live-in at `ebb`, it must be live at all the predecessors.
for &(_, pred) in self.cfg.get_predecessors(ebb) {
if !self.live_at_use(lr, pred) {
return err!(pred,
"{} is live in to {} but not live at predecessor",
val,
ebb);
return err!(
pred,
"{} is live in to {} but not live at predecessor",
val,
ebb
);
}
}

View File

@@ -127,11 +127,12 @@ pub fn verify_function(func: &Function, isa: Option<&TargetIsa>) -> Result {
/// Verify `func` after checking the integrity of associated context data structures `cfg` and
/// `domtree`.
pub fn verify_context(func: &Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
isa: Option<&TargetIsa>)
-> Result {
pub fn verify_context(
func: &Function,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
isa: Option<&TargetIsa>,
) -> Result {
let verifier = Verifier::new(func, isa);
verifier.cfg_integrity(cfg)?;
if domtree.is_valid() {
@@ -187,9 +188,11 @@ impl<'a> Verifier<'a> {
if is_terminator && !is_last_inst {
// Terminating instructions only occur at the end of blocks.
return err!(inst,
"a terminator instruction was encountered before the end of {}",
ebb);
return err!(
inst,
"a terminator instruction was encountered before the end of {}",
ebb
);
}
if is_last_inst && !is_terminator {
return err!(ebb, "block does not end in a terminator instruction!");
@@ -237,10 +240,12 @@ impl<'a> Verifier<'a> {
// All result values for multi-valued instructions are created
let got_results = dfg.inst_results(inst).len();
if got_results != total_results {
return err!(inst,
"expected {} result values, found {}",
total_results,
got_results);
return err!(
inst,
"expected {} result values, found {}",
total_results,
got_results
);
}
self.verify_entity_references(inst)
@@ -407,22 +412,30 @@ impl<'a> Verifier<'a> {
ValueDef::Res(def_inst, _) => {
// Value is defined by an instruction that exists.
if !dfg.inst_is_valid(def_inst) {
return err!(loc_inst,
"{} is defined by invalid instruction {}",
v,
def_inst);
return err!(
loc_inst,
"{} is defined by invalid instruction {}",
v,
def_inst
);
}
// Defining instruction is inserted in an EBB.
if self.func.layout.inst_ebb(def_inst) == None {
return err!(loc_inst,
"{} is defined by {} which has no EBB",
v,
def_inst);
return err!(
loc_inst,
"{} is defined by {} which has no EBB",
v,
def_inst
);
}
// Defining instruction dominates the instruction that uses the value.
if self.domtree.is_reachable(self.func.layout.pp_ebb(loc_inst)) &&
!self.domtree
.dominates(def_inst, loc_inst, &self.func.layout) {
!self.domtree.dominates(
def_inst,
loc_inst,
&self.func.layout,
)
{
return err!(loc_inst, "uses value from non-dominating {}", def_inst);
}
}
@@ -433,14 +446,17 @@ impl<'a> Verifier<'a> {
}
// Defining EBB is inserted in the layout
if !self.func.layout.is_ebb_inserted(ebb) {
return err!(loc_inst,
"{} is defined by {} which is not in the layout",
v,
ebb);
return err!(
loc_inst,
"{} is defined by {} which is not in the layout",
v,
ebb
);
}
// The defining EBB dominates the instruction using this value.
if self.domtree.is_reachable(ebb) &&
!self.domtree.dominates(ebb, loc_inst, &self.func.layout) {
!self.domtree.dominates(ebb, loc_inst, &self.func.layout)
{
return err!(loc_inst, "uses value arg from non-dominating {}", ebb);
}
}
@@ -456,39 +472,48 @@ impl<'a> Verifier<'a> {
let expected = domtree.idom(ebb);
let got = self.domtree.idom(ebb);
if got != expected {
return err!(ebb,
"invalid domtree, expected idom({}) = {:?}, got {:?}",
ebb,
expected,
got);
return err!(
ebb,
"invalid domtree, expected idom({}) = {:?}, got {:?}",
ebb,
expected,
got
);
}
}
// We also verify if the postorder defined by `DominatorTree` is sane
if self.domtree.cfg_postorder().len() != domtree.cfg_postorder().len() {
return err!(AnyEntity::Function,
"incorrect number of Ebbs in postorder traversal");
return err!(
AnyEntity::Function,
"incorrect number of Ebbs in postorder traversal"
);
}
for (index, (&true_ebb, &test_ebb)) in
self.domtree
.cfg_postorder()
.iter()
.zip(domtree.cfg_postorder().iter())
.enumerate() {
.enumerate()
{
if true_ebb != test_ebb {
return err!(test_ebb,
"invalid domtree, postorder ebb number {} should be {}, got {}",
index,
true_ebb,
test_ebb);
return err!(
test_ebb,
"invalid domtree, postorder ebb number {} should be {}, got {}",
index,
true_ebb,
test_ebb
);
}
}
// We verify rpo_cmp on pairs of adjacent ebbs in the postorder
for (&prev_ebb, &next_ebb) in self.domtree.cfg_postorder().iter().adjacent_pairs() {
if domtree.rpo_cmp(prev_ebb, next_ebb, &self.func.layout) != Ordering::Greater {
return err!(next_ebb,
"invalid domtree, rpo_cmp does not says {} is greater than {}",
prev_ebb,
next_ebb);
return err!(
next_ebb,
"invalid domtree, rpo_cmp does not says {} is greater than {}",
prev_ebb,
next_ebb
);
}
}
Ok(())
@@ -506,11 +531,13 @@ impl<'a> Verifier<'a> {
for (i, &arg) in self.func.dfg.ebb_args(ebb).iter().enumerate() {
let arg_type = self.func.dfg.value_type(arg);
if arg_type != expected_types[i].value_type {
return err!(ebb,
"entry block argument {} expected to have type {}, got {}",
i,
expected_types[i],
arg_type);
return err!(
ebb,
"entry block argument {} expected to have type {}, got {}",
i,
expected_types[i],
arg_type
);
}
}
}
@@ -551,12 +578,14 @@ impl<'a> Verifier<'a> {
let expected_type = self.func.dfg.compute_result_type(inst, i, ctrl_type);
if let Some(expected_type) = expected_type {
if result_type != expected_type {
return err!(inst,
"expected result {} ({}) to have type {}, found {}",
i,
result,
expected_type,
result_type);
return err!(
inst,
"expected result {} ({}) to have type {}, found {}",
i,
result,
expected_type,
result_type
);
}
} else {
return err!(inst, "has more result values than expected");
@@ -579,22 +608,26 @@ impl<'a> Verifier<'a> {
match constraints.value_argument_constraint(i, ctrl_type) {
ResolvedConstraint::Bound(expected_type) => {
if arg_type != expected_type {
return err!(inst,
"arg {} ({}) has type {}, expected {}",
i,
arg,
arg_type,
expected_type);
return err!(
inst,
"arg {} ({}) has type {}, expected {}",
i,
arg,
arg_type,
expected_type
);
}
}
ResolvedConstraint::Free(type_set) => {
if !type_set.contains(arg_type) {
return err!(inst,
"arg {} ({}) with type {} failed to satisfy type set {:?}",
i,
arg,
arg_type,
type_set);
return err!(
inst,
"arg {} ({}) with type {} failed to satisfy type set {:?}",
i,
arg,
arg_type,
type_set
);
}
}
}
@@ -605,21 +638,21 @@ impl<'a> Verifier<'a> {
fn typecheck_variable_args(&self, inst: Inst) -> Result {
match self.func.dfg[inst].analyze_branch(&self.func.dfg.value_lists) {
BranchInfo::SingleDest(ebb, _) => {
let iter = self.func
.dfg
.ebb_args(ebb)
.iter()
.map(|&v| self.func.dfg.value_type(v));
let iter = self.func.dfg.ebb_args(ebb).iter().map(|&v| {
self.func.dfg.value_type(v)
});
self.typecheck_variable_args_iterator(inst, iter)?;
}
BranchInfo::Table(table) => {
for (_, ebb) in self.func.jump_tables[table].entries() {
let arg_count = self.func.dfg.num_ebb_args(ebb);
if arg_count != 0 {
return err!(inst,
"takes no arguments, but had target {} with {} arguments",
ebb,
arg_count);
return err!(
inst,
"takes no arguments, but had target {} with {} arguments",
ebb,
arg_count
);
}
}
}
@@ -649,10 +682,11 @@ impl<'a> Verifier<'a> {
Ok(())
}
fn typecheck_variable_args_iterator<I: Iterator<Item = Type>>(&self,
inst: Inst,
iter: I)
-> Result {
fn typecheck_variable_args_iterator<I: Iterator<Item = Type>>(
&self,
inst: Inst,
iter: I,
) -> Result {
let variable_args = self.func.dfg.inst_variable_args(inst);
let mut i = 0;
@@ -665,20 +699,24 @@ impl<'a> Verifier<'a> {
let arg = variable_args[i];
let arg_type = self.func.dfg.value_type(arg);
if expected_type != arg_type {
return err!(inst,
"arg {} ({}) has type {}, expected {}",
i,
variable_args[i],
arg_type,
expected_type);
return err!(
inst,
"arg {} ({}) has type {}, expected {}",
i,
variable_args[i],
arg_type,
expected_type
);
}
i += 1;
}
if i != variable_args.len() {
return err!(inst,
"mismatched argument count, got {}, expected {}",
variable_args.len(),
i);
return err!(
inst,
"mismatched argument count, got {}, expected {}",
variable_args.len(),
i
);
}
Ok(())
}
@@ -707,34 +745,42 @@ impl<'a> Verifier<'a> {
self.verify_stack_slot(inst, ss)?;
let slot = &self.func.stack_slots[ss];
if slot.kind != StackSlotKind::OutgoingArg {
return err!(inst,
"Outgoing stack argument {} in wrong stack slot: {} = {}",
arg,
ss,
slot);
return err!(
inst,
"Outgoing stack argument {} in wrong stack slot: {} = {}",
arg,
ss,
slot
);
}
if slot.offset != offset {
return err!(inst,
"Outgoing stack argument {} should have offset {}: {} = {}",
arg,
offset,
ss,
slot);
return err!(
inst,
"Outgoing stack argument {} should have offset {}: {} = {}",
arg,
offset,
ss,
slot
);
}
if slot.size != abi.value_type.bytes() {
return err!(inst,
"Outgoing stack argument {} wrong size for {}: {} = {}",
arg,
abi.value_type,
ss,
slot);
return err!(
inst,
"Outgoing stack argument {} wrong size for {}: {} = {}",
arg,
abi.value_type,
ss,
slot
);
}
} else {
let reginfo = self.isa.map(|i| i.register_info());
return err!(inst,
"Outgoing stack argument {} in wrong location: {}",
arg,
arg_loc.display(reginfo.as_ref()));
return err!(
inst,
"Outgoing stack argument {} in wrong location: {}",
arg,
arg_loc.display(reginfo.as_ref())
);
}
}
}
@@ -751,12 +797,14 @@ impl<'a> Verifier<'a> {
for (i, (&arg, &expected_type)) in args.iter().zip(expected_types).enumerate() {
let arg_type = self.func.dfg.value_type(arg);
if arg_type != expected_type.value_type {
return err!(inst,
"arg {} ({}) has type {}, must match function signature of {}",
i,
arg,
arg_type,
expected_type);
return err!(
inst,
"arg {} ({}) has type {}, must match function signature of {}",
i,
arg,
arg_type,
expected_type
);
}
}
}
@@ -775,9 +823,11 @@ impl<'a> Verifier<'a> {
let missing_succs: Vec<Ebb> = expected_succs.difference(&got_succs).cloned().collect();
if !missing_succs.is_empty() {
return err!(ebb,
"cfg lacked the following successor(s) {:?}",
missing_succs);
return err!(
ebb,
"cfg lacked the following successor(s) {:?}",
missing_succs
);
}
let excess_succs: Vec<Ebb> = got_succs.difference(&expected_succs).cloned().collect();
@@ -790,9 +840,11 @@ impl<'a> Verifier<'a> {
let missing_preds: Vec<Inst> = expected_preds.difference(&got_preds).cloned().collect();
if !missing_preds.is_empty() {
return err!(ebb,
"cfg lacked the following predecessor(s) {:?}",
missing_preds);
return err!(
ebb,
"cfg lacked the following predecessor(s) {:?}",
missing_preds
);
}
let excess_preds: Vec<Inst> = got_preds.difference(&expected_preds).cloned().collect();
@@ -826,23 +878,28 @@ impl<'a> Verifier<'a> {
let encoding = self.func.encodings[inst];
if encoding.is_legal() {
let verify_encoding =
isa.encode(&self.func.dfg,
&self.func.dfg[inst],
self.func.dfg.ctrl_typevar(inst));
let verify_encoding = isa.encode(
&self.func.dfg,
&self.func.dfg[inst],
self.func.dfg.ctrl_typevar(inst),
);
match verify_encoding {
Ok(verify_encoding) => {
if verify_encoding != encoding {
return err!(inst,
"Instruction re-encoding {} doesn't match {}",
isa.encoding_info().display(verify_encoding),
isa.encoding_info().display(encoding));
return err!(
inst,
"Instruction re-encoding {} doesn't match {}",
isa.encoding_info().display(verify_encoding),
isa.encoding_info().display(encoding)
);
}
}
Err(_) => {
return err!(inst,
"Instruction failed to re-encode {}",
isa.encoding_info().display(encoding))
return err!(
inst,
"Instruction failed to re-encode {}",
isa.encoding_info().display(encoding)
)
}
}
return Ok(());
@@ -932,9 +989,9 @@ mod tests {
let mut func = Function::new();
let ebb0 = func.dfg.make_ebb();
func.layout.append_ebb(ebb0);
let nullary_with_bad_opcode =
func.dfg
.make_inst(InstructionData::Nullary { opcode: Opcode::Jump });
let nullary_with_bad_opcode = func.dfg.make_inst(
InstructionData::Nullary { opcode: Opcode::Jump },
);
func.layout.append_inst(nullary_with_bad_opcode, ebb0);
let verifier = Verifier::new(&func, None);
assert_err_with_msg!(verifier.run(), "instruction format");

View File

@@ -38,10 +38,11 @@ fn write_spec(w: &mut Write, func: &Function, regs: Option<&RegInfo>) -> Result
write!(w, "function {}{}", func.name, func.signature.display(regs))
}
fn write_preamble(w: &mut Write,
func: &Function,
regs: Option<&RegInfo>)
-> result::Result<bool, Error> {
fn write_preamble(
w: &mut Write,
func: &Function,
regs: Option<&RegInfo>,
) -> result::Result<bool, Error> {
let mut any = false;
for ss in func.stack_slots.keys() {
@@ -63,10 +64,12 @@ fn write_preamble(w: &mut Write,
// signatures.
for sig in func.dfg.signatures.keys() {
any = true;
writeln!(w,
" {} = {}",
sig,
func.dfg.signatures[sig].display(regs))?;
writeln!(
w,
" {} = {}",
sig,
func.dfg.signatures[sig].display(regs)
)?;
}
for fnref in func.dfg.ext_funcs.keys() {
@@ -163,8 +166,10 @@ fn type_suffix(func: &Function, inst: Inst) -> Option<Type> {
}
let rtype = func.dfg.ctrl_typevar(inst);
assert!(!rtype.is_void(),
"Polymorphic instruction must produce a result");
assert!(
!rtype.is_void(),
"Polymorphic instruction must produce a result"
);
Some(rtype)
}
@@ -179,11 +184,12 @@ fn write_value_aliases(w: &mut Write, func: &Function, inst: Inst, indent: usize
Ok(())
}
fn write_instruction(w: &mut Write,
func: &Function,
isa: Option<&TargetIsa>,
inst: Inst)
-> Result {
fn write_instruction(
w: &mut Write,
func: &Function,
isa: Option<&TargetIsa>,
inst: Inst,
) -> Result {
// Indent all instructions to col 24 if any encodings are present.
let indent = if func.encodings.is_empty() { 4 } else { 24 };
@@ -240,11 +246,12 @@ fn write_instruction(w: &mut Write,
}
/// Write the operands of `inst` to `w` with a prepended space.
pub fn write_operands(w: &mut Write,
dfg: &DataFlowGraph,
isa: Option<&TargetIsa>,
inst: Inst)
-> Result {
pub fn write_operands(
w: &mut Write,
dfg: &DataFlowGraph,
isa: Option<&TargetIsa>,
inst: Inst,
) -> Result {
let pool = &dfg.value_lists;
use ir::instructions::InstructionData::*;
match dfg[inst] {
@@ -278,10 +285,12 @@ pub fn write_operands(w: &mut Write,
if args.is_empty() {
write!(w, " {}", destination)
} else {
write!(w,
" {}({})",
destination,
DisplayValues(args.as_slice(pool)))
write!(
w,
" {}({})",
destination,
DisplayValues(args.as_slice(pool))
)
}
}
Branch {
@@ -315,11 +324,13 @@ pub fn write_operands(w: &mut Write,
}
IndirectCall { sig_ref, ref args, .. } => {
let args = args.as_slice(pool);
write!(w,
" {}, {}({})",
sig_ref,
args[0],
DisplayValues(&args[1..]))
write!(
w,
" {}, {}({})",
sig_ref,
args[0],
DisplayValues(&args[1..])
)
}
StackLoad { stack_slot, offset, .. } => write!(w, " {}{}", stack_slot, offset),
StackStore {
@@ -341,11 +352,13 @@ pub fn write_operands(w: &mut Write,
RegMove { arg, src, dst, .. } => {
if let Some(isa) = isa {
let regs = isa.register_info();
write!(w,
" {}, {} -> {}",
arg,
regs.display_regunit(src),
regs.display_regunit(dst))
write!(
w,
" {}, {} -> {}",
arg,
regs.display_regunit(src),
regs.display_regunit(dst)
)
} else {
write!(w, " {}, %{} -> %{}", arg, src, dst)
}
@@ -382,22 +395,31 @@ mod tests {
f.name = FunctionName::new("foo");
assert_eq!(f.to_string(), "function %foo() native {\n}\n");
f.stack_slots
.push(StackSlotData::new(StackSlotKind::Local, 4));
assert_eq!(f.to_string(),
"function %foo() native {\n ss0 = local 4\n}\n");
f.stack_slots.push(
StackSlotData::new(StackSlotKind::Local, 4),
);
assert_eq!(
f.to_string(),
"function %foo() native {\n ss0 = local 4\n}\n"
);
let ebb = f.dfg.make_ebb();
f.layout.append_ebb(ebb);
assert_eq!(f.to_string(),
"function %foo() native {\n ss0 = local 4\n\nebb0:\n}\n");
assert_eq!(
f.to_string(),
"function %foo() native {\n ss0 = local 4\n\nebb0:\n}\n"
);
f.dfg.append_ebb_arg(ebb, types::I8);
assert_eq!(f.to_string(),
"function %foo() native {\n ss0 = local 4\n\nebb0(v0: i8):\n}\n");
assert_eq!(
f.to_string(),
"function %foo() native {\n ss0 = local 4\n\nebb0(v0: i8):\n}\n"
);
f.dfg.append_ebb_arg(ebb, types::F32.by(4).unwrap());
assert_eq!(f.to_string(),
"function %foo() native {\n ss0 = local 4\n\nebb0(v0: i8, v1: f32x4):\n}\n");
assert_eq!(
f.to_string(),
"function %foo() native {\n ss0 = local 4\n\nebb0(v0: i8, v1: f32x4):\n}\n"
);
}
}

View File

@@ -47,9 +47,11 @@ impl Directive {
"unordered" => Ok(Directive::Unordered(pat)),
"not" => {
if !pat.defs().is_empty() {
let msg = format!("can't define variables '$({}=...' in not: {}",
pat.defs()[0],
rest);
let msg = format!(
"can't define variables '$({}=...' in not: {}",
pat.defs()[0],
rest
);
Err(Error::DuplicateDef(msg))
} else {
Ok(Directive::Not(pat))
@@ -63,16 +65,23 @@ impl Directive {
fn regex(rest: &str) -> Result<Directive> {
let varlen = varname_prefix(rest);
if varlen == 0 {
return Err(Error::Syntax(format!("invalid variable name in regex: {}", rest)));
return Err(Error::Syntax(
format!("invalid variable name in regex: {}", rest),
));
}
let var = rest[0..varlen].to_string();
if !rest[varlen..].starts_with('=') {
return Err(Error::Syntax(format!("expected '=' after variable '{}' in regex: {}",
var,
rest)));
return Err(Error::Syntax(format!(
"expected '=' after variable '{}' in regex: {}",
var,
rest
)));
}
// Ignore trailing white space in the regex, including CR.
Ok(Directive::Regex(var, rest[varlen + 1..].trim_right().to_string()))
Ok(Directive::Regex(
var,
rest[varlen + 1..].trim_right().to_string(),
))
}
}
@@ -183,13 +192,13 @@ impl Checker {
continue;
}
Directive::Regex(ref var, ref rx) => {
state
.vars
.insert(var.clone(),
VarDef {
value: Value::Regex(Cow::Borrowed(rx)),
offset: 0,
});
state.vars.insert(
var.clone(),
VarDef {
value: Value::Regex(Cow::Borrowed(rx)),
offset: 0,
},
);
continue;
}
};
@@ -210,15 +219,16 @@ impl Checker {
state.recorder.directive(not_idx);
if let Some(mat) = rx.find(&text[not_begin..match_begin]) {
// Matched `not:` pattern.
state
.recorder
.matched_not(rx.as_str(),
(not_begin + mat.start(), not_begin + mat.end()));
state.recorder.matched_not(rx.as_str(), (
not_begin + mat.start(),
not_begin + mat.end(),
));
return Ok(false);
} else {
state
.recorder
.missed_not(rx.as_str(), (not_begin, match_begin));
state.recorder.missed_not(
rx.as_str(),
(not_begin, match_begin),
);
}
}
}
@@ -354,13 +364,13 @@ impl<'a> State<'a> {
})
};
Ok(if let Some(mat) = matched_range {
let r = (range.0 + mat.start(), range.0 + mat.end());
self.recorder.matched_check(rx.as_str(), r);
Some(r)
} else {
self.recorder.missed_check(rx.as_str(), range);
None
})
let r = (range.0 + mat.start(), range.0 + mat.end());
self.recorder.matched_check(rx.as_str(), r);
Some(r)
} else {
self.recorder.missed_check(rx.as_str(), range);
None
})
}
}
@@ -413,20 +423,32 @@ mod tests {
let mut b = CheckerBuilder::new();
assert_eq!(b.directive("not here: more text").map_err(e2s), Ok(false));
assert_eq!(b.directive("not here: regex: X=more text").map_err(e2s),
Ok(true));
assert_eq!(b.directive("regex: X = tommy").map_err(e2s),
Err("expected '=' after variable 'X' in regex: X = tommy".to_string()));
assert_eq!(b.directive("[arm]not: patt $x $(y) here").map_err(e2s),
Ok(true));
assert_eq!(b.directive("[x86]sameln: $x $(y=[^]]*) there").map_err(e2s),
Ok(true));
assert_eq!(
b.directive("not here: regex: X=more text").map_err(e2s),
Ok(true)
);
assert_eq!(
b.directive("regex: X = tommy").map_err(e2s),
Err(
"expected '=' after variable 'X' in regex: X = tommy".to_string(),
)
);
assert_eq!(
b.directive("[arm]not: patt $x $(y) here").map_err(e2s),
Ok(true)
);
assert_eq!(
b.directive("[x86]sameln: $x $(y=[^]]*) there").map_err(e2s),
Ok(true)
);
// Windows line ending sneaking in.
assert_eq!(b.directive("regex: Y=foo\r").map_err(e2s), Ok(true));
let c = b.finish();
assert_eq!(c.to_string(),
"#0 regex: X=more text\n#1 not: patt $(x) $(y) here\n#2 sameln: $(x) \
$(y=[^]]*) there\n#3 regex: Y=foo\n");
assert_eq!(
c.to_string(),
"#0 regex: X=more text\n#1 not: patt $(x) $(y) here\n#2 sameln: $(x) \
$(y=[^]]*) there\n#3 regex: Y=foo\n"
);
}
}

View File

@@ -111,16 +111,21 @@ impl<'a> Display for Explainer<'a> {
}
// Emit the match message itself.
writeln!(f,
"{} #{}{}: {}",
if m.is_match { "Matched" } else { "Missed" },
m.directive,
if m.is_not { " not" } else { "" },
m.regex)?;
writeln!(
f,
"{} #{}{}: {}",
if m.is_match { "Matched" } else { "Missed" },
m.directive,
if m.is_not { " not" } else { "" },
m.regex
)?;
// Emit any variable definitions.
if let Ok(found) = self.vardefs
.binary_search_by_key(&m.directive, |v| v.directive) {
if let Ok(found) = self.vardefs.binary_search_by_key(
&m.directive,
|v| v.directive,
)
{
let mut first = found;
while first > 0 && self.vardefs[first - 1].directive == m.directive {
first -= 1;
@@ -148,55 +153,50 @@ impl<'a> Recorder for Explainer<'a> {
}
fn matched_check(&mut self, regex: &str, matched: MatchRange) {
self.matches
.push(Match {
directive: self.directive,
is_match: true,
is_not: false,
regex: regex.to_owned(),
range: matched,
});
self.matches.push(Match {
directive: self.directive,
is_match: true,
is_not: false,
regex: regex.to_owned(),
range: matched,
});
}
fn matched_not(&mut self, regex: &str, matched: MatchRange) {
self.matches
.push(Match {
directive: self.directive,
is_match: true,
is_not: true,
regex: regex.to_owned(),
range: matched,
});
self.matches.push(Match {
directive: self.directive,
is_match: true,
is_not: true,
regex: regex.to_owned(),
range: matched,
});
}
fn missed_check(&mut self, regex: &str, searched: MatchRange) {
self.matches
.push(Match {
directive: self.directive,
is_match: false,
is_not: false,
regex: regex.to_owned(),
range: searched,
});
self.matches.push(Match {
directive: self.directive,
is_match: false,
is_not: false,
regex: regex.to_owned(),
range: searched,
});
}
fn missed_not(&mut self, regex: &str, searched: MatchRange) {
self.matches
.push(Match {
directive: self.directive,
is_match: false,
is_not: true,
regex: regex.to_owned(),
range: searched,
});
self.matches.push(Match {
directive: self.directive,
is_match: false,
is_not: true,
regex: regex.to_owned(),
range: searched,
});
}
fn defined_var(&mut self, varname: &str, value: &str) {
self.vardefs
.push(VarDef {
directive: self.directive,
varname: varname.to_owned(),
value: value.to_owned(),
});
self.vardefs.push(VarDef {
directive: self.directive,
varname: varname.to_owned(),
value: value.to_owned(),
});
}
}

View File

@@ -70,7 +70,9 @@ impl Pattern {
/// Return the allocated def number.
fn add_def(&mut self, v: &str) -> Result<usize> {
if self.defines_var(v) {
Err(Error::DuplicateDef(format!("duplicate definition of ${} in same pattern", v)))
Err(Error::DuplicateDef(
format!("duplicate definition of ${} in same pattern", v),
))
} else {
let idx = self.defs.len();
self.defs.push(v.to_string());
@@ -111,8 +113,10 @@ impl Pattern {
// All remaining possibilities start with `$(`.
if s.len() < 2 || !s.starts_with("$(") {
return Err(Error::Syntax("pattern syntax error, use $$ to match a single $"
.to_string()));
return Err(Error::Syntax(
"pattern syntax error, use $$ to match a single $"
.to_string(),
));
}
// Match the variable name, allowing for an empty varname in `$()`, or `$(=...)`.
@@ -137,7 +141,9 @@ impl Pattern {
// Variable definition. Fall through.
}
Some(ch) => {
return Err(Error::Syntax(format!("syntax error in $({}... '{}'", varname, ch)));
return Err(Error::Syntax(
format!("syntax error in $({}... '{}'", varname, ch),
));
}
}
@@ -155,23 +161,31 @@ impl Pattern {
let refname_begin = varname_end + 2;
let refname_end = refname_begin + varname_prefix(&s[refname_begin..]);
if refname_begin == refname_end {
return Err(Error::Syntax(format!("expected variable name in $({}=$...", varname)));
return Err(Error::Syntax(
format!("expected variable name in $({}=$...", varname),
));
}
if !s[refname_end..].starts_with(')') {
return Err(Error::Syntax(format!("expected ')' after $({}=${}...",
varname,
&s[refname_begin..refname_end])));
return Err(Error::Syntax(format!(
"expected ')' after $({}=${}...",
varname,
&s[refname_begin..refname_end]
)));
}
let refname = s[refname_begin..refname_end].to_string();
return if let Some(defidx) = def {
Ok((Part::DefVar {
def: defidx,
var: refname,
},
refname_end + 1))
} else {
Err(Error::Syntax(format!("expected variable name in $(=${})", refname)))
};
Ok((
Part::DefVar {
def: defidx,
var: refname,
},
refname_end + 1,
))
} else {
Err(Error::Syntax(
format!("expected variable name in $(=${})", refname),
))
};
}
// Last case: `$(var=...)` where `...` is a regular expression, possibly containing matched
@@ -193,9 +207,11 @@ impl Pattern {
};
Ok((part, rx_end + 1))
} else {
Err(Error::Syntax(format!("missing ')' after regex in $({}={}",
varname,
&s[rx_begin..rx_end])))
Err(Error::Syntax(format!(
"missing ')' after regex in $({}={}",
varname,
&s[rx_begin..rx_end]
)))
}
}
}
@@ -273,9 +289,11 @@ impl FromStr for Pattern {
let (part, len) = pat.parse_part(&s[pos..])?;
if let Some(v) = part.ref_var() {
if pat.defines_var(v) {
return Err(Error::Backref(format!("unsupported back-reference to '${}' \
return Err(Error::Backref(format!(
"unsupported back-reference to '${}' \
defined in same pattern",
v)));
v
)));
}
}
pat.parts.push(part);
@@ -410,49 +428,87 @@ mod tests {
// This is dubious, should we panic instead?
assert_eq!(pat.parse_part("").unwrap(), (Part::Text("".to_string()), 0));
assert_eq!(pat.parse_part("x").unwrap(),
(Part::Text("x".to_string()), 1));
assert_eq!(pat.parse_part("x2").unwrap(),
(Part::Text("x2".to_string()), 2));
assert_eq!(pat.parse_part("x$").unwrap(),
(Part::Text("x".to_string()), 1));
assert_eq!(pat.parse_part("x$$").unwrap(),
(Part::Text("x".to_string()), 1));
assert_eq!(
pat.parse_part("x").unwrap(),
(Part::Text("x".to_string()), 1)
);
assert_eq!(pat.parse_part("x2").unwrap(), (
Part::Text("x2".to_string()),
2,
));
assert_eq!(pat.parse_part("x$").unwrap(), (
Part::Text("x".to_string()),
1,
));
assert_eq!(pat.parse_part("x$$").unwrap(), (
Part::Text("x".to_string()),
1,
));
assert_eq!(pat.parse_part("$").unwrap_err().to_string(),
"pattern syntax error, use $$ to match a single $");
assert_eq!(
pat.parse_part("$").unwrap_err().to_string(),
"pattern syntax error, use $$ to match a single $"
);
assert_eq!(pat.parse_part("$$").unwrap(),
(Part::Text("$".to_string()), 2));
assert_eq!(pat.parse_part("$$ ").unwrap(),
(Part::Text("$".to_string()), 2));
assert_eq!(pat.parse_part("$$").unwrap(), (
Part::Text("$".to_string()),
2,
));
assert_eq!(pat.parse_part("$$ ").unwrap(), (
Part::Text("$".to_string()),
2,
));
assert_eq!(pat.parse_part("$0").unwrap(),
(Part::Var("0".to_string()), 2));
assert_eq!(pat.parse_part("$xx=").unwrap(),
(Part::Var("xx".to_string()), 3));
assert_eq!(pat.parse_part("$xx$").unwrap(),
(Part::Var("xx".to_string()), 3));
assert_eq!(
pat.parse_part("$0").unwrap(),
(Part::Var("0".to_string()), 2)
);
assert_eq!(pat.parse_part("$xx=").unwrap(), (
Part::Var("xx".to_string()),
3,
));
assert_eq!(pat.parse_part("$xx$").unwrap(), (
Part::Var("xx".to_string()),
3,
));
assert_eq!(pat.parse_part("$(0)").unwrap(),
(Part::Var("0".to_string()), 4));
assert_eq!(pat.parse_part("$()").unwrap(),
(Part::Text("".to_string()), 3));
assert_eq!(pat.parse_part("$(0)").unwrap(), (
Part::Var("0".to_string()),
4,
));
assert_eq!(pat.parse_part("$()").unwrap(), (
Part::Text("".to_string()),
3,
));
assert_eq!(pat.parse_part("$(0").unwrap_err().to_string(),
("unterminated $(0..."));
assert_eq!(pat.parse_part("$(foo:").unwrap_err().to_string(),
("syntax error in $(foo... ':'"));
assert_eq!(pat.parse_part("$(foo =").unwrap_err().to_string(),
("syntax error in $(foo... ' '"));
assert_eq!(pat.parse_part("$(eo0=$bar").unwrap_err().to_string(),
("expected ')' after $(eo0=$bar..."));
assert_eq!(pat.parse_part("$(eo1=$bar}").unwrap_err().to_string(),
("expected ')' after $(eo1=$bar..."));
assert_eq!(pat.parse_part("$(eo2=$)").unwrap_err().to_string(),
("expected variable name in $(eo2=$..."));
assert_eq!(pat.parse_part("$(eo3=$-)").unwrap_err().to_string(),
("expected variable name in $(eo3=$..."));
assert_eq!(
pat.parse_part("$(0").unwrap_err().to_string(),
("unterminated $(0...")
);
assert_eq!(
pat.parse_part("$(foo:").unwrap_err().to_string(),
("syntax error in $(foo... ':'")
);
assert_eq!(
pat.parse_part("$(foo =").unwrap_err().to_string(),
("syntax error in $(foo... ' '")
);
assert_eq!(
pat.parse_part("$(eo0=$bar").unwrap_err().to_string(),
("expected ')' after $(eo0=$bar...")
);
assert_eq!(
pat.parse_part("$(eo1=$bar}").unwrap_err().to_string(),
("expected ')' after $(eo1=$bar...")
);
assert_eq!(
pat.parse_part("$(eo2=$)").unwrap_err().to_string(),
("expected variable name in $(eo2=$...")
);
assert_eq!(
pat.parse_part("$(eo3=$-)").unwrap_err().to_string(),
("expected variable name in $(eo3=$...")
);
}
#[test]
@@ -460,48 +516,65 @@ mod tests {
use super::{Pattern, Part};
let mut pat = Pattern::new();
assert_eq!(pat.parse_part("$(foo=$bar)").unwrap(),
(Part::DefVar {
def: 0,
var: "bar".to_string(),
},
11));
assert_eq!(pat.parse_part("$(foo=$bar)").unwrap_err().to_string(),
"duplicate definition of $foo in same pattern");
assert_eq!(pat.parse_part("$(foo=$bar)").unwrap(), (
Part::DefVar {
def: 0,
var: "bar".to_string(),
},
11,
));
assert_eq!(
pat.parse_part("$(foo=$bar)").unwrap_err().to_string(),
"duplicate definition of $foo in same pattern"
);
assert_eq!(pat.parse_part("$(fxo=$bar)x").unwrap(),
(Part::DefVar {
def: 1,
var: "bar".to_string(),
},
11));
assert_eq!(pat.parse_part("$(fxo=$bar)x").unwrap(), (
Part::DefVar {
def: 1,
var: "bar".to_string(),
},
11,
));
assert_eq!(pat.parse_part("$(fo2=[a-z])").unwrap(),
(Part::DefLit {
def: 2,
regex: "(?P<fo2>[a-z])".to_string(),
},
12));
assert_eq!(pat.parse_part("$(fo3=[a-)])").unwrap(),
(Part::DefLit {
def: 3,
regex: "(?P<fo3>[a-)])".to_string(),
},
12));
assert_eq!(pat.parse_part("$(fo4=)").unwrap(),
(Part::DefLit {
def: 4,
regex: "(?P<fo4>)".to_string(),
},
7));
assert_eq!(pat.parse_part("$(fo2=[a-z])").unwrap(), (
Part::DefLit {
def: 2,
regex: "(?P<fo2>[a-z])".to_string(),
},
12,
));
assert_eq!(pat.parse_part("$(fo3=[a-)])").unwrap(), (
Part::DefLit {
def: 3,
regex: "(?P<fo3>[a-)])".to_string(),
},
12,
));
assert_eq!(pat.parse_part("$(fo4=)").unwrap(), (
Part::DefLit {
def: 4,
regex: "(?P<fo4>)".to_string(),
},
7,
));
assert_eq!(pat.parse_part("$(=.*)").unwrap(),
(Part::Regex("(?:.*)".to_string()), 6));
assert_eq!(pat.parse_part("$(=.*)").unwrap(), (
Part::Regex(
"(?:.*)".to_string(),
),
6,
));
assert_eq!(pat.parse_part("$(=)").unwrap(),
(Part::Regex("(?:)".to_string()), 4));
assert_eq!(pat.parse_part("$()").unwrap(),
(Part::Text("".to_string()), 3));
assert_eq!(pat.parse_part("$(=)").unwrap(), (
Part::Regex(
"(?:)".to_string(),
),
4,
));
assert_eq!(pat.parse_part("$()").unwrap(), (
Part::Text("".to_string()),
3,
));
}
#[test]
@@ -512,7 +585,9 @@ mod tests {
assert_eq!(format!("{:?}", p.parts), "[Text(\"Hello world!\")]");
let p: Pattern = " $foo=$(bar) ".parse().unwrap();
assert_eq!(format!("{:?}", p.parts),
"[Var(\"foo\"), Text(\"=\"), Var(\"bar\")]");
assert_eq!(
format!("{:?}", p.parts),
"[Var(\"foo\"), Text(\"=\"), Var(\"bar\")]"
);
}
}

View File

@@ -42,10 +42,12 @@ fn no_matches() {
#[test]
fn simple() {
let c = CheckerBuilder::new()
.text("
.text(
"
check: one
check: two
")
",
)
.unwrap()
.finish();
@@ -71,10 +73,12 @@ fn simple() {
#[test]
fn sameln() {
let c = CheckerBuilder::new()
.text("
.text(
"
check: one
sameln: two
")
",
)
.unwrap()
.finish();
@@ -106,10 +110,12 @@ fn sameln() {
#[test]
fn nextln() {
let c = CheckerBuilder::new()
.text("
.text(
"
check: one
nextln: two
")
",
)
.unwrap()
.finish();
@@ -149,10 +155,12 @@ fn leading_nextln() {
// A leading nextln directive should match from line 2.
// This is somewhat arbitrary, but consistent with a preceeding 'check: $()' directive.
let c = CheckerBuilder::new()
.text("
.text(
"
nextln: one
nextln: two
")
",
)
.unwrap()
.finish();
@@ -174,10 +182,12 @@ fn leading_nextln() {
fn leading_sameln() {
// A leading sameln directive should match from line 1.
let c = CheckerBuilder::new()
.text("
.text(
"
sameln: one
sameln: two
")
",
)
.unwrap()
.finish();
@@ -197,11 +207,13 @@ fn leading_sameln() {
#[test]
fn not() {
let c = CheckerBuilder::new()
.text("
.text(
"
check: one$()
not: $()eat$()
check: $()two
")
",
)
.unwrap()
.finish();
@@ -221,12 +233,14 @@ fn not() {
#[test]
fn notnot() {
let c = CheckerBuilder::new()
.text("
.text(
"
check: one$()
not: $()eat$()
not: half
check: $()two
")
",
)
.unwrap()
.finish();
@@ -254,87 +268,135 @@ fn notnot() {
#[test]
fn unordered() {
let c = CheckerBuilder::new()
.text("
.text(
"
check: one
unordered: two
unordered: three
check: four
")
",
)
.unwrap()
.finish();
assert_eq!(c.check("one two three four", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(c.check("one three two four", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(
c.check("one two three four", NO_VARIABLES).map_err(e2s),
Ok(true)
);
assert_eq!(
c.check("one three two four", NO_VARIABLES).map_err(e2s),
Ok(true)
);
assert_eq!(c.check("one two four three four", NO_VARIABLES)
.map_err(e2s),
Ok(true));
assert_eq!(c.check("one three four two four", NO_VARIABLES)
.map_err(e2s),
Ok(true));
assert_eq!(
c.check("one two four three four", NO_VARIABLES).map_err(
e2s,
),
Ok(true)
);
assert_eq!(
c.check("one three four two four", NO_VARIABLES).map_err(
e2s,
),
Ok(true)
);
assert_eq!(c.check("one two four three", NO_VARIABLES).map_err(e2s),
Ok(false));
assert_eq!(c.check("one three four two", NO_VARIABLES).map_err(e2s),
Ok(false));
assert_eq!(
c.check("one two four three", NO_VARIABLES).map_err(e2s),
Ok(false)
);
assert_eq!(
c.check("one three four two", NO_VARIABLES).map_err(e2s),
Ok(false)
);
}
#[test]
fn leading_unordered() {
let c = CheckerBuilder::new()
.text("
.text(
"
unordered: two
unordered: three
check: four
")
",
)
.unwrap()
.finish();
assert_eq!(c.check("one two three four", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(c.check("one three two four", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(
c.check("one two three four", NO_VARIABLES).map_err(e2s),
Ok(true)
);
assert_eq!(
c.check("one three two four", NO_VARIABLES).map_err(e2s),
Ok(true)
);
assert_eq!(c.check("one two four three four", NO_VARIABLES)
.map_err(e2s),
Ok(true));
assert_eq!(c.check("one three four two four", NO_VARIABLES)
.map_err(e2s),
Ok(true));
assert_eq!(
c.check("one two four three four", NO_VARIABLES).map_err(
e2s,
),
Ok(true)
);
assert_eq!(
c.check("one three four two four", NO_VARIABLES).map_err(
e2s,
),
Ok(true)
);
assert_eq!(c.check("one two four three", NO_VARIABLES).map_err(e2s),
Ok(false));
assert_eq!(c.check("one three four two", NO_VARIABLES).map_err(e2s),
Ok(false));
assert_eq!(
c.check("one two four three", NO_VARIABLES).map_err(e2s),
Ok(false)
);
assert_eq!(
c.check("one three four two", NO_VARIABLES).map_err(e2s),
Ok(false)
);
}
#[test]
fn trailing_unordered() {
let c = CheckerBuilder::new()
.text("
.text(
"
check: one
unordered: two
unordered: three
")
",
)
.unwrap()
.finish();
assert_eq!(c.check("one two three four", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(c.check("one three two four", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(
c.check("one two three four", NO_VARIABLES).map_err(e2s),
Ok(true)
);
assert_eq!(
c.check("one three two four", NO_VARIABLES).map_err(e2s),
Ok(true)
);
assert_eq!(c.check("one two four three four", NO_VARIABLES)
.map_err(e2s),
Ok(true));
assert_eq!(c.check("one three four two four", NO_VARIABLES)
.map_err(e2s),
Ok(true));
assert_eq!(
c.check("one two four three four", NO_VARIABLES).map_err(
e2s,
),
Ok(true)
);
assert_eq!(
c.check("one three four two four", NO_VARIABLES).map_err(
e2s,
),
Ok(true)
);
assert_eq!(c.check("one two four three", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(c.check("one three four two", NO_VARIABLES).map_err(e2s),
Ok(true));
assert_eq!(
c.check("one two four three", NO_VARIABLES).map_err(e2s),
Ok(true)
);
assert_eq!(
c.check("one three four two", NO_VARIABLES).map_err(e2s),
Ok(true)
);
}

View File

@@ -12,7 +12,8 @@ use std::hash::Hash;
/// Permanent structure used for translating into Cretonne IL.
pub struct ILBuilder<Variable>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
ssa: SSABuilder<Variable>,
ebbs: EntityMap<Ebb, EbbData>,
@@ -23,7 +24,8 @@ pub struct ILBuilder<Variable>
/// Temporary object used to build a Cretonne IL `Function`.
pub struct FunctionBuilder<'a, Variable: 'a>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
func: &'a mut Function,
builder: &'a mut ILBuilder<Variable>,
@@ -44,7 +46,8 @@ struct Position {
}
impl<Variable> ILBuilder<Variable>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
/// Creates a ILBuilder structure. The structure is automatically cleared each time it is
/// passed to a [`FunctionBuilder`](struct.FunctionBuilder.html) for creation.
@@ -68,7 +71,8 @@ impl<Variable> ILBuilder<Variable>
/// Implementation of the [`InstBuilder`](../cretonne/ir/builder/trait.InstBuilder.html) that has
/// one convenience method per Cretonne IL instruction.
pub struct FuncInstBuilder<'short, 'long: 'short, Variable: 'long>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
builder: &'short mut FunctionBuilder<'long, Variable>,
ebb: Ebb,
@@ -103,7 +107,7 @@ impl<'short, 'long, Variable> InstBuilderBase<'short> for FuncInstBuilder<'short
self.builder
.check_return_args(data.arguments(&self.builder.func.dfg.value_lists))
}
// We only insert the Ebb in the layout when an instruction is added to it
// We only insert the Ebb in the layout when an instruction is added to it
if self.builder.builder.ebbs[self.builder.position.ebb].pristine {
if !self.builder
.func
@@ -125,9 +129,9 @@ impl<'short, 'long, Variable> InstBuilderBase<'short> for FuncInstBuilder<'short
if data.opcode().is_branch() {
match data.branch_destination() {
Some(dest_ebb) => {
// If the user has supplied jump arguments we must adapt the arguments of
// the destination ebb
// TODO: find a way not to allocate a vector
// If the user has supplied jump arguments we must adapt the arguments of
// the destination ebb
// TODO: find a way not to allocate a vector
let args_types: Vec<Type> =
match data.analyze_branch(&self.builder.func.dfg.value_lists) {
BranchInfo::SingleDest(_, args) => {
@@ -142,14 +146,14 @@ impl<'short, 'long, Variable> InstBuilderBase<'short> for FuncInstBuilder<'short
self.builder.declare_successor(dest_ebb, inst);
}
None => {
// branch_destination() doesn't detect jump_tables
// branch_destination() doesn't detect jump_tables
match data {
// If jump table we declare all entries successor
// TODO: not collect with vector?
// If jump table we declare all entries successor
// TODO: not collect with vector?
InstructionData::BranchTable { table, .. } => {
// Unlike all other jumps/branches, jump tables are
// capable of having the same successor appear
// multiple times. Use a HashSet to deduplicate.
// Unlike all other jumps/branches, jump tables are
// capable of having the same successor appear
// multiple times. Use a HashSet to deduplicate.
let mut unique = HashSet::new();
for dest_ebb in self.builder
.func
@@ -163,7 +167,7 @@ impl<'short, 'long, Variable> InstBuilderBase<'short> for FuncInstBuilder<'short
self.builder.declare_successor(dest_ebb, inst)
}
}
// If not we do nothing
// If not we do nothing
_ => {}
}
}
@@ -211,13 +215,15 @@ impl<'short, 'long, Variable> InstBuilderBase<'short> for FuncInstBuilder<'short
/// `Ebb` when you haven't filled the current one with a terminator instruction, inserting a
/// return instruction with arguments that don't match the function's signature.
impl<'a, Variable> FunctionBuilder<'a, Variable>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
/// Creates a new FunctionBuilder structure that will operate on a `Function` using a
/// `IlBuilder`.
pub fn new(func: &'a mut Function,
builder: &'a mut ILBuilder<Variable>)
-> FunctionBuilder<'a, Variable> {
pub fn new(
func: &'a mut Function,
builder: &'a mut ILBuilder<Variable>,
) -> FunctionBuilder<'a, Variable> {
builder.clear();
FunctionBuilder {
func: func,
@@ -255,12 +261,16 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
}
if !self.builder.ebbs[self.position.ebb].pristine {
// First we check that the previous block has been filled.
debug_assert!(self.is_unreachable() || self.builder.ebbs[self.position.ebb].filled,
"you have to fill your block before switching");
debug_assert!(
self.is_unreachable() || self.builder.ebbs[self.position.ebb].filled,
"you have to fill your block before switching"
);
}
// We cannot switch to a filled block
debug_assert!(!self.builder.ebbs[ebb].filled,
"you cannot switch to a block which is already filled");
debug_assert!(
!self.builder.ebbs[ebb].filled,
"you cannot switch to a block which is already filled"
);
let basic_block = self.builder.ssa.header_block(ebb);
// Then we change the cursor position.
@@ -278,12 +288,12 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
/// created. Forgetting to call this method on every block will cause inconsistences in the
/// produced functions.
pub fn seal_block(&mut self, ebb: Ebb) {
let side_effects = self.builder
.ssa
.seal_ebb_header_block(ebb,
&mut self.func.dfg,
&mut self.func.layout,
&mut self.func.jump_tables);
let side_effects = self.builder.ssa.seal_ebb_header_block(
ebb,
&mut self.func.dfg,
&mut self.func.layout,
&mut self.func.jump_tables,
);
self.handle_ssa_side_effects(side_effects);
}
@@ -295,18 +305,17 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
/// Returns the Cretonne IL value corresponding to the utilization at the current program
/// position of a previously defined user variable.
pub fn use_var(&mut self, var: Variable) -> Value {
let ty = *self.builder
.types
.get(var)
.expect("this variable is used but its type has not been declared");
let (val, side_effects) = self.builder
.ssa
.use_var(&mut self.func.dfg,
&mut self.func.layout,
&mut self.func.jump_tables,
var,
ty,
self.position.basic_block);
let ty = *self.builder.types.get(var).expect(
"this variable is used but its type has not been declared",
);
let (val, side_effects) = self.builder.ssa.use_var(
&mut self.func.dfg,
&mut self.func.layout,
&mut self.func.jump_tables,
var,
ty,
self.position.basic_block,
);
self.handle_ssa_side_effects(side_effects);
val
}
@@ -314,11 +323,15 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
/// Register a new definition of a user variable. Panics if the type of the value is not the
/// same as the type registered for the variable.
pub fn def_var(&mut self, var: Variable, val: Value) {
debug_assert!(self.func.dfg.value_type(val) == self.builder.types[var],
"the type of the value is not the type registered for the variable");
self.builder
.ssa
.def_var(var, val, self.position.basic_block);
debug_assert!(
self.func.dfg.value_type(val) == self.builder.types[var],
"the type of the value is not the type registered for the variable"
);
self.builder.ssa.def_var(
var,
val,
self.position.basic_block,
);
}
/// Returns the value corresponding to the `i`-th argument of the function as defined by
@@ -369,7 +382,8 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
/// function. The functions below help you inspect the function you're creating and modify it
/// in ways that can be unsafe if used incorrectly.
impl<'a, Variable> FunctionBuilder<'a, Variable>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
/// Retrieves all the arguments for an `Ebb` currently infered from the jump instructions
/// inserted that target it and the SSA construction.
@@ -402,15 +416,16 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
/// **Note:** You are responsible for maintaining the coherence with the arguments of
/// other jump instructions.
pub fn change_jump_destination(&mut self, inst: Inst, new_dest: Ebb) {
let old_dest =
self.func.dfg[inst]
.branch_destination_mut()
.expect("you want to change the jump destination of a non-jump instruction");
let old_dest = self.func.dfg[inst].branch_destination_mut().expect(
"you want to change the jump destination of a non-jump instruction",
);
let pred = self.builder.ssa.remove_ebb_predecessor(*old_dest, inst);
*old_dest = new_dest;
self.builder
.ssa
.declare_ebb_predecessor(new_dest, pred, inst);
self.builder.ssa.declare_ebb_predecessor(
new_dest,
pred,
inst,
);
}
/// Returns `true` if and only if the current `Ebb` is sealed and has no predecessors declared.
@@ -422,7 +437,7 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
Some(entry) => self.position.ebb == entry,
};
(!is_entry && self.builder.ssa.is_sealed(self.position.ebb) &&
self.builder.ssa.predecessors(self.position.ebb).is_empty())
self.builder.ssa.predecessors(self.position.ebb).is_empty())
}
/// Returns `true` if and only if no instructions have been added since the last call to
@@ -446,31 +461,31 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
}
impl<'a, Variable> Drop for FunctionBuilder<'a, Variable>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
/// When a `FunctionBuilder` goes out of scope, it means that the function is fully built.
/// We then proceed to check if all the `Ebb`s are filled and sealed
fn drop(&mut self) {
debug_assert!(self.builder
.ebbs
.keys()
.all(|ebb| {
self.builder.ebbs[ebb].pristine ||
(self.builder.ssa.is_sealed(ebb) &&
self.builder.ebbs[ebb].filled)
}),
"all blocks should be filled and sealed before dropping a FunctionBuilder")
debug_assert!(
self.builder.ebbs.keys().all(|ebb| {
self.builder.ebbs[ebb].pristine ||
(self.builder.ssa.is_sealed(ebb) && self.builder.ebbs[ebb].filled)
}),
"all blocks should be filled and sealed before dropping a FunctionBuilder"
)
}
}
// Helper functions
impl<'a, Variable> FunctionBuilder<'a, Variable>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
fn move_to_next_basic_block(&mut self) {
self.position.basic_block = self.builder
.ssa
.declare_ebb_body_block(self.position.basic_block);
self.position.basic_block = self.builder.ssa.declare_ebb_body_block(
self.position.basic_block,
);
}
fn fill_current_block(&mut self) {
@@ -478,30 +493,36 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
}
fn declare_successor(&mut self, dest_ebb: Ebb, jump_inst: Inst) {
self.builder
.ssa
.declare_ebb_predecessor(dest_ebb, self.position.basic_block, jump_inst);
self.builder.ssa.declare_ebb_predecessor(
dest_ebb,
self.position.basic_block,
jump_inst,
);
}
fn check_return_args(&self, args: &[Value]) {
debug_assert_eq!(args.len(),
self.func.signature.return_types.len(),
"the number of returned values doesn't match the function signature ");
debug_assert_eq!(
args.len(),
self.func.signature.return_types.len(),
"the number of returned values doesn't match the function signature "
);
for (i, arg) in args.iter().enumerate() {
let valty = self.func.dfg.value_type(*arg);
debug_assert_eq!(valty,
self.func.signature.return_types[i].value_type,
"the types of the values returned don't match the \
function signature");
debug_assert_eq!(
valty,
self.func.signature.return_types[i].value_type,
"the types of the values returned don't match the \
function signature"
);
}
}
fn fill_function_args_values(&mut self, ebb: Ebb) {
debug_assert!(self.pristine);
for argtyp in &self.func.signature.argument_types {
self.builder
.function_args_values
.push(self.func.dfg.append_ebb_arg(ebb, argtyp.value_type));
self.builder.function_args_values.push(
self.func.dfg.append_ebb_arg(ebb, argtyp.value_type),
);
}
self.pristine = false;
}
@@ -510,48 +531,56 @@ impl<'a, Variable> FunctionBuilder<'a, Variable>
fn ebb_args_adjustement(&mut self, dest_ebb: Ebb, jump_args: &[Type]) {
let ty_to_append: Option<Vec<Type>> =
if self.builder.ssa.predecessors(dest_ebb).len() == 0 ||
self.builder.ebbs[dest_ebb].pristine {
self.builder.ebbs[dest_ebb].pristine
{
// This is the first jump instruction targeting this Ebb
// so the jump arguments supplied here are this Ebb' arguments
// However some of the arguments might already be there
// in the Ebb so we have to check they're consistent
let dest_ebb_args = self.func.dfg.ebb_args(dest_ebb);
debug_assert!(dest_ebb_args
.iter()
.zip(jump_args.iter().take(dest_ebb_args.len()))
.all(|(dest_arg, jump_arg)| {
*jump_arg == self.func.dfg.value_type(*dest_arg)
}),
"the jump argument supplied has not the \
same type as the corresponding dest ebb argument");
debug_assert!(
dest_ebb_args
.iter()
.zip(jump_args.iter().take(dest_ebb_args.len()))
.all(|(dest_arg, jump_arg)| {
*jump_arg == self.func.dfg.value_type(*dest_arg)
}),
"the jump argument supplied has not the \
same type as the corresponding dest ebb argument"
);
self.builder.ebbs[dest_ebb].user_arg_count = jump_args.len();
Some(jump_args
.iter()
.skip(dest_ebb_args.len())
.cloned()
.collect())
Some(
jump_args
.iter()
.skip(dest_ebb_args.len())
.cloned()
.collect(),
)
} else {
let dest_ebb_args = self.func.dfg.ebb_args(dest_ebb);
// The Ebb already has predecessors
// We check that the arguments supplied match those supplied
// previously.
debug_assert!(jump_args.len() == self.builder.ebbs[dest_ebb].user_arg_count,
"the jump instruction doesn't have the same \
debug_assert!(
jump_args.len() == self.builder.ebbs[dest_ebb].user_arg_count,
"the jump instruction doesn't have the same \
number of arguments as its destination Ebb \
({} vs {}).",
jump_args.len(),
dest_ebb_args.len());
debug_assert!(jump_args
.iter()
.zip(dest_ebb_args
.iter()
.take(self.builder.ebbs[dest_ebb].user_arg_count)
)
.all(|(jump_arg, dest_arg)| {
*jump_arg == self.func.dfg.value_type(*dest_arg)
}),
"the jump argument supplied has not the \
same type as the corresponding dest ebb argument");
jump_args.len(),
dest_ebb_args.len()
);
debug_assert!(
jump_args
.iter()
.zip(dest_ebb_args.iter().take(
self.builder.ebbs[dest_ebb].user_arg_count,
))
.all(|(jump_arg, dest_arg)| {
*jump_arg == self.func.dfg.value_type(*dest_arg)
}),
"the jump argument supplied has not the \
same type as the corresponding dest ebb argument"
);
None
};
if let Some(ty_args) = ty_to_append {

View File

@@ -33,7 +33,8 @@ use std::collections::HashMap;
/// and it is said _sealed_ if all of its predecessors have been declared. Only filled predecessors
/// can be declared.
pub struct SSABuilder<Variable>
where Variable: EntityRef + Default
where
Variable: EntityRef + Default,
{
// Records for every variable and for every revelant block, the last definition of
// the variable in the block.
@@ -133,7 +134,8 @@ impl ReservedValue for Block {
}
impl<Variable> SSABuilder<Variable>
where Variable: EntityRef + Default
where
Variable: EntityRef + Default,
{
/// Allocate a new blank SSA builder struct. Use the API function to interact with the struct.
pub fn new() -> SSABuilder<Variable> {
@@ -191,7 +193,8 @@ enum UseVarCases {
/// Phi functions.
///
impl<Variable> SSABuilder<Variable>
where Variable: EntityRef + Hash + Default
where
Variable: EntityRef + Hash + Default,
{
/// Declares a new definition of a variable in a given basic block.
/// The SSA value is passed as an argument because it should be created with
@@ -207,14 +210,15 @@ impl<Variable> SSABuilder<Variable>
/// If the variable has never been defined in this blocks or recursively in its predecessors,
/// this method will silently create an initializer with `iconst` or `fconst`. You are
/// responsible for making sure that you initialize your variables.
pub fn use_var(&mut self,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables,
var: Variable,
ty: Type,
block: Block)
-> (Value, SideEffects) {
pub fn use_var(
&mut self,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables,
var: Variable,
ty: Type,
block: Block,
) -> (Value, SideEffects) {
// First we lookup for the current definition of the variable in this block
if let Some(var_defs) = self.variables.get(var) {
if let Some(val) = var_defs.get(&block) {
@@ -281,13 +285,12 @@ impl<Variable> SSABuilder<Variable>
/// here and the block is not sealed.
/// Predecessors have to be added with `declare_ebb_predecessor`.
pub fn declare_ebb_header_block(&mut self, ebb: Ebb) -> Block {
let block = self.blocks
.push(BlockData::EbbHeader(EbbHeaderBlockData {
predecessors: Vec::new(),
sealed: false,
ebb: ebb,
undef_variables: Vec::new(),
}));
let block = self.blocks.push(BlockData::EbbHeader(EbbHeaderBlockData {
predecessors: Vec::new(),
sealed: false,
ebb: ebb,
undef_variables: Vec::new(),
}));
self.ebb_headers[ebb] = block.into();
block
}
@@ -331,12 +334,13 @@ impl<Variable> SSABuilder<Variable>
///
/// This method modifies the function's `Layout` by adding arguments to the `Ebb`s to
/// take into account the Phi function placed by the SSA algorithm.
pub fn seal_ebb_header_block(&mut self,
ebb: Ebb,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables)
-> SideEffects {
pub fn seal_ebb_header_block(
&mut self,
ebb: Ebb,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables,
) -> SideEffects {
let block = self.header_block(ebb);
// Sanity check
@@ -362,19 +366,24 @@ impl<Variable> SSABuilder<Variable>
// jump argument to the branch instruction.
// Panics if called with a non-header block.
// Returns the list of newly created ebbs for critical edge splitting.
fn resolve_undef_vars(&mut self,
block: Block,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables)
-> SideEffects {
fn resolve_undef_vars(
&mut self,
block: Block,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables,
) -> SideEffects {
// TODO: find a way to not allocate vectors
let (predecessors, undef_vars, ebb): (Vec<(Block, Inst)>,
Vec<(Variable, Value)>,
Ebb) = match self.blocks[block] {
BlockData::EbbBody { .. } => panic!("this should not happen"),
BlockData::EbbHeader(ref mut data) => {
(data.predecessors.clone(), data.undef_variables.clone(), data.ebb)
(
data.predecessors.clone(),
data.undef_variables.clone(),
data.ebb,
)
}
};
@@ -384,12 +393,13 @@ impl<Variable> SSABuilder<Variable>
for (var, val) in undef_vars {
let (_, mut local_side_effects) =
self.predecessors_lookup(dfg, layout, jts, val, var, ebb, &predecessors);
side_effects
.split_ebbs_created
.append(&mut local_side_effects.split_ebbs_created);
side_effects
.instructions_added_to_ebbs
.append(&mut local_side_effects.instructions_added_to_ebbs);
side_effects.split_ebbs_created.append(
&mut local_side_effects
.split_ebbs_created,
);
side_effects.instructions_added_to_ebbs.append(
&mut local_side_effects.instructions_added_to_ebbs,
);
}
// Then we clear the undef_vars and mark the block as sealed.
@@ -405,15 +415,16 @@ impl<Variable> SSABuilder<Variable>
/// Look up in the predecessors of an Ebb the def for a value an decides wether or not
/// to keep the eeb arg, and act accordingly. Returns the chosen value and optionnaly a
/// list of Ebb that are the middle of newly created critical edges splits.
fn predecessors_lookup(&mut self,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables,
temp_arg_val: Value,
temp_arg_var: Variable,
dest_ebb: Ebb,
preds: &[(Block, Inst)])
-> (Value, SideEffects) {
fn predecessors_lookup(
&mut self,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jts: &mut JumpTables,
temp_arg_val: Value,
temp_arg_var: Variable,
dest_ebb: Ebb,
preds: &[(Block, Inst)],
) -> (Value, SideEffects) {
let mut pred_values: ZeroOneOrMore<Value> = ZeroOneOrMore::Zero();
// TODO: find a way not not allocate a vector
let mut jump_args_to_append: Vec<(Block, Inst, Value)> = Vec::new();
@@ -442,12 +453,13 @@ impl<Variable> SSABuilder<Variable>
ZeroOneOrMore::More() => ZeroOneOrMore::More(),
};
jump_args_to_append.push((pred, last_inst, pred_val));
side_effects
.split_ebbs_created
.append(&mut local_side_effects.split_ebbs_created);
side_effects
.instructions_added_to_ebbs
.append(&mut local_side_effects.instructions_added_to_ebbs);
side_effects.split_ebbs_created.append(
&mut local_side_effects
.split_ebbs_created,
);
side_effects.instructions_added_to_ebbs.append(
&mut local_side_effects.instructions_added_to_ebbs,
);
}
match pred_values {
ZeroOneOrMore::Zero() => {
@@ -486,14 +498,16 @@ impl<Variable> SSABuilder<Variable>
// There is disagreement in the predecessors on which value to use so we have
// to keep the ebb argument.
for (pred_block, last_inst, pred_val) in jump_args_to_append {
match self.append_jump_argument(dfg,
layout,
last_inst,
pred_block,
dest_ebb,
pred_val,
temp_arg_var,
jts) {
match self.append_jump_argument(
dfg,
layout,
last_inst,
pred_block,
dest_ebb,
pred_val,
temp_arg_var,
jts,
) {
None => (),
Some(middle_ebb) => side_effects.split_ebbs_created.push(middle_ebb),
};
@@ -505,16 +519,17 @@ impl<Variable> SSABuilder<Variable>
/// Appends a jump argument to a jump instruction, returns ebb created in case of
/// critical edge splitting.
fn append_jump_argument(&mut self,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jump_inst: Inst,
jump_inst_block: Block,
dest_ebb: Ebb,
val: Value,
var: Variable,
jts: &mut JumpTables)
-> Option<Ebb> {
fn append_jump_argument(
&mut self,
dfg: &mut DataFlowGraph,
layout: &mut Layout,
jump_inst: Inst,
jump_inst_block: Block,
dest_ebb: Ebb,
val: Value,
var: Variable,
jts: &mut JumpTables,
) -> Option<Ebb> {
match dfg[jump_inst].analyze_branch(&dfg.value_lists) {
BranchInfo::NotABranch => {
panic!("you have declared a non-branch instruction as a predecessor to an ebb");
@@ -529,14 +544,17 @@ impl<Variable> SSABuilder<Variable>
// In the case of a jump table, the situation is tricky because br_table doesn't
// support arguments.
// We have to split the critical edge
let indexes: Vec<usize> = jts[jt]
.entries()
.fold(Vec::new(), |mut acc, (index, dest)| if dest == dest_ebb {
let indexes: Vec<usize> = jts[jt].entries().fold(
Vec::new(),
|mut acc, (index, dest)| if dest ==
dest_ebb
{
acc.push(index);
acc
} else {
acc
});
},
);
let middle_ebb = dfg.make_ebb();
layout.append_ebb(middle_ebb);
let block = self.declare_ebb_header_block(middle_ebb);
@@ -632,79 +650,95 @@ mod tests {
};
ssa.def_var(y_var, y_ssa, block);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block)
.0,
x_ssa);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block)
.0,
y_ssa);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block,
).0,
x_ssa
);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block,
).0,
y_ssa
);
let z_var = Variable(2);
let x_use1 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block)
.0;
let y_use1 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block)
.0;
let x_use1 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block,
).0;
let y_use1 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block,
).0;
let z1_ssa = {
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb0);
func.dfg.ins(cur).iadd(x_use1, y_use1)
};
ssa.def_var(z_var, z1_ssa, block);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block)
.0,
z1_ssa);
let x_use2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block)
.0;
let z_use1 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block)
.0;
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block,
).0,
z1_ssa
);
let x_use2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block,
).0;
let z_use1 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block,
).0;
let z2_ssa = {
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb0);
func.dfg.ins(cur).iadd(x_use2, z_use1)
};
ssa.def_var(z_var, z2_ssa, block);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block)
.0,
z2_ssa);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block,
).0,
z2_ssa
);
}
#[test]
@@ -740,79 +774,93 @@ mod tests {
func.dfg.ins(cur).iconst(I32, 2)
};
ssa.def_var(y_var, y_ssa, block0);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0)
.0,
x_ssa);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0)
.0,
y_ssa);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0,
).0,
x_ssa
);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0,
).0,
y_ssa
);
let z_var = Variable(2);
let x_use1 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0)
.0;
let y_use1 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0)
.0;
let x_use1 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0,
).0;
let y_use1 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0,
).0;
let z1_ssa = {
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb0);
func.dfg.ins(cur).iadd(x_use1, y_use1)
};
ssa.def_var(z_var, z1_ssa, block0);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block0)
.0,
z1_ssa);
let y_use2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0)
.0;
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block0,
).0,
z1_ssa
);
let y_use2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0,
).0;
let jump_inst: Inst = {
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb0);
func.dfg.ins(cur).brnz(y_use2, ebb1, &[])
};
let block1 = ssa.declare_ebb_body_block(block0);
let x_use2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block1)
.0;
let x_use2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block1,
).0;
assert_eq!(x_use2, x_ssa);
let z_use1 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1)
.0;
let z_use1 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1,
).0;
assert_eq!(z_use1, z1_ssa);
let z2_ssa = {
let cur = &mut Cursor::new(&mut func.layout);
@@ -820,33 +868,38 @@ mod tests {
func.dfg.ins(cur).iadd(x_use2, z_use1)
};
ssa.def_var(z_var, z2_ssa, block1);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1)
.0,
z2_ssa);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1,
).0,
z2_ssa
);
ssa.seal_ebb_header_block(ebb0, &mut func.dfg, &mut func.layout, &mut func.jump_tables);
let block2 = ssa.declare_ebb_header_block(ebb1);
ssa.declare_ebb_predecessor(ebb1, block0, jump_inst);
ssa.seal_ebb_header_block(ebb1, &mut func.dfg, &mut func.layout, &mut func.jump_tables);
let x_use3 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block2)
.0;
let x_use3 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block2,
).0;
assert_eq!(x_ssa, x_use3);
let y_use3 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block2)
.0;
let y_use3 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block2,
).0;
assert_eq!(y_ssa, y_use3);
let y2_ssa = {
let cur = &mut Cursor::new(&mut func.layout);
@@ -897,14 +950,17 @@ mod tests {
func.dfg.ins(cur).iconst(I32, 1)
};
ssa.def_var(x_var, x1, block0);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0)
.0,
x1);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0,
).0,
x1
);
let y_var = Variable(1);
let y1 = {
let cur = &mut Cursor::new(&mut func.layout);
@@ -912,30 +968,35 @@ mod tests {
func.dfg.ins(cur).iconst(I32, 2)
};
ssa.def_var(y_var, y1, block0);
assert_eq!(ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0)
.0,
y1);
assert_eq!(
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0,
).0,
y1
);
let z_var = Variable(2);
let x2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0)
.0;
let x2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0,
).0;
assert_eq!(x2, x1);
let y2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0)
.0;
let y2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block0,
).0;
assert_eq!(y2, y1);
let z1 = {
let cur = &mut Cursor::new(&mut func.layout);
@@ -950,33 +1011,36 @@ mod tests {
};
let block1 = ssa.declare_ebb_header_block(ebb1);
ssa.declare_ebb_predecessor(ebb1, block0, jump_ebb0_ebb1);
let z2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1)
.0;
let y3 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block1)
.0;
let z2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1,
).0;
let y3 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block1,
).0;
let z3 = {
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb1);
func.dfg.ins(cur).iadd(z2, y3)
};
ssa.def_var(z_var, z3, block1);
let y4 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block1)
.0;
let y4 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block1,
).0;
assert_eq!(y4, y3);
let jump_ebb1_ebb2 = {
let cur = &mut Cursor::new(&mut func.layout);
@@ -984,34 +1048,37 @@ mod tests {
func.dfg.ins(cur).brnz(y4, ebb2, &[])
};
let block2 = ssa.declare_ebb_body_block(block1);
let z4 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block2)
.0;
let z4 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block2,
).0;
assert_eq!(z4, z3);
let x3 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block2)
.0;
let x3 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block2,
).0;
let z5 = {
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb1);
func.dfg.ins(cur).isub(z4, x3)
};
ssa.def_var(z_var, z5, block2);
let y5 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block2)
.0;
let y5 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block2,
).0;
assert_eq!(y5, y3);
{
let cur = &mut Cursor::new(&mut func.layout);
@@ -1022,21 +1089,23 @@ mod tests {
let block3 = ssa.declare_ebb_header_block(ebb2);
ssa.declare_ebb_predecessor(ebb2, block1, jump_ebb1_ebb2);
ssa.seal_ebb_header_block(ebb2, &mut func.dfg, &mut func.layout, &mut func.jump_tables);
let y6 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block3)
.0;
let y6 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block3,
).0;
assert_eq!(y6, y3);
let x4 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block3)
.0;
let x4 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block3,
).0;
assert_eq!(x4, x3);
let y7 = {
let cur = &mut Cursor::new(&mut func.layout);
@@ -1089,13 +1158,14 @@ mod tests {
let mut jt_data = JumpTableData::new();
jt_data.set_entry(0, ebb1);
let jt = func.jump_tables.push(jt_data);
ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0)
.0;
ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block0,
).0;
let br_table = {
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb0);
@@ -1117,13 +1187,14 @@ mod tests {
ssa.declare_ebb_predecessor(ebb1, block1, jump_inst);
ssa.declare_ebb_predecessor(ebb1, block0, br_table);
ssa.seal_ebb_header_block(ebb1, &mut func.dfg, &mut func.layout, &mut func.jump_tables);
let x4 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block2)
.0;
let x4 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block2,
).0;
{
let cur = &mut Cursor::new(&mut func.layout);
cur.goto_bottom(ebb1);
@@ -1189,21 +1260,23 @@ mod tests {
};
let block1 = ssa.declare_ebb_header_block(ebb1);
ssa.declare_ebb_predecessor(ebb1, block0, jump_inst);
let z2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1)
.0;
let z2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
z_var,
I32,
block1,
).0;
assert_eq!(func.dfg.ebb_args(ebb1)[0], z2);
let x2 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block1)
.0;
let x2 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block1,
).0;
assert_eq!(func.dfg.ebb_args(ebb1)[1], x2);
let x3 = {
let cur = &mut Cursor::new(&mut func.layout);
@@ -1211,20 +1284,22 @@ mod tests {
func.dfg.ins(cur).iadd(x2, z2)
};
ssa.def_var(x_var, x3, block1);
let x4 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block1)
.0;
let y3 = ssa.use_var(&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block1)
.0;
let x4 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
x_var,
I32,
block1,
).0;
let y3 = ssa.use_var(
&mut func.dfg,
&mut func.layout,
&mut func.jump_tables,
y_var,
I32,
block1,
).0;
assert_eq!(func.dfg.ebb_args(ebb1)[2], y3);
let y4 = {
let cur = &mut Cursor::new(&mut func.layout);

View File

@@ -36,7 +36,8 @@ impl IsaSpec {
/// Parse an iterator of command line options and apply them to `config`.
pub fn parse_options<'a, I>(iter: I, config: &mut Configurable, loc: &Location) -> Result<()>
where I: Iterator<Item = &'a str>
where
I: Iterator<Item = &'a str>,
{
for opt in iter.map(TestOption::new) {
match opt {

Some files were not shown because too many files have changed in this diff Show More