Remove rss prediction from api_calls fuzzer (#3156)
This functionality is now subsumed by the limiter built-in to all fuzzing stores, so there's no longer any need for it. It was also triggering arithmetic overflows in fuzzing, so instead of fixing I'm removing it!
This commit is contained in:
@@ -15,9 +15,7 @@
|
||||
//! [swarm testing]: https://www.cs.utah.edu/~regehr/papers/swarm12.pdf
|
||||
|
||||
use arbitrary::{Arbitrary, Unstructured};
|
||||
use std::collections::BTreeMap;
|
||||
use std::mem;
|
||||
use wasmparser::*;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
#[derive(Arbitrary, Debug)]
|
||||
struct Swarm {
|
||||
@@ -63,17 +61,8 @@ use ApiCall::*;
|
||||
#[derive(Default)]
|
||||
struct Scope {
|
||||
id_counter: usize,
|
||||
|
||||
/// Map from a module id to the predicted amount of rss it will take to
|
||||
/// instantiate.
|
||||
modules: BTreeMap<usize, usize>,
|
||||
|
||||
/// Map from an instance id to the amount of rss it's expected to be using.
|
||||
instances: BTreeMap<usize, usize>,
|
||||
|
||||
/// The rough predicted maximum RSS of executing all of our generated API
|
||||
/// calls thus far.
|
||||
predicted_rss: usize,
|
||||
modules: BTreeSet<usize>,
|
||||
instances: BTreeSet<usize>,
|
||||
}
|
||||
|
||||
impl Scope {
|
||||
@@ -103,7 +92,6 @@ impl<'a> Arbitrary<'a> for ApiCalls {
|
||||
calls.push(StoreNew);
|
||||
|
||||
let mut scope = Scope::default();
|
||||
let max_rss = 1 << 30; // 1GB
|
||||
|
||||
// Total limit on number of API calls we'll generate. This exists to
|
||||
// avoid libFuzzer timeouts.
|
||||
@@ -121,41 +109,38 @@ impl<'a> Arbitrary<'a> for ApiCalls {
|
||||
let id = scope.next_id();
|
||||
let mut wasm = super::GeneratedModule::arbitrary(input)?;
|
||||
wasm.ensure_termination(1000);
|
||||
let predicted_rss = predict_rss(&wasm.to_bytes()).unwrap_or(0);
|
||||
scope.modules.insert(id, predicted_rss);
|
||||
scope.modules.insert(id);
|
||||
Ok(ModuleNew { id, wasm })
|
||||
});
|
||||
}
|
||||
if swarm.module_drop && !scope.modules.is_empty() {
|
||||
choices.push(|input, scope| {
|
||||
let modules: Vec<_> = scope.modules.keys().collect();
|
||||
let modules: Vec<_> = scope.modules.iter().collect();
|
||||
let id = **input.choose(&modules)?;
|
||||
scope.modules.remove(&id);
|
||||
Ok(ModuleDrop { id })
|
||||
});
|
||||
}
|
||||
if swarm.instance_new && !scope.modules.is_empty() && scope.predicted_rss < max_rss {
|
||||
if swarm.instance_new && !scope.modules.is_empty() {
|
||||
choices.push(|input, scope| {
|
||||
let modules: Vec<_> = scope.modules.iter().collect();
|
||||
let (&module, &predicted_rss) = *input.choose(&modules)?;
|
||||
let module = **input.choose(&modules)?;
|
||||
let id = scope.next_id();
|
||||
scope.instances.insert(id, predicted_rss);
|
||||
scope.predicted_rss += predicted_rss;
|
||||
scope.instances.insert(id);
|
||||
Ok(InstanceNew { id, module })
|
||||
});
|
||||
}
|
||||
if swarm.instance_drop && !scope.instances.is_empty() {
|
||||
choices.push(|input, scope| {
|
||||
let instances: Vec<_> = scope.instances.iter().collect();
|
||||
let (&id, &rss) = *input.choose(&instances)?;
|
||||
let id = **input.choose(&instances)?;
|
||||
scope.instances.remove(&id);
|
||||
scope.predicted_rss -= rss;
|
||||
Ok(InstanceDrop { id })
|
||||
});
|
||||
}
|
||||
if swarm.call_exported_func && !scope.instances.is_empty() {
|
||||
choices.push(|input, scope| {
|
||||
let instances: Vec<_> = scope.instances.keys().collect();
|
||||
let instances: Vec<_> = scope.instances.iter().collect();
|
||||
let instance = **input.choose(&instances)?;
|
||||
let nth = usize::arbitrary(input)?;
|
||||
Ok(CallExportedFunc { instance, nth })
|
||||
@@ -212,46 +197,3 @@ fn arbitrary_config(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempt to heuristically predict how much rss instantiating the `wasm`
|
||||
/// provided will take in wasmtime.
|
||||
///
|
||||
/// The intention of this function is to prevent out-of-memory situations from
|
||||
/// trivially instantiating a bunch of modules. We're basically taking any
|
||||
/// random sequence of fuzz inputs and generating API calls, but if we
|
||||
/// instantiate a million things we'd reasonably expect that to exceed the fuzz
|
||||
/// limit of 2GB because, well, instantiation does take a bit of memory.
|
||||
///
|
||||
/// This prediction will prevent new instances from being created once we've
|
||||
/// created a bunch of instances. Once instances start being dropped, though,
|
||||
/// it'll free up new slots to start making new instances.
|
||||
fn predict_rss(wasm: &[u8]) -> Result<usize> {
|
||||
let mut prediction = 0;
|
||||
for payload in Parser::new(0).parse_all(wasm) {
|
||||
match payload? {
|
||||
// For each declared memory we'll have to map that all in, so add in
|
||||
// the minimum amount of memory to our predicted rss.
|
||||
Payload::MemorySection(s) => {
|
||||
for entry in s {
|
||||
let initial = entry?.initial as usize;
|
||||
prediction += initial * 64 * 1024;
|
||||
}
|
||||
}
|
||||
|
||||
// We'll need to allocate tables and space for table elements, and
|
||||
// currently this is 3 pointers per table entry.
|
||||
Payload::TableSection(s) => {
|
||||
for entry in s {
|
||||
let initial = entry?.initial as usize;
|
||||
prediction += initial * 3 * mem::size_of::<usize>();
|
||||
}
|
||||
}
|
||||
|
||||
// ... and for now nothing else is counted. If we run into issues
|
||||
// with the fuzzers though we can always try to take into account
|
||||
// more things
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Ok(prediction)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user