fuzzgen: Add scalar float support (#4467)
* fuzzgen: Add float support Add support for generating floats and some float instructions. * fuzzgen: Enable NaN Canonicalization Both IEEE754 and the Wasm spec are somewhat loose about what is allowed to be returned from NaN producing operations. And in practice this changes from X86 to Aarch64 and others. Even in the same host machine, the interpreter may produce a code sequence different from cranelift that generates different NaN's but produces legal results according to the spec. These differences cause spurious failures in the fuzzer. To fix this we enable the NaN Canonicalization pass that replaces any NaN's produced with a single fixed canonical NaN value. * fuzzgen: Use `MultiAry` when inserting opcodes This deduplicates a few inserters!
This commit is contained in:
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -584,6 +584,7 @@ dependencies = [
|
||||
"anyhow",
|
||||
"arbitrary",
|
||||
"cranelift",
|
||||
"cranelift-native",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -12,6 +12,7 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
cranelift = { path = "../umbrella", version = "0.87.0" }
|
||||
cranelift-native = { path = "../native", version = "0.87.0" }
|
||||
|
||||
anyhow = "1.0.19"
|
||||
arbitrary = "1.0.0"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use crate::codegen::ir::ValueList;
|
||||
use crate::config::Config;
|
||||
use anyhow::Result;
|
||||
use arbitrary::{Arbitrary, Unstructured};
|
||||
@@ -14,36 +15,26 @@ use std::ops::RangeInclusive;
|
||||
|
||||
type BlockSignature = Vec<Type>;
|
||||
|
||||
fn insert_opcode_arity_0(
|
||||
_fgen: &mut FunctionGenerator,
|
||||
builder: &mut FunctionBuilder,
|
||||
opcode: Opcode,
|
||||
_args: &'static [Type],
|
||||
_rets: &'static [Type],
|
||||
) -> Result<()> {
|
||||
builder.ins().NullAry(opcode, INVALID);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn insert_opcode_arity_2(
|
||||
fn insert_opcode(
|
||||
fgen: &mut FunctionGenerator,
|
||||
builder: &mut FunctionBuilder,
|
||||
opcode: Opcode,
|
||||
args: &'static [Type],
|
||||
rets: &'static [Type],
|
||||
) -> Result<()> {
|
||||
let arg0 = fgen.get_variable_of_type(args[0])?;
|
||||
let arg0 = builder.use_var(arg0);
|
||||
let mut arg_vals = ValueList::new();
|
||||
for &arg in args.into_iter() {
|
||||
let var = fgen.get_variable_of_type(arg)?;
|
||||
let val = builder.use_var(var);
|
||||
arg_vals.push(val, &mut builder.func.dfg.value_lists);
|
||||
}
|
||||
|
||||
let arg1 = fgen.get_variable_of_type(args[1])?;
|
||||
let arg1 = builder.use_var(arg1);
|
||||
|
||||
let typevar = rets[0];
|
||||
let (inst, dfg) = builder.ins().Binary(opcode, typevar, arg0, arg1);
|
||||
let typevar = rets.first().copied().unwrap_or(INVALID);
|
||||
let (inst, dfg) = builder.ins().MultiAry(opcode, typevar, arg_vals);
|
||||
let results = dfg.inst_results(inst).to_vec();
|
||||
|
||||
for (val, ty) in results.into_iter().zip(rets) {
|
||||
let var = fgen.get_variable_of_type(*ty)?;
|
||||
for (val, &ty) in results.into_iter().zip(rets) {
|
||||
let var = fgen.get_variable_of_type(ty)?;
|
||||
builder.def_var(var, val);
|
||||
}
|
||||
Ok(())
|
||||
@@ -104,32 +95,84 @@ const OPCODE_SIGNATURES: &'static [(
|
||||
&'static [Type], // Rets
|
||||
OpcodeInserter,
|
||||
)] = &[
|
||||
(Opcode::Nop, &[], &[], insert_opcode_arity_0),
|
||||
(Opcode::Nop, &[], &[], insert_opcode),
|
||||
// Iadd
|
||||
(Opcode::Iadd, &[I8, I8], &[I8], insert_opcode_arity_2),
|
||||
(Opcode::Iadd, &[I16, I16], &[I16], insert_opcode_arity_2),
|
||||
(Opcode::Iadd, &[I32, I32], &[I32], insert_opcode_arity_2),
|
||||
(Opcode::Iadd, &[I64, I64], &[I64], insert_opcode_arity_2),
|
||||
(Opcode::Iadd, &[I8, I8], &[I8], insert_opcode),
|
||||
(Opcode::Iadd, &[I16, I16], &[I16], insert_opcode),
|
||||
(Opcode::Iadd, &[I32, I32], &[I32], insert_opcode),
|
||||
(Opcode::Iadd, &[I64, I64], &[I64], insert_opcode),
|
||||
// Isub
|
||||
(Opcode::Isub, &[I8, I8], &[I8], insert_opcode_arity_2),
|
||||
(Opcode::Isub, &[I16, I16], &[I16], insert_opcode_arity_2),
|
||||
(Opcode::Isub, &[I32, I32], &[I32], insert_opcode_arity_2),
|
||||
(Opcode::Isub, &[I64, I64], &[I64], insert_opcode_arity_2),
|
||||
(Opcode::Isub, &[I8, I8], &[I8], insert_opcode),
|
||||
(Opcode::Isub, &[I16, I16], &[I16], insert_opcode),
|
||||
(Opcode::Isub, &[I32, I32], &[I32], insert_opcode),
|
||||
(Opcode::Isub, &[I64, I64], &[I64], insert_opcode),
|
||||
// Imul
|
||||
(Opcode::Imul, &[I8, I8], &[I8], insert_opcode_arity_2),
|
||||
(Opcode::Imul, &[I16, I16], &[I16], insert_opcode_arity_2),
|
||||
(Opcode::Imul, &[I32, I32], &[I32], insert_opcode_arity_2),
|
||||
(Opcode::Imul, &[I64, I64], &[I64], insert_opcode_arity_2),
|
||||
(Opcode::Imul, &[I8, I8], &[I8], insert_opcode),
|
||||
(Opcode::Imul, &[I16, I16], &[I16], insert_opcode),
|
||||
(Opcode::Imul, &[I32, I32], &[I32], insert_opcode),
|
||||
(Opcode::Imul, &[I64, I64], &[I64], insert_opcode),
|
||||
// Udiv
|
||||
(Opcode::Udiv, &[I8, I8], &[I8], insert_opcode_arity_2),
|
||||
(Opcode::Udiv, &[I16, I16], &[I16], insert_opcode_arity_2),
|
||||
(Opcode::Udiv, &[I32, I32], &[I32], insert_opcode_arity_2),
|
||||
(Opcode::Udiv, &[I64, I64], &[I64], insert_opcode_arity_2),
|
||||
(Opcode::Udiv, &[I8, I8], &[I8], insert_opcode),
|
||||
(Opcode::Udiv, &[I16, I16], &[I16], insert_opcode),
|
||||
(Opcode::Udiv, &[I32, I32], &[I32], insert_opcode),
|
||||
(Opcode::Udiv, &[I64, I64], &[I64], insert_opcode),
|
||||
// Sdiv
|
||||
(Opcode::Sdiv, &[I8, I8], &[I8], insert_opcode_arity_2),
|
||||
(Opcode::Sdiv, &[I16, I16], &[I16], insert_opcode_arity_2),
|
||||
(Opcode::Sdiv, &[I32, I32], &[I32], insert_opcode_arity_2),
|
||||
(Opcode::Sdiv, &[I64, I64], &[I64], insert_opcode_arity_2),
|
||||
(Opcode::Sdiv, &[I8, I8], &[I8], insert_opcode),
|
||||
(Opcode::Sdiv, &[I16, I16], &[I16], insert_opcode),
|
||||
(Opcode::Sdiv, &[I32, I32], &[I32], insert_opcode),
|
||||
(Opcode::Sdiv, &[I64, I64], &[I64], insert_opcode),
|
||||
// Fadd
|
||||
(Opcode::Fadd, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::Fadd, &[F64, F64], &[F64], insert_opcode),
|
||||
// Fmul
|
||||
(Opcode::Fmul, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::Fmul, &[F64, F64], &[F64], insert_opcode),
|
||||
// Fsub
|
||||
(Opcode::Fsub, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::Fsub, &[F64, F64], &[F64], insert_opcode),
|
||||
// Fdiv
|
||||
(Opcode::Fdiv, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::Fdiv, &[F64, F64], &[F64], insert_opcode),
|
||||
// Fmin
|
||||
(Opcode::Fmin, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::Fmin, &[F64, F64], &[F64], insert_opcode),
|
||||
// Fmax
|
||||
(Opcode::Fmax, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::Fmax, &[F64, F64], &[F64], insert_opcode),
|
||||
// FminPseudo
|
||||
(Opcode::FminPseudo, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::FminPseudo, &[F64, F64], &[F64], insert_opcode),
|
||||
// FmaxPseudo
|
||||
(Opcode::FmaxPseudo, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::FmaxPseudo, &[F64, F64], &[F64], insert_opcode),
|
||||
// Fcopysign
|
||||
(Opcode::Fcopysign, &[F32, F32], &[F32], insert_opcode),
|
||||
(Opcode::Fcopysign, &[F64, F64], &[F64], insert_opcode),
|
||||
// Fma
|
||||
// TODO: Missing on X86, see https://github.com/bytecodealliance/wasmtime/pull/4460
|
||||
// (Opcode::Fma, &[F32, F32, F32], &[F32], insert_opcode),
|
||||
// (Opcode::Fma, &[F64, F64, F64], &[F64], insert_opcode),
|
||||
// Fabs
|
||||
(Opcode::Fabs, &[F32], &[F32], insert_opcode),
|
||||
(Opcode::Fabs, &[F64], &[F64], insert_opcode),
|
||||
// Fneg
|
||||
(Opcode::Fneg, &[F32], &[F32], insert_opcode),
|
||||
(Opcode::Fneg, &[F64], &[F64], insert_opcode),
|
||||
// Sqrt
|
||||
(Opcode::Sqrt, &[F32], &[F32], insert_opcode),
|
||||
(Opcode::Sqrt, &[F64], &[F64], insert_opcode),
|
||||
// Ceil
|
||||
(Opcode::Ceil, &[F32], &[F32], insert_opcode),
|
||||
(Opcode::Ceil, &[F64], &[F64], insert_opcode),
|
||||
// Floor
|
||||
(Opcode::Floor, &[F32], &[F32], insert_opcode),
|
||||
(Opcode::Floor, &[F64], &[F64], insert_opcode),
|
||||
// Trunc
|
||||
(Opcode::Trunc, &[F32], &[F32], insert_opcode),
|
||||
(Opcode::Trunc, &[F64], &[F64], insert_opcode),
|
||||
// Nearest
|
||||
(Opcode::Nearest, &[F32], &[F32], insert_opcode),
|
||||
(Opcode::Nearest, &[F64], &[F64], insert_opcode),
|
||||
// Stack Access
|
||||
(Opcode::StackStore, &[I8], &[], insert_stack_store),
|
||||
(Opcode::StackStore, &[I16], &[], insert_stack_store),
|
||||
@@ -202,9 +245,8 @@ where
|
||||
let scalars = [
|
||||
// IFLAGS, FFLAGS,
|
||||
B1, // B8, B16, B32, B64, B128,
|
||||
I8, I16, I32, I64,
|
||||
// I128,
|
||||
// F32, F64,
|
||||
I8, I16, I32, I64, // I128,
|
||||
F32, F64,
|
||||
// R32, R64,
|
||||
];
|
||||
// TODO: vector types
|
||||
@@ -284,6 +326,14 @@ where
|
||||
builder.ins().iconst(ty, imm64)
|
||||
}
|
||||
ty if ty.is_bool() => builder.ins().bconst(ty, bool::arbitrary(self.u)?),
|
||||
// f{32,64}::arbitrary does not generate a bunch of important values
|
||||
// such as Signaling NaN's / NaN's with payload, so generate floats from integers.
|
||||
F32 => builder
|
||||
.ins()
|
||||
.f32const(f32::from_bits(u32::arbitrary(self.u)?)),
|
||||
F64 => builder
|
||||
.ins()
|
||||
.f64const(f64::from_bits(u64::arbitrary(self.u)?)),
|
||||
_ => unimplemented!(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@ use arbitrary::{Arbitrary, Unstructured};
|
||||
use cranelift::codegen::data_value::DataValue;
|
||||
use cranelift::codegen::ir::types::*;
|
||||
use cranelift::codegen::ir::Function;
|
||||
use cranelift::codegen::Context;
|
||||
use cranelift::prelude::*;
|
||||
use cranelift_native::builder_with_options;
|
||||
|
||||
mod config;
|
||||
mod function_generator;
|
||||
@@ -60,6 +62,10 @@ where
|
||||
DataValue::from_integer(imm, ty)?
|
||||
}
|
||||
ty if ty.is_bool() => DataValue::B(bool::arbitrary(self.u)?),
|
||||
// f{32,64}::arbitrary does not generate a bunch of important values
|
||||
// such as Signaling NaN's / NaN's with payload, so generate floats from integers.
|
||||
F32 => DataValue::F32(Ieee32::with_bits(u32::arbitrary(self.u)?)),
|
||||
F64 => DataValue::F64(Ieee64::with_bits(u64::arbitrary(self.u)?)),
|
||||
_ => unimplemented!(),
|
||||
})
|
||||
}
|
||||
@@ -81,10 +87,42 @@ where
|
||||
Ok(inputs)
|
||||
}
|
||||
|
||||
fn run_func_passes(&self, func: Function) -> Result<Function> {
|
||||
// Do a NaN Canonicalization pass on the generated function.
|
||||
//
|
||||
// Both IEEE754 and the Wasm spec are somewhat loose about what is allowed
|
||||
// to be returned from NaN producing operations. And in practice this changes
|
||||
// from X86 to Aarch64 and others. Even in the same host machine, the
|
||||
// interpreter may produce a code sequence different from cranelift that
|
||||
// generates different NaN's but produces legal results according to the spec.
|
||||
//
|
||||
// These differences cause spurious failures in the fuzzer. To fix this
|
||||
// we enable the NaN Canonicalization pass that replaces any NaN's produced
|
||||
// with a single fixed canonical NaN value.
|
||||
//
|
||||
// This is something that we can enable via flags for the compiled version, however
|
||||
// the interpreter won't get that version, so call that pass manually here.
|
||||
|
||||
let mut ctx = Context::for_function(func);
|
||||
// Assume that we are generating this function for the current ISA
|
||||
// this is only used for the verifier after `canonicalize_nans` so
|
||||
// it's not too important.
|
||||
let flags = settings::Flags::new(settings::builder());
|
||||
let isa = builder_with_options(false)
|
||||
.expect("Unable to build a TargetIsa for the current host")
|
||||
.finish(flags)?;
|
||||
|
||||
ctx.canonicalize_nans(isa.as_ref())?;
|
||||
|
||||
Ok(ctx.func)
|
||||
}
|
||||
|
||||
pub fn generate_test(mut self) -> Result<TestCase> {
|
||||
let func = FunctionGenerator::new(&mut self.u, &self.config).generate()?;
|
||||
let inputs = self.generate_test_inputs(&func.signature)?;
|
||||
|
||||
let func = self.run_func_passes(func)?;
|
||||
|
||||
Ok(TestCase { func, inputs })
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user