Update to the latest spec_testsuite and dependencies. (#803)

* Update to the latest spec_testsuite and dependencies.

Update to target-lexicon 0.10, cranelift 0.54, wast 0.6, faerie 0.14,
and the latest spec_testsuite.

For wast and cranelift-wasm, update the code for API changes.

* Factor out the code for matching f32, f64, and v128.

This takes the idea from #802 to split out `f32_matches`, `f64_matches`,
and `v128_matches` functions, which better factor out the matching
functionality between scalar and vector.
This commit is contained in:
Dan Gohman
2020-01-10 13:57:38 -08:00
committed by GitHub
parent 28a938a62f
commit ef2177ed3a
22 changed files with 167 additions and 220 deletions

View File

@@ -191,10 +191,10 @@ impl WastContext {
Ok(Outcome::Ok(vec![global.get()]))
}
fn assert_return(&self, result: Outcome, results: &[Val]) -> Result<()> {
fn assert_return(&self, result: Outcome, results: &[wast::AssertExpression]) -> Result<()> {
let values = result.into_result()?;
for (v, e) in values.iter().zip(results) {
if values_equal(v, e)? {
if val_matches(v, e)? {
continue;
}
bail!("expected {:?}, got {:?}", e, v)
@@ -263,10 +263,6 @@ impl WastContext {
exec,
results,
} => {
let results = results
.iter()
.map(runtime_value)
.collect::<Result<Vec<_>>>()?;
let result = self.perform_execute(exec)?;
self.assert_return(result, &results)?;
}
@@ -286,92 +282,6 @@ impl WastContext {
let result = self.perform_invoke(call)?;
self.assert_trap(result, message)?;
}
AssertReturnCanonicalNan { span: _, invoke } => {
for v in self.perform_invoke(invoke)?.into_result()? {
match v {
Val::F32(x) => {
if !is_canonical_f32_nan(x) {
bail!("expected canonical NaN");
}
}
Val::F64(x) => {
if !is_canonical_f64_nan(x) {
bail!("expected canonical NaN");
}
}
other => bail!("expected float, got {:?}", other),
};
}
}
AssertReturnCanonicalNanF32x4 { span: _, invoke } => {
for v in self.perform_invoke(invoke)?.into_result()? {
let val = match v {
Val::V128(x) => x,
other => bail!("expected v128, got {:?}", other),
};
for l in 0..4 {
if !is_canonical_f32_nan(extract_lane_as_u32(val, l)?) {
bail!("expected f32x4 canonical NaN in lane {}", l)
}
}
}
}
AssertReturnCanonicalNanF64x2 { span: _, invoke } => {
for v in self.perform_invoke(invoke)?.into_result()? {
let val = match v {
Val::V128(x) => x,
other => bail!("expected v128, got {:?}", other),
};
for l in 0..4 {
if !is_canonical_f64_nan(extract_lane_as_u64(val, l)?) {
bail!("expected f64x2 canonical NaN in lane {}", l)
}
}
}
}
AssertReturnArithmeticNan { span: _, invoke } => {
for v in self.perform_invoke(invoke)?.into_result()? {
match v {
Val::F32(x) => {
if !is_arithmetic_f32_nan(x) {
bail!("expected arithmetic NaN");
}
}
Val::F64(x) => {
if !is_arithmetic_f64_nan(x) {
bail!("expected arithmetic NaN");
}
}
other => bail!("expected float, got {:?}", other),
}
}
}
AssertReturnArithmeticNanF32x4 { span: _, invoke } => {
for v in self.perform_invoke(invoke)?.into_result()? {
let val = match v {
Val::V128(x) => x,
other => bail!("expected v128, got {:?}", other),
};
for l in 0..4 {
if !is_arithmetic_f32_nan(extract_lane_as_u32(val, l)?) {
bail!("expected f32x4 arithmetic NaN in lane {}", l)
}
}
}
}
AssertReturnArithmeticNanF64x2 { span: _, invoke } => {
for v in self.perform_invoke(invoke)?.into_result()? {
let val = match v {
Val::V128(x) => x,
other => bail!("expected v128, got {:?}", other),
};
for l in 0..4 {
if !is_arithmetic_f64_nan(extract_lane_as_u64(val, l)?) {
bail!("expected f64x2 arithmetic NaN in lane {}", l)
}
}
}
}
AssertInvalid {
span: _,
mut module,
@@ -449,12 +359,20 @@ impl WastContext {
}
}
fn extract_lane_as_u32(bytes: u128, lane: usize) -> Result<u32> {
Ok((bytes >> (lane * 32)) as u32)
fn extract_lane_as_i8(bytes: u128, lane: usize) -> i8 {
(bytes >> (lane * 8)) as i8
}
fn extract_lane_as_u64(bytes: u128, lane: usize) -> Result<u64> {
Ok((bytes >> (lane * 64)) as u64)
fn extract_lane_as_i16(bytes: u128, lane: usize) -> i16 {
(bytes >> (lane * 16)) as i16
}
fn extract_lane_as_i32(bytes: u128, lane: usize) -> i32 {
(bytes >> (lane * 32)) as i32
}
fn extract_lane_as_i64(bytes: u128, lane: usize) -> i64 {
(bytes >> (lane * 64)) as i64
}
fn is_canonical_f32_nan(bits: u32) -> bool {
@@ -475,15 +393,64 @@ fn is_arithmetic_f64_nan(bits: u64) -> bool {
(bits & AF64_NAN) == AF64_NAN
}
fn values_equal(v1: &Val, v2: &Val) -> Result<bool> {
Ok(match (v1, v2) {
(Val::I32(a), Val::I32(b)) => a == b,
(Val::I64(a), Val::I64(b)) => a == b,
fn val_matches(actual: &Val, expected: &wast::AssertExpression) -> Result<bool> {
Ok(match (actual, expected) {
(Val::I32(a), wast::AssertExpression::I32(b)) => a == b,
(Val::I64(a), wast::AssertExpression::I64(b)) => a == b,
// Note that these float comparisons are comparing bits, not float
// values, so we're testing for bit-for-bit equivalence
(Val::F32(a), Val::F32(b)) => a == b,
(Val::F64(a), Val::F64(b)) => a == b,
(Val::V128(a), Val::V128(b)) => a == b,
_ => bail!("don't know how to compare {:?} and {:?} yet", v1, v2),
(Val::F32(a), wast::AssertExpression::F32(b)) => f32_matches(*a, b),
(Val::F64(a), wast::AssertExpression::F64(b)) => f64_matches(*a, b),
(Val::V128(a), wast::AssertExpression::V128(b)) => v128_matches(*a, b),
_ => bail!(
"don't know how to compare {:?} and {:?} yet",
actual,
expected
),
})
}
fn f32_matches(actual: u32, expected: &wast::NanPattern<wast::Float32>) -> bool {
match expected {
wast::NanPattern::CanonicalNan => is_canonical_f32_nan(actual),
wast::NanPattern::ArithmeticNan => is_arithmetic_f32_nan(actual),
wast::NanPattern::Value(expected_value) => actual == expected_value.bits,
}
}
fn f64_matches(actual: u64, expected: &wast::NanPattern<wast::Float64>) -> bool {
match expected {
wast::NanPattern::CanonicalNan => is_canonical_f64_nan(actual),
wast::NanPattern::ArithmeticNan => is_arithmetic_f64_nan(actual),
wast::NanPattern::Value(expected_value) => actual == expected_value.bits,
}
}
fn v128_matches(actual: u128, expected: &wast::V128Pattern) -> bool {
match expected {
wast::V128Pattern::I8x16(b) => b
.iter()
.enumerate()
.all(|(i, b)| *b == extract_lane_as_i8(actual, i)),
wast::V128Pattern::I16x8(b) => b
.iter()
.enumerate()
.all(|(i, b)| *b == extract_lane_as_i16(actual, i)),
wast::V128Pattern::I32x4(b) => b
.iter()
.enumerate()
.all(|(i, b)| *b == extract_lane_as_i32(actual, i)),
wast::V128Pattern::I64x2(b) => b
.iter()
.enumerate()
.all(|(i, b)| *b == extract_lane_as_i64(actual, i)),
wast::V128Pattern::F32x4(b) => b.iter().enumerate().all(|(i, b)| {
let a = extract_lane_as_i32(actual, i) as u32;
f32_matches(a, b)
}),
wast::V128Pattern::F64x2(b) => b.iter().enumerate().all(|(i, b)| {
let a = extract_lane_as_i64(actual, i) as u64;
f64_matches(a, b)
}),
}
}