More infrastructure.

Improve handling of memory.grow/size, add a standalone wast runner,
test harness improvements.
This commit is contained in:
Dan Gohman
2018-12-03 04:59:40 -08:00
parent 83f8a31010
commit 7faa15d7ac
15 changed files with 316 additions and 82 deletions

View File

@@ -13,6 +13,10 @@ publish = false
name = "wasmtime" name = "wasmtime"
path = "src/wasmtime.rs" path = "src/wasmtime.rs"
[[bin]]
name = "run_wast"
path = "src/run_wast.rs"
[[bin]] [[bin]]
name = "wasm2obj" name = "wasm2obj"
path = "src/wasm2obj.rs" path = "src/wasm2obj.rs"

View File

@@ -8,7 +8,7 @@ use cranelift_codegen::isa;
use cranelift_codegen::Context; use cranelift_codegen::Context;
use cranelift_entity::{EntityRef, PrimaryMap}; use cranelift_entity::{EntityRef, PrimaryMap};
use cranelift_wasm::{DefinedFuncIndex, FuncIndex, FuncTranslator}; use cranelift_wasm::{DefinedFuncIndex, FuncIndex, FuncTranslator};
use environ::{get_func_name, ModuleTranslation}; use environ::{get_func_name, get_memory_grow_name, get_memory_size_name, ModuleTranslation};
use std::string::{String, ToString}; use std::string::{String, ToString};
use std::vec::Vec; use std::vec::Vec;
@@ -49,13 +49,13 @@ impl binemit::RelocSink for RelocSink {
name: &ExternalName, name: &ExternalName,
addend: binemit::Addend, addend: binemit::Addend,
) { ) {
let reloc_target = if let ExternalName::User { namespace, index } = *name { let reloc_target = if *name == get_memory_grow_name() {
RelocationTarget::MemoryGrow
} else if *name == get_memory_size_name() {
RelocationTarget::MemorySize
} else if let ExternalName::User { namespace, index } = *name {
debug_assert!(namespace == 0); debug_assert!(namespace == 0);
RelocationTarget::UserFunc(FuncIndex::new(index as usize)) RelocationTarget::UserFunc(FuncIndex::new(index as usize))
} else if *name == ExternalName::testcase("wasmtime_memory_grow") {
RelocationTarget::MemoryGrow
} else if *name == ExternalName::testcase("wasmtime_memory_size") {
RelocationTarget::MemorySize
} else if let ExternalName::LibCall(libcall) = *name { } else if let ExternalName::LibCall(libcall) = *name {
RelocationTarget::LibCall(libcall) RelocationTarget::LibCall(libcall)
} else { } else {

View File

@@ -3,7 +3,7 @@ use cranelift_codegen::ir;
use cranelift_codegen::ir::immediates::{Imm64, Offset32, Uimm64}; use cranelift_codegen::ir::immediates::{Imm64, Offset32, Uimm64};
use cranelift_codegen::ir::types::*; use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{ use cranelift_codegen::ir::{
AbiParam, ArgumentPurpose, ExtFuncData, ExternalName, FuncRef, Function, InstBuilder, Signature, AbiParam, ArgumentPurpose, ExtFuncData, FuncRef, Function, InstBuilder, Signature,
}; };
use cranelift_codegen::isa; use cranelift_codegen::isa;
use cranelift_entity::EntityRef; use cranelift_entity::EntityRef;
@@ -26,6 +26,16 @@ pub fn get_func_name(func_index: FuncIndex) -> ir::ExternalName {
ir::ExternalName::user(0, func_index.as_u32()) ir::ExternalName::user(0, func_index.as_u32())
} }
/// Compute a `ir::ExternalName` for the `memory.grow` libcall.
pub fn get_memory_grow_name() -> ir::ExternalName {
ir::ExternalName::user(1, 0)
}
/// Compute a `ir::ExternalName` for the `memory.size` libcall.
pub fn get_memory_size_name() -> ir::ExternalName {
ir::ExternalName::user(1, 1)
}
/// Object containing the standalone environment information. To be passed after creation as /// Object containing the standalone environment information. To be passed after creation as
/// argument to `compile_module`. /// argument to `compile_module`.
pub struct ModuleEnvironment<'data, 'module> { pub struct ModuleEnvironment<'data, 'module> {
@@ -97,11 +107,11 @@ pub struct FuncEnvironment<'module_environment> {
/// The Cranelift global holding the base address of the globals vector. /// The Cranelift global holding the base address of the globals vector.
globals_base: Option<ir::GlobalValue>, globals_base: Option<ir::GlobalValue>,
/// The external function declaration for implementing wasm's `current_memory`. /// The external function declaration for implementing wasm's `memory.size`.
current_memory_extfunc: Option<FuncRef>, memory_size_extfunc: Option<FuncRef>,
/// The external function declaration for implementing wasm's `grow_memory`. /// The external function declaration for implementing wasm's `memory.grow`.
grow_memory_extfunc: Option<FuncRef>, memory_grow_extfunc: Option<FuncRef>,
/// Offsets to struct fields accessed by JIT code. /// Offsets to struct fields accessed by JIT code.
offsets: VMOffsets, offsets: VMOffsets,
@@ -119,8 +129,8 @@ impl<'module_environment> FuncEnvironment<'module_environment> {
memories_base: None, memories_base: None,
tables_base: None, tables_base: None,
globals_base: None, globals_base: None,
current_memory_extfunc: None, memory_size_extfunc: None,
grow_memory_extfunc: None, memory_grow_extfunc: None,
offsets: VMOffsets::new(isa.frontend_config().pointer_bytes()), offsets: VMOffsets::new(isa.frontend_config().pointer_bytes()),
} }
} }
@@ -484,7 +494,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
_heap: ir::Heap, _heap: ir::Heap,
val: ir::Value, val: ir::Value,
) -> WasmResult<ir::Value> { ) -> WasmResult<ir::Value> {
let grow_mem_func = self.grow_memory_extfunc.unwrap_or_else(|| { let memory_grow_func = self.memory_grow_extfunc.unwrap_or_else(|| {
let sig_ref = pos.func.import_signature(Signature { let sig_ref = pos.func.import_signature(Signature {
call_conv: self.isa.frontend_config().default_call_conv, call_conv: self.isa.frontend_config().default_call_conv,
params: vec![ params: vec![
@@ -497,17 +507,18 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
// We currently allocate all code segments independently, so nothing // We currently allocate all code segments independently, so nothing
// is colocated. // is colocated.
let colocated = false; let colocated = false;
// FIXME: Use a real ExternalName system.
pos.func.import_function(ExtFuncData { pos.func.import_function(ExtFuncData {
name: ExternalName::testcase("grow_memory"), name: get_memory_grow_name(),
signature: sig_ref, signature: sig_ref,
colocated, colocated,
}) })
}); });
self.grow_memory_extfunc = Some(grow_mem_func); self.memory_grow_extfunc = Some(memory_grow_func);
let memory_index = pos.ins().iconst(I32, index.index() as i64); let memory_index = pos.ins().iconst(I32, index.index() as i64);
let vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap(); let vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap();
let call_inst = pos.ins().call(grow_mem_func, &[val, memory_index, vmctx]); let call_inst = pos
.ins()
.call(memory_grow_func, &[val, memory_index, vmctx]);
Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
} }
@@ -517,7 +528,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
index: MemoryIndex, index: MemoryIndex,
_heap: ir::Heap, _heap: ir::Heap,
) -> WasmResult<ir::Value> { ) -> WasmResult<ir::Value> {
let cur_mem_func = self.current_memory_extfunc.unwrap_or_else(|| { let memory_size_func = self.memory_size_extfunc.unwrap_or_else(|| {
let sig_ref = pos.func.import_signature(Signature { let sig_ref = pos.func.import_signature(Signature {
call_conv: self.isa.frontend_config().default_call_conv, call_conv: self.isa.frontend_config().default_call_conv,
params: vec![ params: vec![
@@ -529,17 +540,16 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
// We currently allocate all code segments independently, so nothing // We currently allocate all code segments independently, so nothing
// is colocated. // is colocated.
let colocated = false; let colocated = false;
// FIXME: Use a real ExternalName system.
pos.func.import_function(ExtFuncData { pos.func.import_function(ExtFuncData {
name: ExternalName::testcase("current_memory"), name: get_memory_size_name(),
signature: sig_ref, signature: sig_ref,
colocated, colocated,
}) })
}); });
self.current_memory_extfunc = Some(cur_mem_func); self.memory_size_extfunc = Some(memory_size_func);
let memory_index = pos.ins().iconst(I32, index.index() as i64); let memory_index = pos.ins().iconst(I32, index.index() as i64);
let vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap(); let vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap();
let call_inst = pos.ins().call(cur_mem_func, &[memory_index, vmctx]); let call_inst = pos.ins().call(memory_size_func, &[memory_index, vmctx]);
Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
} }
} }

View File

@@ -49,6 +49,7 @@ pub use compilation::{
pub use environ::{ModuleEnvironment, ModuleTranslation}; pub use environ::{ModuleEnvironment, ModuleTranslation};
pub use module::{DataInitializer, Export, MemoryPlan, MemoryStyle, Module, TableElements}; pub use module::{DataInitializer, Export, MemoryPlan, MemoryStyle, Module, TableElements};
pub use tunables::Tunables; pub use tunables::Tunables;
pub use vmoffsets::VMOffsets;
/// WebAssembly page sizes are defined to be 64KiB. /// WebAssembly page sizes are defined to be 64KiB.
pub const WASM_PAGE_SIZE: u32 = 0x10000; pub const WASM_PAGE_SIZE: u32 = 0x10000;

View File

@@ -52,17 +52,23 @@ pub enum MemoryStyle {
impl MemoryStyle { impl MemoryStyle {
/// Decide on an implementation style for the given `Memory`. /// Decide on an implementation style for the given `Memory`.
pub fn for_memory(memory: Memory, tunables: &Tunables) -> Self { pub fn for_memory(memory: Memory, tunables: &Tunables) -> (Self, u64) {
if let Some(maximum) = memory.maximum { if let Some(maximum) = memory.maximum {
// A heap with a declared maximum is prepared to be used with // A heap with a declared maximum is prepared to be used with
// threads and therefore be immovable, so make it static. // threads and therefore be immovable, so make it static.
(
MemoryStyle::Static { MemoryStyle::Static {
bound: cmp::max(tunables.static_memory_bound, maximum), bound: cmp::max(tunables.static_memory_bound, maximum),
} },
tunables.static_memory_offset_guard_size,
)
} else { } else {
// A heap without a declared maximum is likely to want to be small // A heap without a declared maximum is likely to want to be small
// at least some of the time, so make it dynamic. // at least some of the time, so make it dynamic.
MemoryStyle::Dynamic (
MemoryStyle::Dynamic,
tunables.dynamic_memory_offset_guard_size,
)
} }
} }
} }
@@ -82,10 +88,11 @@ pub struct MemoryPlan {
impl MemoryPlan { impl MemoryPlan {
/// Draw up a plan for implementing a `Memory`. /// Draw up a plan for implementing a `Memory`.
pub fn for_memory(memory: Memory, tunables: &Tunables) -> Self { pub fn for_memory(memory: Memory, tunables: &Tunables) -> Self {
let (style, offset_guard_size) = MemoryStyle::for_memory(memory, tunables);
Self { Self {
memory, memory,
style: MemoryStyle::for_memory(memory, tunables), style,
offset_guard_size: tunables.offset_guard_size, offset_guard_size,
} }
} }
} }

View File

@@ -4,8 +4,11 @@ pub struct Tunables {
/// For static heaps, the size of the heap protected by bounds checking. /// For static heaps, the size of the heap protected by bounds checking.
pub static_memory_bound: u32, pub static_memory_bound: u32,
/// The size of the offset guard. /// The size of the offset guard for static heaps.
pub offset_guard_size: u64, pub static_memory_offset_guard_size: u64,
/// The size of the offset guard for dynamic heaps.
pub dynamic_memory_offset_guard_size: u64,
} }
impl Default for Tunables { impl Default for Tunables {
@@ -17,11 +20,17 @@ impl Default for Tunables {
/// need for explicit bounds checks. /// need for explicit bounds checks.
static_memory_bound: 0x1_0000, static_memory_bound: 0x1_0000,
/// Size in bytes of the offset guard. /// Size in bytes of the offset guard for static memories.
/// ///
/// Allocating 2 GiB of address space lets us translate wasm /// Allocating 2 GiB of address space lets us translate wasm
/// offsets into x86 offsets as aggressively as we can. /// offsets into x86 offsets as aggressively as we can.
offset_guard_size: 0x8000_0000, static_memory_offset_guard_size: 0x8000_0000,
/// Size in bytes of the offset guard for dynamic memories.
///
/// Allocate a small guard to optimize common cases but without
/// wasting too much memor.
dynamic_memory_offset_guard_size: 0x1_0000,
} }
} }
} }

View File

@@ -72,16 +72,10 @@ impl VMOffsets {
2 * self.pointer_size 2 * self.pointer_size
} }
/// The offset of the `instance` field.
#[allow(dead_code)]
pub fn vmctx_instance(&self) -> u8 {
3 * self.pointer_size
}
/// Return the size of `VMContext`. /// Return the size of `VMContext`.
#[allow(dead_code)] #[allow(dead_code)]
pub fn size_of_vmctx(&self) -> u8 { pub fn size_of_vmctx(&self) -> u8 {
4 * self.pointer_size 3 * self.pointer_size
} }
/// Return the offset from the `memories` pointer to `VMMemory` index `index`. /// Return the offset from the `memories` pointer to `VMMemory` index `index`.

View File

@@ -19,7 +19,6 @@ region = "1.0.0"
lazy_static = "1.2.0" lazy_static = "1.2.0"
libc = { version = "0.2.44", default-features = false } libc = { version = "0.2.44", default-features = false }
errno = "0.2.4" errno = "0.2.4"
cast = { version = "0.2.2", default-features = false }
memoffset = "0.2.1" memoffset = "0.2.1"
[build-dependencies] [build-dependencies]

View File

@@ -36,6 +36,10 @@ where
Ok(compilation) Ok(compilation)
} }
extern "C" {
pub fn __rust_probestack();
}
/// Performs the relocations inside the function bytecode, provided the necessary metadata /// Performs the relocations inside the function bytecode, provided the necessary metadata
fn relocate<F>( fn relocate<F>(
compilation: &mut Compilation, compilation: &mut Compilation,
@@ -76,6 +80,7 @@ fn relocate<F>(
FloorF64 => wasmtime_f64_floor as usize, FloorF64 => wasmtime_f64_floor as usize,
TruncF64 => wasmtime_f64_trunc as usize, TruncF64 => wasmtime_f64_trunc as usize,
NearestF64 => wasmtime_f64_nearest as usize, NearestF64 => wasmtime_f64_nearest as usize,
Probestack => __rust_probestack as usize,
other => panic!("unexpected libcall: {}", other), other => panic!("unexpected libcall: {}", other),
} }
} }

View File

@@ -42,7 +42,6 @@ extern crate lazy_static;
extern crate libc; extern crate libc;
#[macro_use] #[macro_use]
extern crate memoffset; extern crate memoffset;
extern crate cast;
mod code; mod code;
mod execute; mod execute;

View File

@@ -2,7 +2,6 @@
//! //!
//! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables. //! `LinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
use cast;
use mmap::Mmap; use mmap::Mmap;
use region; use region;
use std::string::String; use std::string::String;
@@ -63,9 +62,7 @@ impl LinearMemory {
/// Returns the number of allocated wasm pages. /// Returns the number of allocated wasm pages.
pub fn size(&self) -> u32 { pub fn size(&self) -> u32 {
assert_eq!(self.mmap.len() % WASM_PAGE_SIZE as usize, 0); self.current
let num_pages = self.mmap.len() / WASM_PAGE_SIZE as usize;
cast::u32(num_pages).unwrap()
} }
/// Grow memory by the specified amount of pages. /// Grow memory by the specified amount of pages.
@@ -97,27 +94,25 @@ impl LinearMemory {
let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize; let new_bytes = new_pages as usize * WASM_PAGE_SIZE as usize;
if new_bytes > self.mmap.len() { if new_bytes > self.mmap.len() - self.offset_guard_size {
// If we have no maximum, this is a "dynamic" heap, and it's allowed to move. // If we have no maximum, this is a "dynamic" heap, and it's allowed to move.
assert!(self.maximum.is_none()); assert!(self.maximum.is_none());
let mapped_pages = self.current as usize;
let mapped_bytes = mapped_pages * WASM_PAGE_SIZE as usize;
let guard_bytes = self.offset_guard_size; let guard_bytes = self.offset_guard_size;
let request_bytes = new_bytes.checked_add(guard_bytes)?;
let mut new_mmap = Mmap::with_size(new_bytes).ok()?; let mut new_mmap = Mmap::with_size(request_bytes).ok()?;
// Make the offset-guard pages inaccessible. // Make the offset-guard pages inaccessible.
unsafe { unsafe {
region::protect( region::protect(
new_mmap.as_ptr().add(mapped_bytes), new_mmap.as_ptr().add(new_bytes),
guard_bytes, guard_bytes,
region::Protection::Read, region::Protection::None,
).expect("unable to make memory readonly"); ).expect("unable to make memory inaccessible");
} }
new_mmap let copy_len = self.mmap.len() - self.offset_guard_size;
.as_mut_slice() new_mmap.as_mut_slice()[..copy_len].copy_from_slice(&self.mmap.as_slice()[..copy_len]);
.copy_from_slice(self.mmap.as_slice());
self.mmap = new_mmap; self.mmap = new_mmap;
} }

View File

@@ -18,17 +18,25 @@ pub struct VMMemory {
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test_vmmemory {
use super::VMMemory;
use std::mem::size_of;
use wasmtime_environ::VMOffsets; use wasmtime_environ::VMOffsets;
#[test] #[test]
fn check_vmmemory_offsets() { fn check_vmmemory_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>()); let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMMemory>(), offsets.size_of_vmmemory()); assert_eq!(
assert_eq!(offset_of!(VMMemory, base), offsets.vmmemory_base()); size_of::<VMMemory>(),
usize::from(offsets.size_of_vmmemory())
);
assert_eq!(
offset_of!(VMMemory, base),
usize::from(offsets.vmmemory_base())
);
assert_eq!( assert_eq!(
offset_of!(VMMemory, current_length), offset_of!(VMMemory, current_length),
offsets.vmmemory_current_length() usize::from(offsets.vmmemory_current_length())
); );
} }
} }
@@ -74,13 +82,14 @@ pub struct VMGlobal {
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test_vmglobal {
use std::mem::align_of; use super::VMGlobal;
use std::mem::{align_of, size_of};
use wasmtime_environ::VMOffsets; use wasmtime_environ::VMOffsets;
#[test] #[test]
fn check_vmglobal_alignment() { fn check_vmglobal_alignment() {
assert!(align_of::<VMGlobal>() <= align_of::<i32>()); assert!(align_of::<VMGlobal>() >= align_of::<i32>());
assert!(align_of::<VMGlobal>() >= align_of::<i64>()); assert!(align_of::<VMGlobal>() >= align_of::<i64>());
assert!(align_of::<VMGlobal>() >= align_of::<f32>()); assert!(align_of::<VMGlobal>() >= align_of::<f32>());
assert!(align_of::<VMGlobal>() >= align_of::<f64>()); assert!(align_of::<VMGlobal>() >= align_of::<f64>());
@@ -88,8 +97,11 @@ mod test {
#[test] #[test]
fn check_vmglobal_offsets() { fn check_vmglobal_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>()); let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMGlobal>(), offsets.size_of_vmglobal()); assert_eq!(
size_of::<VMGlobal>(),
usize::from(offsets.size_of_vmglobal())
);
} }
} }
@@ -110,17 +122,22 @@ pub struct VMTable {
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test_vmtable {
use super::VMTable;
use std::mem::size_of;
use wasmtime_environ::VMOffsets; use wasmtime_environ::VMOffsets;
#[test] #[test]
fn check_vmtable_offsets() { fn check_vmtable_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>()); let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMTable>(), offsets.size_of_vmtable()); assert_eq!(size_of::<VMTable>(), usize::from(offsets.size_of_vmtable()));
assert_eq!(offset_of!(VMTable, base), offsets.vmtable_base()); assert_eq!(
offset_of!(VMTable, base),
usize::from(offsets.vmtable_base())
);
assert_eq!( assert_eq!(
offset_of!(VMTable, current_elements), offset_of!(VMTable, current_elements),
offsets.vmtable_current_elements() usize::from(offsets.vmtable_current_elements())
); );
} }
} }
@@ -173,16 +190,26 @@ pub struct VMContext {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::VMContext;
use std::mem::size_of;
use wasmtime_environ::VMOffsets; use wasmtime_environ::VMOffsets;
#[test] #[test]
fn check_vmctx_offsets() { fn check_vmctx_offsets() {
let offsets = VMOffsets::new(size_of::<*mut u8>()); let offsets = VMOffsets::new(size_of::<*mut u8>() as u8);
assert_eq!(size_of::<VMContext>(), offsets.size_of_vmctx()); assert_eq!(size_of::<VMContext>(), usize::from(offsets.size_of_vmctx()));
assert_eq!(offset_of!(VMContext, memories), offsets.vmctx_memories()); assert_eq!(
assert_eq!(offset_of!(VMContext, globals), offsets.vmctx_globals()); offset_of!(VMContext, memories),
assert_eq!(offset_of!(VMContext, tables), offsets.vmctx_tables()); usize::from(offsets.vmctx_memories())
assert_eq!(offset_of!(VMContext, instance), offsets.vmctx_instance()); );
assert_eq!(
offset_of!(VMContext, globals),
usize::from(offsets.vmctx_globals())
);
assert_eq!(
offset_of!(VMContext, tables),
usize::from(offsets.vmctx_tables())
);
} }
} }

View File

@@ -29,10 +29,14 @@ fn main() {
writeln!( writeln!(
out, out,
"fn {}() {{", "fn {}() {{",
path.file_stem() avoid_keywords(
&path
.file_stem()
.expect("file_stem") .expect("file_stem")
.to_str() .to_str()
.expect("to_str") .expect("to_str")
.replace("-", "_")
)
); );
writeln!( writeln!(
out, out,
@@ -44,3 +48,14 @@ fn main() {
writeln!(out); writeln!(out);
} }
} }
fn avoid_keywords(name: &str) -> &str {
match name {
"if" => "if_",
"loop" => "loop_",
"type" => "type_",
"const" => "const_",
"return" => "return_",
other => other,
}
}

View File

@@ -155,6 +155,72 @@ pub fn wast_buffer(name: &str, isa: &isa::TargetIsa, wast: &[u8]) {
} }
} }
} }
CommandKind::AssertReturnCanonicalNan { action } => {
match instances.perform_action(&*isa, action) {
InvokeOutcome::Returned { values } => {
for v in values.iter() {
match v {
Value::I32(_) | Value::I64(_) => {
panic!("unexpected integer type in NaN test");
}
Value::F32(x) => assert_eq!(
x & 0x7fffffff,
0x7fc00000,
"expected canonical NaN at {}:{}",
name,
line
),
Value::F64(x) => assert_eq!(
x & 0x7fffffffffffffff,
0x7ff8000000000000,
"expected canonical NaN at {}:{}",
name,
line
),
};
}
}
InvokeOutcome::Trapped { message } => {
panic!(
"{}:{}: expected canonical NaN return, but a trap occurred: {}",
name, line, message
);
}
}
}
CommandKind::AssertReturnArithmeticNan { action } => {
match instances.perform_action(&*isa, action) {
InvokeOutcome::Returned { values } => {
for v in values.iter() {
match v {
Value::I32(_) | Value::I64(_) => {
panic!("unexpected integer type in NaN test");
}
Value::F32(x) => assert_eq!(
x & 0x00400000,
0x00400000,
"expected arithmetic NaN at {}:{}",
name,
line
),
Value::F64(x) => assert_eq!(
x & 0x0008000000000000,
0x0008000000000000,
"expected arithmetic NaN at {}:{}",
name,
line
),
};
}
}
InvokeOutcome::Trapped { message } => {
panic!(
"{}:{}: expected canonical NaN return, but a trap occurred: {}",
name, line, message
);
}
}
}
command => { command => {
println!("{}:{}: TODO: implement {:?}", name, line, command); println!("{}:{}: TODO: implement {:?}", name, line, command);
} }

103
src/run_wast.rs Normal file
View File

@@ -0,0 +1,103 @@
//! CLI tool to run wast tests using the wasmtime libraries.
#![deny(
missing_docs,
trivial_numeric_casts,
unused_extern_crates,
unstable_features
)]
#![warn(unused_import_braces)]
#![cfg_attr(
feature = "clippy",
plugin(clippy(conf_file = "../../clippy.toml"))
)]
#![cfg_attr(
feature = "cargo-clippy",
allow(new_without_default, new_without_default_derive)
)]
#![cfg_attr(
feature = "cargo-clippy",
warn(
float_arithmetic,
mut_mut,
nonminimal_bool,
option_map_unwrap_or,
option_map_unwrap_or_else,
unicode_not_nfc,
use_self
)
)]
extern crate cranelift_codegen;
extern crate cranelift_native;
extern crate docopt;
extern crate wasmtime_wast;
#[macro_use]
extern crate serde_derive;
extern crate file_per_thread_logger;
extern crate pretty_env_logger;
use cranelift_codegen::settings;
use cranelift_codegen::settings::Configurable;
use docopt::Docopt;
use std::path::Path;
use wasmtime_wast::wast_file;
static LOG_FILENAME_PREFIX: &str = "cranelift.dbg.";
const USAGE: &str = "
Wast test runner.
Usage:
run_wast [-do] <file>...
run_wast --help | --version
Options:
-h, --help print this help message
--version print the Cranelift version
-o, --optimize runs optimization passes on the translated functions
-d, --debug enable debug output on stderr/stdout
";
#[derive(Deserialize, Debug, Clone)]
struct Args {
arg_file: Vec<String>,
flag_debug: bool,
flag_function: Option<String>,
flag_optimize: bool,
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| {
d.help(true)
.version(Some(String::from("0.0.0")))
.deserialize()
}).unwrap_or_else(|e| e.exit());
let isa_builder = cranelift_native::builder().unwrap_or_else(|_| {
panic!("host machine is not a supported target");
});
let mut flag_builder = settings::builder();
// Enable verifier passes in debug mode.
if cfg!(debug_assertions) {
flag_builder.enable("enable_verifier").unwrap();
}
if args.flag_debug {
pretty_env_logger::init();
} else {
file_per_thread_logger::initialize(LOG_FILENAME_PREFIX);
}
// Enable optimization if requested.
if args.flag_optimize {
flag_builder.set("opt_level", "best").unwrap();
}
let isa = isa_builder.finish(settings::Flags::new(flag_builder));
for filename in &args.arg_file {
let path = Path::new(&filename);
wast_file(path, &*isa).expect(&format!("error reading file {}", path.display()));
}
}