Clippy fixes (#692)

This commit is contained in:
XAMPPRocky
2019-12-24 20:50:07 +00:00
committed by Dan Gohman
parent 6c97cfed1e
commit 907e7aac01
35 changed files with 390 additions and 417 deletions

View File

@@ -56,6 +56,7 @@ struct ModuleCacheEntryInner<'config, 'worker> {
worker: &'worker Worker,
}
/// Cached compilation data of a Wasm module.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ModuleCacheData {
compilation: Compilation,
@@ -66,7 +67,8 @@ pub struct ModuleCacheData {
traps: Traps,
}
type ModuleCacheDataTupleType = (
/// A type alias over the module cache data as a tuple.
pub type ModuleCacheDataTupleType = (
Compilation,
Relocations,
ModuleAddressMap,
@@ -102,7 +104,7 @@ impl<'config, 'worker> ModuleCacheEntry<'config, 'worker> {
}
#[cfg(test)]
fn from_inner<'data>(inner: ModuleCacheEntryInner<'config, 'worker>) -> Self {
fn from_inner(inner: ModuleCacheEntryInner<'config, 'worker>) -> Self {
Self(Some(inner))
}
@@ -119,10 +121,9 @@ impl<'config, 'worker> ModuleCacheEntry<'config, 'worker> {
pub fn update_data(&self, data: &ModuleCacheData) {
if let Some(inner) = &self.0 {
inner.update_data(data).map(|val| {
if inner.update_data(data).is_some() {
inner.worker.on_cache_update_async(&inner.mod_cache_path); // call on success
val
});
}
}
}
}
@@ -236,7 +237,7 @@ impl ModuleCacheData {
}
}
pub fn to_tuple(self) -> ModuleCacheDataTupleType {
pub fn into_tuple(self) -> ModuleCacheDataTupleType {
(
self.compilation,
self.relocations,

View File

@@ -148,10 +148,10 @@ pub fn create_new_config<P: AsRef<Path> + Debug>(
)?;
if config_file.exists() {
Err(format!(
return Err(format!(
"Specified config file already exists! Path: {}",
config_file.display()
))?;
));
}
let parent_dir = config_file

View File

@@ -120,26 +120,28 @@ impl Worker {
#[inline]
fn send_cache_event(&self, event: CacheEvent) {
#[cfg(test)]
let mut stats = self
.stats
.0
.lock()
.expect("Failed to acquire worker stats lock");
match self.sender.try_send(event.clone()) {
Ok(()) => {
#[cfg(test)]
let _ = stats.sent += 1;
}
Err(err) => {
info!(
"Failed to send asynchronously message to worker thread, \
event: {:?}, error: {}",
event, err
);
let sent_event = self.sender.try_send(event.clone());
#[cfg(test)]
let _ = stats.dropped += 1;
if let Err(ref err) = sent_event {
info!(
"Failed to send asynchronously message to worker thread, \
event: {:?}, error: {}",
event, err
);
}
#[cfg(test)]
{
let mut stats = self
.stats
.0
.lock()
.expect("Failed to acquire worker stats lock");
if sent_event.is_ok() {
stats.sent += 1;
} else {
stats.dropped += 1;
}
}
}
@@ -194,6 +196,18 @@ enum CacheEntry {
},
}
macro_rules! unwrap_or_warn {
($result:expr, $cont:stmt, $err_msg:expr, $path:expr) => {
match $result {
Ok(val) => val,
Err(err) => {
warn!("{}, path: {}, msg: {}", $err_msg, $path.display(), err);
$cont
}
}
};
}
impl WorkerThread {
fn run(self, init_file_per_thread_logger: Option<&'static str>) {
#[cfg(not(test))] // We want to test the worker without relying on init() being called
@@ -321,91 +335,85 @@ impl WorkerThread {
// recompress, write to other file, rename (it's atomic file content exchange)
// and update the stats file
fs::read(&path)
.map_err(|err| {
warn!(
"Failed to read old cache file, path: {}, err: {}",
path.display(),
err
)
})
.ok()
.and_then(|compressed_cache_bytes| {
zstd::decode_all(&compressed_cache_bytes[..])
.map_err(|err| warn!("Failed to decompress cached code: {}", err))
.ok()
})
.and_then(|cache_bytes| {
zstd::encode_all(&cache_bytes[..], opt_compr_lvl)
.map_err(|err| warn!("Failed to compress cached code: {}", err))
.ok()
})
.and_then(|recompressed_cache_bytes| {
fs::write(&lock_path, &recompressed_cache_bytes)
.map_err(|err| {
warn!(
"Failed to write recompressed cache, path: {}, err: {}",
lock_path.display(),
err
)
})
.ok()
})
.and_then(|()| {
fs::rename(&lock_path, &path)
.map_err(|err| {
warn!(
"Failed to rename recompressed cache, path from: {}, path to: {}, err: {}",
lock_path.display(),
path.display(),
err
);
if let Err(err) = fs::remove_file(&lock_path) {
warn!(
"Failed to clean up (remove) recompressed cache, path {}, err: {}",
lock_path.display(),
err
);
}
})
.ok()
})
.map(|()| {
// update stats file (reload it! recompression can take some time)
if let Some(mut new_stats) = read_stats_file(stats_path.as_ref()) {
if new_stats.compression_level >= opt_compr_lvl {
// Rare race:
// two instances with different opt_compr_lvl: we don't know in which order they updated
// the cache file and the stats file (they are not updated together atomically)
// Possible solution is to use directories per cache entry, but it complicates the system
// and is not worth it.
debug!(
"DETECTED task did more than once (or race with new file): \
recompression of {}. Note: if optimized compression level setting \
has changed in the meantine, the stats file might contain \
inconsistent compression level due to race.",
path.display()
);
} else {
new_stats.compression_level = opt_compr_lvl;
let _ = write_stats_file(stats_path.as_ref(), &new_stats);
}
let compressed_cache_bytes = unwrap_or_warn!(
fs::read(&path),
return,
"Failed to read old cache file",
path
);
if new_stats.usages < stats.usages {
debug!(
"DETECTED lower usage count (new file or race with counter \
increasing): file {}",
path.display()
);
}
} else {
debug!(
"Can't read stats file again to update compression level (it might got \
cleaned up): file {}",
stats_path.display()
let cache_bytes = unwrap_or_warn!(
zstd::decode_all(&compressed_cache_bytes[..]),
return,
"Failed to decompress cached code",
path
);
let recompressed_cache_bytes = unwrap_or_warn!(
zstd::encode_all(&cache_bytes[..], opt_compr_lvl),
return,
"Failed to compress cached code",
path
);
unwrap_or_warn!(
fs::write(&lock_path, &recompressed_cache_bytes),
return,
"Failed to write recompressed cache",
lock_path
);
unwrap_or_warn!(
fs::rename(&lock_path, &path),
{
if let Err(error) = fs::remove_file(&lock_path) {
warn!(
"Failed to clean up (remove) recompressed cache, path {}, err: {}",
lock_path.display(),
error
);
}
});
return;
},
"Failed to rename recompressed cache",
lock_path
);
// update stats file (reload it! recompression can take some time)
if let Some(mut new_stats) = read_stats_file(stats_path.as_ref()) {
if new_stats.compression_level >= opt_compr_lvl {
// Rare race:
// two instances with different opt_compr_lvl: we don't know in which order they updated
// the cache file and the stats file (they are not updated together atomically)
// Possible solution is to use directories per cache entry, but it complicates the system
// and is not worth it.
debug!(
"DETECTED task did more than once (or race with new file): \
recompression of {}. Note: if optimized compression level setting \
has changed in the meantine, the stats file might contain \
inconsistent compression level due to race.",
path.display()
);
} else {
new_stats.compression_level = opt_compr_lvl;
let _ = write_stats_file(stats_path.as_ref(), &new_stats);
}
if new_stats.usages < stats.usages {
debug!(
"DETECTED lower usage count (new file or race with counter \
increasing): file {}",
path.display()
);
}
} else {
debug!(
"Can't read stats file again to update compression level (it might got \
cleaned up): file {}",
stats_path.display()
);
}
trace!("Task finished: recompress file: {}", path.display());
}
@@ -508,10 +516,10 @@ impl WorkerThread {
};
total_size += size;
if start_delete_idx_if_deleting_recognized_items.is_none() {
if total_size > tsl_if_deleting || (idx + 1) as u64 > fcl_if_deleting {
start_delete_idx_if_deleting_recognized_items = Some(idx);
}
if start_delete_idx_if_deleting_recognized_items.is_none()
&& (total_size > tsl_if_deleting || (idx + 1) as u64 > fcl_if_deleting)
{
start_delete_idx_if_deleting_recognized_items = Some(idx);
}
if total_size > total_size_limit || (idx + 1) as u64 > file_count_limit {
@@ -554,26 +562,6 @@ impl WorkerThread {
level: u8,
cache_config: &CacheConfig,
) {
macro_rules! unwrap_or {
($result:expr, $cont:stmt, $err_msg:expr) => {
unwrap_or!($result, $cont, $err_msg, dir_path)
};
($result:expr, $cont:stmt, $err_msg:expr, $path:expr) => {
match $result {
Ok(val) => val,
Err(err) => {
warn!(
"{}, level: {}, path: {}, msg: {}",
$err_msg,
level,
$path.display(),
err
);
$cont
}
}
};
}
macro_rules! add_unrecognized {
(file: $path:expr) => {
add_unrecognized!(false, $path)
@@ -591,10 +579,24 @@ impl WorkerThread {
macro_rules! add_unrecognized_and {
([ $( $ty:ident: $path:expr ),* ], $cont:stmt) => {{
$( add_unrecognized!($ty: $path); )*
$cont
$cont
}};
}
macro_rules! unwrap_or {
($result:expr, $cont:stmt, $err_msg:expr) => {
unwrap_or!($result, $cont, $err_msg, dir_path)
};
($result:expr, $cont:stmt, $err_msg:expr, $path:expr) => {
unwrap_or_warn!(
$result,
$cont,
format!("{}, level: {}", $err_msg, level),
$path
)
};
}
// If we fail to list a directory, something bad is happening anyway
// (something touches our cache or we have disk failure)
// Try to delete it, so we can stay within soft limits of the cache size.
@@ -619,43 +621,41 @@ impl WorkerThread {
match (level, path.is_dir()) {
(0..=1, true) => enter_dir(vec, &path, level + 1, cache_config),
(0..=1, false) => {
if level == 0 && path.file_stem() == Some(OsStr::new(".cleanup")) {
if path.extension().is_some() {
if level == 0
&& path.file_stem() == Some(OsStr::new(".cleanup"))
&& path.extension().is_some()
// assume it's cleanup lock
if !is_fs_lock_expired(
&& !is_fs_lock_expired(
Some(&entry),
&path,
cache_config.cleanup_interval(),
cache_config.allowed_clock_drift_for_files_from_future(),
) {
continue; // skip active lock
}
}
)
{
continue; // skip active lock
}
add_unrecognized!(file: path);
}
(2, false) => {
let ext = path.extension();
if ext.is_none() || ext == Some(OsStr::new("stats")) {
match path.extension().and_then(OsStr::to_str) {
// mod or stats file
cache_files.insert(path, entry);
} else {
let recognized = if let Some(ext_str) = ext.unwrap().to_str() {
None | Some("stats") => {
cache_files.insert(path, entry);
}
Some(ext) => {
// check if valid lock
ext_str.starts_with("wip-")
let recognized = ext.starts_with("wip-")
&& !is_fs_lock_expired(
Some(&entry),
&path,
cache_config.optimizing_compression_task_timeout(),
cache_config.allowed_clock_drift_for_files_from_future(),
)
} else {
// if it's None, i.e. not valid UTF-8 string, then that's not our lock for sure
false
};
);
if !recognized {
add_unrecognized!(file: path);
if !recognized {
add_unrecognized!(file: path);
}
}
}
}

View File

@@ -1,7 +1,7 @@
//! A `Compilation` contains the compiled function bodies for a WebAssembly
//! module.
use crate::address_map::{ModuleAddressMap, ValueLabelsRanges};
use crate::cache::ModuleCacheDataTupleType;
use crate::module;
use crate::module_environ::FunctionBodyData;
use cranelift_codegen::{binemit, ir, isa};
@@ -66,6 +66,11 @@ impl Compilation {
self.functions.len()
}
/// Returns whether there are no functions defined.
pub fn is_empty(&self) -> bool {
self.functions.is_empty()
}
/// Gets functions jump table offsets.
pub fn get_jt_offsets(&self) -> PrimaryMap<DefinedFuncIndex, ir::JumpTableOffsets> {
self.functions
@@ -172,15 +177,5 @@ pub trait Compiler {
function_body_inputs: PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
isa: &dyn isa::TargetIsa,
generate_debug_info: bool,
) -> Result<
(
Compilation,
Relocations,
ModuleAddressMap,
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
Traps,
),
CompileError,
>;
) -> Result<ModuleCacheDataTupleType, CompileError>;
}

View File

@@ -1,12 +1,9 @@
//! Support for compiling with Cranelift.
use crate::address_map::{
FunctionAddressMap, InstructionAddressMap, ModuleAddressMap, ValueLabelsRanges,
};
use crate::cache::{ModuleCacheData, ModuleCacheEntry};
use crate::address_map::{FunctionAddressMap, InstructionAddressMap};
use crate::cache::{ModuleCacheData, ModuleCacheDataTupleType, ModuleCacheEntry};
use crate::compilation::{
Compilation, CompileError, CompiledFunction, Relocation, RelocationTarget, Relocations,
TrapInformation, Traps,
Compilation, CompileError, CompiledFunction, Relocation, RelocationTarget, TrapInformation,
};
use crate::func_environ::{
get_func_name, get_imported_memory32_grow_name, get_imported_memory32_size_name,
@@ -14,12 +11,9 @@ use crate::func_environ::{
};
use crate::module::Module;
use crate::module_environ::FunctionBodyData;
use cranelift_codegen::binemit;
use cranelift_codegen::ir;
use cranelift_codegen::ir::ExternalName;
use cranelift_codegen::isa;
use cranelift_codegen::ir::{self, ExternalName};
use cranelift_codegen::print_errors::pretty_error;
use cranelift_codegen::Context;
use cranelift_codegen::{binemit, isa, Context};
use cranelift_entity::PrimaryMap;
use cranelift_wasm::{DefinedFuncIndex, FuncIndex, FuncTranslator, ModuleTranslationState};
use rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
@@ -181,17 +175,7 @@ impl crate::compilation::Compiler for Cranelift {
function_body_inputs: PrimaryMap<DefinedFuncIndex, FunctionBodyData<'data>>,
isa: &dyn isa::TargetIsa,
generate_debug_info: bool,
) -> Result<
(
Compilation,
Relocations,
ModuleAddressMap,
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
Traps,
),
CompileError,
> {
) -> Result<ModuleCacheDataTupleType, CompileError> {
let cache_entry = ModuleCacheEntry::new(
module,
&function_body_inputs,
@@ -214,82 +198,75 @@ impl crate::compilation::Compiler for Cranelift {
.into_iter()
.collect::<Vec<(DefinedFuncIndex, &FunctionBodyData<'data>)>>()
.par_iter()
.map_init(
|| FuncTranslator::new(),
|func_translator, (i, input)| {
let func_index = module.func_index(*i);
let mut context = Context::new();
context.func.name = get_func_name(func_index);
context.func.signature =
module.signatures[module.functions[func_index]].clone();
if generate_debug_info {
context.func.collect_debug_info();
}
.map_init(FuncTranslator::new, |func_translator, (i, input)| {
let func_index = module.func_index(*i);
let mut context = Context::new();
context.func.name = get_func_name(func_index);
context.func.signature =
module.signatures[module.functions[func_index]].clone();
if generate_debug_info {
context.func.collect_debug_info();
}
func_translator.translate(
module_translation,
input.data,
input.module_offset,
&mut context.func,
&mut FuncEnvironment::new(isa.frontend_config(), module),
)?;
func_translator.translate(
module_translation,
input.data,
input.module_offset,
&mut context.func,
&mut FuncEnvironment::new(isa.frontend_config(), module),
)?;
let mut code_buf: Vec<u8> = Vec::new();
let mut unwind_info = Vec::new();
let mut reloc_sink = RelocSink::new(func_index);
let mut trap_sink = TrapSink::new();
let mut stackmap_sink = binemit::NullStackmapSink {};
context
.compile_and_emit(
isa,
&mut code_buf,
&mut reloc_sink,
&mut trap_sink,
&mut stackmap_sink,
)
.map_err(|error| {
let mut code_buf: Vec<u8> = Vec::new();
let mut unwind_info = Vec::new();
let mut reloc_sink = RelocSink::new(func_index);
let mut trap_sink = TrapSink::new();
let mut stackmap_sink = binemit::NullStackmapSink {};
context
.compile_and_emit(
isa,
&mut code_buf,
&mut reloc_sink,
&mut trap_sink,
&mut stackmap_sink,
)
.map_err(|error| {
CompileError::Codegen(pretty_error(&context.func, Some(isa), error))
})?;
context.emit_unwind_info(isa, &mut unwind_info);
let address_transform = if generate_debug_info {
let body_len = code_buf.len();
Some(get_function_address_map(&context, input, body_len, isa))
} else {
None
};
let ranges = if generate_debug_info {
let ranges =
context.build_value_labels_ranges(isa).map_err(|error| {
CompileError::Codegen(pretty_error(
&context.func,
Some(isa),
error,
))
})?;
Some(ranges)
} else {
None
};
context.emit_unwind_info(isa, &mut unwind_info);
let address_transform = if generate_debug_info {
let body_len = code_buf.len();
Some(get_function_address_map(&context, input, body_len, isa))
} else {
None
};
let ranges = if generate_debug_info {
let ranges =
context.build_value_labels_ranges(isa).map_err(|error| {
CompileError::Codegen(pretty_error(
&context.func,
Some(isa),
error,
))
})?;
Some(ranges)
} else {
None
};
Ok((
code_buf,
context.func.jt_offsets,
reloc_sink.func_relocs,
address_transform,
ranges,
context.func.stack_slots,
trap_sink.traps,
unwind_info,
))
},
)
Ok((
code_buf,
context.func.jt_offsets,
reloc_sink.func_relocs,
address_transform,
ranges,
context.func.stack_slots,
trap_sink.traps,
unwind_info,
))
})
.collect::<Result<Vec<_>, CompileError>>()?
.into_iter()
.for_each(
@@ -333,6 +310,6 @@ impl crate::compilation::Compiler for Cranelift {
}
};
Ok(data.to_tuple())
Ok(data.into_tuple())
}
}

View File

@@ -450,9 +450,9 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
// allocated up front and never moved.
let (offset_guard_size, heap_style, readonly_base) = match self.module.memory_plans[index] {
MemoryPlan {
memory: _,
style: MemoryStyle::Dynamic,
offset_guard_size,
memory: _,
} => {
let heap_bound = func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
@@ -469,9 +469,9 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m
)
}
MemoryPlan {
memory: _,
style: MemoryStyle::Static { bound },
offset_guard_size,
memory: _,
} => (
Uimm64::new(offset_guard_size),
ir::HeapStyle::Static {

View File

@@ -1,5 +1,6 @@
//! Support for compiling with Lightbeam.
use crate::cache::ModuleCacheDataTupleType;
use crate::compilation::{Compilation, CompileError, Relocations, Traps};
use crate::func_environ::FuncEnvironment;
use crate::module::Module;
@@ -24,17 +25,7 @@ impl crate::compilation::Compiler for Lightbeam {
isa: &dyn isa::TargetIsa,
// TODO
generate_debug_info: bool,
) -> Result<
(
Compilation,
Relocations,
ModuleAddressMap,
ValueLabelsRanges,
PrimaryMap<DefinedFuncIndex, ir::StackSlots>,
Traps,
),
CompileError,
> {
) -> Result<ModuleCacheDataTupleType, CompileError> {
if generate_debug_info {
return Err(CompileError::DebugInfoNotSupported);
}

View File

@@ -17,10 +17,7 @@ fn cast_to_u32(sz: usize) -> u32 {
}
#[cfg(target_pointer_width = "64")]
fn cast_to_u32(sz: usize) -> u32 {
match u32::try_from(sz) {
Ok(x) => x,
Err(_) => panic!("overflow in cast from usize to u32"),
}
u32::try_from(sz).expect("overflow in cast from usize to u32")
}
/// Align an offset used in this module to a specific byte-width by rounding up