Use the more-asserts crate in more places.
This provides assert_le, assert_lt, and so on, which can print the values of the operands.
This commit is contained in:
18
crates/environ/src/cache/worker/tests.rs
vendored
18
crates/environ/src/cache/worker/tests.rs
vendored
@@ -1,6 +1,7 @@
|
||||
use super::*;
|
||||
use crate::cache::config::tests::test_prolog;
|
||||
use core::iter::repeat;
|
||||
use more_asserts::{assert_ge, assert_gt, assert_lt};
|
||||
use std::process;
|
||||
// load_config! comes from crate::cache(::config::tests);
|
||||
|
||||
@@ -149,7 +150,10 @@ fn test_on_get_recompress_with_mod_file() {
|
||||
let scenarios = [(4, false), (7, true), (2, false)];
|
||||
|
||||
let mut usages = start_stats.usages;
|
||||
assert!(usages < cache_config.optimized_compression_usage_counter_threshold());
|
||||
assert_lt!(
|
||||
usages,
|
||||
cache_config.optimized_compression_usage_counter_threshold()
|
||||
);
|
||||
let mut tested_higher_opt_compr_lvl = false;
|
||||
for (times_used, lower_compr_lvl) in &scenarios {
|
||||
for _ in 0..*times_used {
|
||||
@@ -176,13 +180,19 @@ fn test_on_get_recompress_with_mod_file() {
|
||||
assert_eq!(decoded_data, mod_data.as_bytes());
|
||||
|
||||
if *lower_compr_lvl {
|
||||
assert!(usages >= cache_config.optimized_compression_usage_counter_threshold());
|
||||
assert_ge!(
|
||||
usages,
|
||||
cache_config.optimized_compression_usage_counter_threshold()
|
||||
);
|
||||
tested_higher_opt_compr_lvl = true;
|
||||
stats.compression_level -= 1;
|
||||
assert!(write_stats_file(&stats_file, &stats));
|
||||
}
|
||||
}
|
||||
assert!(usages >= cache_config.optimized_compression_usage_counter_threshold());
|
||||
assert_ge!(
|
||||
usages,
|
||||
cache_config.optimized_compression_usage_counter_threshold()
|
||||
);
|
||||
assert!(tested_higher_opt_compr_lvl);
|
||||
}
|
||||
|
||||
@@ -418,7 +428,7 @@ fn test_on_update_cleanup_limits_trash_locks() {
|
||||
"past",
|
||||
&Duration::from_secs(secs_ago),
|
||||
);
|
||||
assert!(secs_ago > 0);
|
||||
assert_gt!(secs_ago, 0);
|
||||
secs_ago -= 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ impl binemit::RelocSink for RelocSink {
|
||||
} else if *name == get_imported_memory32_size_name() {
|
||||
RelocationTarget::ImportedMemory32Size
|
||||
} else if let ExternalName::User { namespace, index } = *name {
|
||||
debug_assert!(namespace == 0);
|
||||
debug_assert_eq!(namespace, 0);
|
||||
RelocationTarget::UserFunc(FuncIndex::from_u32(index))
|
||||
} else if let ExternalName::LibCall(libcall) = *name {
|
||||
RelocationTarget::LibCall(libcall)
|
||||
|
||||
@@ -13,6 +13,7 @@ use cranelift_wasm::{
|
||||
GlobalIndex, Memory, MemoryIndex, SignatureIndex, Table, TableIndex,
|
||||
};
|
||||
use indexmap::IndexMap;
|
||||
use more_asserts::assert_ge;
|
||||
|
||||
/// A WebAssembly table initializer.
|
||||
#[derive(Clone, Debug, Hash)]
|
||||
@@ -59,7 +60,7 @@ impl MemoryStyle {
|
||||
if maximum <= tunables.static_memory_bound {
|
||||
// A heap with a declared maximum can be immovable, so make
|
||||
// it static.
|
||||
assert!(tunables.static_memory_bound >= memory.minimum);
|
||||
assert_ge!(tunables.static_memory_bound, memory.minimum);
|
||||
return (
|
||||
Self::Static {
|
||||
bound: tunables.static_memory_bound,
|
||||
|
||||
@@ -9,6 +9,7 @@ use cranelift_wasm::{
|
||||
DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, FuncIndex, GlobalIndex, MemoryIndex,
|
||||
SignatureIndex, TableIndex,
|
||||
};
|
||||
use more_asserts::assert_lt;
|
||||
|
||||
#[cfg(target_pointer_width = "32")]
|
||||
fn cast_to_u32(sz: usize) -> u32 {
|
||||
@@ -365,7 +366,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to `VMSharedSignatureId` index `index`.
|
||||
pub fn vmctx_vmshared_signature_id(&self, index: SignatureIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_signature_ids);
|
||||
assert_lt!(index.as_u32(), self.num_signature_ids);
|
||||
self.vmctx_signature_ids_begin()
|
||||
.checked_add(
|
||||
index
|
||||
@@ -378,7 +379,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to `VMFunctionImport` index `index`.
|
||||
pub fn vmctx_vmfunction_import(&self, index: FuncIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_imported_functions);
|
||||
assert_lt!(index.as_u32(), self.num_imported_functions);
|
||||
self.vmctx_imported_functions_begin()
|
||||
.checked_add(
|
||||
index
|
||||
@@ -391,7 +392,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to `VMTableImport` index `index`.
|
||||
pub fn vmctx_vmtable_import(&self, index: TableIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_imported_tables);
|
||||
assert_lt!(index.as_u32(), self.num_imported_tables);
|
||||
self.vmctx_imported_tables_begin()
|
||||
.checked_add(
|
||||
index
|
||||
@@ -404,7 +405,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to `VMMemoryImport` index `index`.
|
||||
pub fn vmctx_vmmemory_import(&self, index: MemoryIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_imported_memories);
|
||||
assert_lt!(index.as_u32(), self.num_imported_memories);
|
||||
self.vmctx_imported_memories_begin()
|
||||
.checked_add(
|
||||
index
|
||||
@@ -417,7 +418,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to `VMGlobalImport` index `index`.
|
||||
pub fn vmctx_vmglobal_import(&self, index: GlobalIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_imported_globals);
|
||||
assert_lt!(index.as_u32(), self.num_imported_globals);
|
||||
self.vmctx_imported_globals_begin()
|
||||
.checked_add(
|
||||
index
|
||||
@@ -430,7 +431,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to `VMTableDefinition` index `index`.
|
||||
pub fn vmctx_vmtable_definition(&self, index: DefinedTableIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_defined_tables);
|
||||
assert_lt!(index.as_u32(), self.num_defined_tables);
|
||||
self.vmctx_tables_begin()
|
||||
.checked_add(
|
||||
index
|
||||
@@ -443,7 +444,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to `VMMemoryDefinition` index `index`.
|
||||
pub fn vmctx_vmmemory_definition(&self, index: DefinedMemoryIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_defined_memories);
|
||||
assert_lt!(index.as_u32(), self.num_defined_memories);
|
||||
self.vmctx_memories_begin()
|
||||
.checked_add(
|
||||
index
|
||||
@@ -456,7 +457,7 @@ impl VMOffsets {
|
||||
|
||||
/// Return the offset to the `VMGlobalDefinition` index `index`.
|
||||
pub fn vmctx_vmglobal_definition(&self, index: DefinedGlobalIndex) -> u32 {
|
||||
assert!(index.as_u32() < self.num_defined_globals);
|
||||
assert_lt!(index.as_u32(), self.num_defined_globals);
|
||||
self.vmctx_globals_begin()
|
||||
.checked_add(
|
||||
index
|
||||
|
||||
Reference in New Issue
Block a user