Remove all global state from the caching system (#863)

* Remove all global state from the caching system

This commit is a continuation of an effort to remove usages of
`lazy_static!` and similar global state macros which can otherwise be
accomodated with passing objects around. Previously there was a global
cache system initialized per-process, but it was initialized in a bit of
a roundabout way and wasn't actually reachable from the `wasmtime` crate
itself. The changes here remove all global state, refactor many of the
internals in the cache system, and makes configuration possible through
the `wasmtime` crate.

Specifically some changes here are:

* Usage of `lazy_static!` and many `static` items in the cache module
  have all been removed.
* Global `cache_config()`, `worker()`, and `init()` functions have all
  been removed. Instead a `CacheConfig` is a "root object" which
  internally owns its worker and passing around the `CacheConfig` is
  required for cache usage.
* The `wasmtime::Config` structure has grown options to load and parse
  cache files at runtime. Currently only loading files is supported,
  although we can likely eventually support programmatically configuring
  APIs as well.
* Usage of the `spin` crate has been removed and the dependency is removed.
* The internal `errors` field of `CacheConfig` is removed, instead
  changing all relevant methods to return a `Result<()>` instead of
  storing errors internally.
* Tests have all been updated with the new interfaces and APIs.

Functionally no real change is intended here. Usage of the `wasmtime`
CLI, for example, should still enable the cache by default.

* Fix lightbeam compilation
This commit is contained in:
Alex Crichton
2020-02-06 13:11:06 -06:00
committed by GitHub
parent 4ff8257b17
commit 70345aff31
24 changed files with 283 additions and 479 deletions

View File

@@ -1,20 +1,16 @@
//! Module for configuring the cache system.
use super::worker;
use super::Worker;
use anyhow::{anyhow, bail, Context, Result};
use directories::ProjectDirs;
use lazy_static::lazy_static;
use log::{debug, error, trace, warn};
use log::{trace, warn};
use serde::{
de::{self, Deserializer},
Deserialize,
};
use spin::Once;
use std::fmt::Debug;
use std::fs;
use std::mem;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
// wrapped, so we have named section in config,
@@ -25,12 +21,10 @@ struct Config {
cache: CacheConfig,
}
/// Global configuration for how the cache is managed
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub struct CacheConfig {
#[serde(skip)]
errors: Vec<String>,
enabled: bool,
directory: Option<PathBuf>,
#[serde(
@@ -91,49 +85,9 @@ pub struct CacheConfig {
deserialize_with = "deserialize_percent"
)]
files_total_size_limit_percent_if_deleting: Option<u8>,
}
// Private static, so only internal function can access it.
static CONFIG: Once<CacheConfig> = Once::new();
static INIT_CALLED: AtomicBool = AtomicBool::new(false);
/// Returns cache configuration.
///
/// If system has not been initialized, it disables it.
/// You mustn't call init() after it.
pub fn cache_config() -> &'static CacheConfig {
CONFIG.call_once(CacheConfig::new_cache_disabled)
}
/// Initializes the cache system. Should be called exactly once,
/// and before using the cache system. Otherwise it can panic.
/// Returns list of errors. If empty, initialization succeeded.
pub fn init<P: AsRef<Path> + Debug>(
enabled: bool,
config_file: Option<P>,
init_file_per_thread_logger: Option<&'static str>,
) -> &'static Vec<String> {
INIT_CALLED
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.expect("Cache system init must be called at most once");
assert!(
CONFIG.r#try().is_none(),
"Cache system init must be called before using the system."
);
let conf_file_str = format!("{:?}", config_file);
let conf = CONFIG.call_once(|| CacheConfig::from_file(enabled, config_file));
if conf.errors.is_empty() {
if conf.enabled() {
worker::init(init_file_per_thread_logger);
}
debug!("Cache init(\"{}\"): {:#?}", conf_file_str, conf)
} else {
error!(
"Cache init(\"{}\"): errors: {:#?}",
conf_file_str, conf.errors,
)
}
&conf.errors
#[serde(skip)]
worker: Option<Worker>,
}
/// Creates a new configuration file at specified path, or default path if None is passed.
@@ -141,13 +95,10 @@ pub fn init<P: AsRef<Path> + Debug>(
pub fn create_new_config<P: AsRef<Path> + Debug>(config_file: Option<P>) -> Result<PathBuf> {
trace!("Creating new config file, path: {:?}", config_file);
let config_file = config_file
.as_ref()
.map_or_else(
|| DEFAULT_CONFIG_PATH.as_ref().map(|p| p.as_ref()),
|p| Ok(p.as_ref()),
)
.map_err(|s| anyhow!("{}", s))?;
let config_file = match config_file {
Some(path) => path.as_ref().to_path_buf(),
None => default_config_path()?,
};
if config_file.exists() {
bail!(
@@ -188,14 +139,6 @@ enabled = true
// permitted levels from: https://docs.rs/zstd/0.4.28+zstd.1.4.3/zstd/stream/write/struct.Encoder.html
const ZSTD_COMPRESSION_LEVELS: std::ops::RangeInclusive<i32> = 0..=21;
lazy_static! {
static ref PROJECT_DIRS: Option<ProjectDirs> =
ProjectDirs::from("", "BytecodeAlliance", "wasmtime");
static ref DEFAULT_CONFIG_PATH: Result<PathBuf, String> = PROJECT_DIRS
.as_ref()
.map(|proj_dirs| proj_dirs.config_dir().join("config.toml"))
.ok_or_else(|| "Config file not specified and failed to get the default".to_string());
}
// Default settings, you're welcome to tune them!
// TODO: what do we want to warn users about?
@@ -232,6 +175,17 @@ const DEFAULT_FILE_COUNT_LIMIT_PERCENT_IF_DELETING: u8 = 70;
// if changed, update cli-cache.md
const DEFAULT_FILES_TOTAL_SIZE_LIMIT_PERCENT_IF_DELETING: u8 = 70;
fn project_dirs() -> Option<ProjectDirs> {
ProjectDirs::from("", "BytecodeAlliance", "wasmtime")
}
fn default_config_path() -> Result<PathBuf> {
match project_dirs() {
Some(dirs) => Ok(dirs.config_dir().join("config.toml")),
None => bail!("config file not specified and failed to get the default"),
}
}
// Deserializers of our custom formats
// can be replaced with const generics later
macro_rules! generate_deserializer {
@@ -353,9 +307,9 @@ impl CacheConfig {
.expect(CACHE_IMPROPER_CONFIG_ERROR_MSG)
}
/// Creates a new set of configuration which represents a disabled cache
pub fn new_cache_disabled() -> Self {
Self {
errors: Vec::new(),
enabled: false,
directory: None,
worker_event_queue_size: None,
@@ -369,6 +323,7 @@ impl CacheConfig {
files_total_size_soft_limit: None,
file_count_limit_percent_if_deleting: None,
files_total_size_limit_percent_if_deleting: None,
worker: None,
}
}
@@ -378,78 +333,70 @@ impl CacheConfig {
conf
}
fn new_cache_with_errors(errors: Vec<String>) -> Self {
let mut conf = Self::new_cache_disabled();
conf.errors = errors;
conf
}
pub fn from_file<P: AsRef<Path>>(enabled: bool, config_file: Option<P>) -> Self {
if !enabled {
return Self::new_cache_disabled();
}
let mut config = match Self::load_and_parse_file(config_file) {
Ok(data) => data,
Err(err) => return Self::new_cache_with_errors(vec![err]),
};
/// Parses cache configuration from the file specified
pub fn from_file(config_file: Option<&Path>) -> Result<Self> {
let mut config = Self::load_and_parse_file(config_file)?;
// validate values and fill in defaults
config.validate_directory_or_default();
config.validate_directory_or_default()?;
config.validate_worker_event_queue_size_or_default();
config.validate_baseline_compression_level_or_default();
config.validate_optimized_compression_level_or_default();
config.validate_baseline_compression_level_or_default()?;
config.validate_optimized_compression_level_or_default()?;
config.validate_optimized_compression_usage_counter_threshold_or_default();
config.validate_cleanup_interval_or_default();
config.validate_optimizing_compression_task_timeout_or_default();
config.validate_allowed_clock_drift_for_files_from_future_or_default();
config.validate_file_count_soft_limit_or_default();
config.validate_files_total_size_soft_limit_or_default();
config.validate_file_count_limit_percent_if_deleting_or_default();
config.validate_files_total_size_limit_percent_if_deleting_or_default();
config.validate_file_count_limit_percent_if_deleting_or_default()?;
config.validate_files_total_size_limit_percent_if_deleting_or_default()?;
config.spawn_worker();
config.disable_if_any_error();
config
Ok(config)
}
fn load_and_parse_file<P: AsRef<Path>>(config_file: Option<P>) -> Result<Self, String> {
fn spawn_worker(&mut self) {
if self.enabled {
self.worker = Some(Worker::start_new(self, None));
}
}
pub(super) fn worker(&self) -> &Worker {
assert!(self.enabled);
self.worker.as_ref().unwrap()
}
fn load_and_parse_file(config_file: Option<&Path>) -> Result<Self> {
// get config file path
let (config_file, user_custom_file) = config_file.as_ref().map_or_else(
|| DEFAULT_CONFIG_PATH.as_ref().map(|p| (p.as_ref(), false)),
|p| Ok((p.as_ref(), true)),
)?;
let (config_file, user_custom_file) = match config_file {
Some(path) => (path.to_path_buf(), true),
None => (default_config_path()?, false),
};
// read config, or use default one
let entity_exists = config_file.exists();
match (entity_exists, user_custom_file) {
(false, false) => Ok(Self::new_cache_enabled_template()),
_ => match fs::read(&config_file) {
Ok(bytes) => match toml::from_slice::<Config>(&bytes[..]) {
Ok(config) => Ok(config.cache),
Err(err) => Err(format!(
"Failed to parse config file, path: {}, error: {}",
config_file.display(),
err
)),
},
Err(err) => Err(format!(
"Failed to read config file, path: {}, error: {}",
config_file.display(),
err
)),
},
_ => {
let bytes = fs::read(&config_file).context(format!(
"failed to read config file: {}",
config_file.display()
))?;
let config = toml::from_slice::<Config>(&bytes[..]).context(format!(
"failed to parse config file: {}",
config_file.display()
))?;
Ok(config.cache)
}
}
}
fn validate_directory_or_default(&mut self) {
fn validate_directory_or_default(&mut self) -> Result<()> {
if self.directory.is_none() {
match &*PROJECT_DIRS {
match project_dirs() {
Some(proj_dirs) => self.directory = Some(proj_dirs.cache_dir().to_path_buf()),
None => {
self.errors.push(
"Cache directory not specified and failed to get the default".to_string(),
);
return;
bail!("Cache directory not specified and failed to get the default");
}
}
}
@@ -461,35 +408,22 @@ impl CacheConfig {
let cache_dir = self.directory.as_ref().unwrap();
if !cache_dir.is_absolute() {
self.errors.push(format!(
bail!(
"Cache directory path has to be absolute, path: {}",
cache_dir.display(),
));
return;
);
}
match fs::create_dir_all(cache_dir) {
Ok(()) => (),
Err(err) => {
self.errors.push(format!(
"Failed to create the cache directory, path: {}, error: {}",
cache_dir.display(),
err
));
return;
}
};
match fs::canonicalize(cache_dir) {
Ok(p) => self.directory = Some(p),
Err(err) => {
self.errors.push(format!(
"Failed to canonicalize the cache directory, path: {}, error: {}",
cache_dir.display(),
err
));
}
}
fs::create_dir_all(cache_dir).context(format!(
"failed to create cache directory: {}",
cache_dir.display()
))?;
let canonical = fs::canonicalize(cache_dir).context(format!(
"failed to canonicalize cache directory: {}",
cache_dir.display()
))?;
self.directory = Some(canonical);
Ok(())
}
fn validate_worker_event_queue_size_or_default(&mut self) {
@@ -502,22 +436,23 @@ impl CacheConfig {
}
}
fn validate_baseline_compression_level_or_default(&mut self) {
fn validate_baseline_compression_level_or_default(&mut self) -> Result<()> {
if self.baseline_compression_level.is_none() {
self.baseline_compression_level = Some(DEFAULT_BASELINE_COMPRESSION_LEVEL);
}
if !ZSTD_COMPRESSION_LEVELS.contains(&self.baseline_compression_level.unwrap()) {
self.errors.push(format!(
bail!(
"Invalid baseline compression level: {} not in {:#?}",
self.baseline_compression_level.unwrap(),
ZSTD_COMPRESSION_LEVELS
));
);
}
Ok(())
}
// assumption: baseline compression level has been verified
fn validate_optimized_compression_level_or_default(&mut self) {
fn validate_optimized_compression_level_or_default(&mut self) -> Result<()> {
if self.optimized_compression_level.is_none() {
self.optimized_compression_level = Some(DEFAULT_OPTIMIZED_COMPRESSION_LEVEL);
}
@@ -526,18 +461,21 @@ impl CacheConfig {
let base_lvl = self.baseline_compression_level.unwrap();
if !ZSTD_COMPRESSION_LEVELS.contains(&opt_lvl) {
self.errors.push(format!(
bail!(
"Invalid optimized compression level: {} not in {:#?}",
opt_lvl, ZSTD_COMPRESSION_LEVELS
));
opt_lvl,
ZSTD_COMPRESSION_LEVELS
);
}
if opt_lvl < base_lvl {
self.errors.push(format!(
bail!(
"Invalid optimized compression level is lower than baseline: {} < {}",
opt_lvl, base_lvl
));
opt_lvl,
base_lvl
);
}
Ok(())
}
fn validate_optimized_compression_usage_counter_threshold_or_default(&mut self) {
@@ -579,7 +517,7 @@ impl CacheConfig {
}
}
fn validate_file_count_limit_percent_if_deleting_or_default(&mut self) {
fn validate_file_count_limit_percent_if_deleting_or_default(&mut self) -> Result<()> {
if self.file_count_limit_percent_if_deleting.is_none() {
self.file_count_limit_percent_if_deleting =
Some(DEFAULT_FILE_COUNT_LIMIT_PERCENT_IF_DELETING);
@@ -587,14 +525,15 @@ impl CacheConfig {
let percent = self.file_count_limit_percent_if_deleting.unwrap();
if percent > 100 {
self.errors.push(format!(
bail!(
"Invalid files count limit percent if deleting: {} not in range 0-100%",
percent
));
);
}
Ok(())
}
fn validate_files_total_size_limit_percent_if_deleting_or_default(&mut self) {
fn validate_files_total_size_limit_percent_if_deleting_or_default(&mut self) -> Result<()> {
if self.files_total_size_limit_percent_if_deleting.is_none() {
self.files_total_size_limit_percent_if_deleting =
Some(DEFAULT_FILES_TOTAL_SIZE_LIMIT_PERCENT_IF_DELETING);
@@ -602,19 +541,12 @@ impl CacheConfig {
let percent = self.files_total_size_limit_percent_if_deleting.unwrap();
if percent > 100 {
self.errors.push(format!(
bail!(
"Invalid files total size limit percent if deleting: {} not in range 0-100%",
percent
));
}
}
fn disable_if_any_error(&mut self) {
if !self.errors.is_empty() {
let mut conf = Self::new_cache_disabled();
mem::swap(self, &mut conf);
mem::swap(&mut self.errors, &mut conf.errors);
);
}
Ok(())
}
}

View File

@@ -24,7 +24,19 @@ macro_rules! load_config {
cache_dir = toml::to_string_pretty(&format!("{}", $cache_dir.display())).unwrap()
);
fs::write(config_path, content).expect("Failed to write test config file");
CacheConfig::from_file(true, Some(config_path))
CacheConfig::from_file(Some(config_path)).unwrap()
}};
}
macro_rules! bad_config {
($config_path:ident, $content_fmt:expr, $cache_dir:ident) => {{
let config_path = &$config_path;
let content = format!(
$content_fmt,
cache_dir = toml::to_string_pretty(&format!("{}", $cache_dir.display())).unwrap()
);
fs::write(config_path, content).expect("Failed to write test config file");
assert!(CacheConfig::from_file(Some(config_path)).is_err());
}};
}
@@ -33,25 +45,17 @@ macro_rules! load_config {
fn test_disabled() {
let dir = tempfile::tempdir().expect("Can't create temporary directory");
let config_path = dir.path().join("cache-config.toml");
let config_content = "[cache]\n\
enabled = true\n";
fs::write(&config_path, config_content).expect("Failed to write test config file");
let conf = CacheConfig::from_file(false, Some(&config_path));
assert!(!conf.enabled());
assert!(conf.errors.is_empty());
let config_content = "[cache]\n\
enabled = false\n";
fs::write(&config_path, config_content).expect("Failed to write test config file");
let conf = CacheConfig::from_file(true, Some(&config_path));
let conf = CacheConfig::from_file(Some(&config_path)).unwrap();
assert!(!conf.enabled());
assert!(conf.errors.is_empty());
}
#[test]
fn test_unrecognized_settings() {
let (_td, cd, cp) = test_prolog();
let conf = load_config!(
bad_config!(
cp,
"unrecognized-setting = 42\n\
[cache]\n\
@@ -59,10 +63,8 @@ fn test_unrecognized_settings() {
directory = {cache_dir}",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -70,8 +72,6 @@ fn test_unrecognized_settings() {
unrecognized-setting = 42",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
}
#[test]
@@ -119,9 +119,7 @@ fn test_all_settings() {
check_conf(&conf, &cd);
fn check_conf(conf: &CacheConfig, cd: &PathBuf) {
eprintln!("errors: {:#?}", conf.errors);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(
conf.directory(),
&fs::canonicalize(cd).expect("canonicalize failed")
@@ -159,11 +157,10 @@ fn test_compression_level_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.baseline_compression_level(), 1);
assert_eq!(conf.optimized_compression_level(), 21);
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -172,10 +169,8 @@ fn test_compression_level_settings() {
optimized-compression-level = 21",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -184,8 +179,6 @@ fn test_compression_level_settings() {
optimized-compression-level = 10",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
}
#[test]
@@ -202,7 +195,6 @@ fn test_si_prefix_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.worker_event_queue_size(), 42);
assert_eq!(conf.optimized_compression_usage_counter_threshold(), 4_000);
assert_eq!(conf.file_count_soft_limit(), 3_000_000);
@@ -212,14 +204,13 @@ fn test_si_prefix_settings() {
"[cache]\n\
enabled = true\n\
directory = {cache_dir}\n\
worker-event-queue-size = '2G'\n\
worker-event-queue-size = '2K'\n\
optimized-compression-usage-counter-threshold = '4444T'\n\
file-count-soft-limit = '1P'",
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.worker_event_queue_size(), 2_000_000_000);
assert_eq!(conf.worker_event_queue_size(), 2_000);
assert_eq!(
conf.optimized_compression_usage_counter_threshold(),
4_444_000_000_000_000
@@ -227,7 +218,7 @@ fn test_si_prefix_settings() {
assert_eq!(conf.file_count_soft_limit(), 1_000_000_000_000_000);
// different errors
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -235,10 +226,8 @@ fn test_si_prefix_settings() {
worker-event-queue-size = '2g'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -246,10 +235,8 @@ fn test_si_prefix_settings() {
file-count-soft-limit = 1",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -257,10 +244,8 @@ fn test_si_prefix_settings() {
file-count-soft-limit = '-31337'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -268,8 +253,6 @@ fn test_si_prefix_settings() {
file-count-soft-limit = '3.14M'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
}
#[test]
@@ -284,7 +267,6 @@ fn test_disk_space_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.files_total_size_soft_limit(), 76);
let conf = load_config!(
@@ -296,7 +278,6 @@ fn test_disk_space_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.files_total_size_soft_limit(), 42 * (1u64 << 20));
let conf = load_config!(
@@ -308,7 +289,6 @@ fn test_disk_space_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.files_total_size_soft_limit(), 2 * (1u64 << 30));
let conf = load_config!(
@@ -320,7 +300,6 @@ fn test_disk_space_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.files_total_size_soft_limit(), 31337 * (1u64 << 40));
let conf = load_config!(
@@ -332,7 +311,6 @@ fn test_disk_space_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.files_total_size_soft_limit(), 7 * (1u64 << 50));
let conf = load_config!(
@@ -344,11 +322,10 @@ fn test_disk_space_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.files_total_size_soft_limit(), 7_000_000);
// different errors
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -356,10 +333,8 @@ fn test_disk_space_settings() {
files-total-size-soft-limit = '7 mi'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -367,10 +342,8 @@ fn test_disk_space_settings() {
files-total-size-soft-limit = 1",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -378,10 +351,8 @@ fn test_disk_space_settings() {
files-total-size-soft-limit = '-31337'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -389,8 +360,6 @@ fn test_disk_space_settings() {
files-total-size-soft-limit = '3.14Ki'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
}
#[test]
@@ -407,7 +376,6 @@ fn test_duration_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.cleanup_interval(), Duration::from_secs(100));
assert_eq!(
conf.optimizing_compression_task_timeout(),
@@ -428,7 +396,6 @@ fn test_duration_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(
conf.cleanup_interval(),
Duration::from_secs(2 * 24 * 60 * 60)
@@ -439,7 +406,7 @@ fn test_duration_settings() {
);
// different errors
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -447,10 +414,8 @@ fn test_duration_settings() {
optimizing-compression-task-timeout = '333'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -458,10 +423,8 @@ fn test_duration_settings() {
optimizing-compression-task-timeout = 333",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -469,10 +432,8 @@ fn test_duration_settings() {
optimizing-compression-task-timeout = '10 M'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -480,10 +441,8 @@ fn test_duration_settings() {
optimizing-compression-task-timeout = '10 min'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -491,10 +450,8 @@ fn test_duration_settings() {
optimizing-compression-task-timeout = '-10s'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -502,8 +459,6 @@ fn test_duration_settings() {
optimizing-compression-task-timeout = '1.5m'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
}
#[test]
@@ -519,12 +474,11 @@ fn test_percent_settings() {
cd
);
assert!(conf.enabled());
assert!(conf.errors.is_empty());
assert_eq!(conf.file_count_limit_percent_if_deleting(), 62);
assert_eq!(conf.files_total_size_limit_percent_if_deleting(), 23);
// different errors
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -532,10 +486,8 @@ fn test_percent_settings() {
files-total-size-limit-percent-if-deleting = '23'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -543,10 +495,8 @@ fn test_percent_settings() {
files-total-size-limit-percent-if-deleting = '22.5%'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -554,10 +504,8 @@ fn test_percent_settings() {
files-total-size-limit-percent-if-deleting = '0.5'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -565,10 +513,8 @@ fn test_percent_settings() {
files-total-size-limit-percent-if-deleting = '-1%'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
let conf = load_config!(
bad_config!(
cp,
"[cache]\n\
enabled = true\n\
@@ -576,6 +522,4 @@ fn test_percent_settings() {
files-total-size-limit-percent-if-deleting = '101%'",
cd
);
assert!(!conf.enabled());
assert!(!conf.errors.is_empty());
}

View File

@@ -36,11 +36,9 @@ fn test_cache_init() {
);
fs::write(&config_path, config_content).expect("Failed to write test config file");
let errors = init(true, Some(&config_path), None);
assert!(errors.is_empty());
let cache_config = CacheConfig::from_file(Some(&config_path)).unwrap();
// test if we can use config
let cache_config = cache_config();
assert!(cache_config.enabled());
// assumption: config init creates cache directory and returns canonicalized path
assert_eq!(
@@ -53,8 +51,7 @@ fn test_cache_init() {
);
// test if we can use worker
let worker = worker();
worker.on_cache_update_async(config_path);
cache_config.worker().on_cache_update_async(config_path);
}
#[test]
@@ -69,7 +66,6 @@ fn test_write_read_cache() {
cache_dir
);
assert!(cache_config.enabled());
let worker = Worker::start_new(&cache_config, None);
// assumption: config load creates cache directory and returns canonicalized path
assert_eq!(
@@ -102,7 +98,6 @@ fn test_write_read_cache() {
compiler1,
false,
&cache_config,
&worker,
));
assert!(entry1.0.is_some());
assert!(entry1.get_data().is_none());
@@ -117,7 +112,6 @@ fn test_write_read_cache() {
compiler1,
false,
&cache_config,
&worker,
));
let data2 = new_module_cache_data(&mut rng);
entry2.update_data(&data2);
@@ -131,7 +125,6 @@ fn test_write_read_cache() {
compiler1,
false,
&cache_config,
&worker,
));
let data3 = new_module_cache_data(&mut rng);
entry3.update_data(&data3);
@@ -146,7 +139,6 @@ fn test_write_read_cache() {
compiler1,
false,
&cache_config,
&worker,
));
let data4 = new_module_cache_data(&mut rng);
entry4.update_data(&data4);
@@ -162,7 +154,6 @@ fn test_write_read_cache() {
compiler2,
false,
&cache_config,
&worker,
));
let data5 = new_module_cache_data(&mut rng);
entry5.update_data(&data5);

View File

@@ -5,16 +5,15 @@
//! but we guarantee eventual consistency and fault tolerancy.
//! Background tasks can be CPU intensive, but the worker thread has low priority.
use super::{cache_config, fs_write_atomic, CacheConfig};
use super::{fs_write_atomic, CacheConfig};
use log::{debug, info, trace, warn};
use serde::{Deserialize, Serialize};
use spin::Once;
use std::cmp;
use std::collections::HashMap;
use std::ffi::OsStr;
use std::fmt;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::atomic::{self, AtomicBool};
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
#[cfg(test)]
use std::sync::{Arc, Condvar, Mutex};
@@ -25,6 +24,7 @@ use std::time::SystemTime;
#[cfg(test)]
use tests::system_time_stub::SystemTimeStub as SystemTime;
#[derive(Clone)]
pub(super) struct Worker {
sender: SyncSender<CacheEvent>,
#[cfg(test)]
@@ -46,29 +46,6 @@ struct WorkerStats {
handled: u32,
}
static WORKER: Once<Worker> = Once::new();
static INIT_CALLED: AtomicBool = AtomicBool::new(false);
pub(super) fn worker() -> &'static Worker {
WORKER
.r#try()
.expect("Cache worker must be initialized before usage")
}
pub(super) fn init(init_file_per_thread_logger: Option<&'static str>) {
INIT_CALLED
.compare_exchange(
false,
true,
atomic::Ordering::SeqCst,
atomic::Ordering::SeqCst,
)
.expect("Cache worker init must be called at most once");
let worker = Worker::start_new(cache_config(), init_file_per_thread_logger);
WORKER.call_once(|| worker);
}
#[derive(Debug, Clone)]
enum CacheEvent {
OnCacheGet(PathBuf),
@@ -168,6 +145,12 @@ impl Worker {
}
}
impl fmt::Debug for Worker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Worker").finish()
}
}
#[derive(Serialize, Deserialize)]
struct ModuleCacheStatistics {
pub usages: u64,
@@ -210,9 +193,6 @@ macro_rules! unwrap_or_warn {
impl WorkerThread {
fn run(self, init_file_per_thread_logger: Option<&'static str>) {
#[cfg(not(test))] // We want to test the worker without relying on init() being called
assert!(INIT_CALLED.load(atomic::Ordering::SeqCst));
if let Some(prefix) = init_file_per_thread_logger {
file_per_thread_logger::initialize(prefix);
}