Fix pooling tests on high-cpu-count systems (#3141)

This commit fixes an issue where `cargo test` was failing pretty
reliably on an 80-thread system where many of the pooling tests would
fail in `mmap` to reserve address space for the linear memories
allocated for a pooling allocator. Each test wants to reserve about 6TB
of address space, and if we let 80 tests do that apparently Linux
doesn't like that and starts returning errors from `mmap`.

The implementation here is a relatively simple semaphore-lookalike
which allows a fixed amount of concurrency in pooling tests.
This commit is contained in:
Alex Crichton
2021-08-04 11:55:52 -05:00
committed by GitHub
parent 8e06b78177
commit 91d24b8448

View File

@@ -1,4 +1,5 @@
use std::path::Path; use std::path::Path;
use std::sync::{Condvar, Mutex};
use wasmtime::{ use wasmtime::{
Config, Engine, InstanceAllocationStrategy, InstanceLimits, ModuleLimits, Config, Engine, InstanceAllocationStrategy, InstanceLimits, ModuleLimits,
PoolingAllocationStrategy, Store, Strategy, PoolingAllocationStrategy, Store, Strategy,
@@ -47,7 +48,7 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()>
cfg.static_memory_maximum_size(0); cfg.static_memory_maximum_size(0);
} }
if pooling { let _pooling_lock = if pooling {
// The limits here are crafted such that the wast tests should pass. // The limits here are crafted such that the wast tests should pass.
// However, these limits may become insufficient in the future as the wast tests change. // However, these limits may become insufficient in the future as the wast tests change.
// If a wast test fails because of a limit being "exceeded" or if memory/table // If a wast test fails because of a limit being "exceeded" or if memory/table
@@ -69,7 +70,10 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()>
..Default::default() ..Default::default()
}, },
}); });
} Some(lock_pooling())
} else {
None
};
let store = Store::new(&Engine::new(&cfg)?, ()); let store = Store::new(&Engine::new(&cfg)?, ());
let mut wast_context = WastContext::new(store); let mut wast_context = WastContext::new(store);
@@ -77,3 +81,49 @@ fn run_wast(wast: &str, strategy: Strategy, pooling: bool) -> anyhow::Result<()>
wast_context.run_file(wast)?; wast_context.run_file(wast)?;
Ok(()) Ok(())
} }
// The pooling tests make about 6TB of address space reservation which means
// that we shouldn't let too many of them run concurrently at once. On
// high-cpu-count systems (e.g. 80 threads) this leads to mmap failures because
// presumably too much of the address space has been reserved with our limits
// specified above. By keeping the number of active pooling-related tests to a
// specified maximum we can put a cap on the virtual address space reservations
// made.
fn lock_pooling() -> impl Drop {
const MAX_CONCURRENT_POOLING: u32 = 8;
lazy_static::lazy_static! {
static ref ACTIVE: MyState = MyState::default();
}
#[derive(Default)]
struct MyState {
lock: Mutex<u32>,
waiters: Condvar,
}
impl MyState {
fn lock(&self) -> impl Drop + '_ {
let state = self.lock.lock().unwrap();
let mut state = self
.waiters
.wait_while(state, |cnt| *cnt >= MAX_CONCURRENT_POOLING)
.unwrap();
*state += 1;
LockGuard { state: self }
}
}
struct LockGuard<'a> {
state: &'a MyState,
}
impl Drop for LockGuard<'_> {
fn drop(&mut self) {
*self.state.lock.lock().unwrap() -= 1;
self.state.waiters.notify_one();
}
}
ACTIVE.lock()
}