From eb259e8aba8ac1e116e37cc7c20d327a1500c2ee Mon Sep 17 00:00:00 2001 From: Jamey Sharp Date: Tue, 11 Oct 2022 08:23:02 -0700 Subject: [PATCH] Some small perf improvements (#95) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Do conflict-set hash lookups once, not twice This makes the small wasmtime bz2 benchmark 1% faster, per Hyperfine and Sightglass. The effect disappears into the noise on larger benchmarks. * Inline PosWithPrio::key When compiling the pulldown-cmark benchmark from Sightglass, this is the single most frequently called function: it's invoked 2.5 million times. Inlining it reduces instructions retired by 1.5% on that benchmark, according to `valgrind --tool=callgrind`. This patch is "1.01 ± 0.01 times faster" according to Hyperfine for the bz2, pulldown-cmark, and spidermonkey benchmarks from Sightglass. Sightglass, in turn, agrees that all three benchmarks are 1.01x faster by instructions retired, and the first two are around 1.01x faster by CPU cycles as well. * Inline and simplify AdaptiveMap::expand Previously, `get_or_insert` would iterate over the keys to find one that matched; then, if none did, iterate over the values to check if any are 0; then iterate again to remove all zero values and compact the map. This commit instead focuses on picking an index to use: preferably one where the key already exists; but if it's not in the map, then an unused index; but if there aren't any, then an index where the value is zero. As a result this iterates the two arrays at most once each, and both iterations can stop early. The downside is that keys whose value is zero are not removed as aggressively. It might be worth pruning such keys in `IndexSet::set`. Also: - `#[inline]` both implementations of `Iterator::next` - Replace `set_bits` with using the `SetBitsIter` constructor directly These changes together reduce instructions retired when compiling the pulldown-cmark benchmark by 0.9%. --- src/indexset.rs | 96 +++++++++++++------------------------- src/ion/data_structures.rs | 1 + src/ion/process.rs | 3 +- 3 files changed, 34 insertions(+), 66 deletions(-) diff --git a/src/indexset.rs b/src/indexset.rs index a5c263a..8a3af2d 100644 --- a/src/indexset.rs +++ b/src/indexset.rs @@ -36,73 +36,41 @@ impl AdaptiveMap { } } - /// Expand into `Large` mode if we are at capacity and have no - /// zero-value pairs that can be trimmed. - #[inline(never)] - fn expand(&mut self) { - match self { - &mut Self::Small { - ref mut len, - ref mut keys, - ref mut values, - } => { - // Note: we *may* remain as `Small` if there are any - // zero elements. Try removing them first, before we - // commit to a memory allocation. - if values.iter().any(|v| *v == 0) { - let mut out = 0; - for i in 0..(*len as usize) { - if values[i] == 0 { - continue; - } - if out < i { - keys[out] = keys[i]; - values[out] = values[i]; - } - out += 1; - } - *len = out as u32; - } else { - let mut map = FxHashMap::default(); - for i in 0..(*len as usize) { - map.insert(keys[i], values[i]); - } - *self = Self::Large(map); - } - } - _ => {} - } - } #[inline(always)] fn get_or_insert<'a>(&'a mut self, key: u32) -> &'a mut u64 { // Check whether the key is present and we are in small mode; // if no to both, we need to expand first. - let (needs_expand, small_mode_idx) = match self { - &mut Self::Small { len, ref keys, .. } => { + let small_mode_idx = match self { + &mut Self::Small { + len, + ref mut keys, + ref values, + } => { // Perform this scan but do not return right away; // doing so runs into overlapping-borrow issues // because the current non-lexical lifetimes // implementation is not able to see that the `self` // mutable borrow on return is only on the // early-return path. - let small_mode_idx = keys.iter().take(len as usize).position(|k| *k == key); - let needs_expand = small_mode_idx.is_none() && len == SMALL_ELEMS as u32; - (needs_expand, small_mode_idx) + if let Some(i) = keys[..len as usize].iter().position(|&k| k == key) { + Some(i) + } else if len != SMALL_ELEMS as u32 { + debug_assert!(len < SMALL_ELEMS as u32); + None + } else if let Some(i) = values.iter().position(|&v| v == 0) { + // If an existing value is zero, reuse that slot. + keys[i] = key; + Some(i) + } else { + *self = Self::Large(keys.iter().copied().zip(values.iter().copied()).collect()); + None + } } - _ => (false, None), + _ => None, }; - if needs_expand { - debug_assert!(small_mode_idx.is_none()); - self.expand(); - } - match self { - &mut Self::Small { - ref mut len, - ref mut keys, - ref mut values, - } => { + Self::Small { len, keys, values } => { // If we found the key already while checking whether // we need to expand above, use that index to return // early. @@ -112,15 +80,16 @@ impl AdaptiveMap { // Otherwise, the key must not be present; add a new // entry. debug_assert!(*len < SMALL_ELEMS as u32); - let idx = *len; + let idx = *len as usize; *len += 1; - keys[idx as usize] = key; - values[idx as usize] = 0; - &mut values[idx as usize] + keys[idx] = key; + values[idx] = 0; + &mut values[idx] } - &mut Self::Large(ref mut map) => map.entry(key).or_insert(0), + Self::Large(map) => map.entry(key).or_insert(0), } } + #[inline(always)] fn get_mut(&mut self, key: u32) -> Option<&mut u64> { match self { @@ -180,6 +149,8 @@ enum AdaptiveMapIter<'a> { impl<'a> std::iter::Iterator for AdaptiveMapIter<'a> { type Item = (u32, u64); + + #[inline] fn next(&mut self) -> Option { match self { &mut Self::Small(ref mut keys, ref mut values) => { @@ -285,7 +256,7 @@ impl IndexSet { pub fn iter<'a>(&'a self) -> impl Iterator + 'a { self.elems.iter().flat_map(|(word_idx, bits)| { let word_idx = word_idx as usize; - set_bits(bits).map(move |i| BITS_PER_WORD * word_idx + i) + SetBitsIter(bits).map(move |i| BITS_PER_WORD * word_idx + i) }) } @@ -299,15 +270,12 @@ impl IndexSet { } } -fn set_bits(bits: u64) -> impl Iterator { - let iter = SetBitsIter(bits); - iter -} - pub struct SetBitsIter(u64); impl Iterator for SetBitsIter { type Item = usize; + + #[inline] fn next(&mut self) -> Option { // Build an `Option` so that on the nonzero path, // the compiler can optimize the trailing-zeroes operator diff --git a/src/ion/data_structures.rs b/src/ion/data_structures.rs index 0b5becd..3201326 100644 --- a/src/ion/data_structures.rs +++ b/src/ion/data_structures.rs @@ -630,6 +630,7 @@ pub struct PosWithPrio { } impl PosWithPrio { + #[inline] pub fn key(self) -> u64 { u64_key(self.pos.to_index(), self.prio) } diff --git a/src/ion/process.rs b/src/ion/process.rs index 4a99567..0c2161b 100644 --- a/src/ion/process.rs +++ b/src/ion/process.rs @@ -157,9 +157,8 @@ impl<'a, F: Function> Env<'a, F> { // conflicts list. let conflict_bundle = self.ranges[preg_range.index()].bundle; trace!(" -> conflict bundle {:?}", conflict_bundle); - if !self.conflict_set.contains(&conflict_bundle) { + if self.conflict_set.insert(conflict_bundle) { conflicts.push(conflict_bundle); - self.conflict_set.insert(conflict_bundle); max_conflict_weight = std::cmp::max( max_conflict_weight, self.bundles[conflict_bundle.index()].cached_spill_weight(),