mirror of
https://github.com/Zygo/bees.git
synced 2025-05-17 21:35:45 +02:00
counters: track pair growing time
When we find a matching block we attempt to extend ("grow") the matched pair around the first matching block. This function takes the IO hit of reading the second extent from each duplicate extent pair. It's also very slow--too many allocations, too small reads, reads in the wrong order, an order of magnitude too many calls to TREE_SEARCH_V2, and it is usually in the top 3 most frequent PERFORMANCE warnings. Start tracking the running time of grows using the pairforward_ms and pairbackward_ms counters so that we can compare it to various replacements. Signed-off-by: Zygo Blaxell <bees@furryterror.org>
This commit is contained in:
parent
677da5de45
commit
38ccf5c921
@ -368,6 +368,7 @@ BeesRangePair::grow(shared_ptr<BeesContext> ctx, bool constrained)
|
||||
BEESTOOLONG("grow constrained = " << constrained << " *this = " << *this);
|
||||
BEESTRACE("grow constrained = " << constrained << " *this = " << *this);
|
||||
bool rv = false;
|
||||
Timer grow_backward_timer;
|
||||
|
||||
THROW_CHECK1(invalid_argument, first.begin(), (first.begin() & BLOCK_MASK_CLONE) == 0);
|
||||
THROW_CHECK1(invalid_argument, second.begin(), (second.begin() & BLOCK_MASK_CLONE) == 0);
|
||||
@ -496,9 +497,11 @@ BeesRangePair::grow(shared_ptr<BeesContext> ctx, bool constrained)
|
||||
BEESCOUNT(pairbackward_hit);
|
||||
}
|
||||
BEESCOUNT(pairbackward_stop);
|
||||
BEESCOUNTADD(pairbackward_ms, grow_backward_timer.age() * 1000);
|
||||
|
||||
// Look forward
|
||||
BEESTRACE("grow_forward " << *this);
|
||||
Timer grow_forward_timer;
|
||||
while (first.size() < BLOCK_SIZE_MAX_EXTENT) {
|
||||
if (second.end() >= e_second.end()) {
|
||||
if (constrained) {
|
||||
@ -612,6 +615,7 @@ BeesRangePair::grow(shared_ptr<BeesContext> ctx, bool constrained)
|
||||
}
|
||||
|
||||
BEESCOUNT(pairforward_stop);
|
||||
BEESCOUNTADD(pairforward_ms, grow_forward_timer.age() * 1000);
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user