1
0
mirror of https://github.com/Zygo/bees.git synced 2025-05-18 05:45:45 +02:00

roots: run insert_new_crawl from within a Task

If we have loadavg targeting enabled, there may be no worker threads
available to respond to new subvols, so we should not bother updating
the subvols list.

Put insert_new_crawl into a Task so it only executes when a worker
is available.

Signed-off-by: Zygo Blaxell <bees@furryterror.org>
This commit is contained in:
Zygo Blaxell 2021-12-02 23:56:46 -05:00
parent 48dd2a45fe
commit d5d17cbe62
2 changed files with 23 additions and 22 deletions

View File

@ -731,17 +731,23 @@ BeesRoots::crawl_thread()
// Create the Task that does the crawling // Create the Task that does the crawling
const auto shared_this = shared_from_this(); const auto shared_this = shared_from_this();
m_crawl_task = Task("crawl_more", [shared_this]() { const auto crawl_task = Task("crawl_more", [shared_this]() {
BEESTRACE("crawl_more " << shared_this); BEESTRACE("crawl_more " << shared_this);
const auto run_again = shared_this->crawl_roots(); if (shared_this->crawl_roots()) {
if (run_again) { Task::current_task().run();
shared_this->m_crawl_task.run();
} }
}); });
const auto crawl_new = Task("crawl_new", [shared_this, crawl_task]() {
BEESTRACE("crawl_new " << shared_this);
catch_all([&]() {
shared_this->insert_new_crawl();
});
crawl_task.run();
});
// Monitor transid_max and wake up roots when it changes // Monitor transid_max and wake up roots when it changes
BEESNOTE("tracking transid"); BEESNOTE("tracking transid");
auto last_count = m_transid_re.count(); auto last_transid = m_transid_re.count();
while (!m_stop_requested) { while (!m_stop_requested) {
BEESTRACE("Measure current transid"); BEESTRACE("Measure current transid");
catch_all([&]() { catch_all([&]() {
@ -749,24 +755,20 @@ BeesRoots::crawl_thread()
m_transid_re.update(transid_max_nocache()); m_transid_re.update(transid_max_nocache());
}); });
BEESTRACE("Make sure we have a full complement of crawlers"); const auto new_transid = m_transid_re.count();
catch_all([&]() { if (new_transid != last_transid) {
BEESTRACE("calling insert_new_crawl");
insert_new_crawl();
});
// Don't hold root FDs open too long. // Don't hold root FDs open too long.
// The open FDs prevent snapshots from being deleted. // The open FDs prevent snapshots from being deleted.
// cleaner_kthread just keeps skipping over the open dir and all its children. // cleaner_kthread just keeps skipping over the open dir and all its children.
// Even open files are a problem if they're big enough. // Even open files are a problem if they're big enough.
auto new_count = m_transid_re.count(); // Always run this even if we have no worker threads.
if (new_count != last_count) {
clear_caches(); clear_caches();
}
last_count = new_count;
// If crawl_more stopped running (i.e. ran out of data), start it up again // Insert new roots and restart crawl_more.
m_crawl_task.run(); // Don't run this if we have no worker threads.
crawl_new.run();
}
last_transid = new_transid;
auto poll_time = m_transid_re.seconds_for(m_transid_factor); auto poll_time = m_transid_re.seconds_for(m_transid_factor);
BEESLOGDEBUG("Polling " << poll_time << "s for next " << m_transid_factor << " transid " << m_transid_re); BEESLOGDEBUG("Polling " << poll_time << "s for next " << m_transid_factor << " transid " << m_transid_re);

View File

@ -544,7 +544,6 @@ class BeesRoots : public enable_shared_from_this<BeesRoots> {
BeesThread m_writeback_thread; BeesThread m_writeback_thread;
RateEstimator m_transid_re; RateEstimator m_transid_re;
size_t m_transid_factor = BEES_TRANSID_FACTOR; size_t m_transid_factor = BEES_TRANSID_FACTOR;
Task m_crawl_task;
bool m_workaround_btrfs_send = false; bool m_workaround_btrfs_send = false;
shared_ptr<BeesScanMode> m_scanner; shared_ptr<BeesScanMode> m_scanner;