From 762f833ab0931faddfca2ba8a87b040d71337e19 Mon Sep 17 00:00:00 2001 From: Zygo Blaxell Date: Fri, 26 Jan 2018 21:44:39 -0500 Subject: [PATCH] roots: poll every 10 transids Restartng scans for each transid is a bit aggressive. Scan every 10 transids for a polling rate close to the former BEES_COMMIT_INTERVAL. Signed-off-by: Zygo Blaxell --- src/bees-roots.cc | 24 +++++++++++++++--------- src/bees.h | 4 ++++ 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/bees-roots.cc b/src/bees-roots.cc index ac52ec5..a4b8f24 100644 --- a/src/bees-roots.cc +++ b/src/bees-roots.cc @@ -311,7 +311,7 @@ BeesRoots::crawl_roots() BEESCOUNT(crawl_done); - auto want_transid = m_transid_re.count() + 1; + auto want_transid = m_transid_re.count() + m_transid_factor; auto ran_out_time = m_crawl_timer.lap(); BEESLOGINFO("Crawl master ran out of data after " << ran_out_time << "s, waiting about " << m_transid_re.seconds_until(want_transid) << "s for transid " << want_transid << "..."); BEESNOTE("idle, waiting for transid " << want_transid << ": " << m_transid_re); @@ -343,14 +343,18 @@ BeesRoots::crawl_thread() }).run(); // Monitor transid_max and wake up roots when it changes - BEESNOTE("tracking transids"); + BEESNOTE("tracking transid"); auto last_count = m_transid_re.count(); while (true) { // Measure current transid - catch_all([&]() { m_transid_re.update(transid_max_nocache()); }); + catch_all([&]() { + m_transid_re.update(transid_max_nocache()); + }); // Make sure we have a full complement of crawlers - catch_all([&]() { insert_new_crawl(); }); + catch_all([&]() { + insert_new_crawl(); + }); // Don't hold root FDs open too long. // The open FDs prevent snapshots from being deleted. @@ -362,10 +366,10 @@ BeesRoots::crawl_thread() } last_count = new_count; - BEESNOTE("waiting for next transid " << m_transid_re); - // We don't use wait_for here because somebody needs to - // be updating m_transid_re from time to time. - nanosleep(m_transid_re.seconds_for(1)); + auto poll_time = m_transid_re.seconds_for(m_transid_factor); + BEESLOGDEBUG("Polling " << poll_time << "s for next " << m_transid_factor << " transid " << m_transid_re); + BEESNOTE("waiting " << poll_time << "s for next " << m_transid_factor << " transid " << m_transid_re); + nanosleep(poll_time); } } @@ -474,7 +478,9 @@ BeesRoots::BeesRoots(shared_ptr ctx) : { m_crawl_thread.exec([&]() { // Measure current transid before creating any crawlers - catch_all([&]() { m_transid_re.update(transid_max_nocache()); }); + catch_all([&]() { + m_transid_re.update(transid_max_nocache()); + }); // Make sure we have a full complement of crawlers catch_all([&]() { diff --git a/src/bees.h b/src/bees.h index 029f57f..5e31cf0 100644 --- a/src/bees.h +++ b/src/bees.h @@ -103,6 +103,9 @@ const size_t BEES_MAX_CRAWL_SIZE = 1024; // Insert this many items before switching to a new subvol const size_t BEES_MAX_CRAWL_BATCH = 128; +// Wait this many transids between crawls +const size_t BEES_TRANSID_FACTOR = 10; + // If an extent has this many refs, pretend it does not exist // to avoid a crippling btrfs performance bug // The actual limit in LOGICAL_INO seems to be 2730, but let's leave a little headroom @@ -524,6 +527,7 @@ class BeesRoots : public enable_shared_from_this { BeesThread m_crawl_thread; BeesThread m_writeback_thread; RateEstimator m_transid_re; + size_t m_transid_factor = BEES_TRANSID_FACTOR; void insert_new_crawl(); void insert_root(const BeesCrawlState &bcs);