From 2f14a5a9c7b56d0627a1086dec10d63a938e61eb Mon Sep 17 00:00:00 2001 From: Zygo Blaxell Date: Tue, 12 Oct 2021 15:32:52 -0400 Subject: [PATCH] roots: reduce number of objects per TREE_SEARCH_V2, drop BEES_MAX_CRAWL_ITEMS and BEES_MAX_CRAWL_BYTES This makes better use of dynamic buffer sizing, and reduces the amount of stale date lying around. Signed-off-by: Zygo Blaxell --- src/bees-roots.cc | 10 +++++----- src/bees.h | 6 ------ 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/src/bees-roots.cc b/src/bees-roots.cc index 9f117e3..a62ead3 100644 --- a/src/bees-roots.cc +++ b/src/bees-roots.cc @@ -209,7 +209,7 @@ BeesRoots::transid_max_nocache() sk.min_objectid = sk.max_objectid = BTRFS_EXTENT_TREE_OBJECTID; while (true) { - sk.nr_items = 1024; + sk.nr_items = 4; BEESTRACE("transid_max search sk " << sk); sk.do_ioctl(m_ctx->root_fd()); @@ -632,7 +632,7 @@ BeesRoots::open_root_nocache(uint64_t rootid) BEESTRACE("sk " << sk); while (sk.min_objectid <= rootid) { - sk.nr_items = 1024; + sk.nr_items = 1; sk.do_ioctl(m_ctx->root_fd()); if (sk.m_result.empty()) { @@ -769,7 +769,7 @@ BeesRoots::next_root(uint64_t root) sk.min_objectid = root + 1; while (true) { - sk.nr_items = 1024; + sk.nr_items = 1; sk.do_ioctl(m_ctx->root_fd()); if (sk.m_result.empty()) { @@ -1005,7 +1005,7 @@ BeesCrawl::fetch_extents() Timer crawl_timer; - BtrfsIoctlSearchKey sk(BEES_MAX_CRAWL_BYTES); + BtrfsIoctlSearchKey sk; sk.tree_id = old_state.m_root; sk.min_objectid = old_state.m_objectid; sk.min_type = sk.max_type = BTRFS_EXTENT_DATA_KEY; @@ -1013,7 +1013,7 @@ BeesCrawl::fetch_extents() sk.min_transid = old_state.m_min_transid; // Don't set max_transid to m_max_transid here. See below. sk.max_transid = numeric_limits::max(); - sk.nr_items = BEES_MAX_CRAWL_ITEMS; + sk.nr_items = 4; // Lock in the old state set_state(old_state); diff --git a/src/bees.h b/src/bees.h index 2821cf9..494d736 100644 --- a/src/bees.h +++ b/src/bees.h @@ -101,12 +101,6 @@ const double BEES_HASH_TABLE_ANALYZE_INTERVAL = BEES_STATS_INTERVAL; // Stop growing the work queue after we have this many tasks queued const size_t BEES_MAX_QUEUE_SIZE = 128; -// Read this many items at a time in SEARCHv2 -const size_t BEES_MAX_CRAWL_ITEMS = 8; - -// Read this many bytes at a time in SEARCHv2 (one maximum-sized metadata page) -const size_t BEES_MAX_CRAWL_BYTES = 64 * 1024; - // Insert this many items before switching to a new subvol const size_t BEES_MAX_CRAWL_BATCH = 128;