mirror of
https://github.com/Zygo/bees.git
synced 2025-05-17 21:35:45 +02:00
roots: reduce number of objects per TREE_SEARCH_V2, drop BEES_MAX_CRAWL_ITEMS and BEES_MAX_CRAWL_BYTES
This makes better use of dynamic buffer sizing, and reduces the amount of stale date lying around. Signed-off-by: Zygo Blaxell <bees@furryterror.org>
This commit is contained in:
parent
cf4091b352
commit
2f14a5a9c7
@ -209,7 +209,7 @@ BeesRoots::transid_max_nocache()
|
|||||||
sk.min_objectid = sk.max_objectid = BTRFS_EXTENT_TREE_OBJECTID;
|
sk.min_objectid = sk.max_objectid = BTRFS_EXTENT_TREE_OBJECTID;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
sk.nr_items = 1024;
|
sk.nr_items = 4;
|
||||||
BEESTRACE("transid_max search sk " << sk);
|
BEESTRACE("transid_max search sk " << sk);
|
||||||
sk.do_ioctl(m_ctx->root_fd());
|
sk.do_ioctl(m_ctx->root_fd());
|
||||||
|
|
||||||
@ -632,7 +632,7 @@ BeesRoots::open_root_nocache(uint64_t rootid)
|
|||||||
|
|
||||||
BEESTRACE("sk " << sk);
|
BEESTRACE("sk " << sk);
|
||||||
while (sk.min_objectid <= rootid) {
|
while (sk.min_objectid <= rootid) {
|
||||||
sk.nr_items = 1024;
|
sk.nr_items = 1;
|
||||||
sk.do_ioctl(m_ctx->root_fd());
|
sk.do_ioctl(m_ctx->root_fd());
|
||||||
|
|
||||||
if (sk.m_result.empty()) {
|
if (sk.m_result.empty()) {
|
||||||
@ -769,7 +769,7 @@ BeesRoots::next_root(uint64_t root)
|
|||||||
sk.min_objectid = root + 1;
|
sk.min_objectid = root + 1;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
sk.nr_items = 1024;
|
sk.nr_items = 1;
|
||||||
sk.do_ioctl(m_ctx->root_fd());
|
sk.do_ioctl(m_ctx->root_fd());
|
||||||
|
|
||||||
if (sk.m_result.empty()) {
|
if (sk.m_result.empty()) {
|
||||||
@ -1005,7 +1005,7 @@ BeesCrawl::fetch_extents()
|
|||||||
|
|
||||||
Timer crawl_timer;
|
Timer crawl_timer;
|
||||||
|
|
||||||
BtrfsIoctlSearchKey sk(BEES_MAX_CRAWL_BYTES);
|
BtrfsIoctlSearchKey sk;
|
||||||
sk.tree_id = old_state.m_root;
|
sk.tree_id = old_state.m_root;
|
||||||
sk.min_objectid = old_state.m_objectid;
|
sk.min_objectid = old_state.m_objectid;
|
||||||
sk.min_type = sk.max_type = BTRFS_EXTENT_DATA_KEY;
|
sk.min_type = sk.max_type = BTRFS_EXTENT_DATA_KEY;
|
||||||
@ -1013,7 +1013,7 @@ BeesCrawl::fetch_extents()
|
|||||||
sk.min_transid = old_state.m_min_transid;
|
sk.min_transid = old_state.m_min_transid;
|
||||||
// Don't set max_transid to m_max_transid here. See below.
|
// Don't set max_transid to m_max_transid here. See below.
|
||||||
sk.max_transid = numeric_limits<uint64_t>::max();
|
sk.max_transid = numeric_limits<uint64_t>::max();
|
||||||
sk.nr_items = BEES_MAX_CRAWL_ITEMS;
|
sk.nr_items = 4;
|
||||||
|
|
||||||
// Lock in the old state
|
// Lock in the old state
|
||||||
set_state(old_state);
|
set_state(old_state);
|
||||||
|
@ -101,12 +101,6 @@ const double BEES_HASH_TABLE_ANALYZE_INTERVAL = BEES_STATS_INTERVAL;
|
|||||||
// Stop growing the work queue after we have this many tasks queued
|
// Stop growing the work queue after we have this many tasks queued
|
||||||
const size_t BEES_MAX_QUEUE_SIZE = 128;
|
const size_t BEES_MAX_QUEUE_SIZE = 128;
|
||||||
|
|
||||||
// Read this many items at a time in SEARCHv2
|
|
||||||
const size_t BEES_MAX_CRAWL_ITEMS = 8;
|
|
||||||
|
|
||||||
// Read this many bytes at a time in SEARCHv2 (one maximum-sized metadata page)
|
|
||||||
const size_t BEES_MAX_CRAWL_BYTES = 64 * 1024;
|
|
||||||
|
|
||||||
// Insert this many items before switching to a new subvol
|
// Insert this many items before switching to a new subvol
|
||||||
const size_t BEES_MAX_CRAWL_BATCH = 128;
|
const size_t BEES_MAX_CRAWL_BATCH = 128;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user