mirror of
https://github.com/Zygo/bees.git
synced 2025-05-18 13:55:44 +02:00
crawl: remove the unused single-threaded crawl implementation
This is a TODO from "bees: process each subvol in its own thread" Signed-off-by: Zygo Blaxell <bees@furryterror.org>
This commit is contained in:
parent
09ab0778e8
commit
4604f5bc96
@ -194,93 +194,6 @@ BeesRoots::transid_max()
|
|||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
BeesRoots::crawl_roots()
|
|
||||||
{
|
|
||||||
BEESNOTE("Crawling roots");
|
|
||||||
|
|
||||||
unique_lock<mutex> lock(m_mutex);
|
|
||||||
if (m_root_crawl_map.empty()) {
|
|
||||||
BEESNOTE("idle, crawl map is empty");
|
|
||||||
m_condvar.wait(lock);
|
|
||||||
// Don't count the time we were waiting as part of the crawl time
|
|
||||||
m_crawl_timer.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Work from a copy because BeesCrawl might change the world under us
|
|
||||||
auto crawl_map_copy = m_root_crawl_map;
|
|
||||||
lock.unlock();
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
// Scan the same inode/offset tuple in each subvol (good for snapshots)
|
|
||||||
BeesFileRange first_range;
|
|
||||||
shared_ptr<BeesCrawl> first_crawl;
|
|
||||||
for (auto i : crawl_map_copy) {
|
|
||||||
auto this_crawl = i.second;
|
|
||||||
auto this_range = this_crawl->peek_front();
|
|
||||||
if (this_range) {
|
|
||||||
if (!first_range || this_range < first_range) {
|
|
||||||
first_crawl = this_crawl;
|
|
||||||
first_range = this_range;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (first_range) {
|
|
||||||
catch_all([&]() {
|
|
||||||
// BEESINFO("scan_forward " << first_range);
|
|
||||||
m_ctx->scan_forward(first_range);
|
|
||||||
});
|
|
||||||
BEESCOUNT(crawl_scan);
|
|
||||||
m_crawl_current = first_crawl->get_state();
|
|
||||||
auto first_range_popped = first_crawl->pop_front();
|
|
||||||
THROW_CHECK2(runtime_error, first_range, first_range_popped, first_range == first_range_popped);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#if 0
|
|
||||||
// Scan each subvol one extent at a time (good for continuous forward progress)
|
|
||||||
bool crawled = false;
|
|
||||||
for (auto i : crawl_map_copy) {
|
|
||||||
auto this_crawl = i.second;
|
|
||||||
auto this_range = this_crawl->peek_front();
|
|
||||||
if (this_range) {
|
|
||||||
catch_all([&]() {
|
|
||||||
// BEESINFO("scan_forward " << this_range);
|
|
||||||
m_ctx->scan_forward(this_range);
|
|
||||||
});
|
|
||||||
crawled = true;
|
|
||||||
BEESCOUNT(crawl_scan);
|
|
||||||
m_crawl_current = this_crawl->get_state();
|
|
||||||
auto this_range_popped = this_crawl->pop_front();
|
|
||||||
THROW_CHECK2(runtime_error, this_range, this_range_popped, this_range == this_range_popped);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (crawled) return;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
BEESLOG("Crawl ran out of data after " << m_crawl_timer.lap() << "s, waiting for more...");
|
|
||||||
BEESCOUNT(crawl_done);
|
|
||||||
BEESNOTE("idle, waiting for more data");
|
|
||||||
lock.lock();
|
|
||||||
m_condvar.wait(lock);
|
|
||||||
|
|
||||||
// Don't count the time we were waiting as part of the crawl time
|
|
||||||
m_crawl_timer.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
BeesRoots::crawl_thread()
|
|
||||||
{
|
|
||||||
BEESNOTE("crawling");
|
|
||||||
while (true) {
|
|
||||||
catch_all([&]() {
|
|
||||||
crawl_roots();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
BeesRoots::writeback_thread()
|
BeesRoots::writeback_thread()
|
||||||
{
|
{
|
||||||
@ -384,21 +297,17 @@ BeesRoots::state_load()
|
|||||||
BeesRoots::BeesRoots(shared_ptr<BeesContext> ctx) :
|
BeesRoots::BeesRoots(shared_ptr<BeesContext> ctx) :
|
||||||
m_ctx(ctx),
|
m_ctx(ctx),
|
||||||
m_crawl_state_file(ctx->home_fd(), crawl_state_filename()),
|
m_crawl_state_file(ctx->home_fd(), crawl_state_filename()),
|
||||||
m_crawl_thread("crawl"),
|
|
||||||
m_writeback_thread("crawl_writeback")
|
m_writeback_thread("crawl_writeback")
|
||||||
{
|
{
|
||||||
unsigned max_crawlers = max(1U, thread::hardware_concurrency());
|
unsigned max_crawlers = max(1U, thread::hardware_concurrency());
|
||||||
m_lock_set.max_size(max_crawlers);
|
m_lock_set.max_size(max_crawlers);
|
||||||
|
|
||||||
// m_crawl_thread.exec([&]() {
|
catch_all([&]() {
|
||||||
catch_all([&]() {
|
state_load();
|
||||||
state_load();
|
});
|
||||||
});
|
m_writeback_thread.exec([&]() {
|
||||||
m_writeback_thread.exec([&]() {
|
writeback_thread();
|
||||||
writeback_thread();
|
});
|
||||||
});
|
|
||||||
// crawl_thread();
|
|
||||||
// });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Fd
|
Fd
|
||||||
|
@ -517,7 +517,6 @@ class BeesRoots {
|
|||||||
condition_variable m_condvar;
|
condition_variable m_condvar;
|
||||||
bool m_crawl_dirty = false;
|
bool m_crawl_dirty = false;
|
||||||
Timer m_crawl_timer;
|
Timer m_crawl_timer;
|
||||||
BeesThread m_crawl_thread;
|
|
||||||
BeesThread m_writeback_thread;
|
BeesThread m_writeback_thread;
|
||||||
LockSet<uint64_t> m_lock_set;
|
LockSet<uint64_t> m_lock_set;
|
||||||
|
|
||||||
@ -534,7 +533,6 @@ class BeesRoots {
|
|||||||
BeesCrawlState crawl_state_get(uint64_t root);
|
BeesCrawlState crawl_state_get(uint64_t root);
|
||||||
void crawl_state_set_dirty();
|
void crawl_state_set_dirty();
|
||||||
void crawl_state_erase(const BeesCrawlState &bcs);
|
void crawl_state_erase(const BeesCrawlState &bcs);
|
||||||
void crawl_thread();
|
|
||||||
void writeback_thread();
|
void writeback_thread();
|
||||||
uint64_t next_root(uint64_t root = 0);
|
uint64_t next_root(uint64_t root = 0);
|
||||||
void current_state_set(const BeesCrawlState &bcs);
|
void current_state_set(const BeesCrawlState &bcs);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user