1
0
mirror of https://github.com/Zygo/bees.git synced 2025-05-18 05:45:45 +02:00

bees: adjust concurrency model

Tune the concurrency model to work a little better with large numbers
of subvols.  This is much less than the full rewrite Bees desparately
needs, but it provides a marginal improvement until the new code is ready.

Signed-off-by: Zygo Blaxell <bees@furryterror.org>
This commit is contained in:
Zygo Blaxell 2017-09-16 16:45:15 -04:00
parent 1052119a53
commit 552e74066d
3 changed files with 21 additions and 6 deletions

View File

@ -596,7 +596,9 @@ BeesHashTable::BeesHashTable(shared_ptr<BeesContext> ctx, string filename, off_t
m_stats_file(m_ctx->home_fd(), "beesstats.txt")
{
// Reduce disk thrashing at startup: one reader at a time
m_extent_lock_set.max_size(1);
// m_extent_lock_set.max_size(1);
// OK maybe a little faster
m_extent_lock_set.max_size(bees_worker_thread_count());
// Sanity checks to protect the implementation from its weaknesses
THROW_CHECK2(invalid_argument, BLOCK_SIZE_HASHTAB_BUCKET, BLOCK_SIZE_HASHTAB_EXTENT, (BLOCK_SIZE_HASHTAB_EXTENT % BLOCK_SIZE_HASHTAB_BUCKET) == 0);

View File

@ -302,7 +302,7 @@ BeesRoots::BeesRoots(shared_ptr<BeesContext> ctx) :
m_crawl_state_file(ctx->home_fd(), crawl_state_filename()),
m_writeback_thread("crawl_writeback")
{
// m_lock_set.max_size(bees_worker_thread_count());
m_lock_set.max_size(bees_worker_thread_count());
catch_all([&]() {
state_load();
@ -578,17 +578,27 @@ BeesCrawl::crawl_thread()
Timer crawl_timer;
auto crawl_lock = m_ctx->roots()->lock_set().make_lock(m_state.m_root, false);
while (!m_stopped) {
#if 0
BEESNOTE("waiting for crawl thread limit " << m_state);
crawl_lock.lock();
#endif
BEESNOTE("pop_front " << m_state);
auto this_range = pop_front();
#if 0
crawl_lock.unlock();
#endif
if (this_range) {
catch_all([&]() {
#if 1
BEESNOTE("waiting for scan thread limit " << m_state);
crawl_lock.lock();
#endif
BEESNOTE("scan_forward " << this_range);
m_ctx->scan_forward(this_range);
});
BEESCOUNT(crawl_scan);
// Let another thread have a turn with the mutexes
this_thread::yield();
} else {
auto crawl_time = crawl_timer.age();
BEESLOGNOTE("Crawl ran out of data after " << crawl_time << "s, waiting for more...");
@ -667,8 +677,10 @@ BeesCrawl::fetch_extents()
BEESTRACE("Searching crawl sk " << static_cast<btrfs_ioctl_search_key&>(sk));
bool ioctl_ok = false;
{
#if 0
BEESNOTE("waiting to search crawl sk " << static_cast<btrfs_ioctl_search_key&>(sk));
// auto lock = bees_ioctl_lock_set.make_lock(gettid());
auto lock = bees_ioctl_lock_set.make_lock(gettid());
#endif
BEESNOTE("searching crawl sk " << static_cast<btrfs_ioctl_search_key&>(sk));
BEESTOOLONG("Searching crawl sk " << static_cast<btrfs_ioctl_search_key&>(sk));

View File

@ -580,7 +580,8 @@ BeesTempFile::make_copy(const BeesFileRange &src)
unsigned
bees_worker_thread_count()
{
return max(1U, thread::hardware_concurrency());
// Maybe # of cores * (scalar from 0.25..4)?
return max(1U, thread::hardware_concurrency() * 4);
}
int
@ -600,8 +601,8 @@ bees_main(int argc, const char **argv)
THROW_CHECK1(invalid_argument, argc, argc >= 0);
vector<string> args(argv + 1, argv + argc);
// Set global concurrency limits - use only half the cores for ioctls
bees_ioctl_lock_set.max_size(max(1U, bees_worker_thread_count() / 2));
// There can be only one because we measure running time with it
bees_ioctl_lock_set.max_size(1);
// Create a context and start crawlers
bool did_subscription = false;