mirror of
https://github.com/Zygo/bees.git
synced 2025-05-18 05:45:45 +02:00
hash: don't spin when writes fail
When a hash table write fails, we skip over the write throttling because we didn't report that we successfully wrote an extent. This can be bad if the filesystem is full and the allocations for writes are burning a lot of CPU time searching for free space. We also don't retry the write later on since we assume the extent is clean after a write attempt whether it was successful or not, so the extent might not be written out later when writes are possible again. Check whether a hash extent is dirty, and always throttle after attempting the write. If a write fails, leave the extent dirty so we attempt to write it out the next time flush cycles through the hash table. During shutdown this will reattempt each failing write once, after that the updated hash table data will be dropped. Signed-off-by: Zygo Blaxell <bees@furryterror.org>
This commit is contained in:
parent
28ee2ae1a8
commit
cbc76a7457
@ -106,12 +106,6 @@ BeesHashTable::flush_dirty_extent(uint64_t extent_index)
|
||||
BEESNOTE("flushing extent #" << extent_index << " of " << m_extents << " extents");
|
||||
|
||||
auto lock = lock_extent_by_index(extent_index);
|
||||
|
||||
// Not dirty, nothing to do
|
||||
if (!m_extent_metadata.at(extent_index).m_dirty) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool wrote_extent = false;
|
||||
|
||||
catch_all([&]() {
|
||||
@ -125,9 +119,6 @@ BeesHashTable::flush_dirty_extent(uint64_t extent_index)
|
||||
// Copy the extent because we might be stuck writing for a while
|
||||
ByteVector extent_copy(dirty_extent, dirty_extent_end);
|
||||
|
||||
// Mark extent non-dirty while we still hold the lock
|
||||
m_extent_metadata.at(extent_index).m_dirty = false;
|
||||
|
||||
// Release the lock
|
||||
lock.unlock();
|
||||
|
||||
@ -139,6 +130,10 @@ BeesHashTable::flush_dirty_extent(uint64_t extent_index)
|
||||
// const size_t dirty_extent_size = dirty_extent_end - dirty_extent;
|
||||
// bees_unreadahead(m_fd, dirty_extent_offset, dirty_extent_size);
|
||||
|
||||
// Mark extent clean if write was successful
|
||||
lock.lock();
|
||||
m_extent_metadata.at(extent_index).m_dirty = false;
|
||||
|
||||
wrote_extent = true;
|
||||
});
|
||||
|
||||
@ -152,6 +147,13 @@ BeesHashTable::flush_dirty_extents(bool slowly)
|
||||
|
||||
uint64_t wrote_extents = 0;
|
||||
for (size_t extent_index = 0; extent_index < m_extents; ++extent_index) {
|
||||
// Skip the clean ones
|
||||
auto lock = lock_extent_by_index(extent_index);
|
||||
if (!m_extent_metadata.at(extent_index).m_dirty) {
|
||||
continue;
|
||||
}
|
||||
lock.unlock();
|
||||
|
||||
if (flush_dirty_extent(extent_index)) {
|
||||
++wrote_extents;
|
||||
if (slowly) {
|
||||
|
@ -415,6 +415,7 @@ public:
|
||||
bool push_random_hash_addr(HashType hash, AddrType addr);
|
||||
void erase_hash_addr(HashType hash, AddrType addr);
|
||||
bool push_front_hash_addr(HashType hash, AddrType addr);
|
||||
bool flush_dirty_extent(uint64_t extent_index);
|
||||
|
||||
private:
|
||||
string m_filename;
|
||||
@ -474,7 +475,6 @@ private:
|
||||
void fetch_missing_extent_by_index(uint64_t extent_index);
|
||||
void set_extent_dirty_locked(uint64_t extent_index);
|
||||
size_t flush_dirty_extents(bool slowly);
|
||||
bool flush_dirty_extent(uint64_t extent_index);
|
||||
|
||||
size_t hash_to_extent_index(HashType ht);
|
||||
unique_lock<mutex> lock_extent_by_hash(HashType ht);
|
||||
|
Loading…
x
Reference in New Issue
Block a user