Skip to content

Commit

Permalink
CR Updates (#338)
Browse files Browse the repository at this point in the history
  • Loading branch information
udi-speedb committed Jan 26, 2023
1 parent c64805f commit e71353d
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 65 deletions.
57 changes: 19 additions & 38 deletions cache/cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,46 +129,29 @@ Status Cache::CreateFromString(const ConfigOptions& config_options,

// ==================================================================================================================================
Cache::ItemOwnerId Cache::ItemOwnerIdAllocator::Allocate() {
auto num_free_after_me = --num_free_ids_;
if (num_free_after_me < 0) {
// This should be a rare case so locking the mutex should not have
// a big effect.
// The following could occur (T<I> == thread #I):
// - T1 Allocate(), on entry num_free_ids_ == 0 and free_ids_ is empty.
// - T1 decrements num_free_ids_ and num_free_after_me == -1
// - T2 Free(), puts an id on free_ids_ and sets num_free_ids_ == 1
// - T1 now sets num_free_ids_ below => num_free_ids_ = 1 (which is correct)
// => Setting num_free_ids_ to the size of the free list is the right thing
// to
// do (rather than set to 0).
// Although eventually there is an available id to allocate, this is a
// corner case and doesn't seem to be worth handling to avoid returning
// kUnknownItemId.
std::lock_guard<std::mutex> lock(free_ids_mutex_);
num_free_ids_ = free_ids_.size();
return kUnknownItemId;
// In practice, onwer-ids are allocated and freed when cf-s
// are created and destroyed => relatively rare => paying
// the price to always lock the mutex and simplify the code
std::lock_guard<std::mutex> lock(free_ids_mutex_);

// First allocate from the free list if possible
if (free_ids_.empty() == false) {
auto allocated_id = free_ids_.front();
free_ids_.pop_front();
return allocated_id;
}

auto allocated_id = kUnknownItemId;
// Nothing on the free list - try to allocate from the
// next item counter if not yet exhausted
if (has_wrapped_around_) {
// counter exhausted, allocation not possible
return kUnknownItemId;
}

if (has_wrapped_around_ == false) {
allocated_id = next_item_owner_id_++;
auto allocated_id = next_item_owner_id_++;

if (allocated_id == kMaxOwnerItemId) {
has_wrapped_around_ = true;
}
} else {
std::lock_guard<std::mutex> lock(free_ids_mutex_);
// There must be at least 1 list element on the free_ids_ list since
// we num_free_ids_ must have been >= 1 on entry if we got here.
// Asserting to catch the bug during testing, however, this is a
// nice-to-have feature so avoding the exception when calling front() just
// in case
assert(free_ids_.empty() == false);
if (free_ids_.empty() == false) {
allocated_id = free_ids_.front();
free_ids_.pop_front();
}
if (allocated_id == kMaxOwnerItemId) {
has_wrapped_around_ = true;
}

return allocated_id;
Expand All @@ -181,8 +164,6 @@ void Cache::ItemOwnerIdAllocator::Free(ItemOwnerId* id) {
// pay too much space to support it.
if (free_ids_.size() < kMaxFreeItemOwnersIdListSize) {
free_ids_.push_back(*id);
// Set Incrementing only once the id is actually on the list
++num_free_ids_;
}
*id = kUnknownItemId;
}
Expand Down
2 changes: 1 addition & 1 deletion cache/lru_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,7 @@ void LRUCacheShard::Promote(LRUHandle* e) {
e->IsHighPri() ? Cache::Priority::HIGH : Cache::Priority::LOW;
s = Insert(e->key(), e->hash, /*value=*/nullptr, 0,
/*deleter=*/nullptr, /*helper=*/nullptr, /*handle=*/nullptr,
priority, Cache::kUnknownItemId);
priority, e->item_owner_id);
} else {
e->SetInCache(true);
e->SetIsStandalone(false);
Expand Down
54 changes: 32 additions & 22 deletions db/db_block_cache_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1744,42 +1744,53 @@ TEST_F(DBBlockCacheTest, ItemIdAllocation) {

size_t max_num_ids = Cache::kMaxOwnerItemId - Cache::kMinOwnerItemId + 1;
auto expected_num_free_ids = max_num_ids;
auto expected_next_id = Cache::kMinOwnerItemId;
auto num_ids_allocated = 0U;
auto num_ids_freed = 0U;

// Allocate 10 id-s
auto expected_next_id = Cache::kMinOwnerItemId;
for (auto i = 0U; i < 10U; ++i) {
ASSERT_EQ(cache->GetNextItemOwnerId(), expected_next_id);
auto to_discard_id = expected_next_id;
cache->DiscardItemOwnerId(&to_discard_id);
++num_ids_allocated;
++num_ids_freed;
++expected_next_id;
--expected_num_free_ids;
}
--expected_next_id;

// Exhaust all of the id-s before wrap around
while (expected_num_free_ids > 10U) {
ASSERT_EQ(cache->GetNextItemOwnerId(), expected_next_id);
++num_ids_allocated;
++expected_next_id;
// Release all 10 allocated id-s in reverse order
Cache::ItemOwnerId to_discard_id = expected_next_id;
for (auto i = 0U; i < 10U; ++i) {
auto temp = to_discard_id;
cache->DiscardItemOwnerId(&temp);
ASSERT_EQ(temp, Cache::kUnknownItemId);

--expected_num_free_ids;
ASSERT_GT(to_discard_id, 0U);
--to_discard_id;
++expected_num_free_ids;
}

// Verify allocation from the free list
expected_next_id = Cache::kMinOwnerItemId;
// Allocate 10 id-s and expect to get the id-s from the free list
// in the reverse order
ASSERT_EQ(expected_next_id, Cache::kMinOwnerItemId + 9U);
for (auto i = 0U; i < 10U; ++i) {
ASSERT_EQ(cache->GetNextItemOwnerId(), expected_next_id);
++num_ids_allocated;
++expected_next_id;
ASSERT_GT(expected_next_id, 0U);
--expected_next_id;
--expected_num_free_ids;
}

ASSERT_EQ(num_ids_allocated - num_ids_freed, max_num_ids);
ASSERT_EQ(expected_num_free_ids, 0U);
ASSERT_EQ(expected_num_free_ids, max_num_ids - 10U);

// Expecting next allocation to fail
ASSERT_EQ(cache->GetNextItemOwnerId(), Cache::kUnknownItemId);
// Free list should now be empty
// Exhaust all of the id-s before wrap around
expected_next_id = Cache::kMinOwnerItemId + 10U;
while (expected_num_free_ids > 0U) {
ASSERT_EQ(cache->GetNextItemOwnerId(), expected_next_id);
++expected_next_id;
--expected_num_free_ids;
}

// Expecting next allocations to fail
for (auto i = 0U; i < 5U; ++i) {
ASSERT_EQ(cache->GetNextItemOwnerId(), Cache::kUnknownItemId);
}

// Free some arbitrary id-s
Cache::ItemOwnerId owner_id = 5000U;
Expand All @@ -1788,7 +1799,6 @@ TEST_F(DBBlockCacheTest, ItemIdAllocation) {
cache->DiscardItemOwnerId(&owner_id);
owner_id = 3000;
cache->DiscardItemOwnerId(&owner_id);
num_ids_freed += 3;

// Expect allocations to return id-s in the same order as freed
ASSERT_EQ(cache->GetNextItemOwnerId(), 5000);
Expand Down
6 changes: 2 additions & 4 deletions include/rocksdb/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
#include <functional>
#include <limits>
#include <list>
#include <memory>
#include <mutex>
#include <string>

Expand Down Expand Up @@ -687,9 +686,8 @@ class Cache {
void Free(ItemOwnerId* id);

private:
std::atomic<ItemOwnerId> next_item_owner_id_ = kMinOwnerItemId;
std::atomic<int64_t> num_free_ids_ = kMaxOwnerItemId - kMinOwnerItemId + 1;
std::atomic<bool> has_wrapped_around_ = false;
ItemOwnerId next_item_owner_id_ = kMinOwnerItemId;
bool has_wrapped_around_ = false;
std::mutex free_ids_mutex_;
std::list<ItemOwnerId> free_ids_;
};
Expand Down

0 comments on commit e71353d

Please sign in to comment.