Skip to content

Commit

Permalink
Report to Log the Index size per CF - LRU Cache Only (#338)
Browse files Browse the repository at this point in the history
  • Loading branch information
udi-speedb committed Jan 17, 2023
1 parent 8a81432 commit b09b016
Show file tree
Hide file tree
Showing 37 changed files with 797 additions and 161 deletions.
70 changes: 70 additions & 0 deletions cache/cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,4 +126,74 @@ Status Cache::CreateFromString(const ConfigOptions& config_options,
}
return status;
}

// ==================================================================================================================================
Cache::ItemOwnerId Cache::ItemOwnerIdAllocator::Allocate() {
auto num_free_after_me = --num_free_ids_;
if (num_free_after_me < 0) {
// This should be a rare case so locking the mutex should not have
// a big effect.
// The following could occur (T<I> == thread #I):
// - T1 Allocate(), on entry num_free_ids_ == 0 and free_ids_ is empty.
// - T1 decrements num_free_ids_ and num_free_after_me == -1
// - T2 Free(), puts an id on free_ids_ and sets num_free_ids_ == 1
// - T1 now sets num_free_ids_ below => num_free_ids_ = 1 (which is correct)
// => Setting num_free_ids_ to the size of the free list is the right thing
// to
// do (rather than set to 0).
// Although eventually there is an available id to allocate, this is a
// corner case and doesn't seem to be worth handling to avoid returning
// kUnknownItemId.
std::lock_guard<std::mutex> lock(free_ids_mutex_);
num_free_ids_ = free_ids_.size();
return kUnknownItemId;
}

auto allocated_id = kUnknownItemId;

if (has_wrapped_around_ == false) {
allocated_id = next_item_owner_id_++;

if (allocated_id == kMaxOwnerItemId) {
has_wrapped_around_ = true;
}
} else {
std::lock_guard<std::mutex> lock(free_ids_mutex_);
// There must be at least 1 list element on the free_ids_ list since
// we num_free_ids_ must have been >= 1 on entry if we got here.
// Asserting to catch the bug during testing, however, this is a
// nice-to-have feature so avoding the exception when calling front() just
// in case
assert(free_ids_.empty() == false);
if (free_ids_.empty() == false) {
allocated_id = free_ids_.front();
free_ids_.pop_front();
}
}

return allocated_id;
}

void Cache::ItemOwnerIdAllocator::Free(ItemOwnerId* id) {
if (*id != kUnknownItemId) {
std::lock_guard<std::mutex> lock(free_ids_mutex_);
// The freed id is lost but this is a luxury feature. We can't
// pay too much space to support it.
if (free_ids_.size() < kMaxFreeItemOwnersIdListSize) {
free_ids_.push_back(*id);
// Set Incrementing only once the id is actually on the list
++num_free_ids_;
}
*id = kUnknownItemId;
}
}

Cache::ItemOwnerId Cache::GetNextItemOwnerId() {
return owner_id_allocator_.Allocate();
}

void Cache::DiscardItemOwnerId(ItemOwnerId* item_owner_id) {
owner_id_allocator_.Free(item_owner_id);
}

} // namespace ROCKSDB_NAMESPACE
3 changes: 2 additions & 1 deletion cache/cache_bench_tool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,8 @@ class CacheBench {
total_entry_count = 0;
deleters.clear();
auto fn = [&](const Slice& key, void* /*value*/, size_t charge,
Cache::DeleterFn deleter) {
Cache::DeleterFn deleter,
Cache::ItemOwnerId /* item_owner_id */) {
total_key_size += key.size();
total_charge += charge;
++total_entry_count;
Expand Down
15 changes: 15 additions & 0 deletions cache/cache_entry_roles.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,21 @@ std::string BlockCacheEntryStatsMapKeys::UsedPercent(CacheEntryRole role) {
return GetPrefixedCacheEntryRoleName(kPrefix, role);
}

const std::string& BlockCacheCfStatsMapKeys::CfName() {
static const std::string kCfName = "cf_name";
return kCfName;
}

const std::string& BlockCacheCfStatsMapKeys::CacheId() {
static const std::string kCacheId = "id";
return kCacheId;
}

std::string BlockCacheCfStatsMapKeys::UsedBytes(CacheEntryRole role) {
const static std::string kPrefix = "bytes.";
return GetPrefixedCacheEntryRoleName(kPrefix, role);
}

namespace {

struct Registry {
Expand Down
6 changes: 4 additions & 2 deletions cache/cache_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -942,7 +942,8 @@ TEST_P(CacheTest, ApplyToAllCacheEntriesTest) {
TEST_P(CacheTest, ApplyToAllEntriesTest) {
std::vector<std::string> callback_state;
const auto callback = [&](const Slice& key, void* value, size_t charge,
Cache::DeleterFn deleter) {
Cache::DeleterFn deleter,
Cache::ItemOwnerId /* item_owner_id */) {
callback_state.push_back(std::to_string(DecodeKey(key)) + "," +
std::to_string(DecodeValue(value)) + "," +
std::to_string(charge));
Expand Down Expand Up @@ -986,7 +987,8 @@ TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
// For callback
int special_count = 0;
const auto callback = [&](const Slice&, void*, size_t charge,
Cache::DeleterFn) {
Cache::DeleterFn,
Cache::ItemOwnerId /* item_owner_id */) {
if (charge == static_cast<size_t>(kSpecialCharge)) {
++special_count;
}
Expand Down
12 changes: 8 additions & 4 deletions cache/charged_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ ChargedCache::ChargedCache(std::shared_ptr<Cache> cache,

Status ChargedCache::Insert(const Slice& key, void* value, size_t charge,
DeleterFn deleter, Handle** handle,
Priority priority) {
Status s = cache_->Insert(key, value, charge, deleter, handle, priority);
Priority priority,
Cache::ItemOwnerId item_owner_id) {
Status s = cache_->Insert(key, value, charge, deleter, handle, priority,
item_owner_id);
if (s.ok()) {
// Insert may cause the cache entry eviction if the cache is full. So we
// directly call the reservation manager to update the total memory used
Expand All @@ -34,8 +36,10 @@ Status ChargedCache::Insert(const Slice& key, void* value, size_t charge,

Status ChargedCache::Insert(const Slice& key, void* value,
const CacheItemHelper* helper, size_t charge,
Handle** handle, Priority priority) {
Status s = cache_->Insert(key, value, helper, charge, handle, priority);
Handle** handle, Priority priority,
Cache::ItemOwnerId item_owner_id) {
Status s = cache_->Insert(key, value, helper, charge, handle, priority,
item_owner_id);
if (s.ok()) {
// Insert may cause the cache entry eviction if the cache is full. So we
// directly call the reservation manager to update the total memory used
Expand Down
14 changes: 9 additions & 5 deletions cache/charged_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,13 @@ class ChargedCache : public Cache {
~ChargedCache() override = default;

Status Insert(const Slice& key, void* value, size_t charge, DeleterFn deleter,
Handle** handle, Priority priority) override;
Status Insert(const Slice& key, void* value, const CacheItemHelper* helper,
size_t charge, Handle** handle = nullptr,
Priority priority = Priority::LOW) override;
Handle** handle, Priority priority,
Cache::ItemOwnerId item_owner_id) override;
Status Insert(
const Slice& key, void* value, const CacheItemHelper* helper,
size_t charge, Handle** handle = nullptr,
Priority priority = Priority::LOW,
Cache::ItemOwnerId item_owner_id = Cache::kUnknownItemId) override;

Cache::Handle* Lookup(const Slice& key, Statistics* stats) override;
Cache::Handle* Lookup(const Slice& key, const CacheItemHelper* helper,
Expand Down Expand Up @@ -90,7 +93,8 @@ class ChargedCache : public Cache {

void ApplyToAllEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
Cache::DeleterFn deleter)>& callback,
Cache::DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
const Cache::ApplyToAllEntriesOptions& opts) override {
cache_->ApplyToAllEntries(callback, opts);
}
Expand Down
10 changes: 6 additions & 4 deletions cache/clock_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -970,7 +970,8 @@ void ClockCacheShard::EraseUnRefEntries() { table_.EraseUnRefEntries(); }

void ClockCacheShard::ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) {
// The state is essentially going to be the starting hash, which works
// nicely even if we resize between calls because we use upper-most
Expand All @@ -995,7 +996,8 @@ void ClockCacheShard::ApplyToSomeEntries(

table_.ConstApplyToEntriesRange(
[callback](const ClockHandle& h) {
callback(h.KeySlice(), h.value, h.total_charge, h.deleter);
callback(h.KeySlice(), h.value, h.total_charge, h.deleter,
Cache::kUnknownItemId);
},
index_begin, index_end, false);
}
Expand Down Expand Up @@ -1035,8 +1037,8 @@ void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {

Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge, Cache::DeleterFn deleter,
Cache::Handle** handle,
Cache::Priority priority) {
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId /* item_owner_id */) {
if (UNLIKELY(key.size() != kCacheKeySize)) {
return Status::NotSupported("ClockCache only supports key size " +
std::to_string(kCacheKeySize) + "B");
Expand Down
12 changes: 8 additions & 4 deletions cache/clock_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShard {

Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
Cache::DeleterFn deleter, Cache::Handle** handle,
Cache::Priority priority) override;
Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override;

Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;

Expand All @@ -531,7 +532,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShard {

void ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) override;

void EraseUnRefEntries() override;
Expand All @@ -541,8 +543,10 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShard {
// SecondaryCache not yet supported
Status Insert(const Slice& key, uint32_t hash, void* value,
const Cache::CacheItemHelper* helper, size_t charge,
Cache::Handle** handle, Cache::Priority priority) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority,
item_owner_id);
}

Cache::Handle* Lookup(const Slice& key, uint32_t hash,
Expand Down
8 changes: 5 additions & 3 deletions cache/fast_lru_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,8 @@ void LRUCacheShard::EraseUnRefEntries() {

void LRUCacheShard::ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) {
// The state is essentially going to be the starting hash, which works
// nicely even if we resize between calls because we use upper-most
Expand Down Expand Up @@ -238,7 +239,7 @@ void LRUCacheShard::ApplyToSomeEntries(
[callback,
metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) {
callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
h->deleter);
h->deleter, Cache::kUnknownItemId);
},
index_begin, index_end);
}
Expand Down Expand Up @@ -323,7 +324,8 @@ void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge, Cache::DeleterFn deleter,
Cache::Handle** handle,
Cache::Priority /*priority*/) {
Cache::Priority /*priority*/,
Cache::ItemOwnerId /* item_owner_id */) {
if (key.size() != kCacheKeySize) {
return Status::NotSupported("FastLRUCache only supports key size " +
std::to_string(kCacheKeySize) + "B");
Expand Down
12 changes: 8 additions & 4 deletions cache/fast_lru_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -337,12 +337,15 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// nullptr.
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
Cache::DeleterFn deleter, Cache::Handle** handle,
Cache::Priority priority) override;
Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override;

Status Insert(const Slice& key, uint32_t hash, void* value,
const Cache::CacheItemHelper* helper, size_t charge,
Cache::Handle** handle, Cache::Priority priority) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority,
item_owner_id);
}

Cache::Handle* Lookup(const Slice& key, uint32_t hash,
Expand Down Expand Up @@ -372,7 +375,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {

void ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) override;

void EraseUnRefEntries() override;
Expand Down
11 changes: 7 additions & 4 deletions cache/lru_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,8 @@ void LRUCacheShard::EraseUnRefEntries() {

void LRUCacheShard::ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) {
// The state is essentially going to be the starting hash, which works
// nicely even if we resize between calls because we use upper-most
Expand Down Expand Up @@ -192,7 +193,7 @@ void LRUCacheShard::ApplyToSomeEntries(
? h->info_.helper->del_cb
: h->info_.deleter;
callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
deleter);
deleter, h->item_owner_id);
},
index_begin, index_end);
}
Expand Down Expand Up @@ -468,7 +469,7 @@ void LRUCacheShard::Promote(LRUHandle* e) {
e->IsHighPri() ? Cache::Priority::HIGH : Cache::Priority::LOW;
s = Insert(e->key(), e->hash, /*value=*/nullptr, 0,
/*deleter=*/nullptr, /*helper=*/nullptr, /*handle=*/nullptr,
priority);
priority, Cache::kUnknownItemId);
} else {
e->SetInCache(true);
e->SetIsStandalone(false);
Expand Down Expand Up @@ -680,7 +681,8 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value),
const Cache::CacheItemHelper* helper,
Cache::Handle** handle, Cache::Priority priority) {
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) {
// Allocate the memory here outside of the mutex.
// If the cache is full, we'll have to release it.
// It shouldn't happen very often though.
Expand All @@ -707,6 +709,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
memcpy(e->key_data, key.data(), key.size());
e->CalcTotalCharge(charge, metadata_charge_policy_);

e->item_owner_id = item_owner_id;
return InsertItem(e, handle, /* free_handle_on_fail */ true);
}

Expand Down
Loading

0 comments on commit b09b016

Please sign in to comment.