Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Report to Log the Index size per CF - LRU Cache Only (#338) #368

Merged
merged 1 commit into from
Feb 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions cache/cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,4 +126,55 @@ Status Cache::CreateFromString(const ConfigOptions& config_options,
}
return status;
}

// ==================================================================================================================================
Cache::ItemOwnerId Cache::ItemOwnerIdAllocator::Allocate() {
// In practice, onwer-ids are allocated and freed when cf-s
// are created and destroyed => relatively rare => paying
// the price to always lock the mutex and simplify the code
std::lock_guard<std::mutex> lock(free_ids_mutex_);

// First allocate from the free list if possible
if (free_ids_.empty() == false) {
auto allocated_id = free_ids_.front();
free_ids_.pop_front();
return allocated_id;
}

// Nothing on the free list - try to allocate from the
// next item counter if not yet exhausted
if (has_wrapped_around_) {
// counter exhausted, allocation not possible
return kUnknownItemId;
}

auto allocated_id = next_item_owner_id_++;

if (allocated_id == kMaxOwnerItemId) {
has_wrapped_around_ = true;
}

return allocated_id;
}

void Cache::ItemOwnerIdAllocator::Free(ItemOwnerId* id) {
if (*id != kUnknownItemId) {
std::lock_guard<std::mutex> lock(free_ids_mutex_);
// The freed id is lost but this is a luxury feature. We can't
// pay too much space to support it.
if (free_ids_.size() < kMaxFreeItemOwnersIdListSize) {
free_ids_.push_back(*id);
}
*id = kUnknownItemId;
}
}

Cache::ItemOwnerId Cache::GetNextItemOwnerId() {
return owner_id_allocator_.Allocate();
}

void Cache::DiscardItemOwnerId(ItemOwnerId* item_owner_id) {
owner_id_allocator_.Free(item_owner_id);
}

} // namespace ROCKSDB_NAMESPACE
3 changes: 2 additions & 1 deletion cache/cache_bench_tool.cc
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,8 @@ class CacheBench {
total_entry_count = 0;
deleters.clear();
auto fn = [&](const Slice& key, void* /*value*/, size_t charge,
Cache::DeleterFn deleter) {
Cache::DeleterFn deleter,
Cache::ItemOwnerId /* item_owner_id */) {
total_key_size += key.size();
total_charge += charge;
++total_entry_count;
Expand Down
15 changes: 15 additions & 0 deletions cache/cache_entry_roles.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,21 @@ std::string BlockCacheEntryStatsMapKeys::UsedPercent(CacheEntryRole role) {
return GetPrefixedCacheEntryRoleName(kPrefix, role);
}

const std::string& BlockCacheCfStatsMapKeys::CfName() {
static const std::string kCfName = "cf_name";
return kCfName;
}

const std::string& BlockCacheCfStatsMapKeys::CacheId() {
static const std::string kCacheId = "id";
return kCacheId;
}

std::string BlockCacheCfStatsMapKeys::UsedBytes(CacheEntryRole role) {
const static std::string kPrefix = "bytes.";
return GetPrefixedCacheEntryRoleName(kPrefix, role);
}

namespace {

struct Registry {
Expand Down
6 changes: 4 additions & 2 deletions cache/cache_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -942,7 +942,8 @@ TEST_P(CacheTest, ApplyToAllCacheEntriesTest) {
TEST_P(CacheTest, ApplyToAllEntriesTest) {
std::vector<std::string> callback_state;
const auto callback = [&](const Slice& key, void* value, size_t charge,
Cache::DeleterFn deleter) {
Cache::DeleterFn deleter,
Cache::ItemOwnerId /* item_owner_id */) {
callback_state.push_back(std::to_string(DecodeKey(key)) + "," +
std::to_string(DecodeValue(value)) + "," +
std::to_string(charge));
Expand Down Expand Up @@ -986,7 +987,8 @@ TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
// For callback
int special_count = 0;
const auto callback = [&](const Slice&, void*, size_t charge,
Cache::DeleterFn) {
Cache::DeleterFn,
Cache::ItemOwnerId /* item_owner_id */) {
if (charge == static_cast<size_t>(kSpecialCharge)) {
++special_count;
}
Expand Down
12 changes: 8 additions & 4 deletions cache/charged_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ ChargedCache::ChargedCache(std::shared_ptr<Cache> cache,

Status ChargedCache::Insert(const Slice& key, void* value, size_t charge,
DeleterFn deleter, Handle** handle,
Priority priority) {
Status s = cache_->Insert(key, value, charge, deleter, handle, priority);
Priority priority,
Cache::ItemOwnerId item_owner_id) {
Status s = cache_->Insert(key, value, charge, deleter, handle, priority,
item_owner_id);
if (s.ok()) {
// Insert may cause the cache entry eviction if the cache is full. So we
// directly call the reservation manager to update the total memory used
Expand All @@ -34,8 +36,10 @@ Status ChargedCache::Insert(const Slice& key, void* value, size_t charge,

Status ChargedCache::Insert(const Slice& key, void* value,
const CacheItemHelper* helper, size_t charge,
Handle** handle, Priority priority) {
Status s = cache_->Insert(key, value, helper, charge, handle, priority);
Handle** handle, Priority priority,
Cache::ItemOwnerId item_owner_id) {
Status s = cache_->Insert(key, value, helper, charge, handle, priority,
item_owner_id);
if (s.ok()) {
// Insert may cause the cache entry eviction if the cache is full. So we
// directly call the reservation manager to update the total memory used
Expand Down
14 changes: 9 additions & 5 deletions cache/charged_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,13 @@ class ChargedCache : public Cache {
~ChargedCache() override = default;

Status Insert(const Slice& key, void* value, size_t charge, DeleterFn deleter,
Handle** handle, Priority priority) override;
Status Insert(const Slice& key, void* value, const CacheItemHelper* helper,
size_t charge, Handle** handle = nullptr,
Priority priority = Priority::LOW) override;
Handle** handle, Priority priority,
Cache::ItemOwnerId item_owner_id) override;
Status Insert(
const Slice& key, void* value, const CacheItemHelper* helper,
size_t charge, Handle** handle = nullptr,
Priority priority = Priority::LOW,
Cache::ItemOwnerId item_owner_id = Cache::kUnknownItemId) override;

Cache::Handle* Lookup(const Slice& key, Statistics* stats) override;
Cache::Handle* Lookup(const Slice& key, const CacheItemHelper* helper,
Expand Down Expand Up @@ -90,7 +93,8 @@ class ChargedCache : public Cache {

void ApplyToAllEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
Cache::DeleterFn deleter)>& callback,
Cache::DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
const Cache::ApplyToAllEntriesOptions& opts) override {
cache_->ApplyToAllEntries(callback, opts);
}
Expand Down
10 changes: 6 additions & 4 deletions cache/clock_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -970,7 +970,8 @@ void ClockCacheShard::EraseUnRefEntries() { table_.EraseUnRefEntries(); }

void ClockCacheShard::ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) {
// The state is essentially going to be the starting hash, which works
// nicely even if we resize between calls because we use upper-most
Expand All @@ -995,7 +996,8 @@ void ClockCacheShard::ApplyToSomeEntries(

table_.ConstApplyToEntriesRange(
[callback](const ClockHandle& h) {
callback(h.KeySlice(), h.value, h.total_charge, h.deleter);
callback(h.KeySlice(), h.value, h.total_charge, h.deleter,
Cache::kUnknownItemId);
},
index_begin, index_end, false);
}
Expand Down Expand Up @@ -1035,8 +1037,8 @@ void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {

Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge, Cache::DeleterFn deleter,
Cache::Handle** handle,
Cache::Priority priority) {
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId /* item_owner_id */) {
if (UNLIKELY(key.size() != kCacheKeySize)) {
return Status::NotSupported("ClockCache only supports key size " +
std::to_string(kCacheKeySize) + "B");
Expand Down
12 changes: 8 additions & 4 deletions cache/clock_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShard {

Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
Cache::DeleterFn deleter, Cache::Handle** handle,
Cache::Priority priority) override;
Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override;

Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;

Expand All @@ -531,7 +532,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShard {

void ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) override;

void EraseUnRefEntries() override;
Expand All @@ -541,8 +543,10 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShard {
// SecondaryCache not yet supported
Status Insert(const Slice& key, uint32_t hash, void* value,
const Cache::CacheItemHelper* helper, size_t charge,
Cache::Handle** handle, Cache::Priority priority) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority,
item_owner_id);
}

Cache::Handle* Lookup(const Slice& key, uint32_t hash,
Expand Down
8 changes: 5 additions & 3 deletions cache/fast_lru_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,8 @@ void LRUCacheShard::EraseUnRefEntries() {

void LRUCacheShard::ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) {
// The state is essentially going to be the starting hash, which works
// nicely even if we resize between calls because we use upper-most
Expand Down Expand Up @@ -238,7 +239,7 @@ void LRUCacheShard::ApplyToSomeEntries(
[callback,
metadata_charge_policy = metadata_charge_policy_](LRUHandle* h) {
callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
h->deleter);
h->deleter, Cache::kUnknownItemId);
},
index_begin, index_end);
}
Expand Down Expand Up @@ -323,7 +324,8 @@ void LRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge, Cache::DeleterFn deleter,
Cache::Handle** handle,
Cache::Priority /*priority*/) {
Cache::Priority /*priority*/,
Cache::ItemOwnerId /* item_owner_id */) {
if (key.size() != kCacheKeySize) {
return Status::NotSupported("FastLRUCache only supports key size " +
std::to_string(kCacheKeySize) + "B");
Expand Down
12 changes: 8 additions & 4 deletions cache/fast_lru_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -337,12 +337,15 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// nullptr.
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
Cache::DeleterFn deleter, Cache::Handle** handle,
Cache::Priority priority) override;
Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override;

Status Insert(const Slice& key, uint32_t hash, void* value,
const Cache::CacheItemHelper* helper, size_t charge,
Cache::Handle** handle, Cache::Priority priority) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override {
return Insert(key, hash, value, charge, helper->del_cb, handle, priority,
item_owner_id);
}

Cache::Handle* Lookup(const Slice& key, uint32_t hash,
Expand Down Expand Up @@ -372,7 +375,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {

void ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) override;

void EraseUnRefEntries() override;
Expand Down
11 changes: 7 additions & 4 deletions cache/lru_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,8 @@ void LRUCacheShard::EraseUnRefEntries() {

void LRUCacheShard::ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) {
// The state is essentially going to be the starting hash, which works
// nicely even if we resize between calls because we use upper-most
Expand Down Expand Up @@ -192,7 +193,7 @@ void LRUCacheShard::ApplyToSomeEntries(
? h->info_.helper->del_cb
: h->info_.deleter;
callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
deleter);
deleter, h->item_owner_id);
},
index_begin, index_end);
}
Expand Down Expand Up @@ -468,7 +469,7 @@ void LRUCacheShard::Promote(LRUHandle* e) {
e->IsHighPri() ? Cache::Priority::HIGH : Cache::Priority::LOW;
s = Insert(e->key(), e->hash, /*value=*/nullptr, 0,
/*deleter=*/nullptr, /*helper=*/nullptr, /*handle=*/nullptr,
priority);
priority, e->item_owner_id);
} else {
e->SetInCache(true);
e->SetIsStandalone(false);
Expand Down Expand Up @@ -679,7 +680,8 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value),
const Cache::CacheItemHelper* helper,
Cache::Handle** handle, Cache::Priority priority) {
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) {
// Allocate the memory here outside of the mutex.
// If the cache is full, we'll have to release it.
// It shouldn't happen very often though.
Expand All @@ -706,6 +708,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
memcpy(e->key_data, key.data(), key.size());
e->CalcTotalCharge(charge, metadata_charge_policy_);

e->item_owner_id = item_owner_id;
return InsertItem(e, handle, /* free_handle_on_fail */ true);
}

Expand Down
22 changes: 14 additions & 8 deletions cache/lru_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ struct LRUHandle {

uint16_t flags;

Cache::ItemOwnerId item_owner_id = Cache::kUnknownItemId;

#ifdef __SANITIZE_THREAD__
// TSAN can report a false data race on flags, where one thread is writing
// to one of the mutable bits and another thread is reading this immutable
Expand Down Expand Up @@ -359,16 +361,18 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// Like Cache methods, but with an extra "hash" parameter.
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
size_t charge, Cache::DeleterFn deleter,
Cache::Handle** handle,
Cache::Priority priority) override {
return Insert(key, hash, value, charge, deleter, nullptr, handle, priority);
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override {
return Insert(key, hash, value, charge, deleter, nullptr, handle, priority,
item_owner_id);
}
virtual Status Insert(const Slice& key, uint32_t hash, void* value,
const Cache::CacheItemHelper* helper, size_t charge,
Cache::Handle** handle,
Cache::Priority priority) override {
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id) override {
assert(helper);
return Insert(key, hash, value, charge, nullptr, helper, handle, priority);
return Insert(key, hash, value, charge, nullptr, helper, handle, priority,
item_owner_id);
}
// If helper_cb is null, the values of the following arguments don't matter.
virtual Cache::Handle* Lookup(const Slice& key, uint32_t hash,
Expand Down Expand Up @@ -402,7 +406,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {

virtual void ApplyToSomeEntries(
const std::function<void(const Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
DeleterFn deleter,
Cache::ItemOwnerId item_owner_id)>& callback,
uint32_t average_entries_per_lock, uint32_t* state) override;

virtual void EraseUnRefEntries() override;
Expand Down Expand Up @@ -432,7 +437,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
bool free_handle_on_fail);
Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
DeleterFn deleter, const Cache::CacheItemHelper* helper,
Cache::Handle** handle, Cache::Priority priority);
Cache::Handle** handle, Cache::Priority priority,
Cache::ItemOwnerId item_owner_id);
// Promote an item looked up from the secondary cache to the LRU cache.
// The item may be still in the secondary cache.
// It is only inserted into the hash table and not the LRU list, and only
Expand Down
Loading