Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c0c90d2
Run centos and debian workflows on push and PR
igchor Nov 2, 2021
dbe3fda
Adds createPutToken and switches findEviction
byrnedj Feb 4, 2023
9afcd64
Add memory usage statistics for allocation classes
igchor Jul 6, 2022
eca7d8c
Initial multi-tier support implementation
igchor Sep 28, 2021
664da8d
AC stats multi-tier
byrnedj Jan 17, 2023
3b7bb0c
Tests and fix tier sizing
byrnedj Feb 8, 2023
58e825b
This is the additional multi-tier support needed
guptask Nov 14, 2022
9fc705f
Rolling average alloc latency
guptask Jul 21, 2022
ce0e38a
Rolling average class latency
guptask Jul 21, 2022
e0a8006
MM2Q promotion iterator
byrnedj Aug 9, 2022
bcb2ae2
Multi-tier allocator patch
byrnedj Feb 7, 2023
d4cf1d4
basic multi-tier test based on numa bindings
igchor Dec 30, 2021
6d2fbef
Aadding new configs to hit_ratio/graph_cache_leader_fobj
vinser52 Jan 27, 2022
5bfa1ff
Background data movement for the tiers
byrnedj Oct 21, 2022
1593291
dummy change to trigger container image rebuild
guptask Mar 28, 2023
a171f38
Updated the docker gcc version to 12 (#83)
guptask May 9, 2023
35a17e4
NUMA bindigs support for private memory (#82)
vinser52 May 17, 2023
46d168c
Do not run cachelib-centos-8-5 on PRs (#85)
igchor Jun 6, 2023
7d06531
Add option to insert items to first free tier (#87)
igchor Jun 8, 2023
1521efe
Chained item movement between tiers - sync on the parent item (#84)
byrnedj Jun 28, 2023
3328e4e
edit dockerfile
byrnedj Jul 24, 2023
3c87c49
Track latency of per item eviction/promotion between memory tiers
guptask Jul 28, 2023
795f85b
Update dependencies (#95)
igchor Aug 23, 2023
96d948f
enable DTO build without memcpy changes to cachebench
byrnedj Feb 28, 2024
47d5034
Bckground eviction for multi-tier
byrnedj Feb 28, 2024
efea480
no online eviction option patch
byrnedj Feb 28, 2024
ebfca17
fixes cmake in latest test removal (upstream test build fails - need …
byrnedj May 20, 2024
52618b5
fixes commit for now (should drop once https://github.com/facebook/Ca…
byrnedj May 28, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Add option to insert items to first free tier (#87)
instead of always inserting to topmost tier
  • Loading branch information
igchor authored and byrnedj committed May 20, 2024
commit 7d065316ea6d9b11fa3430072a1c82cd438611cb
32 changes: 26 additions & 6 deletions cachelib/allocator/CacheAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -1528,13 +1528,19 @@ class CacheAllocator : public CacheBase {
// For description see allocateInternal.
//
// @param tid id a memory tier
// @param fromBgThread whether this function was called from a bg
// thread - this is used to decide whether bg thread should
// be waken in case there is no free memory
// @param evict whether to evict an item from tier tid in case there
// is not enough memory
WriteHandle allocateInternalTier(TierId tid,
PoolId id,
Key key,
uint32_t size,
uint32_t creationTime,
uint32_t expiryTime,
bool fromBgThread);
bool fromBgThread,
bool evict);

// Allocate a chained item
//
Expand Down Expand Up @@ -2977,7 +2983,8 @@ CacheAllocator<CacheTrait>::allocateInternalTier(TierId tid,
uint32_t size,
uint32_t creationTime,
uint32_t expiryTime,
bool fromBgThread) {
bool fromBgThread,
bool evict) {
util::LatencyTracker tracker{stats().allocateLatency_};

SCOPE_FAIL { stats_.invalidAllocs.inc(); };
Expand All @@ -3002,6 +3009,9 @@ CacheAllocator<CacheTrait>::allocateInternalTier(TierId tid,
}

if (memory == nullptr) {
if (!evict) {
return {};
}
memory = findEviction(tid, pid, cid);
}

Expand Down Expand Up @@ -3051,7 +3061,9 @@ CacheAllocator<CacheTrait>::allocateInternal(PoolId pid,
bool fromBgThread) {
auto tid = 0; /* TODO: consult admission policy */
for(TierId tid = 0; tid < getNumTiers(); ++tid) {
auto handle = allocateInternalTier(tid, pid, key, size, creationTime, expiryTime, fromBgThread);
bool evict = !config_.insertToFirstFreeTier || tid == getNumTiers() - 1;
auto handle = allocateInternalTier(tid, pid, key, size, creationTime,
expiryTime, fromBgThread, evict);
if (handle) return handle;
}
return {};
Expand Down Expand Up @@ -4220,13 +4232,16 @@ CacheAllocator<CacheTrait>::tryEvictToNextMemoryTier(

TierId nextTier = tid; // TODO - calculate this based on some admission policy
while (++nextTier < getNumTiers()) { // try to evict down to the next memory tiers
// always evict item from the nextTier to make room for new item
bool evict = true;
// allocateInternal might trigger another eviction
auto newItemHdl = allocateInternalTier(nextTier, pid,
item.getKey(),
item.getSize(),
item.getCreationTime(),
item.getExpiryTime(),
fromBgThread);
fromBgThread,
evict);

if (newItemHdl) {

Expand Down Expand Up @@ -4263,13 +4278,16 @@ CacheAllocator<CacheTrait>::tryPromoteToNextMemoryTier(
auto toPromoteTier = nextTier - 1;
--nextTier;

// always evict item from the toPromoteTier to make room for new item
bool evict = true;
// allocateInternal might trigger another eviction
auto newItemHdl = allocateInternalTier(toPromoteTier, pid,
item.getKey(),
item.getSize(),
item.getCreationTime(),
item.getExpiryTime(),
fromBgThread);
fromBgThread,
true);

if (newItemHdl) {
XDCHECK_EQ(newItemHdl->getSize(), item.getSize());
Expand Down Expand Up @@ -5608,6 +5626,7 @@ CacheAllocator<CacheTrait>::allocateNewItemForOldItem(const Item& oldItem) {
const auto tid = getTierId(oldItem);
const auto allocInfo =
allocator_[tid]->getAllocInfo(static_cast<const void*>(&oldItem));
bool evict = !config_.insertToFirstFreeTier || tid == getNumTiers() - 1;

// Set up the destination for the move. Since oldItem would have the moving
// bit set, it won't be picked for eviction.
Expand All @@ -5617,7 +5636,8 @@ CacheAllocator<CacheTrait>::allocateNewItemForOldItem(const Item& oldItem) {
oldItem.getSize(),
oldItem.getCreationTime(),
oldItem.getExpiryTime(),
false);
false,
evict);
if (!newItemHdl) {
return {};
}
Expand Down
15 changes: 15 additions & 0 deletions cachelib/allocator/CacheAllocatorConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,9 @@ class CacheAllocatorConfig {
// Library team if you find yourself customizing this.
CacheAllocatorConfig& setThrottlerConfig(util::Throttler::Config config);

// Insert items to first free memory tier
CacheAllocatorConfig& enableInsertToFirstFreeTier();

// Passes in a callback to initialize an event tracker when the allocator
// starts
CacheAllocatorConfig& setEventTracker(EventTrackerSharedPtr&&);
Expand Down Expand Up @@ -539,6 +542,11 @@ class CacheAllocatorConfig {
// ABOVE are the config for various cache workers
//

// if turned off, always insert new elements to topmost memory tier.
// if turned on, insert new element to first free memory tier or evict memory
// from the bottom one if memory cache is full
bool insertToFirstFreeTier = false;

// the number of tries to search for an item to evict
// 0 means it's infinite
unsigned int evictionSearchTries{50};
Expand Down Expand Up @@ -673,6 +681,12 @@ class CacheAllocatorConfig {
{MemoryTierCacheConfig::fromShm().setRatio(1)}};
};

template <typename T>
CacheAllocatorConfig<T>& CacheAllocatorConfig<T>::enableInsertToFirstFreeTier() {
insertToFirstFreeTier = true;
return *this;
}

template <typename T>
CacheAllocatorConfig<T>& CacheAllocatorConfig<T>::setCacheName(
const std::string& _cacheName) {
Expand Down Expand Up @@ -1254,6 +1268,7 @@ std::map<std::string, std::string> CacheAllocatorConfig<T>::serialize() const {
configMap["nvmAdmissionMinTTL"] = std::to_string(nvmAdmissionMinTTL);
configMap["delayCacheWorkersStart"] =
delayCacheWorkersStart ? "true" : "false";
configMap["insertToFirstFreeTier"] = std::to_string(insertToFirstFreeTier);
mergeWithPrefix(configMap, throttleConfig.serialize(), "throttleConfig");
mergeWithPrefix(configMap,
chainedItemAccessConfig.serialize(),
Expand Down
2 changes: 2 additions & 0 deletions cachelib/cachebench/cache/Cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -578,6 +578,8 @@ Cache<Allocator>::Cache(const CacheConfig& config,
allocatorConfig_.configureMemoryTiers(config_.memoryTierConfigs);
}

allocatorConfig_.insertToFirstFreeTier = config_.insertToFirstFreeTier;

auto cleanupGuard = folly::makeGuard([&] {
if (!nvmCacheFilePath_.empty()) {
util::removePath(nvmCacheFilePath_);
Expand Down
2 changes: 2 additions & 0 deletions cachelib/cachebench/util/CacheConfig.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ CacheConfig::CacheConfig(const folly::dynamic& configJson) {
JSONSetVal(configJson, tryLockUpdate);
JSONSetVal(configJson, lruIpSpec);
JSONSetVal(configJson, useCombinedLockForIterators);

JSONSetVal(configJson, insertToFirstFreeTier);

JSONSetVal(configJson, lru2qHotPct);
JSONSetVal(configJson, lru2qColdPct);
Expand Down
2 changes: 2 additions & 0 deletions cachelib/cachebench/util/CacheConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,8 @@ struct CacheConfig : public JSONConfig {
bool lruUpdateOnRead{true};
bool tryLockUpdate{false};
bool useCombinedLockForIterators{true};

bool insertToFirstFreeTier{false};

// LRU param
uint64_t lruIpSpec{0};
Expand Down