Skip to content
Draft
Changes from 1 commit
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
a93f01f
Run centos and debian workflows on push and PR
igchor Nov 2, 2021
2a8fa60
Adds createPutToken and switches findEviction
byrnedj Feb 4, 2023
c3a4db9
Add memory usage statistics for allocation classes
igchor Jul 6, 2022
2529f0a
Initial multi-tier support implementation (rebased with NUMA and cs p…
igchor Sep 28, 2021
3cc41bd
AC stats multi-tier
byrnedj Jan 17, 2023
bf4c244
This commit contains the additional memory tiers tests
byrnedj Feb 8, 2023
c432df6
This is the additional multi-tier support needed
guptask Nov 14, 2022
4cefc44
added per pool class rolling average latency (upstream PR version)
guptask Jul 21, 2022
1f62a63
added per tier pool class rolling average latency (based on upstream PR)
guptask Jul 21, 2022
489ef20
MM2Q promotion iterators (#1)
byrnedj Aug 9, 2022
048c809
CS Patch Part 2 for mulit-tier cachelib:
byrnedj Feb 7, 2023
ed7b70f
basic multi-tier test based on numa bindings
igchor Dec 30, 2021
94c4974
Aadding new configs to hit_ratio/graph_cache_leader_fobj
vinser52 Jan 27, 2022
afd1456
Do not block reader if a child item is moving
igchor Dec 19, 2022
4f8f425
Background data movement (#20)
byrnedj Oct 21, 2022
6203a95
fix race in moveRegularItemWith sync where insertOrReplace can cause …
byrnedj Feb 16, 2023
6abb498
Fix race in acquire (#68)
igchor Mar 16, 2023
add2e5f
Per tier pool stats (#70)
byrnedj Mar 23, 2023
aedaf97
dummy change to trigger container image rebuild
guptask Mar 28, 2023
1f21fce
Fix token creation and stats (#79)
igchor Apr 27, 2023
9e27d35
Updated the docker gcc version to 12 (#83)
guptask May 9, 2023
da7a6bb
NUMA bindigs support for private memory (#82)
vinser52 May 17, 2023
b5ac462
Do not run cachelib-centos-8-5 on PRs (#85)
igchor Jun 6, 2023
50d3ae5
correct handling for expired items in eviction (#86)
byrnedj Jun 6, 2023
5632d18
Add option to insert items to first free tier (#87)
igchor Jun 8, 2023
09d7bab
Chained item movement between tiers - sync on the parent item (#84)
byrnedj Jun 28, 2023
08d8f33
edit dockerfile
byrnedj Jul 24, 2023
316133c
these submodules work
byrnedj Jul 25, 2023
8d2c390
Track latency of per item eviction/promotion between memory tiers
guptask Jul 28, 2023
b99f2b3
Merge pull request #91 from guptask/tier_eviction_latency
guptask Jul 31, 2023
a14f058
modified the cachebench output to make it friendly for parsing
guptask Aug 7, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Do not block reader if a child item is moving
This would lead to deadlock (.e.g in forEachChainedItem)
if the child is moving (e.g. marked by Slab Release thread).

Instead treat moving bit only to prevent freeing the item and
do all synchronization on parent.
  • Loading branch information
igchor authored and byrnedj committed Jul 23, 2023
commit afd145650bfdae30b600c414488b0b181ef1b722
93 changes: 58 additions & 35 deletions cachelib/allocator/CacheAllocator-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -987,7 +987,8 @@ CacheAllocator<CacheTrait>::acquire(Item* it) {

SCOPE_FAIL { stats_.numRefcountOverflow.inc(); };

auto failIfMoving = getNumTiers() > 1;
// TODO: do not block incRef for child items to avoid deadlock
auto failIfMoving = getNumTiers() > 1 && !it->isChainedItem();
auto incRes = incRef(*it, failIfMoving);
if (LIKELY(incRes == RefcountWithFlags::incResult::incOk)) {
return WriteHandle{it, *this};
Expand Down Expand Up @@ -3051,7 +3052,8 @@ bool CacheAllocator<CacheTrait>::tryMovingForSlabRelease(
// a regular item or chained item is synchronized with any potential
// user-side mutation.
std::unique_ptr<SyncObj> syncObj;
if (config_.movingSync) {
if (config_.movingSync && getNumTiers() == 1) {
// TODO: use moving-bit synchronization for single tier as well
if (!oldItem.isChainedItem()) {
syncObj = config_.movingSync(oldItem.getKey());
} else {
Expand Down Expand Up @@ -3149,47 +3151,51 @@ void CacheAllocator<CacheTrait>::evictForSlabRelease(
Item* evicted;
if (item.isChainedItem()) {
auto& expectedParent = item.asChainedItem().getParentItem(compressor_);
const std::string parentKey = expectedParent.getKey().str();
auto l = chainedItemLocks_.lockExclusive(parentKey);

// check if the child is still in mmContainer and the expected parent is
// valid under the chained item lock.
if (expectedParent.getKey() != parentKey || !item.isInMMContainer() ||
item.isOnlyMoving() ||
&expectedParent != &item.asChainedItem().getParentItem(compressor_) ||
!expectedParent.isAccessible() || !expectedParent.hasChainedItem()) {
continue;
}

// search if the child is present in the chain
{
auto parentHandle = findInternal(parentKey);
if (!parentHandle || parentHandle != &expectedParent) {
if (getNumTiers() == 1) {
// TODO: unify this with multi-tier implementation
// right now, taking a chained item lock here would lead to deadlock
const std::string parentKey = expectedParent.getKey().str();
auto l = chainedItemLocks_.lockExclusive(parentKey);

// check if the child is still in mmContainer and the expected parent is
// valid under the chained item lock.
if (expectedParent.getKey() != parentKey || !item.isInMMContainer() ||
item.isOnlyMoving() ||
&expectedParent != &item.asChainedItem().getParentItem(compressor_) ||
!expectedParent.isAccessible() || !expectedParent.hasChainedItem()) {
continue;
}

ChainedItem* head = nullptr;
{ // scope for the handle
auto headHandle = findChainedItem(expectedParent);
head = headHandle ? &headHandle->asChainedItem() : nullptr;
}
// search if the child is present in the chain
{
auto parentHandle = findInternal(parentKey);
if (!parentHandle || parentHandle != &expectedParent) {
continue;
}

bool found = false;
while (head) {
if (head == &item) {
found = true;
break;
ChainedItem* head = nullptr;
{ // scope for the handle
auto headHandle = findChainedItem(expectedParent);
head = headHandle ? &headHandle->asChainedItem() : nullptr;
}
head = head->getNext(compressor_);
}

if (!found) {
continue;
bool found = false;
while (head) {
if (head == &item) {
found = true;
break;
}
head = head->getNext(compressor_);
}

if (!found) {
continue;
}
}
}

evicted = &expectedParent;

token = createPutToken(*evicted);
if (evicted->markForEviction()) {
// unmark the child so it will be freed
Expand All @@ -3200,6 +3206,9 @@ void CacheAllocator<CacheTrait>::evictForSlabRelease(
// no other reader can be added to the waiters list
wakeUpWaiters(*evicted, {});
} else {
// TODO: potential deadlock with markUseful for parent item
// for now, we do not block any reader on child items but this
// should probably be fixed
continue;
}
} else {
Expand Down Expand Up @@ -3231,7 +3240,17 @@ void CacheAllocator<CacheTrait>::evictForSlabRelease(
XDCHECK(evicted->getRefCount() == 0);
const auto res =
releaseBackToAllocator(*evicted, RemoveContext::kEviction, false);
XDCHECK(res == ReleaseRes::kReleased);

if (getNumTiers() == 1) {
XDCHECK(res == ReleaseRes::kReleased);
} else {
const bool isAlreadyFreed =
!markMovingForSlabRelease(ctx, &item, throttler);
if (!isAlreadyFreed) {
continue;
}
}

return;
}
}
Expand Down Expand Up @@ -3279,11 +3298,15 @@ bool CacheAllocator<CacheTrait>::markMovingForSlabRelease(
bool itemFreed = true;
bool markedMoving = false;
TierId tid = getTierId(alloc);
const auto fn = [&markedMoving, &itemFreed](void* memory) {
auto numTiers = getNumTiers();
const auto fn = [&markedMoving, &itemFreed, numTiers](void* memory) {
// Since this callback is executed, the item is not yet freed
itemFreed = false;
Item* item = static_cast<Item*>(memory);
if (item->markMoving(false)) {
// TODO: for chained items, moving bit is only used to avoid
// freeing the item prematurely
auto failIfRefNotZero = numTiers > 1 && !item->isChainedItem();
if (item->markMoving(failIfRefNotZero)) {
markedMoving = true;
}
};
Expand Down