Skip to content
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
a93f01f
Run centos and debian workflows on push and PR
igchor Nov 2, 2021
2a8fa60
Adds createPutToken and switches findEviction
byrnedj Feb 4, 2023
c3a4db9
Add memory usage statistics for allocation classes
igchor Jul 6, 2022
2529f0a
Initial multi-tier support implementation (rebased with NUMA and cs p…
igchor Sep 28, 2021
3cc41bd
AC stats multi-tier
byrnedj Jan 17, 2023
bf4c244
This commit contains the additional memory tiers tests
byrnedj Feb 8, 2023
c432df6
This is the additional multi-tier support needed
guptask Nov 14, 2022
4cefc44
added per pool class rolling average latency (upstream PR version)
guptask Jul 21, 2022
1f62a63
added per tier pool class rolling average latency (based on upstream PR)
guptask Jul 21, 2022
489ef20
MM2Q promotion iterators (#1)
byrnedj Aug 9, 2022
048c809
CS Patch Part 2 for mulit-tier cachelib:
byrnedj Feb 7, 2023
ed7b70f
basic multi-tier test based on numa bindings
igchor Dec 30, 2021
94c4974
Aadding new configs to hit_ratio/graph_cache_leader_fobj
vinser52 Jan 27, 2022
afd1456
Do not block reader if a child item is moving
igchor Dec 19, 2022
4f8f425
Background data movement (#20)
byrnedj Oct 21, 2022
6203a95
fix race in moveRegularItemWith sync where insertOrReplace can cause …
byrnedj Feb 16, 2023
6abb498
Fix race in acquire (#68)
igchor Mar 16, 2023
add2e5f
Per tier pool stats (#70)
byrnedj Mar 23, 2023
aedaf97
dummy change to trigger container image rebuild
guptask Mar 28, 2023
1f21fce
Fix token creation and stats (#79)
igchor Apr 27, 2023
9e27d35
Updated the docker gcc version to 12 (#83)
guptask May 9, 2023
da7a6bb
NUMA bindigs support for private memory (#82)
vinser52 May 17, 2023
b5ac462
Do not run cachelib-centos-8-5 on PRs (#85)
igchor Jun 6, 2023
50d3ae5
correct handling for expired items in eviction (#86)
byrnedj Jun 6, 2023
5632d18
Add option to insert items to first free tier (#87)
igchor Jun 8, 2023
09d7bab
Chained item movement between tiers - sync on the parent item (#84)
byrnedj Jun 28, 2023
08d8f33
edit dockerfile
byrnedj Jul 24, 2023
316133c
these submodules work
byrnedj Jul 25, 2023
8d2c390
Track latency of per item eviction/promotion between memory tiers
guptask Jul 28, 2023
b99f2b3
Merge pull request #91 from guptask/tier_eviction_latency
guptask Jul 31, 2023
a14f058
modified the cachebench output to make it friendly for parsing
guptask Aug 7, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Chained item movement between tiers - sync on the parent item (#84)
* Chained item movement between tiers - currently sync on the parent
item for moving.
 - updated tests accordingly, note that we can no longer swap
   parent item if chained item is being moved for slab release.

* added some debug checks around chained item check
* fix slab release behavior if no movecb
  • Loading branch information
byrnedj committed Jul 23, 2023
commit 09d7bab3ac06a561e683a099b120fa4494127acd
868 changes: 438 additions & 430 deletions cachelib/allocator/CacheAllocator-inl.h

Large diffs are not rendered by default.

79 changes: 49 additions & 30 deletions cachelib/allocator/CacheAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -1394,7 +1394,7 @@ class CacheAllocator : public CacheBase {

private:
// wrapper around Item's refcount and active handle tracking
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef(Item& it, bool failIfMoving);
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef(Item& it);
FOLLY_ALWAYS_INLINE RefcountWithFlags::Value decRef(Item& it);

// drops the refcount and if needed, frees the allocation back to the memory
Expand Down Expand Up @@ -1552,6 +1552,26 @@ class CacheAllocator : public CacheBase {
WriteHandle allocateChainedItemInternal(const ReadHandle& parent,
uint32_t size);

// Allocate a chained item to a specific tier
//
// The resulting chained item does not have a parent item yet
// and if we fail to link to the chain for any reasoin
// the chained item will be freed once the handle is dropped.
//
// The parent item parameter here is mainly used to find the
// correct pool to allocate memory for this chained item
//
// @param parent parent item
// @param size the size for the chained allocation
// @param tid the tier to allocate on
//
// @return handle to the chained allocation
// @throw std::invalid_argument if the size requested is invalid or
// if the item is invalid
WriteHandle allocateChainedItemInternalTier(const Item& parent,
uint32_t size,
TierId tid);

// Given an item and its parentKey, validate that the parentKey
// corresponds to an item that's the parent of the supplied item.
//
Expand Down Expand Up @@ -1632,19 +1652,17 @@ class CacheAllocator : public CacheBase {
//
// @return true If the move was completed, and the containers were updated
// successfully.
bool moveRegularItemWithSync(Item& oldItem, WriteHandle& newItemHdl);
bool moveRegularItem(Item& oldItem, WriteHandle& newItemHdl);

// Moves a regular item to a different slab. This should only be used during
// slab release after the item's exclusive bit has been set. The user supplied
// callback is responsible for copying the contents and fixing the semantics
// of chained item.
// Moves a chained item to a different memory tier.
//
// @param oldItem item being moved
// @param oldItem Reference to the item being moved
// @param newItemHdl Reference to the handle of the new item being moved into
// @param parentHandle Reference to the handle of the parent item
//
// @return true If the move was completed, and the containers were updated
// successfully.
bool moveRegularItem(Item& oldItem, WriteHandle& newItemHdl);
bool moveChainedItem(ChainedItem& oldItem, WriteHandle& newItemHdl, Item& parentItem);

// template class for viewAsChainedAllocs that takes either ReadHandle or
// WriteHandle
Expand All @@ -1657,29 +1675,12 @@ class CacheAllocator : public CacheBase {
template <typename Handle>
folly::IOBuf convertToIOBufT(Handle& handle);

// Moves a chained item to a different slab. This should only be used during
// slab release after the item's exclusive bit has been set. The user supplied
// callback is responsible for copying the contents and fixing the semantics
// of chained item.
//
// Note: If we have successfully moved the old item into the new, the
// newItemHdl is reset and no longer usable by the caller.
//
// @param oldItem Reference to the item being moved
// @param newItemHdl Reference to the handle of the new item being
// moved into
//
// @return true If the move was completed, and the containers were updated
// successfully.
bool moveChainedItem(ChainedItem& oldItem, WriteHandle& newItemHdl);

// Transfers the chain ownership from parent to newParent. Parent
// will be unmarked as having chained allocations. Parent will not be null
// after calling this API.
//
// Parent and NewParent must be valid handles to items with same key and
// parent must have chained items and parent handle must be the only
// outstanding handle for parent. New parent must be without any chained item
// NewParent must be valid handles to item with same key as Parent and
// Parent must have chained items. New parent must be without any chained item
// handles.
//
// Chained item lock for the parent's key needs to be held in exclusive mode.
Expand All @@ -1688,7 +1689,7 @@ class CacheAllocator : public CacheBase {
// @param newParent the new parent for the chain
//
// @throw if any of the conditions for parent or newParent are not met.
void transferChainLocked(WriteHandle& parent, WriteHandle& newParent);
void transferChainLocked(Item& parent, WriteHandle& newParent);

// replace a chained item in the existing chain. This needs to be called
// with the chained item lock held exclusive
Expand All @@ -1702,6 +1703,24 @@ class CacheAllocator : public CacheBase {
WriteHandle newItemHdl,
const Item& parent);

//
// Performs the actual inplace replace - it is called from
// moveChainedItem and replaceChainedItemLocked
// must hold chainedItemLock
//
// @param oldItem the item we are replacing in the chain
// @param newItem the item we are replacing it with
// @param parent the parent for the chain
// @param fromMove used to determine if the replaced was called from
// moveChainedItem - we avoid the handle destructor
// in this case.
//
// @return handle to the oldItem
void replaceInChainLocked(Item& oldItem,
WriteHandle& newItemHdl,
const Item& parent,
bool fromMove);

// Insert an item into MM container. The caller must hold a valid handle for
// the item.
//
Expand Down Expand Up @@ -2016,7 +2035,7 @@ auto& mmContainer = getMMContainer(tid, pid, cid);
throw std::runtime_error("Not supported for chained items");
}

if (candidate->markMoving(true)) {
if (candidate->markMoving()) {
mmContainer.remove(itr);
candidates.push_back(candidate);
} else {
Expand Down Expand Up @@ -2089,7 +2108,7 @@ auto& mmContainer = getMMContainer(tid, pid, cid);

// TODO: only allow it for read-only items?
// or implement mvcc
if (candidate->markMoving(true)) {
if (candidate->markMoving()) {
// promotions should rarely fail since we already marked moving
mmContainer.remove(itr);
candidates.push_back(candidate);
Expand Down
4 changes: 2 additions & 2 deletions cachelib/allocator/CacheItem-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -238,8 +238,8 @@ bool CacheItem<CacheTrait>::markForEvictionWhenMoving() {
}

template <typename CacheTrait>
bool CacheItem<CacheTrait>::markMoving(bool failIfRefNotZero) {
return ref_.markMoving(failIfRefNotZero);
bool CacheItem<CacheTrait>::markMoving() {
return ref_.markMoving();
}

template <typename CacheTrait>
Expand Down
6 changes: 3 additions & 3 deletions cachelib/allocator/CacheItem.h
Original file line number Diff line number Diff line change
Expand Up @@ -312,9 +312,9 @@ class CACHELIB_PACKED_ATTR CacheItem {
//
// @return true on success, failure if item is marked as exclusive
// @throw exception::RefcountOverflow on ref count overflow
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef(bool failIfMoving) {
FOLLY_ALWAYS_INLINE RefcountWithFlags::incResult incRef() {
try {
return ref_.incRef(failIfMoving);
return ref_.incRef();
} catch (exception::RefcountOverflow& e) {
throw exception::RefcountOverflow(
folly::sformat("{} item: {}", e.what(), toString()));
Expand Down Expand Up @@ -381,7 +381,7 @@ class CACHELIB_PACKED_ATTR CacheItem {
* Unmarking moving will also return the refcount at the moment of
* unmarking.
*/
bool markMoving(bool failIfRefNotZero);
bool markMoving();
RefcountWithFlags::Value unmarkMoving() noexcept;
bool isMoving() const noexcept;
bool isOnlyMoving() const noexcept;
Expand Down
6 changes: 6 additions & 0 deletions cachelib/allocator/MM2Q-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,12 @@ MM2Q::Container<T, HookPtr>::getEvictionIterator() const noexcept {
return LockedIterator{std::move(l), lru_.rbegin()};
}

template <typename T, MM2Q::Hook<T> T::*HookPtr>
template <typename F>
void MM2Q::Container<T, HookPtr>::withContainerLock(F&& fun) {
lruMutex_->lock_combine([this, &fun]() { fun(); });
}

template <typename T, MM2Q::Hook<T> T::*HookPtr>
template <typename F>
void MM2Q::Container<T, HookPtr>::withEvictionIterator(F&& fun) {
Expand Down
4 changes: 4 additions & 0 deletions cachelib/allocator/MM2Q.h
Original file line number Diff line number Diff line change
Expand Up @@ -503,6 +503,10 @@ class MM2Q {
template <typename F>
void withEvictionIterator(F&& f);

// Execute provided function under container lock.
template <typename F>
void withContainerLock(F&& f);

// Execute provided function under container lock. Function gets
// iterator passed as parameter.
template <typename F>
Expand Down
6 changes: 6 additions & 0 deletions cachelib/allocator/MMLru-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,12 @@ MMLru::Container<T, HookPtr>::getEvictionIterator() const noexcept {
return LockedIterator{std::move(l), lru_.rbegin()};
}

template <typename T, MMLru::Hook<T> T::*HookPtr>
template <typename F>
void MMLru::Container<T, HookPtr>::withContainerLock(F&& fun) {
lruMutex_->lock_combine([this, &fun]() { fun(); });
}

template <typename T, MMLru::Hook<T> T::*HookPtr>
template <typename F>
void MMLru::Container<T, HookPtr>::withEvictionIterator(F&& fun) {
Expand Down
4 changes: 4 additions & 0 deletions cachelib/allocator/MMLru.h
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,10 @@ class MMLru {
template <typename F>
void withEvictionIterator(F&& f);

// Execute provided function under container lock.
template <typename F>
void withContainerLock(F&& f);

template <typename F>
void withPromotionIterator(F&& f);

Expand Down
7 changes: 7 additions & 0 deletions cachelib/allocator/MMTinyLFU-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,13 @@ MMTinyLFU::Container<T, HookPtr>::getEvictionIterator() const noexcept {
return LockedIterator{std::move(l), *this};
}

template <typename T, MMTinyLFU::Hook<T> T::*HookPtr>
template <typename F>
void MMTinyLFU::Container<T, HookPtr>::withContainerLock(F&& fun) {
LockHolder l(lruMutex_);
fun();
}

template <typename T, MMTinyLFU::Hook<T> T::*HookPtr>
template <typename F>
void MMTinyLFU::Container<T, HookPtr>::withEvictionIterator(F&& fun) {
Expand Down
4 changes: 4 additions & 0 deletions cachelib/allocator/MMTinyLFU.h
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,10 @@ class MMTinyLFU {
template <typename F>
void withEvictionIterator(F&& f);

// Execute provided function under container lock.
template <typename F>
void withContainerLock(F&& f);

template <typename F>
void withPromotionIterator(F&& f);

Expand Down
16 changes: 10 additions & 6 deletions cachelib/allocator/Refcount.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,9 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
// @return true if refcount is bumped. false otherwise (if item is exclusive)
// @throw exception::RefcountOverflow if new count would be greater than
// maxCount
FOLLY_ALWAYS_INLINE incResult incRef(bool failIfMoving) {
FOLLY_ALWAYS_INLINE incResult incRef() {
incResult res = incOk;
auto predicate = [failIfMoving, &res](const Value curValue) {
auto predicate = [&res](const Value curValue) {
Value bitMask = getAdminRef<kExclusive>();

const bool exlusiveBitIsSet = curValue & bitMask;
Expand All @@ -151,7 +151,7 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
} else if (exlusiveBitIsSet && (curValue & kAccessRefMask) == 0) {
res = incFailedEviction;
return false;
} else if (exlusiveBitIsSet && failIfMoving) {
} else if (exlusiveBitIsSet) {
res = incFailedMoving;
return false;
}
Expand Down Expand Up @@ -330,14 +330,18 @@ class FOLLY_PACK_ATTR RefcountWithFlags {
* Unmarking clears `kExclusive` bit and decreses the interanl refCount by 1.
* `unmarkMoving` does does not depend on `isInMMContainer`
*/
bool markMoving(bool failIfRefNotZero) {
bool markMoving() {
Value linkedBitMask = getAdminRef<kLinked>();
Value exclusiveBitMask = getAdminRef<kExclusive>();
Value isChainedItemFlag = getFlag<kIsChainedItem>();

auto predicate = [failIfRefNotZero, linkedBitMask, exclusiveBitMask](const Value curValue) {
auto predicate = [linkedBitMask, exclusiveBitMask, isChainedItemFlag](const Value curValue) {
const bool unlinked = !(curValue & linkedBitMask);
const bool alreadyExclusive = curValue & exclusiveBitMask;
if (failIfRefNotZero && (curValue & kAccessRefMask) != 0) {
const bool isChained = curValue & isChainedItemFlag;

// chained item can have ref count == 1, this just means it's linked in the chain
if ((curValue & kAccessRefMask) > isChained ? 1 : 0) {
return false;
}
if (unlinked || alreadyExclusive) {
Expand Down
4 changes: 2 additions & 2 deletions cachelib/allocator/tests/AllocatorTypeTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,8 @@ TYPED_TEST(BaseAllocatorTest, AddChainedItemMultiThreadWithMovingAndSync) {
this->testAddChainedItemMultithreadWithMovingAndSync();
}

TYPED_TEST(BaseAllocatorTest, TransferChainWhileMoving) {
this->testTransferChainWhileMoving();
TYPED_TEST(BaseAllocatorTest, TransferChainAfterMoving) {
this->testTransferChainAfterMoving();
}

TYPED_TEST(BaseAllocatorTest, AddAndPopChainedItemMultithread) {
Expand Down
Loading