Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c0c90d2
Run centos and debian workflows on push and PR
igchor Nov 2, 2021
dbe3fda
Adds createPutToken and switches findEviction
byrnedj Feb 4, 2023
9afcd64
Add memory usage statistics for allocation classes
igchor Jul 6, 2022
eca7d8c
Initial multi-tier support implementation
igchor Sep 28, 2021
664da8d
AC stats multi-tier
byrnedj Jan 17, 2023
3b7bb0c
Tests and fix tier sizing
byrnedj Feb 8, 2023
58e825b
This is the additional multi-tier support needed
guptask Nov 14, 2022
9fc705f
Rolling average alloc latency
guptask Jul 21, 2022
ce0e38a
Rolling average class latency
guptask Jul 21, 2022
e0a8006
MM2Q promotion iterator
byrnedj Aug 9, 2022
bcb2ae2
Multi-tier allocator patch
byrnedj Feb 7, 2023
d4cf1d4
basic multi-tier test based on numa bindings
igchor Dec 30, 2021
6d2fbef
Aadding new configs to hit_ratio/graph_cache_leader_fobj
vinser52 Jan 27, 2022
5bfa1ff
Background data movement for the tiers
byrnedj Oct 21, 2022
1593291
dummy change to trigger container image rebuild
guptask Mar 28, 2023
a171f38
Updated the docker gcc version to 12 (#83)
guptask May 9, 2023
35a17e4
NUMA bindigs support for private memory (#82)
vinser52 May 17, 2023
46d168c
Do not run cachelib-centos-8-5 on PRs (#85)
igchor Jun 6, 2023
7d06531
Add option to insert items to first free tier (#87)
igchor Jun 8, 2023
1521efe
Chained item movement between tiers - sync on the parent item (#84)
byrnedj Jun 28, 2023
3328e4e
edit dockerfile
byrnedj Jul 24, 2023
3c87c49
Track latency of per item eviction/promotion between memory tiers
guptask Jul 28, 2023
795f85b
Update dependencies (#95)
igchor Aug 23, 2023
96d948f
enable DTO build without memcpy changes to cachebench
byrnedj Feb 28, 2024
47d5034
Bckground eviction for multi-tier
byrnedj Feb 28, 2024
efea480
no online eviction option patch
byrnedj Feb 28, 2024
ebfca17
fixes cmake in latest test removal (upstream test build fails - need …
byrnedj May 20, 2024
52618b5
fixes commit for now (should drop once https://github.com/facebook/Ca…
byrnedj May 28, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
This is the additional multi-tier support needed
for the compressed ptr changes that were introduced
upstream.
 - Includes later cosmetic changes added by sounak
9cb5c29
  • Loading branch information
guptask authored and byrnedj committed May 20, 2024
commit 58e825b37aa7a0e9d784c428f68168a0ba420595
3 changes: 2 additions & 1 deletion cachelib/allocator/CacheAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -1333,6 +1333,7 @@ class CacheAllocator : public CacheBase {
sizeof(typename RefcountWithFlags::Value) + sizeof(uint32_t) +
sizeof(uint32_t) + sizeof(KAllocation)) == sizeof(Item),
"vtable overhead");
// Check for CompressedPtr single/multi tier support
static_assert(32 == sizeof(Item), "item overhead is 32 bytes");

// make sure there is no overhead in ChainedItem on top of a regular Item
Expand Down Expand Up @@ -1988,7 +1989,7 @@ class CacheAllocator : public CacheBase {
}

typename Item::PtrCompressor createPtrCompressor() const {
return allocator_[0 /* TODO */]->createPtrCompressor<Item>();
return typename Item::PtrCompressor(allocator_);
}

// helper utility to throttle and optionally log.
Expand Down
11 changes: 6 additions & 5 deletions cachelib/allocator/memory/AllocationClass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ AllocationClass::AllocationClass(ClassId classId,
poolId_(poolId),
allocationSize_(allocSize),
slabAlloc_(s),
freedAllocations_{slabAlloc_.createPtrCompressor<FreeAlloc>()} {
freedAllocations_{slabAlloc_.createSingleTierPtrCompressor<FreeAlloc>()} {
checkState();
}

Expand Down Expand Up @@ -102,7 +102,7 @@ AllocationClass::AllocationClass(
currSlab_(s.getSlabForIdx(*object.currSlabIdx())),
slabAlloc_(s),
freedAllocations_(*object.freedAllocationsObject(),
slabAlloc_.createPtrCompressor<FreeAlloc>()),
slabAlloc_.createSingleTierPtrCompressor<FreeAlloc>()),
canAllocate_(*object.canAllocate()) {
if (!slabAlloc_.isRestorable()) {
throw std::logic_error("The allocation class cannot be restored.");
Expand Down Expand Up @@ -356,9 +356,10 @@ std::pair<bool, std::vector<void*>> AllocationClass::pruneFreeAllocs(
// allocated slab, release any freed allocations belonging to this slab.
// Set the bit to true if the corresponding allocation is freed, false
// otherwise.
FreeList freeAllocs{slabAlloc_.createPtrCompressor<FreeAlloc>()};
FreeList notInSlab{slabAlloc_.createPtrCompressor<FreeAlloc>()};
FreeList inSlab{slabAlloc_.createPtrCompressor<FreeAlloc>()};
FreeList freeAllocs{slabAlloc_.createSingleTierPtrCompressor<FreeAlloc>()};
FreeList notInSlab{slabAlloc_.createSingleTierPtrCompressor<FreeAlloc>()};
FreeList inSlab{slabAlloc_.createSingleTierPtrCompressor<FreeAlloc>()};


lock_->lock_combine([&]() {
// Take the allocation class free list offline
Expand Down
2 changes: 1 addition & 1 deletion cachelib/allocator/memory/AllocationClass.h
Original file line number Diff line number Diff line change
Expand Up @@ -445,7 +445,7 @@ class AllocationClass {
struct CACHELIB_PACKED_ATTR FreeAlloc {
using CompressedPtr = facebook::cachelib::CompressedPtr;
using PtrCompressor =
facebook::cachelib::PtrCompressor<FreeAlloc, SlabAllocator>;
facebook::cachelib::SingleTierPtrCompressor<FreeAlloc, SlabAllocator>;
SListHook<FreeAlloc> hook_{};
};

Expand Down
65 changes: 59 additions & 6 deletions cachelib/allocator/memory/CompressedPtr.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,12 @@ namespace cachelib {

class SlabAllocator;

template <typename PtrType, typename AllocatorContainer>
class PtrCompressor;

// This CompressedPtr makes decompression fast by staying away from division and
// modulo arithmetic and doing those during the compression time. We most often
// decompress a CompressedPtr than compress a pointer while creating one. This
// modulo arithmetic and doing those during the compression time. We most often
// decompress a CompressedPtr than compress a pointer while creating one. This
// is used for pointer compression by the memory allocator.

// We compress pointers by storing the tier index, slab index and alloc index of
Expand Down Expand Up @@ -173,12 +176,14 @@ class CACHELIB_PACKED_ATTR CompressedPtr {
}

friend SlabAllocator;
template <typename CPtrType, typename AllocatorContainer>
friend class PtrCompressor;
};

template <typename PtrType, typename AllocatorT>
class PtrCompressor {
class SingleTierPtrCompressor {
public:
explicit PtrCompressor(const AllocatorT& allocator) noexcept
explicit SingleTierPtrCompressor(const AllocatorT& allocator) noexcept
: allocator_(allocator) {}

const CompressedPtr compress(const PtrType* uncompressed) const {
Expand All @@ -190,17 +195,65 @@ class PtrCompressor {
allocator_.unCompress(compressed, false /* isMultiTiered */));
}

bool operator==(const PtrCompressor& rhs) const noexcept {
bool operator==(const SingleTierPtrCompressor& rhs) const noexcept {
return &allocator_ == &rhs.allocator_;
}

bool operator!=(const PtrCompressor& rhs) const noexcept {
bool operator!=(const SingleTierPtrCompressor& rhs) const noexcept {
return !(*this == rhs);
}

private:
// memory allocator that does the pointer compression.
const AllocatorT& allocator_;
};

template <typename PtrType, typename AllocatorContainer>
class PtrCompressor {
public:
explicit PtrCompressor(const AllocatorContainer& allocators) noexcept
: allocators_(allocators) {}

const CompressedPtr compress(const PtrType* uncompressed) const {
if (uncompressed == nullptr)
return CompressedPtr{};

TierId tid;
for (tid = 0; tid < allocators_.size(); tid++) {
if (allocators_[tid]->isMemoryInAllocator(
static_cast<const void*>(uncompressed)))
break;
}

bool isMultiTiered = allocators_.size() > 1;
auto cptr = allocators_[tid]->compress(uncompressed, isMultiTiered);
if (isMultiTiered) { // config has multiple tiers
cptr.setTierId(tid);
}
return cptr;
}

PtrType* unCompress(const CompressedPtr compressed) const {
if (compressed.isNull()) {
return nullptr;
}
bool isMultiTiered = allocators_.size() > 1;
auto& allocator = *allocators_[compressed.getTierId(isMultiTiered)];
return static_cast<PtrType*>(
allocator.unCompress(compressed, isMultiTiered));
}

bool operator==(const PtrCompressor& rhs) const noexcept {
return &allocators_ == &rhs.allocators_;
}

bool operator!=(const PtrCompressor& rhs) const noexcept {
return !(*this == rhs);
}

private:
// memory allocator that does the pointer compression.
const AllocatorContainer& allocators_;
};
} // namespace cachelib
} // namespace facebook
11 changes: 6 additions & 5 deletions cachelib/allocator/memory/MemoryAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -516,12 +516,13 @@ class MemoryAllocator {
using CompressedPtr = facebook::cachelib::CompressedPtr;
template <typename PtrType>
using PtrCompressor =
facebook::cachelib::PtrCompressor<PtrType, SlabAllocator>;

facebook::cachelib::PtrCompressor<PtrType,
std::vector<std::unique_ptr<MemoryAllocator>>>;

template <typename PtrType>
PtrCompressor<PtrType> createPtrCompressor() {
return slabAllocator_.createPtrCompressor<PtrType>();
}
using SingleTierPtrCompressor =
facebook::cachelib::PtrCompressor<PtrType,
SlabAllocator>;

// compress a given pointer to a valid allocation made out of this allocator
// through an allocate() or nullptr. Calling this otherwise with invalid
Expand Down
4 changes: 2 additions & 2 deletions cachelib/allocator/memory/SlabAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,8 @@ class SlabAllocator {
}

template <typename PtrType>
PtrCompressor<PtrType, SlabAllocator> createPtrCompressor() const {
return PtrCompressor<PtrType, SlabAllocator>(*this);
SingleTierPtrCompressor<PtrType, SlabAllocator> createSingleTierPtrCompressor() const {
return SingleTierPtrCompressor<PtrType, SlabAllocator>(*this);
}

// returns starting address of memory we own.
Expand Down
1 change: 1 addition & 0 deletions run_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

# Newline separated list of tests to ignore
BLACKLIST="allocator-test-NavySetupTest
allocator-test-NvmCacheTests
shm-test-test_page_size"

if [ "$1" == "long" ]; then
Expand Down