Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c0c90d2
Run centos and debian workflows on push and PR
igchor Nov 2, 2021
dbe3fda
Adds createPutToken and switches findEviction
byrnedj Feb 4, 2023
9afcd64
Add memory usage statistics for allocation classes
igchor Jul 6, 2022
eca7d8c
Initial multi-tier support implementation
igchor Sep 28, 2021
664da8d
AC stats multi-tier
byrnedj Jan 17, 2023
3b7bb0c
Tests and fix tier sizing
byrnedj Feb 8, 2023
58e825b
This is the additional multi-tier support needed
guptask Nov 14, 2022
9fc705f
Rolling average alloc latency
guptask Jul 21, 2022
ce0e38a
Rolling average class latency
guptask Jul 21, 2022
e0a8006
MM2Q promotion iterator
byrnedj Aug 9, 2022
bcb2ae2
Multi-tier allocator patch
byrnedj Feb 7, 2023
d4cf1d4
basic multi-tier test based on numa bindings
igchor Dec 30, 2021
6d2fbef
Aadding new configs to hit_ratio/graph_cache_leader_fobj
vinser52 Jan 27, 2022
5bfa1ff
Background data movement for the tiers
byrnedj Oct 21, 2022
1593291
dummy change to trigger container image rebuild
guptask Mar 28, 2023
a171f38
Updated the docker gcc version to 12 (#83)
guptask May 9, 2023
35a17e4
NUMA bindigs support for private memory (#82)
vinser52 May 17, 2023
46d168c
Do not run cachelib-centos-8-5 on PRs (#85)
igchor Jun 6, 2023
7d06531
Add option to insert items to first free tier (#87)
igchor Jun 8, 2023
1521efe
Chained item movement between tiers - sync on the parent item (#84)
byrnedj Jun 28, 2023
3328e4e
edit dockerfile
byrnedj Jul 24, 2023
3c87c49
Track latency of per item eviction/promotion between memory tiers
guptask Jul 28, 2023
795f85b
Update dependencies (#95)
igchor Aug 23, 2023
96d948f
enable DTO build without memcpy changes to cachebench
byrnedj Feb 28, 2024
47d5034
Bckground eviction for multi-tier
byrnedj Feb 28, 2024
efea480
no online eviction option patch
byrnedj Feb 28, 2024
ebfca17
fixes cmake in latest test removal (upstream test build fails - need …
byrnedj May 20, 2024
52618b5
fixes commit for now (should drop once https://github.com/facebook/Ca…
byrnedj May 28, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Tests and fix tier sizing
-------------------------
There are two parts to this commit and we can split them up.

Part 1)

This commit contains the additional memory tiers tests
for different pool sizes. We also use getPoolSize(pid),
to get total size from all pools across allocators.

Part 2)

This part can be merged with the initial multi-tier
part 1. It fixes the tiering sizes (pulls changes from
what was issue75 rebased commit that did not make
it into upstream commits).
  • Loading branch information
byrnedj committed May 20, 2024
commit 3b7bb0c698053029c71331f06df29111b272aff3
38 changes: 32 additions & 6 deletions cachelib/allocator/CacheAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -2210,6 +2210,8 @@ class CacheAllocator : public CacheBase {
return config_.memoryTierConfigs.size();
}

size_t memoryTierSize(TierId tid) const;

// Whether the memory allocator for this cache allocator was created on shared
// memory. The hash table, chained item hash table etc is also created on
// shared memory except for temporary shared memory mode when they're created
Expand Down Expand Up @@ -2496,6 +2498,16 @@ ShmSegmentOpts CacheAllocator<CacheTrait>::createShmCacheOpts(TierId tid) {
return opts;
}

template <typename CacheTrait>
size_t CacheAllocator<CacheTrait>::memoryTierSize(TierId tid) const {
auto partitions = std::accumulate(config_.memoryTierConfigs.begin(), config_.memoryTierConfigs.end(), 0UL,
[](const size_t i, const MemoryTierCacheConfig& config){
return i + config.getRatio();
});

return config_.memoryTierConfigs[tid].calculateTierSize(config_.getCacheSize(), partitions);
}

template <typename CacheTrait>
std::vector<std::unique_ptr<MemoryAllocator>>
CacheAllocator<CacheTrait>::createPrivateAllocator() {
Expand All @@ -2518,14 +2530,15 @@ CacheAllocator<CacheTrait>::createPrivateAllocator() {
template <typename CacheTrait>
std::unique_ptr<MemoryAllocator>
CacheAllocator<CacheTrait>::createNewMemoryAllocator(TierId tid) {
size_t tierSize = memoryTierSize(tid);
return std::make_unique<MemoryAllocator>(
getAllocatorConfig(config_),
shmManager_
->createShm(detail::kShmCacheName + std::to_string(tid),
config_.getCacheSize(), config_.slabMemoryBaseAddr,
tierSize, config_.slabMemoryBaseAddr,
createShmCacheOpts(tid))
.addr,
config_.getCacheSize());
tierSize);
}

template <typename CacheTrait>
Expand All @@ -2536,7 +2549,7 @@ CacheAllocator<CacheTrait>::restoreMemoryAllocator(TierId tid) {
shmManager_
->attachShm(detail::kShmCacheName + std::to_string(tid),
config_.slabMemoryBaseAddr, createShmCacheOpts(tid)).addr,
config_.getCacheSize(),
memoryTierSize(tid),
config_.disableFullCoredump);
}

Expand Down Expand Up @@ -4830,6 +4843,16 @@ const std::string CacheAllocator<CacheTrait>::getCacheName() const {
return config_.cacheName;
}

template <typename CacheTrait>
size_t CacheAllocator<CacheTrait>::getPoolSize(PoolId poolId) const {
size_t poolSize = 0;
for (auto& allocator: allocator_) {
const auto& pool = allocator->getPool(poolId);
poolSize += pool.getPoolSize();
}
return poolSize;
}

template <typename CacheTrait>
PoolStats CacheAllocator<CacheTrait>::getPoolStats(PoolId poolId) const {
const auto& pool = allocator_[0]->getPool(poolId);
Expand Down Expand Up @@ -5777,9 +5800,12 @@ GlobalCacheStats CacheAllocator<CacheTrait>::getGlobalCacheStats() const {

template <typename CacheTrait>
CacheMemoryStats CacheAllocator<CacheTrait>::getCacheMemoryStats() const {
const auto totalCacheSize = allocator_[0]->getMemorySize();
const auto configuredTotalCacheSize = allocator_[0]->getMemorySizeInclAdvised();

size_t totalCacheSize = 0;
size_t configuredTotalCacheSize = 0;
for(auto& allocator: allocator_) {
totalCacheSize += allocator->getMemorySize();
configuredTotalCacheSize += allocator->getMemorySizeInclAdvised();
}
auto addSize = [this](size_t a, PoolId pid) {
return a + allocator_[0]->getPool(pid).getPoolSize();
};
Expand Down
6 changes: 3 additions & 3 deletions cachelib/allocator/tests/AllocatorMemoryTiersTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ namespace tests {
using LruAllocatorMemoryTiersTest = AllocatorMemoryTiersTest<LruAllocator>;

// TODO(MEMORY_TIER): add more tests with different eviction policies
TEST_F(LruAllocatorMemoryTiersTest, MultiTiersValid1) {
this->testMultiTiersValid1();
}
TEST_F(LruAllocatorMemoryTiersTest, MultiTiersInvalid) { this->testMultiTiersInvalid(); }
TEST_F(LruAllocatorMemoryTiersTest, MultiTiersValid) { this->testMultiTiersValid(); }
TEST_F(LruAllocatorMemoryTiersTest, MultiTiersValidMixed) { this->testMultiTiersValidMixed(); }

} // end of namespace tests
} // end of namespace cachelib
Expand Down
40 changes: 39 additions & 1 deletion cachelib/allocator/tests/AllocatorMemoryTiersTest.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace tests {
template <typename AllocatorT>
class AllocatorMemoryTiersTest : public AllocatorTest<AllocatorT> {
public:
void testMultiTiersValid1() {
void testMultiTiersInvalid() {
typename AllocatorT::Config config;
config.setCacheSize(100 * Slab::kSize);
ASSERT_NO_THROW(config.configureMemoryTiers(
Expand All @@ -36,6 +36,44 @@ class AllocatorMemoryTiersTest : public AllocatorTest<AllocatorT> {
MemoryTierCacheConfig::fromShm().setRatio(1).setMemBind(
std::string("0"))}));
}

void testMultiTiersValid() {
typename AllocatorT::Config config;
config.setCacheSize(100 * Slab::kSize);
config.enableCachePersistence("/tmp");
ASSERT_NO_THROW(config.configureMemoryTiers(
{MemoryTierCacheConfig::fromShm().setRatio(1).setMemBind(
std::string("0")),
MemoryTierCacheConfig::fromShm().setRatio(1).setMemBind(
std::string("0"))}));

auto alloc = std::make_unique<AllocatorT>(AllocatorT::SharedMemNew, config);
ASSERT(alloc != nullptr);

auto pool = alloc->addPool("default", alloc->getCacheMemoryStats().ramCacheSize);
auto handle = alloc->allocate(pool, "key", std::string("value").size());
ASSERT(handle != nullptr);
ASSERT_NO_THROW(alloc->insertOrReplace(handle));
}

void testMultiTiersValidMixed() {
typename AllocatorT::Config config;
config.setCacheSize(100 * Slab::kSize);
config.enableCachePersistence("/tmp");
ASSERT_NO_THROW(config.configureMemoryTiers(
{MemoryTierCacheConfig::fromShm().setRatio(1).setMemBind(
std::string("0")),
MemoryTierCacheConfig::fromShm().setRatio(1).setMemBind(
std::string("0"))}));

auto alloc = std::make_unique<AllocatorT>(AllocatorT::SharedMemNew, config);
ASSERT(alloc != nullptr);

auto pool = alloc->addPool("default", alloc->getCacheMemoryStats().ramCacheSize);
auto handle = alloc->allocate(pool, "key", std::string("value").size());
ASSERT(handle != nullptr);
ASSERT_NO_THROW(alloc->insertOrReplace(handle));
}
};
} // namespace tests
} // namespace cachelib
Expand Down
86 changes: 82 additions & 4 deletions cachelib/allocator/tests/MemoryTiersTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ constexpr size_t MB = 1024ULL * 1024ULL;
constexpr size_t GB = MB * 1024ULL;

const size_t defaultTotalCacheSize{1 * GB};
const std::string defaultCacheDir{"/var/metadataDir"};
const std::string defaultCacheDir{"/tmp/metadataDir"};

template <typename Allocator>
class MemoryTiersTest : public AllocatorTest<Allocator> {
Expand Down Expand Up @@ -109,7 +109,7 @@ class MemoryTiersTest : public AllocatorTest<Allocator> {
void validatePoolSize(PoolId poolId,
std::unique_ptr<LruAllocator>& allocator,
size_t expectedSize) {
size_t actualSize = allocator->getPool(poolId).getPoolSize();
size_t actualSize = allocator->getPoolSize(poolId);
EXPECT_EQ(actualSize, expectedSize);
}

Expand All @@ -119,9 +119,9 @@ class MemoryTiersTest : public AllocatorTest<Allocator> {
size_t numTiers = 2) {
if (isSizeValid) {
auto pool = alloc->addPool("validPoolSize", poolSize);
EXPECT_LE(alloc->getPool(pool).getPoolSize(), poolSize);
EXPECT_LE(alloc->getPoolSize(pool), poolSize);
if (poolSize >= numTiers * Slab::kSize)
EXPECT_GE(alloc->getPool(pool).getPoolSize(),
EXPECT_GE(alloc->getPoolSize(pool),
poolSize - numTiers * Slab::kSize);
} else {
EXPECT_THROW(alloc->addPool("invalidPoolSize", poolSize),
Expand Down Expand Up @@ -172,6 +172,84 @@ TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigRatioNotSet) {
TEST_F(LruMemoryTiersTest, TestInvalid2TierConfigSizesNeCacheSize) {
EXPECT_THROW(createTestCacheConfig({0, 0}), std::invalid_argument);
}

TEST_F(LruMemoryTiersTest, TestPoolAllocations) {
std::vector<size_t> totalCacheSizes = {8 * GB, 2 * GB};

static const size_t numExtraSizes = 4;
static const size_t numExtraSlabs = 20;

for (size_t i = 0; i < numExtraSizes; i++) {
totalCacheSizes.push_back(totalCacheSizes.back() +
(folly::Random::rand64() % numExtraSlabs) *
Slab::kSize);
}

size_t min_ratio = 1;
size_t max_ratio = 111;

static const size_t numCombinations = 10;

for (auto totalCacheSize : totalCacheSizes) {
for (size_t k = 0; k < numCombinations; k++) {
const size_t i = folly::Random::rand32() % max_ratio + min_ratio;
const size_t j = folly::Random::rand32() % max_ratio + min_ratio;
LruAllocatorConfig cfg =
createTestCacheConfig({i, j},
/* usePoisx */ true, totalCacheSize);
basicCheck(cfg, totalCacheSize);

std::unique_ptr<LruAllocator> alloc = std::unique_ptr<LruAllocator>(
new LruAllocator(LruAllocator::SharedMemNew, cfg));

size_t size = (folly::Random::rand64() %
(alloc->getCacheMemoryStats().ramCacheSize - Slab::kSize)) +
Slab::kSize;
testAddPool(alloc, size, true);
}
}
}

TEST_F(LruMemoryTiersTest, TestPoolInvalidAllocations) {
std::vector<size_t> totalCacheSizes = {48 * MB, 51 * MB, 256 * MB,
1 * GB, 5 * GB, 8 * GB};
size_t min_ratio = 1;
size_t max_ratio = 111;

static const size_t numCombinations = 10;

for (auto totalCacheSize : totalCacheSizes) {
for (size_t k = 0; k < numCombinations; k++) {
const size_t i = folly::Random::rand32() % max_ratio + min_ratio;
const size_t j = folly::Random::rand32() % max_ratio + min_ratio;
LruAllocatorConfig cfg =
createTestCacheConfig({i, j},
/* usePoisx */ true, totalCacheSize);

std::unique_ptr<LruAllocator> alloc = nullptr;
try {
alloc = std::unique_ptr<LruAllocator>(
new LruAllocator(LruAllocator::SharedMemNew, cfg));
} catch(...) {
// expection only if cache too small
size_t sum_ratios = std::accumulate(
cfg.getMemoryTierConfigs().begin(), cfg.getMemoryTierConfigs().end(), 0UL,
[](const size_t i, const MemoryTierCacheConfig& config) {
return i + config.getRatio();
});
auto tier1slabs = cfg.getMemoryTierConfigs()[0].calculateTierSize(cfg.getCacheSize(), sum_ratios) / Slab::kSize;
auto tier2slabs = cfg.getMemoryTierConfigs()[1].calculateTierSize(cfg.getCacheSize(), sum_ratios) / Slab::kSize;
EXPECT_TRUE(tier1slabs <= 2 || tier2slabs <= 2);

continue;
}

size_t size = (folly::Random::rand64() % (100 * GB)) +
alloc->getCacheMemoryStats().ramCacheSize;
testAddPool(alloc, size, false);
}
}
}
} // namespace tests
} // namespace cachelib
} // namespace facebook