Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
c0c90d2
Run centos and debian workflows on push and PR
igchor Nov 2, 2021
dbe3fda
Adds createPutToken and switches findEviction
byrnedj Feb 4, 2023
9afcd64
Add memory usage statistics for allocation classes
igchor Jul 6, 2022
eca7d8c
Initial multi-tier support implementation
igchor Sep 28, 2021
664da8d
AC stats multi-tier
byrnedj Jan 17, 2023
3b7bb0c
Tests and fix tier sizing
byrnedj Feb 8, 2023
58e825b
This is the additional multi-tier support needed
guptask Nov 14, 2022
9fc705f
Rolling average alloc latency
guptask Jul 21, 2022
ce0e38a
Rolling average class latency
guptask Jul 21, 2022
e0a8006
MM2Q promotion iterator
byrnedj Aug 9, 2022
bcb2ae2
Multi-tier allocator patch
byrnedj Feb 7, 2023
d4cf1d4
basic multi-tier test based on numa bindings
igchor Dec 30, 2021
6d2fbef
Aadding new configs to hit_ratio/graph_cache_leader_fobj
vinser52 Jan 27, 2022
5bfa1ff
Background data movement for the tiers
byrnedj Oct 21, 2022
1593291
dummy change to trigger container image rebuild
guptask Mar 28, 2023
a171f38
Updated the docker gcc version to 12 (#83)
guptask May 9, 2023
35a17e4
NUMA bindigs support for private memory (#82)
vinser52 May 17, 2023
46d168c
Do not run cachelib-centos-8-5 on PRs (#85)
igchor Jun 6, 2023
7d06531
Add option to insert items to first free tier (#87)
igchor Jun 8, 2023
1521efe
Chained item movement between tiers - sync on the parent item (#84)
byrnedj Jun 28, 2023
3328e4e
edit dockerfile
byrnedj Jul 24, 2023
3c87c49
Track latency of per item eviction/promotion between memory tiers
guptask Jul 28, 2023
795f85b
Update dependencies (#95)
igchor Aug 23, 2023
96d948f
enable DTO build without memcpy changes to cachebench
byrnedj Feb 28, 2024
47d5034
Bckground eviction for multi-tier
byrnedj Feb 28, 2024
efea480
no online eviction option patch
byrnedj Feb 28, 2024
ebfca17
fixes cmake in latest test removal (upstream test build fails - need …
byrnedj May 20, 2024
52618b5
fixes commit for now (should drop once https://github.com/facebook/Ca…
byrnedj May 28, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
NUMA bindigs support for private memory (#82)
  • Loading branch information
vinser52 authored and byrnedj committed May 20, 2024
commit 35a17e4dd297b256caa731e4504978a18c7e8a57
1 change: 1 addition & 0 deletions cachelib/allocator/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ add_library (cachelib_allocator
PoolOptimizeStrategy.cpp
PoolRebalancer.cpp
PoolResizer.cpp
PrivateMemoryManager.cpp
RebalanceStrategy.cpp
SlabReleaseStats.cpp
TempShmMapping.cpp
Expand Down
49 changes: 37 additions & 12 deletions cachelib/allocator/CacheAllocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
#include "cachelib/allocator/PoolOptimizer.h"
#include "cachelib/allocator/PoolRebalancer.h"
#include "cachelib/allocator/PoolResizer.h"
#include "cachelib/allocator/PrivateMemoryManager.h"
#include "cachelib/allocator/ReadOnlySharedCacheView.h"
#include "cachelib/allocator/Reaper.h"
#include "cachelib/allocator/RebalanceStrategy.h"
Expand Down Expand Up @@ -2185,6 +2186,8 @@ class CacheAllocator : public CacheBase {
std::chrono::seconds timeout = std::chrono::seconds{0});

ShmSegmentOpts createShmCacheOpts(TierId tid);
PrivateSegmentOpts createPrivateSegmentOpts(TierId tid);
std::unique_ptr<MemoryAllocator> createPrivateAllocator(TierId tid);
std::unique_ptr<MemoryAllocator> createNewMemoryAllocator(TierId tid);
std::unique_ptr<MemoryAllocator> restoreMemoryAllocator(TierId tid);
std::unique_ptr<CCacheManager> restoreCCacheManager(TierId tid);
Expand Down Expand Up @@ -2234,7 +2237,7 @@ class CacheAllocator : public CacheBase {
// @throw std::runtime_error if type is invalid
std::vector<std::unique_ptr<MemoryAllocator>> initAllocator(InitMemType type);

std::vector<std::unique_ptr<MemoryAllocator>> createPrivateAllocator();
std::vector<std::unique_ptr<MemoryAllocator>> createPrivateAllocators();
std::vector<std::unique_ptr<MemoryAllocator>> createAllocators();
std::vector<std::unique_ptr<MemoryAllocator>> restoreAllocators();

Expand Down Expand Up @@ -2400,6 +2403,8 @@ class CacheAllocator : public CacheBase {
// is not persisted when cache process exits.
std::unique_ptr<TempShmMapping> tempShm_;

std::unique_ptr<PrivateMemoryManager> privMemManager_;

std::unique_ptr<ShmManager> shmManager_;

// Deserialize data to restore cache allocator. Used only while attaching to
Expand Down Expand Up @@ -2612,6 +2617,9 @@ CacheAllocator<CacheTrait>::CacheAllocator(
tempShm_(type == InitMemType::kNone && isOnShm_
? std::make_unique<TempShmMapping>(config_.getCacheSize())
: nullptr),
privMemManager_(type == InitMemType::kNone && !isOnShm_
? std::make_unique<PrivateMemoryManager>()
: nullptr),
shmManager_(type != InitMemType::kNone
? std::make_unique<ShmManager>(config_.cacheDir,
config_.isUsingPosixShm())
Expand Down Expand Up @@ -2674,6 +2682,16 @@ ShmSegmentOpts CacheAllocator<CacheTrait>::createShmCacheOpts(TierId tid) {
return opts;
}

template <typename CacheTrait>
PrivateSegmentOpts CacheAllocator<CacheTrait>::createPrivateSegmentOpts(TierId tid) {
PrivateSegmentOpts opts;
opts.alignment = sizeof(Slab);
auto memoryTierConfigs = config_.getMemoryTierConfigs();
opts.memBindNumaNodes = memoryTierConfigs[tid].getMemBind();

return opts;
}

template <typename CacheTrait>
size_t CacheAllocator<CacheTrait>::memoryTierSize(TierId tid) const {
auto partitions = std::accumulate(config_.memoryTierConfigs.begin(), config_.memoryTierConfigs.end(), 0UL,
Expand All @@ -2685,22 +2703,19 @@ size_t CacheAllocator<CacheTrait>::memoryTierSize(TierId tid) const {
}

template <typename CacheTrait>
std::vector<std::unique_ptr<MemoryAllocator>>
CacheAllocator<CacheTrait>::createPrivateAllocator() {
std::vector<std::unique_ptr<MemoryAllocator>> allocators;

std::unique_ptr<MemoryAllocator>
CacheAllocator<CacheTrait>::createPrivateAllocator(TierId tid) {
if (isOnShm_) {
allocators.emplace_back(std::make_unique<MemoryAllocator>(
return std::make_unique<MemoryAllocator>(
getAllocatorConfig(config_),
tempShm_->getAddr(),
config_.getCacheSize()));
memoryTierSize(tid));
} else {
allocators.emplace_back(std::make_unique<MemoryAllocator>(
return std::make_unique<MemoryAllocator>(
getAllocatorConfig(config_),
config_.getCacheSize()));
privMemManager_->createMapping(config_.size, createPrivateSegmentOpts(tid)),
memoryTierSize(tid));
}

return allocators;
}

template <typename CacheTrait>
Expand Down Expand Up @@ -2729,6 +2744,16 @@ CacheAllocator<CacheTrait>::restoreMemoryAllocator(TierId tid) {
config_.disableFullCoredump);
}

template <typename CacheTrait>
std::vector<std::unique_ptr<MemoryAllocator>>
CacheAllocator<CacheTrait>::createPrivateAllocators() {
std::vector<std::unique_ptr<MemoryAllocator>> allocators;
for (int tid = 0; tid < getNumTiers(); tid++) {
allocators.emplace_back(createPrivateAllocator(tid));
}
return allocators;
}

template <typename CacheTrait>
std::vector<std::unique_ptr<MemoryAllocator>>
CacheAllocator<CacheTrait>::createAllocators() {
Expand Down Expand Up @@ -2862,7 +2887,7 @@ std::vector<std::unique_ptr<MemoryAllocator>>
CacheAllocator<CacheTrait>::initAllocator(
InitMemType type) {
if (type == InitMemType::kNone) {
return createPrivateAllocator();
return createPrivateAllocators();
} else if (type == InitMemType::kMemNew) {
return createAllocators();
} else if (type == InitMemType::kMemAttach) {
Expand Down
9 changes: 6 additions & 3 deletions cachelib/allocator/MemoryTierCacheConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,14 @@

#pragma once

#include "cachelib/common/Utils.h"
#include "cachelib/shm/ShmCommon.h"

namespace facebook {
namespace cachelib {
class MemoryTierCacheConfig {
using bitmask_type = util::NumaBitMask;

public:
// Creates instance of MemoryTierCacheConfig for Posix/SysV Shared memory.
static MemoryTierCacheConfig fromShm() { return MemoryTierCacheConfig(); }
Expand All @@ -39,12 +42,12 @@ class MemoryTierCacheConfig {
size_t getRatio() const noexcept { return ratio; }

// Allocate memory only from specified NUMA nodes
MemoryTierCacheConfig& setMemBind(const NumaBitMask& _numaNodes) {
MemoryTierCacheConfig& setMemBind(const bitmask_type& _numaNodes) {
numaNodes = _numaNodes;
return *this;
}

const NumaBitMask& getMemBind() const noexcept { return numaNodes; }
const bitmask_type& getMemBind() const noexcept { return numaNodes; }

size_t calculateTierSize(size_t totalCacheSize, size_t partitionNum) const {
// TODO: Call this method when tiers are enabled in allocator
Expand All @@ -71,7 +74,7 @@ class MemoryTierCacheConfig {
size_t ratio{1};

// Numa node(s) to bind the tier
NumaBitMask numaNodes;
bitmask_type numaNodes;

// TODO: introduce a container for tier settings when adding support for
// file-mapped memory
Expand Down
50 changes: 50 additions & 0 deletions cachelib/allocator/PrivateMemoryManager.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "cachelib/allocator/PrivateMemoryManager.h"

#include <folly/ScopeGuard.h>

namespace facebook {
namespace cachelib {

PrivateMemoryManager::~PrivateMemoryManager() {
for (auto& entry : mappings) {
util::munmapMemory(entry.first, entry.second);
}
}

void* PrivateMemoryManager::createMapping(size_t size,
PrivateSegmentOpts opts) {
void* addr = util::mmapAlignedZeroedMemory(opts.alignment, size);
auto guard = folly::makeGuard([&]() {
util::munmapMemory(addr, size);
mappings.erase(addr);
});

XDCHECK_EQ(reinterpret_cast<uint64_t>(addr) & (opts.alignment - 1), 0ULL);

if (!opts.memBindNumaNodes.empty()) {
util::mbindMemory(addr, size, MPOL_BIND, opts.memBindNumaNodes, 0);
}

mappings.emplace(addr, size);

guard.dismiss();
return addr;
}
} // namespace cachelib
} // namespace facebook
44 changes: 44 additions & 0 deletions cachelib/allocator/PrivateMemoryManager.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <cstddef>
#include <unordered_map>

#include "cachelib/common/Utils.h"

namespace facebook {
namespace cachelib {

struct PrivateSegmentOpts {
size_t alignment{1}; // alignment for mapping.
util::NumaBitMask memBindNumaNodes;
};

class PrivateMemoryManager {
public:
PrivateMemoryManager() {}
~PrivateMemoryManager();

void* createMapping(size_t size, PrivateSegmentOpts opts);

private:
std::unordered_map<void*, size_t> mappings;
};

} // namespace cachelib
} // namespace facebook
2 changes: 1 addition & 1 deletion cachelib/cachebench/util/CacheConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ struct MemoryTierConfig : public JSONConfig {
MemoryTierCacheConfig getMemoryTierCacheConfig() {
MemoryTierCacheConfig config = MemoryTierCacheConfig::fromShm();
config.setRatio(ratio);
config.setMemBind(NumaBitMask(memBindNodes));
config.setMemBind(util::NumaBitMask(memBindNodes));
return config;
}

Expand Down
1 change: 1 addition & 0 deletions cachelib/common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ target_link_libraries(cachelib_common PUBLIC
Folly::folly_exception_tracer
Folly::folly_exception_tracer_base
Folly::folly_exception_counter
numa
)

install(TARGETS cachelib_common
Expand Down
17 changes: 17 additions & 0 deletions cachelib/common/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <dirent.h>
#include <folly/experimental/exception_tracer/ExceptionTracer.h>
#include <numaif.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/shm.h>
Expand Down Expand Up @@ -181,6 +182,22 @@ void* mmapAlignedZeroedMemory(size_t alignment,
throw std::system_error(errno, std::system_category(), "Cannot mmap");
}

void munmapMemory(void* addr, size_t size) { munmap(addr, size); }

void mbindMemory(void* addr,
unsigned long len,
int mode,
const NumaBitMask& mask,
unsigned int flags) {
auto nodesMask = mask.getNativeBitmask();

long ret = mbind(addr, len, mode, nodesMask->maskp, nodesMask->size, flags);
if (ret != 0) {
util::throwSystemError(
errno, folly::sformat("mbind() failed: {}", std::strerror(errno)));
}
}

void setMaxLockMemory(uint64_t bytes) {
struct rlimit rlim {
bytes, bytes
Expand Down
Loading