20 #include <glog/logging.h> 24 #if defined(MADV_HUGEPAGE) && defined(FOLLY_USE_JEMALLOC) && !FOLLY_SANITIZE 25 #include <jemalloc/jemalloc.h> 26 #if (JEMALLOC_VERSION_MAJOR >= 5) 27 #define FOLLY_JEMALLOC_HUGE_PAGE_ALLOCATOR_SUPPORTED 1 31 #endif // defined(FOLLY_HAVE_LIBJEMALLOC) && !FOLLY_SANITIZE 33 #ifndef FOLLY_JEMALLOC_HUGE_PAGE_ALLOCATOR_SUPPORTED 37 #undef MALLOCX_TCACHE_NONE 39 #define MALLOCX_ARENA(x) 0 40 #define MALLOCX_TCACHE_NONE 0 41 #define MADV_HUGEPAGE 0 55 #endif // FOLLY_JEMALLOC_HUGE_PAGE_ALLOCATOR_SUPPORTED 68 int init(
int nr_pages);
71 bool addressInArena(
void* address) {
72 uintptr_t
addr =
reinterpret_cast<uintptr_t
>(address);
81 static void* allocHook(
97 constexpr
size_t kHugePageSize = 2 * 1024 * 1024;
100 static HugePageArena arena;
102 template <
typename T,
typename U>
103 static inline T align_up(
T val, U alignment) {
104 DCHECK((alignment & (alignment - 1)) == 0);
105 return (val + alignment - 1) & ~(alignment - 1);
111 static uintptr_t map_pages(
size_t nr_pages) {
113 size_t alloc_size = nr_pages * kHugePageSize;
116 alloc_size + kHugePageSize,
117 PROT_READ | PROT_WRITE,
118 MAP_PRIVATE | MAP_ANONYMOUS,
122 if (p == MAP_FAILED) {
127 uintptr_t first_page = align_up((uintptr_t)p, kHugePageSize);
130 munmap(p, first_page - (uintptr_t)p);
132 (
void*)(first_page + alloc_size),
133 kHugePageSize - (first_page - (uintptr_t)p));
136 madvise((
void*)first_page, kHugePageSize * nr_pages,
MADV_HUGEPAGE);
137 LOG(INFO) << nr_pages <<
" huge pages at " << (
void*)first_page;
141 void* HugePageArena::allocHook(
148 unsigned arena_ind) {
149 DCHECK((size & (size - 1)) == 0);
151 if (new_addr ==
nullptr) {
152 res = arena.reserve(size, alignment);
154 LOG(INFO) <<
"Extent request of size " << size <<
" alignment " << alignment
155 <<
" = " << res <<
" (" << arena.freeSpace() <<
" bytes free)";
156 if (res ==
nullptr) {
157 LOG_IF(WARNING, new_addr !=
nullptr) <<
"Explicit address not supported";
158 res = arena.originalAlloc_(
159 extent, new_addr, size, alignment, zero, commit, arena_ind);
173 unsigned arena_index;
174 size_t len =
sizeof(arena_index);
175 if (
auto ret =
mallctl(
"arenas.create", &arena_index, &len,
nullptr, 0)) {
188 size_t miblen =
sizeof(mib) /
sizeof(
size_t);
189 std::ostringstream rtl_key;
190 rtl_key <<
"arena." << arena_index <<
".retain_grow_limit";
195 size_t grow_retained_limit = kHugePageSize;
196 mib[1] = arena_index;
202 &grow_retained_limit,
203 sizeof(grow_retained_limit))) {
208 std::ostringstream hooks_key;
209 hooks_key <<
"arena." << arena_index <<
".extent_hooks";
213 if (
auto ret =
mallctl(hooks_key.str().c_str(), &hooks, &len,
nullptr, 0)) {
224 hooks_key.str().c_str(),
228 sizeof(new_hooks))) {
242 VLOG(1) <<
"Reserve: " << size <<
" alignemnt " << alignment;
243 uintptr_t res = align_up(
freePtr_, alignment);
244 uintptr_t newFreePtr = res +
size;
245 if (newFreePtr >
end_) {
246 LOG(WARNING) <<
"Request of size " << size <<
" denied: " << freeSpace()
247 <<
" bytes available - not backed by huge pages";
251 return reinterpret_cast<void*
>(res);
256 int JemallocHugePageAllocator::flags_{0};
260 LOG(ERROR) <<
"Not linked with jemalloc?";
261 hugePagesSupported =
false;
263 if (hugePagesSupported) {
265 flags_ = arena.init(nr_pages);
267 LOG(
WARNING) <<
"Already initialized";
270 LOG(
WARNING) <<
"Huge Page Allocator not supported";
275 size_t JemallocHugePageAllocator::freeSpace() {
276 return arena.freeSpace();
279 bool JemallocHugePageAllocator::addressInArena(
void* address) {
280 return arena.addressInArena(address);
bool usingJEMalloc() noexcept
extent_alloc_t * originalAlloc_
extent_hooks_t extentHooks_
—— Concurrent Priority Queue Implementation ——
int(* mallctlbymib)(const size_t *, size_t, void *, size_t *, void *, size_t)
void BENCHFUN() reserve(int iters, int size)
constexpr auto size(C const &c) -> decltype(c.size())
static bool hugePagesSupported
static void print_error(const char *raw, size_t error_location)
int(* mallctl)(const char *, void *, size_t *, void *, size_t)
T exchange(T &obj, U &&new_value)
int(* mallctlnametomib)(const char *, size_t *, size_t *)
void *( extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, bool *, unsigned)
ThreadPoolListHook * addr
#define MALLOCX_TCACHE_NONE