proxygen
JemallocHugePageAllocator.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2018-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
18 
20 #include <glog/logging.h>
21 
22 #include <sstream>
23 
24 #if defined(MADV_HUGEPAGE) && defined(FOLLY_USE_JEMALLOC) && !FOLLY_SANITIZE
25 #include <jemalloc/jemalloc.h>
26 #if (JEMALLOC_VERSION_MAJOR >= 5)
27 #define FOLLY_JEMALLOC_HUGE_PAGE_ALLOCATOR_SUPPORTED 1
29 #endif
30 
31 #endif // defined(FOLLY_HAVE_LIBJEMALLOC) && !FOLLY_SANITIZE
32 
33 #ifndef FOLLY_JEMALLOC_HUGE_PAGE_ALLOCATOR_SUPPORTED
34 // Some mocks when jemalloc.h is not included or version too old
35 // or when the system does not support the MADV_HUGEPAGE madvise flag
36 #undef MALLOCX_ARENA
37 #undef MALLOCX_TCACHE_NONE
38 #undef MADV_HUGEPAGE
39 #define MALLOCX_ARENA(x) 0
40 #define MALLOCX_TCACHE_NONE 0
41 #define MADV_HUGEPAGE 0
43 typedef void*(extent_alloc_t)(
45  void*,
46  size_t,
47  size_t,
48  bool*,
49  bool*,
50  unsigned);
53 };
55 #endif // FOLLY_JEMALLOC_HUGE_PAGE_ALLOCATOR_SUPPORTED
56 
57 namespace folly {
58 namespace {
59 
60 static void print_error(int err, const char* msg) {
61  int cur_errno = std::exchange(errno, err);
62  PLOG(ERROR) << msg;
63  errno = cur_errno;
64 }
65 
66 class HugePageArena {
67  public:
68  int init(int nr_pages);
69  void* reserve(size_t size, size_t alignment);
70 
71  bool addressInArena(void* address) {
72  uintptr_t addr = reinterpret_cast<uintptr_t>(address);
73  return addr >= start_ && addr < end_;
74  }
75 
76  size_t freeSpace() {
77  return end_ - freePtr_;
78  }
79 
80  private:
81  static void* allocHook(
82  extent_hooks_t* extent,
83  void* new_addr,
84  size_t size,
85  size_t alignment,
86  bool* zero,
87  bool* commit,
88  unsigned arena_ind);
89 
90  uintptr_t start_{0};
91  uintptr_t end_{0};
92  uintptr_t freePtr_{0};
95 };
96 
97 constexpr size_t kHugePageSize = 2 * 1024 * 1024;
98 
99 // Singleton arena instance
100 static HugePageArena arena;
101 
102 template <typename T, typename U>
103 static inline T align_up(T val, U alignment) {
104  DCHECK((alignment & (alignment - 1)) == 0);
105  return (val + alignment - 1) & ~(alignment - 1);
106 }
107 
108 // mmap enough memory to hold the aligned huge pages, then use madvise
109 // to get huge pages. Note that this is only a hint and is not guaranteed
110 // to be honoured. Check /proc/<pid>/smaps to verify!
111 static uintptr_t map_pages(size_t nr_pages) {
112  // Initial mmapped area is large enough to contain the aligned huge pages
113  size_t alloc_size = nr_pages * kHugePageSize;
114  void* p = mmap(
115  nullptr,
116  alloc_size + kHugePageSize,
117  PROT_READ | PROT_WRITE,
118  MAP_PRIVATE | MAP_ANONYMOUS,
119  -1,
120  0);
121 
122  if (p == MAP_FAILED) {
123  return 0;
124  }
125 
126  // Aligned start address
127  uintptr_t first_page = align_up((uintptr_t)p, kHugePageSize);
128 
129  // Unmap left-over 4k pages
130  munmap(p, first_page - (uintptr_t)p);
131  munmap(
132  (void*)(first_page + alloc_size),
133  kHugePageSize - (first_page - (uintptr_t)p));
134 
135  // Tell the kernel to please give us huge pages for this range
136  madvise((void*)first_page, kHugePageSize * nr_pages, MADV_HUGEPAGE);
137  LOG(INFO) << nr_pages << " huge pages at " << (void*)first_page;
138  return first_page;
139 }
140 
141 void* HugePageArena::allocHook(
142  extent_hooks_t* extent,
143  void* new_addr,
144  size_t size,
145  size_t alignment,
146  bool* zero,
147  bool* commit,
148  unsigned arena_ind) {
149  DCHECK((size & (size - 1)) == 0);
150  void* res = nullptr;
151  if (new_addr == nullptr) {
152  res = arena.reserve(size, alignment);
153  }
154  LOG(INFO) << "Extent request of size " << size << " alignment " << alignment
155  << " = " << res << " (" << arena.freeSpace() << " bytes free)";
156  if (res == nullptr) {
157  LOG_IF(WARNING, new_addr != nullptr) << "Explicit address not supported";
158  res = arena.originalAlloc_(
159  extent, new_addr, size, alignment, zero, commit, arena_ind);
160  } else {
161  if (*zero) {
162  bzero(res, size);
163  }
164  *commit = true;
165  }
166  return res;
167 }
168 
169 int HugePageArena::init(int nr_pages) {
170  DCHECK(start_ == 0);
171  DCHECK(usingJEMalloc());
172 
173  unsigned arena_index;
174  size_t len = sizeof(arena_index);
175  if (auto ret = mallctl("arenas.create", &arena_index, &len, nullptr, 0)) {
176  print_error(ret, "Unable to create arena");
177  return 0;
178  }
179 
180  // Set grow retained limit to stop jemalloc from
181  // forever increasing the requested size after failed allocations.
182  // Normally jemalloc asks for maps of increasing size in order to avoid
183  // hitting the limit of allowed mmaps per process.
184  // Since this arena is backed by a single mmap and is using huge pages,
185  // this is not a concern here.
186  // TODO: Support growth of the huge page arena.
187  size_t mib[3];
188  size_t miblen = sizeof(mib) / sizeof(size_t);
189  std::ostringstream rtl_key;
190  rtl_key << "arena." << arena_index << ".retain_grow_limit";
191  if (auto ret = mallctlnametomib(rtl_key.str().c_str(), mib, &miblen)) {
192  print_error(ret, "Unable to read growth limit");
193  return 0;
194  }
195  size_t grow_retained_limit = kHugePageSize;
196  mib[1] = arena_index;
197  if (auto ret = mallctlbymib(
198  mib,
199  miblen,
200  nullptr,
201  nullptr,
202  &grow_retained_limit,
203  sizeof(grow_retained_limit))) {
204  print_error(ret, "Unable to set growth limit");
205  return 0;
206  }
207 
208  std::ostringstream hooks_key;
209  hooks_key << "arena." << arena_index << ".extent_hooks";
210  extent_hooks_t* hooks;
211  len = sizeof(hooks);
212  // Read the existing hooks
213  if (auto ret = mallctl(hooks_key.str().c_str(), &hooks, &len, nullptr, 0)) {
214  print_error(ret, "Unable to get the hooks");
215  return 0;
216  }
217  originalAlloc_ = hooks->alloc;
218 
219  // Set the custom hook
220  extentHooks_ = *hooks;
221  extentHooks_.alloc = &allocHook;
222  extent_hooks_t* new_hooks = &extentHooks_;
223  if (auto ret = mallctl(
224  hooks_key.str().c_str(),
225  nullptr,
226  nullptr,
227  &new_hooks,
228  sizeof(new_hooks))) {
229  print_error(ret, "Unable to set the hooks");
230  return 0;
231  }
232 
233  start_ = freePtr_ = map_pages(nr_pages);
234  if (start_ == 0) {
235  return false;
236  }
237  end_ = start_ + (nr_pages * kHugePageSize);
238  return MALLOCX_ARENA(arena_index) | MALLOCX_TCACHE_NONE;
239 }
240 
241 void* HugePageArena::reserve(size_t size, size_t alignment) {
242  VLOG(1) << "Reserve: " << size << " alignemnt " << alignment;
243  uintptr_t res = align_up(freePtr_, alignment);
244  uintptr_t newFreePtr = res + size;
245  if (newFreePtr > end_) {
246  LOG(WARNING) << "Request of size " << size << " denied: " << freeSpace()
247  << " bytes available - not backed by huge pages";
248  return nullptr;
249  }
250  freePtr_ = newFreePtr;
251  return reinterpret_cast<void*>(res);
252 }
253 
254 } // namespace
255 
256 int JemallocHugePageAllocator::flags_{0};
257 
259  if (!usingJEMalloc()) {
260  LOG(ERROR) << "Not linked with jemalloc?";
261  hugePagesSupported = false;
262  }
263  if (hugePagesSupported) {
264  if (flags_ == 0) {
265  flags_ = arena.init(nr_pages);
266  } else {
267  LOG(WARNING) << "Already initialized";
268  }
269  } else {
270  LOG(WARNING) << "Huge Page Allocator not supported";
271  }
272  return flags_ != 0;
273 }
274 
275 size_t JemallocHugePageAllocator::freeSpace() {
276  return arena.freeSpace();
277 }
278 
279 bool JemallocHugePageAllocator::addressInArena(void* address) {
280  return arena.addressInArena(address);
281 }
282 
283 } // namespace folly
#define T(v)
Definition: http_parser.c:233
uintptr_t freePtr_
bool usingJEMalloc() noexcept
Definition: Malloc.h:147
double val
Definition: String.cpp:273
extent_alloc_t * originalAlloc_
#define MALLOCX_ARENA(x)
extent_hooks_t extentHooks_
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
void init()
int(* mallctlbymib)(const size_t *, size_t, void *, size_t *, void *, size_t)
Definition: MallocImpl.cpp:44
void BENCHFUN() reserve(int iters, int size)
constexpr auto size(C const &c) -> decltype(c.size())
Definition: Access.h:45
#define MADV_HUGEPAGE
static void print_error(const char *raw, size_t error_location)
Definition: test.c:1775
int(* mallctl)(const char *, void *, size_t *, void *, size_t)
Definition: MallocImpl.cpp:42
T exchange(T &obj, U &&new_value)
Definition: Utility.h:120
uintptr_t start_
int(* mallctlnametomib)(const char *, size_t *, size_t *)
Definition: MallocImpl.cpp:43
void *( extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, bool *, unsigned)
ThreadPoolListHook * addr
#define MALLOCX_TCACHE_NONE
uintptr_t end_