proxygen
GuardPageAllocator.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2015-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
17 
18 #ifndef _WIN32
19 #include <dlfcn.h>
20 #endif
21 #include <signal.h>
22 
23 #include <iostream>
24 #include <mutex>
25 
26 #include <folly/Singleton.h>
27 #include <folly/SpinLock.h>
28 #include <folly/Synchronized.h>
31 
32 #include <glog/logging.h>
33 
34 namespace folly {
35 namespace fibers {
36 
48 constexpr size_t kNumGuarded = 100;
49 
53 constexpr size_t kMaxInUse = 100;
54 
60 class StackCache {
61  public:
62  explicit StackCache(size_t stackSize) : allocSize_(allocSize(stackSize)) {
63  auto p = ::mmap(
64  nullptr,
65  allocSize_ * kNumGuarded,
66  PROT_READ | PROT_WRITE,
67  MAP_PRIVATE | MAP_ANONYMOUS,
68  -1,
69  0);
70  PCHECK(p != (void*)(-1));
71  storage_ = reinterpret_cast<unsigned char*>(p);
72 
73  /* Protect the bottommost page of every stack allocation */
74  for (size_t i = 0; i < kNumGuarded; ++i) {
75  auto allocBegin = storage_ + allocSize_ * i;
76  freeList_.emplace_back(allocBegin, /* protected= */ false);
77  }
78  }
79 
80  unsigned char* borrow(size_t size) {
81  std::lock_guard<folly::SpinLock> lg(lock_);
82 
83  assert(storage_);
84 
85  auto as = allocSize(size);
86  if (as != allocSize_ || freeList_.empty()) {
87  return nullptr;
88  }
89 
90  auto p = freeList_.back().first;
91  if (!freeList_.back().second) {
92  PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
93  protectedPages().wlock()->insert(reinterpret_cast<intptr_t>(p));
94  }
95  freeList_.pop_back();
96 
97  /* We allocate minimum number of pages required, plus a guard page.
98  Since we use this for stack storage, requested allocation is aligned
99  at the top of the allocated pages, while the guard page is at the bottom.
100 
101  -- increasing addresses -->
102  Guard page Normal pages
103  |xxxxxxxxxx|..........|..........|
104  <- allocSize_ ------------------->
105  p -^ <- size -------->
106  limit -^
107  */
108  auto limit = p + allocSize_ - size;
109  assert(limit >= p + pagesize());
110  return limit;
111  }
112 
113  bool giveBack(unsigned char* limit, size_t size) {
114  std::lock_guard<folly::SpinLock> lg(lock_);
115 
116  assert(storage_);
117 
118  auto as = allocSize(size);
119  if (std::less_equal<void*>{}(limit, storage_) ||
120  std::less_equal<void*>{}(storage_ + allocSize_ * kNumGuarded, limit)) {
121  /* not mine */
122  return false;
123  }
124 
125  auto p = limit + size - as;
126  assert(as == allocSize_);
127  assert((p - storage_) % allocSize_ == 0);
128  freeList_.emplace_back(p, /* protected= */ true);
129  return true;
130  }
131 
133  assert(storage_);
134  protectedPages().withWLock([&](auto& pages) {
135  for (const auto& item : freeList_) {
136  pages.erase(reinterpret_cast<intptr_t>(item.first));
137  }
138  });
139  PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
140  }
141 
142  static bool isProtected(intptr_t addr) {
143  // Use a read lock for reading.
144  return protectedPages().withRLock([&](auto const& pages) {
145  for (const auto& page : pages) {
146  intptr_t pageEnd = intptr_t(page + pagesize());
147  if (page <= addr && addr < pageEnd) {
148  return true;
149  }
150  }
151  return false;
152  });
153  }
154 
155  private:
157  unsigned char* storage_{nullptr};
158  size_t allocSize_{0};
159 
163  std::vector<std::pair<unsigned char*, bool>> freeList_;
164 
165  static size_t pagesize() {
166  static const size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
167  return pagesize;
168  }
169 
170  /* Returns a multiple of pagesize() enough to store size + one guard page */
171  static size_t allocSize(size_t size) {
172  return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
173  }
174 
176  static auto instance =
178  return *instance;
179  }
180 };
181 
182 #ifndef _WIN32
183 
184 namespace {
185 
186 struct sigaction oldSigsegvAction;
187 
188 void sigsegvSignalHandler(int signum, siginfo_t* info, void*) {
189  if (signum != SIGSEGV) {
190  std::cerr << "GuardPageAllocator signal handler called for signal: "
191  << signum;
192  return;
193  }
194 
195  if (info &&
196  StackCache::isProtected(reinterpret_cast<intptr_t>(info->si_addr))) {
197  std::cerr << "folly::fibers Fiber stack overflow detected." << std::endl;
198  }
199 
200  // Restore old signal handler and let it handle the signal.
201  sigaction(signum, &oldSigsegvAction, nullptr);
202  raise(signum);
203 }
204 
205 bool isInJVM() {
206  auto getCreated = dlsym(RTLD_DEFAULT, "JNI_GetCreatedJavaVMs");
207  return getCreated;
208 }
209 
210 void installSignalHandler() {
211  static std::once_flag onceFlag;
212  std::call_once(onceFlag, []() {
213  if (isInJVM()) {
214  // Don't install signal handler, since JVM internal signal handler doesn't
215  // work with SA_ONSTACK
216  return;
217  }
218 
219  struct sigaction sa;
220  memset(&sa, 0, sizeof(sa));
221  sigemptyset(&sa.sa_mask);
222  // By default signal handlers are run on the signaled thread's stack.
223  // In case of stack overflow running the SIGSEGV signal handler on
224  // the same stack leads to another SIGSEGV and crashes the program.
225  // Use SA_ONSTACK, so alternate stack is used (only if configured via
226  // sigaltstack).
227  sa.sa_flags |= SA_SIGINFO | SA_ONSTACK;
228  sa.sa_sigaction = &sigsegvSignalHandler;
229  sigaction(SIGSEGV, &sa, &oldSigsegvAction);
230  });
231 }
232 } // namespace
233 
234 #endif
235 
237  public:
238  static CacheManager& instance() {
239  static auto inst = new CacheManager();
240  return *inst;
241  }
242 
243  std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
244  std::lock_guard<folly::SpinLock> lg(lock_);
245  if (inUse_ < kMaxInUse) {
246  ++inUse_;
247  return std::make_unique<StackCacheEntry>(stackSize);
248  }
249 
250  return nullptr;
251  }
252 
253  private:
255  size_t inUse_{0};
256 
257  friend class StackCacheEntry;
258 
259  void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
260  assert(inUse_ > 0);
261  --inUse_;
262  /* Note: we can add a free list for each size bucket
263  if stack re-use is important.
264  In this case this needs to be a folly::Singleton
265  to make sure the free list is cleaned up on fork.
266 
267  TODO(t7351705): fix Singleton destruction order
268  */
269  }
270 };
271 
272 /*
273  * RAII Wrapper around a StackCache that calls
274  * CacheManager::giveBack() on destruction.
275  */
277  public:
278  explicit StackCacheEntry(size_t stackSize)
279  : stackCache_(std::make_unique<StackCache>(stackSize)) {}
280 
282  return *stackCache_;
283  }
284 
286  CacheManager::instance().giveBack(std::move(stackCache_));
287  }
288 
289  private:
290  std::unique_ptr<StackCache> stackCache_;
291 };
292 
294  : useGuardPages_(useGuardPages) {
295 #ifndef _WIN32
296  installSignalHandler();
297 #endif
298 }
299 
301 
302 unsigned char* GuardPageAllocator::allocate(size_t size) {
303  if (useGuardPages_ && !stackCache_) {
305  }
306 
307  if (stackCache_) {
308  auto p = stackCache_->cache().borrow(size);
309  if (p != nullptr) {
310  return p;
311  }
312  }
313  return fallbackAllocator_.allocate(size);
314 }
315 
316 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
317  if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
318  fallbackAllocator_.deallocate(limit, size);
319  }
320 }
321 } // namespace fibers
322 } // namespace folly
std::vector< std::pair< unsigned char *, bool > > freeList_
std::unique_ptr< StackCache > stackCache_
def info()
Definition: deadlock.py:447
std::unique_ptr< StackCacheEntry > stackCache_
void deallocate(unsigned char *limit, size_t size)
constexpr size_t kNumGuarded
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
STL namespace.
unsigned char * borrow(size_t size)
static folly::Synchronized< std::unordered_set< intptr_t > > & protectedPages()
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
requires E e noexcept(noexcept(s.error(std::move(e))))
std::unique_ptr< StackCacheEntry > getStackCache(size_t stackSize)
FOLLY_ALWAYS_INLINE void call_once(basic_once_flag< Mutex, Atom > &flag, F &&f, Args &&...args)
Definition: CallOnce.h:56
static size_t allocSize(size_t size)
constexpr auto size(C const &c) -> decltype(c.size())
Definition: Access.h:45
static bool isProtected(intptr_t addr)
bool giveBack(unsigned char *limit, size_t size)
void giveBack(std::unique_ptr< StackCache >)
basic_once_flag< TimedMutex > once_flag
Definition: CallOnce.h:23
std::allocator< unsigned char > fallbackAllocator_
std::enable_if<!std::is_array< T >::value, std::unique_ptr< T > >::type make_unique(Args &&...args)
Definition: Memory.h:259
unsigned char * allocate(size_t size)
constexpr size_t kMaxInUse
const
Definition: upload.py:398
StackCache & cache() const noexcept
Collect as()
Definition: Base.h:811
ThreadPoolListHook * addr
static CacheManager & instance()