proxygen
ThreadLocalTest.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2011-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <folly/ThreadLocal.h>
18 
19 #ifndef _WIN32
20 #include <dlfcn.h>
21 #include <sys/wait.h>
22 #endif
23 
24 #include <sys/types.h>
25 
26 #include <array>
27 #include <atomic>
28 #include <chrono>
29 #include <climits>
30 #include <condition_variable>
31 #include <map>
32 #include <memory>
33 #include <mutex>
34 #include <set>
35 #include <thread>
36 #include <unordered_map>
37 
38 #include <glog/logging.h>
39 
40 #include <folly/Memory.h>
46 #include <folly/system/ThreadId.h>
47 
48 using namespace folly;
49 
50 struct Widget {
51  static int totalVal_;
52  int val_;
53  ~Widget() {
54  totalVal_ += val_;
55  }
56 
57  static void customDeleter(Widget* w, TLPDestructionMode mode) {
58  totalVal_ += (mode == TLPDestructionMode::ALL_THREADS) ? 1000 : 1;
59  delete w;
60  }
61 };
62 int Widget::totalVal_ = 0;
63 
64 struct MultiWidget {
65  int val_{0};
66  MultiWidget() = default;
68  // force a reallocation in the destructor by
69  // allocating more than elementsCapacity
70 
71  using TL = ThreadLocal<size_t>;
73  auto const numElements = TLMeta::instance().elementsCapacity() + 1;
74  std::vector<ThreadLocal<size_t>> elems(numElements);
75  for (auto& t : elems) {
76  *t += 1;
77  }
78  }
79 };
80 
81 TEST(ThreadLocalPtr, BasicDestructor) {
84  std::thread([&w]() {
85  w.reset(new Widget());
86  w.get()->val_ += 10;
87  })
88  .join();
90 }
91 
92 TEST(ThreadLocalPtr, CustomDeleter1) {
94  {
96  std::thread([&w]() {
97  w.reset(new Widget(), Widget::customDeleter);
98  w.get()->val_ += 10;
99  })
100  .join();
102  }
104 }
105 
106 TEST(ThreadLocalPtr, CustomDeleterOwnershipTransfer) {
107  Widget::totalVal_ = 0;
108  {
110  auto deleter = [](Widget* ptr) {
112  };
113  std::unique_ptr<Widget, decltype(deleter)> source(new Widget(), deleter);
114  std::thread([&w, &source]() {
115  w.reset(std::move(source));
116  w.get()->val_ += 10;
117  })
118  .join();
120  }
122 }
123 
124 TEST(ThreadLocalPtr, DefaultDeleterOwnershipTransfer) {
125  Widget::totalVal_ = 0;
126  {
128  auto source = std::make_unique<Widget>();
129  std::thread([&w, &source]() {
130  w.reset(std::move(source));
131  w.get()->val_ += 10;
132  })
133  .join();
135  }
137 }
138 
139 TEST(ThreadLocalPtr, resetNull) {
141  EXPECT_FALSE(tl);
142  tl.reset(new int(4));
143  EXPECT_TRUE(static_cast<bool>(tl));
144  EXPECT_EQ(*tl.get(), 4);
145  tl.reset();
146  EXPECT_FALSE(tl);
147 }
148 
149 TEST(ThreadLocalPtr, TestRelease) {
150  Widget::totalVal_ = 0;
152  std::unique_ptr<Widget> wPtr;
153  std::thread([&w, &wPtr]() {
154  w.reset(new Widget());
155  w.get()->val_ += 10;
156 
157  wPtr.reset(w.release());
158  })
159  .join();
161  wPtr.reset();
163 }
164 
165 TEST(ThreadLocalPtr, CreateOnThreadExit) {
166  Widget::totalVal_ = 0;
169 
170  std::thread([&] {
171  tl.reset(new int(1), [&](int* ptr, TLPDestructionMode /* mode */) {
172  delete ptr;
173  // This test ensures Widgets allocated here are not leaked.
174  ++w.get()->val_;
176  ++wl.get()->val_;
177  });
178  })
179  .join();
181 }
182 
183 // Test deleting the ThreadLocalPtr object
184 TEST(ThreadLocalPtr, CustomDeleter2) {
185  Widget::totalVal_ = 0;
186  std::thread t;
188  std::condition_variable cv;
189  enum class State {
190  START,
191  DONE,
192  EXIT,
193  };
194  State state = State::START;
195  {
197  t = std::thread([&]() {
198  w.reset(new Widget(), Widget::customDeleter);
199  w.get()->val_ += 10;
200 
201  // Notify main thread that we're done
202  {
203  std::unique_lock<std::mutex> lock(mutex);
204  state = State::DONE;
205  cv.notify_all();
206  }
207 
208  // Wait for main thread to allow us to exit
209  {
210  std::unique_lock<std::mutex> lock(mutex);
211  while (state != State::EXIT) {
212  cv.wait(lock);
213  }
214  }
215  });
216 
217  // Wait for main thread to start (and set w.get()->val_)
218  {
219  std::unique_lock<std::mutex> lock(mutex);
220  while (state != State::DONE) {
221  cv.wait(lock);
222  }
223  }
224 
225  // Thread started but hasn't exited yet
227 
228  // Destroy ThreadLocalPtr<Widget> (by letting it go out of scope)
229  }
230 
232 
233  // Allow thread to exit
234  {
235  std::unique_lock<std::mutex> lock(mutex);
236  state = State::EXIT;
237  cv.notify_all();
238  }
239  t.join();
240 
242 }
243 
244 TEST(ThreadLocal, BasicDestructor) {
245  Widget::totalVal_ = 0;
247  std::thread([&w]() { w->val_ += 10; }).join();
249 }
250 
251 // this should force a realloc of the ElementWrapper array
252 TEST(ThreadLocal, ReallocDestructor) {
254  std::thread([&w]() { w->val_ += 10; }).join();
255 }
256 
257 TEST(ThreadLocal, SimpleRepeatDestructor) {
258  Widget::totalVal_ = 0;
259  {
261  w->val_ += 10;
262  }
263  {
265  w->val_ += 10;
266  }
268 }
269 
270 TEST(ThreadLocal, InterleavedDestructors) {
271  Widget::totalVal_ = 0;
272  std::unique_ptr<ThreadLocal<Widget>> w;
273  int wVersion = 0;
274  const int wVersionMax = 2;
275  int thIter = 0;
277  auto th = std::thread([&]() {
278  int wVersionPrev = 0;
279  while (true) {
280  while (true) {
281  std::lock_guard<std::mutex> g(lock);
282  if (wVersion > wVersionMax) {
283  return;
284  }
285  if (wVersion > wVersionPrev) {
286  // We have a new version of w, so it should be initialized to zero
287  EXPECT_EQ((*w)->val_, 0);
288  break;
289  }
290  }
291  std::lock_guard<std::mutex> g(lock);
292  wVersionPrev = wVersion;
293  (*w)->val_ += 10;
294  ++thIter;
295  }
296  });
297  FOR_EACH_RANGE (i, 0, wVersionMax) {
298  int thIterPrev = 0;
299  {
300  std::lock_guard<std::mutex> g(lock);
301  thIterPrev = thIter;
302  w = std::make_unique<ThreadLocal<Widget>>();
303  ++wVersion;
304  }
305  while (true) {
306  std::lock_guard<std::mutex> g(lock);
307  if (thIter > thIterPrev) {
308  break;
309  }
310  }
311  }
312  {
313  std::lock_guard<std::mutex> g(lock);
314  wVersion = wVersionMax + 1;
315  }
316  th.join();
317  EXPECT_EQ(wVersionMax * 10, Widget::totalVal_);
318 }
319 
321  class NewTag;
323 
324  public:
325  void add(int val) {
326  *val_ += val;
327  }
328 
329  int read() {
330  int ret = 0;
331  for (const auto& i : val_.accessAllThreads()) {
332  ret += i;
333  }
334  return ret;
335  }
336 };
337 
338 TEST(ThreadLocalPtr, AccessAllThreadsCounter) {
339  const int kNumThreads = 256;
340  SimpleThreadCachedInt stci[kNumThreads + 1];
341  std::atomic<bool> run(true);
342  std::atomic<int> totalAtomic{0};
343  std::vector<std::thread> threads;
344  // thread i will increment all the thread locals
345  // in the range 0..i
346  for (int i = 0; i < kNumThreads; ++i) {
347  threads.push_back(std::thread([i, // i needs to be captured by value
348  &stci,
349  &run,
350  &totalAtomic]() {
351  for (int j = 0; j <= i; j++) {
352  stci[j].add(1);
353  }
354 
355  totalAtomic.fetch_add(1);
356  while (run.load()) {
357  usleep(100);
358  }
359  }));
360  }
361  while (totalAtomic.load() != kNumThreads) {
362  usleep(100);
363  }
364  for (int i = 0; i <= kNumThreads; i++) {
365  EXPECT_EQ(kNumThreads - i, stci[i].read());
366  }
367  run.store(false);
368  for (auto& t : threads) {
369  t.join();
370  }
371 }
372 
373 TEST(ThreadLocal, resetNull) {
374  ThreadLocal<int> tl;
375  tl.reset(new int(4));
376  EXPECT_EQ(*tl.get(), 4);
377  tl.reset();
378  EXPECT_EQ(*tl.get(), 0);
379  tl.reset(new int(5));
380  EXPECT_EQ(*tl.get(), 5);
381 }
382 
383 namespace {
384 struct Tag {};
385 
386 struct Foo {
388 };
389 } // namespace
390 
391 TEST(ThreadLocal, Movable1) {
392  Foo a;
393  Foo b;
394  EXPECT_TRUE(a.tl.get() != b.tl.get());
395 
396  a = Foo();
397  b = Foo();
398  EXPECT_TRUE(a.tl.get() != b.tl.get());
399 }
400 
401 TEST(ThreadLocal, Movable2) {
402  std::map<int, Foo> map;
403 
404  map[42];
405  map[10];
406  map[23];
407  map[100];
408 
409  std::set<void*> tls;
410  for (auto& m : map) {
411  tls.insert(m.second.tl.get());
412  }
413 
414  // Make sure that we have 4 different instances of *tl
415  EXPECT_EQ(4, tls.size());
416 }
417 
418 namespace {
419 class ThreadCachedIntWidget {
420  public:
421  ThreadCachedIntWidget() {}
422 
423  ~ThreadCachedIntWidget() {
424  if (ints_) {
425  ints_->increment(0);
426  }
427  }
428 
429  void set(detail::ThreadCachedInts<void>* ints) {
430  ints_ = ints;
431  }
432 
433  private:
434  detail::ThreadCachedInts<void>* ints_{nullptr};
435 };
436 } // namespace
437 
438 TEST(ThreadLocal, TCICreateOnThreadExit) {
439  detail::ThreadCachedInts<void> ints;
441 
442  std::thread([&] {
443  // make sure the ints object is created
444  ints.increment(1);
445  // now the widget
446  w->set(&ints);
447  })
448  .join();
449 }
450 
451 namespace {
452 
453 constexpr size_t kFillObjectSize = 300;
454 
455 std::atomic<uint64_t> gDestroyed;
456 
463 class FillObject {
464  public:
465  explicit FillObject(uint64_t idx) : idx_(idx) {
466  uint64_t v = val();
467  for (size_t i = 0; i < kFillObjectSize; ++i) {
468  data_[i] = v;
469  }
470  }
471 
472  void check() {
473  uint64_t v = val();
474  for (size_t i = 0; i < kFillObjectSize; ++i) {
475  CHECK_EQ(v, data_[i]);
476  }
477  }
478 
479  ~FillObject() {
480  ++gDestroyed;
481  }
482 
483  private:
484  uint64_t val() const {
485  return (idx_ << 40) | folly::getCurrentThreadID();
486  }
487 
488  uint64_t idx_;
489  uint64_t data_[kFillObjectSize];
490 };
491 
492 } // namespace
493 
494 TEST(ThreadLocal, Stress) {
495  static constexpr size_t numFillObjects = 250;
496  std::array<ThreadLocalPtr<FillObject>, numFillObjects> objects;
497 
498  static constexpr size_t numThreads = 32;
499  static constexpr size_t numReps = 20;
500 
501  std::vector<std::thread> threads;
502  threads.reserve(numThreads);
503 
504  for (size_t k = 0; k < numThreads; ++k) {
505  threads.emplace_back([&objects] {
506  for (size_t rep = 0; rep < numReps; ++rep) {
507  for (size_t i = 0; i < objects.size(); ++i) {
508  objects[i].reset(new FillObject(rep * objects.size() + i));
509  std::this_thread::sleep_for(std::chrono::microseconds(100));
510  }
511  for (size_t i = 0; i < objects.size(); ++i) {
512  objects[i]->check();
513  }
514  }
515  });
516  }
517 
518  for (auto& t : threads) {
519  t.join();
520  }
521 
522  EXPECT_EQ(numFillObjects * numThreads * numReps, gDestroyed);
523 }
524 
525 // Yes, threads and fork don't mix
526 // (http://cppwisdom.quora.com/Why-threads-and-fork-dont-mix) but if you're
527 // stupid or desperate enough to try, we shouldn't stand in your way.
528 namespace {
529 class HoldsOne {
530  public:
531  HoldsOne() : value_(1) {}
532  // Do an actual access to catch the buggy case where this == nullptr
533  int value() const {
534  return value_;
535  }
536 
537  private:
538  int value_;
539 };
540 
541 struct HoldsOneTag {};
542 
544 
545 int totalValue() {
546  int value = 0;
547  for (auto& p : ptr.accessAllThreads()) {
548  value += p.value();
549  }
550  return value;
551 }
552 
553 } // namespace
554 
555 #ifdef FOLLY_HAVE_PTHREAD_ATFORK
556 TEST(ThreadLocal, Fork) {
557  EXPECT_EQ(1, ptr->value()); // ensure created
558  EXPECT_EQ(1, totalValue());
559  // Spawn a new thread
560 
562  bool started = false;
563  std::condition_variable startedCond;
564  bool stopped = false;
565  std::condition_variable stoppedCond;
566 
567  std::thread t([&]() {
568  EXPECT_EQ(1, ptr->value()); // ensure created
569  {
570  std::unique_lock<std::mutex> lock(mutex);
571  started = true;
572  startedCond.notify_all();
573  }
574  {
575  std::unique_lock<std::mutex> lock(mutex);
576  while (!stopped) {
577  stoppedCond.wait(lock);
578  }
579  }
580  });
581 
582  {
583  std::unique_lock<std::mutex> lock(mutex);
584  while (!started) {
585  startedCond.wait(lock);
586  }
587  }
588 
589  EXPECT_EQ(2, totalValue());
590 
591  pid_t pid = fork();
592  if (pid == 0) {
593  // in child
594  int v = totalValue();
595 
596  // exit successfully if v == 1 (one thread)
597  // diagnostic error code otherwise :)
598  switch (v) {
599  case 1:
600  _exit(0);
601  case 0:
602  _exit(1);
603  }
604  _exit(2);
605  } else if (pid > 0) {
606  // in parent
607  int status;
608  EXPECT_EQ(pid, waitpid(pid, &status, 0));
609  EXPECT_TRUE(WIFEXITED(status));
610  EXPECT_EQ(0, WEXITSTATUS(status));
611  } else {
612  ADD_FAILURE() << "fork failed";
613  }
614 
615  EXPECT_EQ(2, totalValue());
616 
617  {
618  std::unique_lock<std::mutex> lock(mutex);
619  stopped = true;
620  stoppedCond.notify_all();
621  }
622 
623  t.join();
624 
625  EXPECT_EQ(1, totalValue());
626 }
627 #endif
628 
629 #ifndef _WIN32
630 struct HoldsOneTag2 {};
631 
632 TEST(ThreadLocal, Fork2) {
633  // A thread-local tag that was used in the parent from a *different* thread
634  // (but not the forking thread) would cause the child to hang in a
635  // ThreadLocalPtr's object destructor. Yeah.
637  {
638  // use tag in different thread
639  std::thread t([&p] { p.get(); });
640  t.join();
641  }
642  pid_t pid = fork();
643  if (pid == 0) {
644  {
646  q.get();
647  }
648  _exit(0);
649  } else if (pid > 0) {
650  int status;
651  EXPECT_EQ(pid, waitpid(pid, &status, 0));
652  EXPECT_TRUE(WIFEXITED(status));
653  EXPECT_EQ(0, WEXITSTATUS(status));
654  } else {
655  ADD_FAILURE() << "fork failed";
656  }
657 }
658 
659 // Disable the SharedLibrary test when using any sanitizer. Otherwise, the
660 // dlopen'ed code would end up running without e.g., ASAN-initialized data
661 // structures and failing right away.
662 //
663 // We also cannot run this test unless folly was compiled with PIC support,
664 // since we cannot build thread_local_test_lib.so without PIC.
665 #if defined FOLLY_SANITIZE_ADDRESS || defined FOLLY_SANITIZE_THREAD || \
666  !defined FOLLY_SUPPORT_SHARED_LIBRARY
667 #define SHARED_LIBRARY_TEST_NAME DISABLED_SharedLibrary
668 #else
669 #define SHARED_LIBRARY_TEST_NAME SharedLibrary
670 #endif
671 
673  auto exe = fs::executable_path();
674  auto lib = exe.parent_path() / "thread_local_test_lib.so";
675  auto handle = dlopen(lib.string().c_str(), RTLD_LAZY);
676  ASSERT_NE(nullptr, handle)
677  << "unable to load " << lib.string() << ": " << dlerror();
678 
679  typedef void (*useA_t)();
680  dlerror();
681  useA_t useA = (useA_t)dlsym(handle, "useA");
682 
683  const char* dlsym_error = dlerror();
684  EXPECT_EQ(nullptr, dlsym_error);
685  ASSERT_NE(nullptr, useA);
686 
687  useA();
688 
689  folly::Baton<> b11, b12, b21, b22;
690 
691  std::thread t1([&]() {
692  useA();
693  b11.post();
694  b12.wait();
695  });
696 
697  std::thread t2([&]() {
698  useA();
699  b21.post();
700  b22.wait();
701  });
702 
703  b11.wait();
704  b21.wait();
705 
706  dlclose(handle);
707 
708  b12.post();
709  b22.post();
710 
711  t1.join();
712  t2.join();
713 }
714 
715 #endif
716 
717 namespace folly {
718 namespace threadlocal_detail {
721  constexpr PthreadKeyUnregisterTester() = default;
722 };
723 } // namespace threadlocal_detail
724 } // namespace folly
725 
726 TEST(ThreadLocal, UnregisterClassHasConstExprCtor) {
728  // yep!
729  SUCCEED();
730 }
void * ptr
auto v
void reset(T *newPtr=nullptr)
Definition: ThreadLocal.h:176
path executable_path()
Definition: FsUtil.cpp:72
void reset(T *newPtr=nullptr)
Definition: ThreadLocal.h:82
char b
#define EXPECT_EQ(val1, val2)
Definition: gtest.h:1922
Accessor accessAllThreads() const
Definition: ThreadLocal.h:87
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
const int x
double val
Definition: String.cpp:273
static size_t const kNumThreads
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
void useA()
folly::Optional< PskKeyExchangeMode > mode
std::vector< std::thread::id > threads
#define FOR_EACH_RANGE(i, begin, end)
Definition: Foreach.h:313
FOLLY_ALWAYS_INLINE void wait(const WaitOptions &opt=wait_options()) noexcept
Definition: Baton.h:170
FOLLY_ALWAYS_INLINE FOLLY_ATTR_VISIBILITY_HIDDEN T * get() const
Definition: ThreadLocal.h:69
#define SUCCEED()
Definition: gtest.h:1831
State
See Core for details.
Definition: Core.h:43
static void run(EventBaseManager *ebm, EventBase *eb, folly::Baton<> *stop, const StringPiece &name)
Widget(int number, const std::string &name)
Definition: widget.cc:40
size_t read(T &out, folly::io::Cursor &cursor)
Definition: Types-inl.h:258
static void customDeleter(Widget *w, TLPDestructionMode mode)
static Map map(mapCap)
auto lock(Synchronized< D, M > &synchronized, Args &&...args)
static map< string, int > m
char a
static int totalVal_
void post() noexcept
Definition: Baton.h:123
ThreadLocal< int, NewTag > val_
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
std::mutex mutex
uint64_t getCurrentThreadID()
Definition: ThreadId.h:42
g_t g(f_t)
uint64_t value(const typename LockFreeRingBuffer< T, Atom >::Cursor &rbcursor)
void join(const Delim &delimiter, Iterator begin, Iterator end, String &output)
Definition: String-inl.h:498
InlineExecutor exe
Definition: Benchmark.cpp:337
#define ASSERT_NE(val1, val2)
Definition: gtest.h:1960
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
#define ADD_FAILURE()
Definition: gtest.h:1808
KeyT k
TEST(SequencedExecutor, CPUThreadPoolExecutor)
#define SHARED_LIBRARY_TEST_NAME
bool check(const dynamic &schema, const dynamic &value, bool check=true)
StringPiece data_
state
Definition: http_parser.c:272
#define lib