proxygen
SynchronizedTestLib-inl.h
Go to the documentation of this file.
1 /*
2  * Copyright 2012-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
19 #include <folly/Random.h>
20 #include <folly/Synchronized.h>
23 #include <glog/logging.h>
24 #include <algorithm>
25 #include <condition_variable>
26 #include <functional>
27 #include <map>
28 #include <random>
29 #include <thread>
30 #include <vector>
31 
32 namespace folly {
33 namespace sync_tests {
34 
35 inline std::mt19937& getRNG() {
36  static const auto seed = folly::randomNumberSeed();
37  static std::mt19937 rng(seed);
38  return rng;
39 }
40 
41 void randomSleep(std::chrono::milliseconds min, std::chrono::milliseconds max) {
42  std::uniform_int_distribution<> range(min.count(), max.count());
43  std::chrono::milliseconds duration(range(getRNG()));
44  /* sleep override */
45  std::this_thread::sleep_for(duration);
46 }
47 
48 /*
49  * Run a functon simultaneously in a number of different threads.
50  *
51  * The function will be passed the index number of the thread it is running in.
52  * This function makes an attempt to synchronize the start of the threads as
53  * best as possible. It waits for all threads to be allocated and started
54  * before invoking the function.
55  */
56 template <class Function>
57 void runParallel(size_t numThreads, const Function& function) {
58  std::vector<std::thread> threads;
59  threads.reserve(numThreads);
60 
61  // Variables used to synchronize all threads to try and start them
62  // as close to the same time as possible
64  std::condition_variable readyCV;
66  std::condition_variable goCV;
67 
68  auto worker = [&](size_t threadIndex) {
69  // Signal that we are ready
70  ++(*threadsReady.lock());
71  readyCV.notify_one();
72 
73  // Wait until we are given the signal to start
74  // The purpose of this is to try and make sure all threads start
75  // as close to the same time as possible.
76  {
77  auto lockedGo = go.lock();
78  goCV.wait(lockedGo.getUniqueLock(), [&] { return *lockedGo; });
79  }
80 
81  function(threadIndex);
82  };
83 
84  // Start all of the threads
85  for (size_t threadIndex = 0; threadIndex < numThreads; ++threadIndex) {
86  threads.emplace_back([threadIndex, &worker]() { worker(threadIndex); });
87  }
88 
89  // Wait for all threads to become ready
90  {
91  auto readyLocked = threadsReady.lock();
92  readyCV.wait(readyLocked.getUniqueLock(), [&] {
93  return *readyLocked == numThreads;
94  });
95  }
96  // Now signal the threads that they can go
97  go = true;
98  goCV.notify_all();
99 
100  // Wait for all threads to finish
101  for (auto& thread : threads) {
102  thread.join();
103  }
104 }
105 
106 // testBasic() version for shared lock types
107 template <class Mutex>
108 typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type
111  const auto& constObj = obj;
112 
113  obj.wlock()->resize(1000);
114 
115  folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.wlock()};
116  EXPECT_EQ(1000, obj2.rlock()->size());
117 
118  {
119  auto lockedObj = obj.wlock();
120  lockedObj->push_back(10);
121  EXPECT_EQ(1001, lockedObj->size());
122  EXPECT_EQ(10, lockedObj->back());
123  EXPECT_EQ(1000, obj2.wlock()->size());
124  EXPECT_EQ(1000, obj2.rlock()->size());
125 
126  {
127  auto unlocker = lockedObj.scopedUnlock();
128  EXPECT_EQ(1001, obj.wlock()->size());
129  }
130  }
131 
132  {
133  auto lockedObj = obj.rlock();
134  EXPECT_EQ(1001, lockedObj->size());
135  EXPECT_EQ(1001, obj.rlock()->size());
136  {
137  auto unlocker = lockedObj.scopedUnlock();
138  EXPECT_EQ(1001, obj.wlock()->size());
139  }
140  }
141 
142  obj.wlock()->front() = 2;
143 
144  {
145  // contextualLock() on a const reference should grab a shared lock
146  auto lockedObj = constObj.contextualLock();
147  EXPECT_EQ(2, lockedObj->front());
148  EXPECT_EQ(2, constObj.rlock()->front());
149  EXPECT_EQ(2, obj.rlock()->front());
150  }
151 
152  EXPECT_EQ(1001, obj.rlock()->size());
153  EXPECT_EQ(2, obj.rlock()->front());
154  EXPECT_EQ(10, obj.rlock()->back());
155  EXPECT_EQ(1000, obj2.rlock()->size());
156 }
157 
158 // testBasic() version for non-shared lock types
159 template <class Mutex>
160 typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type
163  const auto& constObj = obj;
164 
165  obj.lock()->resize(1000);
166 
167  folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.lock()};
168  EXPECT_EQ(1000, obj2.lock()->size());
169 
170  {
171  auto lockedObj = obj.lock();
172  lockedObj->push_back(10);
173  EXPECT_EQ(1001, lockedObj->size());
174  EXPECT_EQ(10, lockedObj->back());
175  EXPECT_EQ(1000, obj2.lock()->size());
176 
177  {
178  auto unlocker = lockedObj.scopedUnlock();
179  EXPECT_EQ(1001, obj.lock()->size());
180  }
181  }
182  {
183  auto lockedObj = constObj.lock();
184  EXPECT_EQ(1001, lockedObj->size());
185  EXPECT_EQ(10, lockedObj->back());
186  EXPECT_EQ(1000, obj2.lock()->size());
187  }
188 
189  obj.lock()->front() = 2;
190 
191  EXPECT_EQ(1001, obj.lock()->size());
192  EXPECT_EQ(2, obj.lock()->front());
193  EXPECT_EQ(2, obj.contextualLock()->front());
194  EXPECT_EQ(10, obj.lock()->back());
195  EXPECT_EQ(1000, obj2.lock()->size());
196 }
197 
198 template <class Mutex>
199 void testBasic() {
200  testBasicImpl<Mutex>();
201 }
202 
203 // testWithLock() version for shared lock types
204 template <class Mutex>
205 typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type
208  const auto& constObj = obj;
209 
210  // Test withWLock() and withRLock()
211  obj.withWLock([](std::vector<int>& lockedObj) {
212  lockedObj.resize(1000);
213  lockedObj.push_back(10);
214  lockedObj.push_back(11);
215  });
216  obj.withWLock([](const std::vector<int>& lockedObj) {
217  EXPECT_EQ(1002, lockedObj.size());
218  });
219  obj.withRLock([](const std::vector<int>& lockedObj) {
220  EXPECT_EQ(1002, lockedObj.size());
221  EXPECT_EQ(11, lockedObj.back());
222  });
223  constObj.withRLock([](const std::vector<int>& lockedObj) {
224  EXPECT_EQ(1002, lockedObj.size());
225  });
226 
227 #if __cpp_generic_lambdas >= 201304
228  obj.withWLock([](auto& lockedObj) { lockedObj.push_back(12); });
229  obj.withWLock(
230  [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
231  obj.withRLock([](const auto& lockedObj) {
232  EXPECT_EQ(1003, lockedObj.size());
233  EXPECT_EQ(12, lockedObj.back());
234  });
235  constObj.withRLock(
236  [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
237  obj.withWLock([](auto& lockedObj) { lockedObj.pop_back(); });
238 #endif
239 
240  // Test withWLockPtr() and withRLockPtr()
241  using SynchType = folly::Synchronized<std::vector<int>, Mutex>;
242 #if __cpp_generic_lambdas >= 201304
243  obj.withWLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); });
244  obj.withRLockPtr([](auto&& lockedObj) {
245  EXPECT_EQ(1003, lockedObj->size());
246  EXPECT_EQ(13, lockedObj->back());
247  });
248  constObj.withRLockPtr([](auto&& lockedObj) {
249  EXPECT_EQ(1003, lockedObj->size());
250  EXPECT_EQ(13, lockedObj->back());
251  });
252  obj.withWLockPtr([&](auto&& lockedObj) {
253  lockedObj->push_back(14);
254  {
255  auto unlocker = lockedObj.scopedUnlock();
256  obj.wlock()->push_back(15);
257  }
258  EXPECT_EQ(15, lockedObj->back());
259  });
260 #else
261  obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
262  lockedObj->push_back(13);
263  lockedObj->push_back(14);
264  lockedObj->push_back(15);
265  });
266 #endif
267 
268  obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
269  lockedObj->push_back(16);
270  EXPECT_EQ(1006, lockedObj->size());
271  });
272  obj.withRLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) {
273  EXPECT_EQ(1006, lockedObj->size());
274  EXPECT_EQ(16, lockedObj->back());
275  });
276  constObj.withRLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) {
277  EXPECT_EQ(1006, lockedObj->size());
278  EXPECT_EQ(16, lockedObj->back());
279  });
280 }
281 
282 // testWithLock() version for non-shared lock types
283 template <class Mutex>
284 typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type
287 
288  // Test withLock()
289  obj.withLock([](std::vector<int>& lockedObj) {
290  lockedObj.resize(1000);
291  lockedObj.push_back(10);
292  lockedObj.push_back(11);
293  });
294  obj.withLock([](const std::vector<int>& lockedObj) {
295  EXPECT_EQ(1002, lockedObj.size());
296  });
297 
298 #if __cpp_generic_lambdas >= 201304
299  obj.withLock([](auto& lockedObj) { lockedObj.push_back(12); });
300  obj.withLock(
301  [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
302  obj.withLock([](auto& lockedObj) { lockedObj.pop_back(); });
303 #endif
304 
305  // Test withLockPtr()
306  using SynchType = folly::Synchronized<std::vector<int>, Mutex>;
307 #if __cpp_generic_lambdas >= 201304
308  obj.withLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); });
309  obj.withLockPtr([](auto&& lockedObj) {
310  EXPECT_EQ(1003, lockedObj->size());
311  EXPECT_EQ(13, lockedObj->back());
312  });
313  obj.withLockPtr([&](auto&& lockedObj) {
314  lockedObj->push_back(14);
315  {
316  auto unlocker = lockedObj.scopedUnlock();
317  obj.lock()->push_back(15);
318  }
319  EXPECT_EQ(1005, lockedObj->size());
320  EXPECT_EQ(15, lockedObj->back());
321  });
322 #else
323  obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
324  lockedObj->push_back(13);
325  lockedObj->push_back(14);
326  lockedObj->push_back(15);
327  });
328 #endif
329 
330  obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
331  lockedObj->push_back(16);
332  EXPECT_EQ(1006, lockedObj->size());
333  });
334  const auto& constObj = obj;
335  constObj.withLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) {
336  EXPECT_EQ(1006, lockedObj->size());
337  EXPECT_EQ(16, lockedObj->back());
338  });
339 }
340 
341 template <class Mutex>
344  const auto& cv = value;
345 
346  {
347  auto lv = value.contextualLock();
348  EXPECT_EQ(7, *lv);
349  *lv = 5;
350  lv.unlock();
351  EXPECT_TRUE(lv.isNull());
352  EXPECT_FALSE(lv);
353 
354  auto rlv = cv.contextualLock();
355  EXPECT_EQ(5, *rlv);
356  rlv.unlock();
357  EXPECT_TRUE(rlv.isNull());
358  EXPECT_FALSE(rlv);
359 
360  auto rlv2 = cv.contextualRLock();
361  EXPECT_EQ(5, *rlv2);
362  rlv2.unlock();
363 
364  lv = value.contextualLock();
365  EXPECT_EQ(5, *lv);
366  *lv = 9;
367  }
368 
369  EXPECT_EQ(9, *value.contextualRLock());
370 }
371 
372 // testUnlock() version for shared lock types
373 template <class Mutex>
374 typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type
377  {
378  auto lv = value.wlock();
379  EXPECT_EQ(10, *lv);
380  *lv = 5;
381  lv.unlock();
382  EXPECT_FALSE(lv);
383  EXPECT_TRUE(lv.isNull());
384 
385  auto rlv = value.rlock();
386  EXPECT_EQ(5, *rlv);
387  rlv.unlock();
388  EXPECT_FALSE(rlv);
389  EXPECT_TRUE(rlv.isNull());
390 
391  auto lv2 = value.wlock();
392  EXPECT_EQ(5, *lv2);
393  *lv2 = 7;
394 
395  lv = std::move(lv2);
396  EXPECT_FALSE(lv2);
397  EXPECT_TRUE(lv2.isNull());
398  EXPECT_FALSE(lv.isNull());
399  EXPECT_EQ(7, *lv);
400  }
401 
402  testUnlockCommon<Mutex>();
403 }
404 
405 // testUnlock() version for non-shared lock types
406 template <class Mutex>
407 typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type
410  {
411  auto lv = value.lock();
412  EXPECT_EQ(10, *lv);
413  *lv = 5;
414  lv.unlock();
415  EXPECT_TRUE(lv.isNull());
416  EXPECT_FALSE(lv);
417 
418  auto lv2 = value.lock();
419  EXPECT_EQ(5, *lv2);
420  *lv2 = 6;
421  lv2.unlock();
422  EXPECT_TRUE(lv2.isNull());
423  EXPECT_FALSE(lv2);
424 
425  lv = value.lock();
426  EXPECT_EQ(6, *lv);
427  *lv = 7;
428 
429  lv2 = std::move(lv);
430  EXPECT_TRUE(lv.isNull());
431  EXPECT_FALSE(lv);
432  EXPECT_FALSE(lv2.isNull());
433  EXPECT_EQ(7, *lv2);
434  }
435 
436  testUnlockCommon<Mutex>();
437 }
438 
439 // Testing the deprecated SYNCHRONIZED and SYNCHRONIZED_CONST APIs
440 template <class Mutex>
443 
444  obj->resize(1000);
445 
446  auto obj2 = obj;
447  EXPECT_EQ(1000, obj2->size());
448 
449  SYNCHRONIZED(obj) {
450  obj.push_back(10);
451  EXPECT_EQ(1001, obj.size());
452  EXPECT_EQ(10, obj.back());
453  EXPECT_EQ(1000, obj2->size());
454  }
455 
456  SYNCHRONIZED_CONST(obj) {
457  EXPECT_EQ(1001, obj.size());
458  }
459 
460  SYNCHRONIZED(lockedObj, *&obj) {
461  lockedObj.front() = 2;
462  }
463 
464  EXPECT_EQ(1001, obj->size());
465  EXPECT_EQ(10, obj->back());
466  EXPECT_EQ(1000, obj2->size());
467 
468  EXPECT_EQ(FB_ARG_2_OR_1(1, 2), 2);
469  EXPECT_EQ(FB_ARG_2_OR_1(1), 1);
470 }
471 
472 template <class Mutex>
475  static const size_t numThreads = 100;
476  // Note: I initially tried using itersPerThread = 1000,
477  // which works fine for most lock types, but std::shared_timed_mutex
478  // appears to be extraordinarily slow. It could take around 30 seconds
479  // to run this test with 1000 iterations per thread using shared_timed_mutex.
480  static const size_t itersPerThread = 100;
481 
482  auto pushNumbers = [&](size_t threadIdx) {
483  // Test lock()
484  for (size_t n = 0; n < itersPerThread; ++n) {
485  v.contextualLock()->push_back((itersPerThread * threadIdx) + n);
487  }
488  };
489  runParallel(numThreads, pushNumbers);
490 
491  std::vector<int> result;
492  v.swap(result);
493 
494  EXPECT_EQ(numThreads * itersPerThread, result.size());
495  sort(result.begin(), result.end());
496 
497  for (size_t i = 0; i < itersPerThread * numThreads; ++i) {
498  EXPECT_EQ(i, result[i]);
499  }
500 }
501 
502 template <class Mutex>
506 
507  auto dualLockWorker = [&](size_t threadIdx) {
508  // Note: this will be less awkward with C++ 17's structured
509  // binding functionality, which will make it easier to use the returned
510  // std::tuple.
511  if (threadIdx & 1) {
512  auto ret = acquireLocked(v, m);
513  std::get<0>(ret)->push_back(threadIdx);
514  (*std::get<1>(ret))[threadIdx] = threadIdx + 1;
515  } else {
516  auto ret = acquireLocked(m, v);
517  std::get<1>(ret)->push_back(threadIdx);
518  (*std::get<0>(ret))[threadIdx] = threadIdx + 1;
519  }
520  };
521  static const size_t numThreads = 100;
522  runParallel(numThreads, dualLockWorker);
523 
524  std::vector<int> result;
525  v.swap(result);
526 
527  EXPECT_EQ(numThreads, result.size());
528  sort(result.begin(), result.end());
529 
530  for (size_t i = 0; i < numThreads; ++i) {
531  EXPECT_EQ(i, result[i]);
532  }
533 }
534 
535 template <class Mutex>
539 
540  auto dualLockWorker = [&](size_t threadIdx) {
541  const auto& cm = m;
542  if (threadIdx & 1) {
543  auto ret = acquireLocked(v, cm);
544  (void)std::get<1>(ret)->size();
545  std::get<0>(ret)->push_back(threadIdx);
546  } else {
547  auto ret = acquireLocked(cm, v);
548  (void)std::get<0>(ret)->size();
549  std::get<1>(ret)->push_back(threadIdx);
550  }
551  };
552  static const size_t numThreads = 100;
553  runParallel(numThreads, dualLockWorker);
554 
555  std::vector<int> result;
556  v.swap(result);
557 
558  EXPECT_EQ(numThreads, result.size());
559  sort(result.begin(), result.end());
560 
561  for (size_t i = 0; i < numThreads; ++i) {
562  EXPECT_EQ(i, result[i]);
563  }
564 }
565 
566 // Testing the deprecated SYNCHRONIZED_DUAL API
567 template <class Mutex>
571 
572  auto dualLockWorker = [&](size_t threadIdx) {
573  if (threadIdx & 1) {
574  SYNCHRONIZED_DUAL(lv, v, lm, m) {
575  lv.push_back(threadIdx);
576  lm[threadIdx] = threadIdx + 1;
577  }
578  } else {
579  SYNCHRONIZED_DUAL(lm, m, lv, v) {
580  lv.push_back(threadIdx);
581  lm[threadIdx] = threadIdx + 1;
582  }
583  }
584  };
585  static const size_t numThreads = 100;
586  runParallel(numThreads, dualLockWorker);
587 
588  std::vector<int> result;
589  v.swap(result);
590 
591  EXPECT_EQ(numThreads, result.size());
592  sort(result.begin(), result.end());
593 
594  for (size_t i = 0; i < numThreads; ++i) {
595  EXPECT_EQ(i, result[i]);
596  }
597 }
598 
599 // Testing the deprecated SYNCHRONIZED_DUAL API
600 template <class Mutex>
604 
605  auto dualLockWorker = [&](size_t threadIdx) {
606  const auto& cm = m;
607  if (threadIdx & 1) {
608  SYNCHRONIZED_DUAL(lv, v, lm, cm) {
609  (void)lm.size();
610  lv.push_back(threadIdx);
611  }
612  } else {
613  SYNCHRONIZED_DUAL(lm, cm, lv, v) {
614  (void)lm.size();
615  lv.push_back(threadIdx);
616  }
617  }
618  };
619  static const size_t numThreads = 100;
620  runParallel(numThreads, dualLockWorker);
621 
622  std::vector<int> result;
623  v.swap(result);
624 
625  EXPECT_EQ(numThreads, result.size());
626  sort(result.begin(), result.end());
627 
628  for (size_t i = 0; i < numThreads; ++i) {
629  EXPECT_EQ(i, result[i]);
630  }
631 }
632 
633 template <class Mutex>
634 void testTimed() {
637 
638  auto worker = [&](size_t threadIdx) {
639  // Test directly using operator-> on the lock result
640  v.contextualLock()->push_back(2 * threadIdx);
641 
642  // Test using lock with a timeout
643  for (;;) {
644  auto lv = v.contextualLock(std::chrono::milliseconds(5));
645  if (!lv) {
646  ++(*numTimeouts.contextualLock());
647  continue;
648  }
649 
650  // Sleep for a random time to ensure we trigger timeouts
651  // in other threads
652  randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15));
653  lv->push_back(2 * threadIdx + 1);
654  break;
655  }
656  };
657 
658  static const size_t numThreads = 100;
659  runParallel(numThreads, worker);
660 
661  std::vector<int> result;
662  v.swap(result);
663 
664  EXPECT_EQ(2 * numThreads, result.size());
665  sort(result.begin(), result.end());
666 
667  for (size_t i = 0; i < 2 * numThreads; ++i) {
668  EXPECT_EQ(i, result[i]);
669  }
670  // We generally expect a large number of number timeouts here.
671  // I'm not adding a check for it since it's theoretically possible that
672  // we might get 0 timeouts depending on the CPU scheduling if our threads
673  // don't get to run very often.
674  LOG(INFO) << "testTimed: " << *numTimeouts.contextualRLock() << " timeouts";
675 
676  // Make sure we can lock with various timeout duration units
677  {
678  auto lv = v.contextualLock(std::chrono::milliseconds(5));
679  EXPECT_TRUE(bool(lv));
680  EXPECT_FALSE(lv.isNull());
681  auto lv2 = v.contextualLock(std::chrono::microseconds(5));
682  // We may or may not acquire lv2 successfully, depending on whether
683  // or not this is a recursive mutex type.
684  }
685  {
686  auto lv = v.contextualLock(std::chrono::seconds(1));
687  EXPECT_TRUE(bool(lv));
688  }
689 }
690 
691 template <class Mutex>
695 
696  auto worker = [&](size_t threadIdx) {
697  // Test directly using operator-> on the lock result
698  v.wlock()->push_back(threadIdx);
699 
700  // Test lock() with a timeout
701  for (;;) {
702  auto lv = v.rlock(std::chrono::milliseconds(10));
703  if (!lv) {
704  ++(*numTimeouts.contextualLock());
705  continue;
706  }
707 
708  // Sleep while holding the lock.
709  //
710  // This will block other threads from acquiring the write lock to add
711  // their thread index to v, but it won't block threads that have entered
712  // the for loop and are trying to acquire a read lock.
713  //
714  // For lock types that give preference to readers rather than writers,
715  // this will tend to serialize all threads on the wlock() above.
716  randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15));
717  auto found = std::find(lv->begin(), lv->end(), threadIdx);
718  CHECK(found != lv->end());
719  break;
720  }
721  };
722 
723  static const size_t numThreads = 100;
724  runParallel(numThreads, worker);
725 
726  std::vector<int> result;
727  v.swap(result);
728 
729  EXPECT_EQ(numThreads, result.size());
730  sort(result.begin(), result.end());
731 
732  for (size_t i = 0; i < numThreads; ++i) {
733  EXPECT_EQ(i, result[i]);
734  }
735  // We generally expect a small number of timeouts here.
736  // For locks that give readers preference over writers this should usually
737  // be 0. With locks that give writers preference we do see a small-ish
738  // number of read timeouts.
739  LOG(INFO) << "testTimedShared: " << *numTimeouts.contextualRLock()
740  << " timeouts";
741 }
742 
743 // Testing the deprecated TIMED_SYNCHRONIZED API
744 template <class Mutex>
748 
749  auto worker = [&](size_t threadIdx) {
750  // Test operator->
751  v->push_back(2 * threadIdx);
752 
753  // Aaand test the TIMED_SYNCHRONIZED macro
754  for (;;) {
755  TIMED_SYNCHRONIZED(5, lv, v) {
756  if (lv) {
757  // Sleep for a random time to ensure we trigger timeouts
758  // in other threads
759  randomSleep(
760  std::chrono::milliseconds(5), std::chrono::milliseconds(15));
761  lv->push_back(2 * threadIdx + 1);
762  return;
763  }
764 
765  ++(*numTimeouts.contextualLock());
766  }
767  }
768  };
769 
770  static const size_t numThreads = 100;
771  runParallel(numThreads, worker);
772 
773  std::vector<int> result;
774  v.swap(result);
775 
776  EXPECT_EQ(2 * numThreads, result.size());
777  sort(result.begin(), result.end());
778 
779  for (size_t i = 0; i < 2 * numThreads; ++i) {
780  EXPECT_EQ(i, result[i]);
781  }
782  // We generally expect a large number of number timeouts here.
783  // I'm not adding a check for it since it's theoretically possible that
784  // we might get 0 timeouts depending on the CPU scheduling if our threads
785  // don't get to run very often.
786  LOG(INFO) << "testTimedSynchronized: " << *numTimeouts.contextualRLock()
787  << " timeouts";
788 }
789 
790 // Testing the deprecated TIMED_SYNCHRONIZED_CONST API
791 template <class Mutex>
795 
796  auto worker = [&](size_t threadIdx) {
797  // Test operator->
798  v->push_back(threadIdx);
799 
800  // Test TIMED_SYNCHRONIZED_CONST
801  for (;;) {
802  TIMED_SYNCHRONIZED_CONST(10, lv, v) {
803  if (lv) {
804  // Sleep while holding the lock.
805  //
806  // This will block other threads from acquiring the write lock to add
807  // their thread index to v, but it won't block threads that have
808  // entered the for loop and are trying to acquire a read lock.
809  //
810  // For lock types that give preference to readers rather than writers,
811  // this will tend to serialize all threads on the wlock() above.
812  randomSleep(
813  std::chrono::milliseconds(5), std::chrono::milliseconds(15));
814  auto found = std::find(lv->begin(), lv->end(), threadIdx);
815  CHECK(found != lv->end());
816  return;
817  } else {
818  ++(*numTimeouts.contextualLock());
819  }
820  }
821  }
822  };
823 
824  static const size_t numThreads = 100;
825  runParallel(numThreads, worker);
826 
827  std::vector<int> result;
828  v.swap(result);
829 
830  EXPECT_EQ(numThreads, result.size());
831  sort(result.begin(), result.end());
832 
833  for (size_t i = 0; i < numThreads; ++i) {
834  EXPECT_EQ(i, result[i]);
835  }
836  // We generally expect a small number of timeouts here.
837  // For locks that give readers preference over writers this should usually
838  // be 0. With locks that give writers preference we do see a small-ish
839  // number of read timeouts.
840  LOG(INFO) << "testTimedSynchronizedWithConst: "
841  << *numTimeouts.contextualRLock() << " timeouts";
842 }
843 
844 template <class Mutex>
846  std::vector<int> input = {1, 2, 3};
848 
849  std::vector<int> result;
850 
851  v.copy(&result);
852  EXPECT_EQ(input, result);
853 
854  result = v.copy();
855  EXPECT_EQ(input, result);
856 }
857 
859  NotCopiableNotMovable(int, const char*) {}
864 };
865 
866 template <class Mutex>
868  // This won't compile without in_place
870 }
871 
872 template <class Mutex>
873 void testExchange() {
874  std::vector<int> input = {1, 2, 3};
876  std::vector<int> next = {4, 5, 6};
877  auto prev = v.exchange(std::move(next));
878  EXPECT_EQ((std::vector<int>{{1, 2, 3}}), prev);
879  EXPECT_EQ((std::vector<int>{{4, 5, 6}}), v.copy());
880 }
881 } // namespace sync_tests
882 } // namespace folly
#define SYNCHRONIZED_CONST(...)
auto v
void randomSleep(std::chrono::milliseconds min, std::chrono::milliseconds max)
#define FB_ARG_2_OR_1(...)
Definition: Preprocessor.h:54
std::enable_if< folly::LockTraits< Mutex >::is_shared >::type testBasicImpl()
LogLevel max
Definition: LogLevel.cpp:31
void swap(Synchronized &rhs)
Definition: Synchronized.h:684
#define SYNCHRONIZED_DUAL(n1, e1, n2, e2)
PskType type
static const int seed
#define EXPECT_EQ(val1, val2)
Definition: gtest.h:1922
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
LockedPtr contextualLock()
Definition: Synchronized.h:601
NotCopiableNotMovable & operator=(const NotCopiableNotMovable &)=delete
#define Mutex
ConstLockedPtr contextualRLock() const
Definition: Synchronized.h:623
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
in_place_tag in_place(in_place_tag={})
Definition: Utility.h:235
void runParallel(size_t numThreads, const Function &function)
std::enable_if< folly::LockTraits< Mutex >::is_shared >::type testWithLock()
std::vector< std::thread::id > threads
void BENCHFUN() push_back(size_t iters, size_t arg)
auto rng
Definition: CollectTest.cpp:31
std::tuple< detail::LockedPtrType< Sync1 >, detail::LockedPtrType< Sync2 > > acquireLocked(Sync1 &l1, Sync2 &l2)
LogLevel min
Definition: LogLevel.cpp:30
constexpr Range< Iter > range(Iter first, Iter last)
Definition: Range.h:1114
static map< string, int > m
char a
#define TIMED_SYNCHRONIZED(timeout,...)
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
#define SYNCHRONIZED(...)
uint64_t value(const typename LockFreeRingBuffer< T, Atom >::Cursor &rbcursor)
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
uint32_t randomNumberSeed()
Definition: Random.h:367
std::enable_if< folly::LockTraits< Mutex >::is_shared >::type testUnlock()
void copy(T *target) const
Definition: Synchronized.h:721
#define TIMED_SYNCHRONIZED_CONST(timeout,...)
def next(obj)
Definition: ast.py:58