proxygen
FunctionSchedulerTest.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2015-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <algorithm>
18 #include <atomic>
19 #include <cassert>
20 #include <random>
21 
22 #include <boost/thread.hpp>
23 
24 #include <folly/Random.h>
28 
29 #if defined(__linux__)
30 #include <dlfcn.h>
31 #endif
32 
33 using namespace folly;
34 using std::atomic;
35 using std::chrono::duration_cast;
36 using std::chrono::microseconds;
37 using std::chrono::milliseconds;
38 using std::chrono::steady_clock;
39 
40 namespace {
41 
42 /*
43  * Helper functions for controlling how long this test takes.
44  *
45  * Using larger intervals here will make the tests less flaky when run on
46  * heavily loaded systems. However, this will also make the tests take longer
47  * to run.
48  */
49 static const auto timeFactor = std::chrono::milliseconds(400);
50 std::chrono::milliseconds testInterval(int n) {
51  return n * timeFactor;
52 }
53 int getTicksWithinRange(int n, int min, int max) {
54  assert(min <= max);
55  n = std::max(min, n);
56  n = std::min(max, n);
57  return n;
58 }
59 void delay(float n) {
60  microseconds usec(static_cast<microseconds::rep>(
61  duration_cast<microseconds>(timeFactor).count() * n));
62  usleep(usec.count());
63 }
64 
65 } // namespace
66 
67 TEST(FunctionScheduler, StartAndShutdown) {
69  EXPECT_TRUE(fs.start());
70  EXPECT_FALSE(fs.start());
71  EXPECT_TRUE(fs.shutdown());
72  EXPECT_FALSE(fs.shutdown());
73  // start again
74  EXPECT_TRUE(fs.start());
75  EXPECT_FALSE(fs.start());
76  EXPECT_TRUE(fs.shutdown());
77  EXPECT_FALSE(fs.shutdown());
78 }
79 
80 TEST(FunctionScheduler, SimpleAdd) {
81  atomic<int> total{0};
83  fs.addFunction([&] { total += 2; }, testInterval(2), "add2");
84  fs.start();
85  delay(1);
86  EXPECT_EQ(2, total);
87  fs.shutdown();
88  delay(2);
89  EXPECT_EQ(2, total);
90 }
91 
92 TEST(FunctionScheduler, AddCancel) {
93  atomic<int> total{0};
95  fs.addFunction([&] { total += 2; }, testInterval(2), "add2");
96  fs.start();
97  delay(1);
98  EXPECT_EQ(2, total);
99  delay(2);
100  EXPECT_EQ(4, total);
101  EXPECT_TRUE(fs.cancelFunction("add2"));
102  EXPECT_FALSE(fs.cancelFunction("NO SUCH FUNC"));
103  delay(2);
104  EXPECT_EQ(4, total);
105  fs.addFunction([&] { total += 1; }, testInterval(2), "add2");
106  delay(1);
107  EXPECT_EQ(5, total);
108  delay(2);
109  EXPECT_EQ(6, total);
110  fs.shutdown();
111 }
112 
113 TEST(FunctionScheduler, AddCancel2) {
114  atomic<int> total{0};
116 
117  // Test adds and cancels while the scheduler is stopped
118  EXPECT_FALSE(fs.cancelFunction("add2"));
119  fs.addFunction([&] { total += 1; }, testInterval(2), "add2");
120  EXPECT_TRUE(fs.cancelFunction("add2"));
121  EXPECT_FALSE(fs.cancelFunction("add2"));
122  fs.addFunction([&] { total += 2; }, testInterval(2), "add2");
123  fs.addFunction([&] { total += 3; }, testInterval(3), "add3");
124 
125  EXPECT_EQ(0, total);
126  fs.start();
127  delay(1);
128  EXPECT_EQ(5, total);
129 
130  // Cancel add2 while the scheduler is running
131  EXPECT_TRUE(fs.cancelFunction("add2"));
132  EXPECT_FALSE(fs.cancelFunction("add2"));
133  EXPECT_FALSE(fs.cancelFunction("bogus"));
134 
135  delay(3);
136  EXPECT_EQ(8, total);
137  EXPECT_TRUE(fs.cancelFunction("add3"));
138 
139  // Test a function that cancels itself
140  atomic<int> selfCancelCount{0};
141  fs.addFunction(
142  [&] {
143  ++selfCancelCount;
144  if (selfCancelCount > 2) {
145  fs.cancelFunction("selfCancel");
146  }
147  },
148  testInterval(1),
149  "selfCancel",
150  testInterval(1));
151  delay(4);
152  EXPECT_EQ(3, selfCancelCount);
153  EXPECT_FALSE(fs.cancelFunction("selfCancel"));
154 
155  // Test a function that schedules another function
156  atomic<int> adderCount{0};
157  int fn2Count = 0;
158  auto fn2 = [&] { ++fn2Count; };
159  auto fnAdder = [&] {
160  ++adderCount;
161  if (adderCount == 2) {
162  fs.addFunction(fn2, testInterval(3), "fn2", testInterval(2));
163  }
164  };
165  fs.addFunction(fnAdder, testInterval(4), "adder");
166  // t0: adder fires
167  delay(1); // t1
168  EXPECT_EQ(1, adderCount);
169  EXPECT_EQ(0, fn2Count);
170  // t4: adder fires, schedules fn2
171  delay(4); // t5
172  EXPECT_EQ(2, adderCount);
173  EXPECT_EQ(0, fn2Count);
174  // t6: fn2 fires
175  delay(2); // t7
176  EXPECT_EQ(2, adderCount);
177  EXPECT_EQ(1, fn2Count);
178  // t8: adder fires
179  // t9: fn2 fires
180  delay(3); // t10
181  EXPECT_EQ(3, adderCount);
182  EXPECT_EQ(2, fn2Count);
183  EXPECT_TRUE(fs.cancelFunction("fn2"));
184  EXPECT_TRUE(fs.cancelFunction("adder"));
185  delay(5); // t10
186  EXPECT_EQ(3, adderCount);
187  EXPECT_EQ(2, fn2Count);
188 
189  EXPECT_EQ(8, total);
190  EXPECT_EQ(3, selfCancelCount);
191 }
192 
193 TEST(FunctionScheduler, AddMultiple) {
194  atomic<int> total{0};
196  fs.addFunction([&] { total += 2; }, testInterval(2), "add2");
197  fs.addFunction([&] { total += 3; }, testInterval(3), "add3");
198  EXPECT_THROW(
199  fs.addFunction([&] { total += 2; }, testInterval(2), "add2"),
200  std::invalid_argument); // function name already exists
201 
202  fs.start();
203  delay(1);
204  EXPECT_EQ(5, total);
205  delay(4);
206  EXPECT_EQ(12, total);
207  EXPECT_TRUE(fs.cancelFunction("add2"));
208  delay(2);
209  EXPECT_EQ(15, total);
210  fs.shutdown();
211  delay(3);
212  EXPECT_EQ(15, total);
213  fs.shutdown();
214 }
215 
216 TEST(FunctionScheduler, AddAfterStart) {
217  atomic<int> total{0};
219  fs.addFunction([&] { total += 2; }, testInterval(2), "add2");
220  fs.addFunction([&] { total += 3; }, testInterval(2), "add3");
221  fs.start();
222  delay(3);
223  EXPECT_EQ(10, total);
224  fs.addFunction([&] { total += 2; }, testInterval(3), "add22");
225  delay(2);
226  EXPECT_EQ(17, total);
227 }
228 
229 TEST(FunctionScheduler, ShutdownStart) {
230  atomic<int> total{0};
232  fs.addFunction([&] { total += 2; }, testInterval(2), "add2");
233  fs.start();
234  delay(1);
235  fs.shutdown();
236  fs.start();
237  delay(1);
238  EXPECT_EQ(4, total);
239  EXPECT_FALSE(fs.cancelFunction("add3")); // non existing
240  delay(2);
241  EXPECT_EQ(6, total);
242 }
243 
244 TEST(FunctionScheduler, ResetFunc) {
245  atomic<int> total{0};
247  fs.addFunction([&] { total += 2; }, testInterval(3), "add2");
248  fs.addFunction([&] { total += 3; }, testInterval(3), "add3");
249  fs.start();
250  delay(1);
251  EXPECT_EQ(5, total);
252  EXPECT_FALSE(fs.resetFunctionTimer("NON_EXISTING"));
253  EXPECT_TRUE(fs.resetFunctionTimer("add2"));
254  delay(1);
255  // t2: after the reset, add2 should have been invoked immediately
256  EXPECT_EQ(7, total);
257  delay(1.5);
258  // t3.5: add3 should have been invoked. add2 should not
259  EXPECT_EQ(10, total);
260  delay(1);
261  // t4.5: add2 should have been invoked once more (it was reset at t1)
262  EXPECT_EQ(12, total);
263 }
264 
265 TEST(FunctionScheduler, ResetFunc2) {
266  atomic<int> total{0};
268  fs.addFunctionOnce([&] { total += 2; }, "add2", testInterval(1));
269  fs.addFunctionOnce([&] { total += 3; }, "add3", testInterval(1));
270  fs.start();
271  delay(2);
272  fs.addFunctionOnce([&] { total += 3; }, "add4", testInterval(2));
273  EXPECT_TRUE(fs.resetFunctionTimer("add4"));
274  fs.addFunctionOnce([&] { total += 3; }, "add6", testInterval(2));
275  delay(1);
276  EXPECT_TRUE(fs.resetFunctionTimer("add4"));
277  delay(3);
278  EXPECT_FALSE(fs.resetFunctionTimer("add3"));
279  fs.addFunctionOnce([&] { total += 3; }, "add4", testInterval(1));
280 }
281 
282 TEST(FunctionScheduler, ResetFuncWhileRunning) {
283  struct State {
284  boost::barrier barrier_a{2};
285  boost::barrier barrier_b{2};
286  boost::barrier barrier_c{2};
287  boost::barrier barrier_d{2};
288  bool set = false;
289  size_t count = 0;
290  };
291 
292  State state; // held by ref
293  auto mv = std::make_shared<size_t>(); // gets moved
294 
296  fs.addFunction(
297  [&, mv /* ref + shared_ptr fit in in-situ storage */] {
298  if (!state.set) { // first invocation
299  state.barrier_a.wait();
300  // ensure that resetFunctionTimer is called in this critical section
301  state.barrier_b.wait();
302  ++state.count;
303  EXPECT_TRUE(bool(mv)) << "bug repro: mv was moved-out";
304  state.barrier_c.wait();
305  // main thread checks count here
306  state.barrier_d.wait();
307  } else { // subsequent invocations
308  ++state.count;
309  }
310  },
311  testInterval(3),
312  "nada");
313  fs.start();
314 
315  state.barrier_a.wait();
316  state.set = true;
317  fs.resetFunctionTimer("nada");
318  EXPECT_EQ(0, state.count) << "sanity check";
319  state.barrier_b.wait();
320  // fn thread increments count and checks mv here
321  state.barrier_c.wait();
322  EXPECT_EQ(1, state.count) << "sanity check";
323  state.barrier_d.wait();
324  delay(1);
325  EXPECT_EQ(2, state.count) << "sanity check";
326 }
327 
328 TEST(FunctionScheduler, AddInvalid) {
329  atomic<int> total{0};
331  // interval may not be negative
332  EXPECT_THROW(
333  fs.addFunction([&] { total += 2; }, testInterval(-1), "add2"),
334  std::invalid_argument);
335 
336  EXPECT_FALSE(fs.cancelFunction("addNoFunc"));
337 }
338 
339 TEST(FunctionScheduler, NoFunctions) {
341  EXPECT_TRUE(fs.start());
342  fs.shutdown();
343  FunctionScheduler fs2;
344  fs2.shutdown();
345 }
346 
347 TEST(FunctionScheduler, AddWhileRunning) {
348  atomic<int> total{0};
350  fs.start();
351  delay(1);
352  fs.addFunction([&] { total += 2; }, testInterval(2), "add2");
353  // The function should be invoked nearly immediately when we add it
354  // and the FunctionScheduler is already running
355  delay(0.5);
356  auto t = total.load();
357  EXPECT_EQ(2, t);
358  delay(2);
359  t = total.load();
360  EXPECT_EQ(4, t);
361 }
362 
363 TEST(FunctionScheduler, NoShutdown) {
364  atomic<int> total{0};
365  {
367  fs.addFunction([&] { total += 2; }, testInterval(1), "add2");
368  fs.start();
369  delay(0.5);
370  EXPECT_EQ(2, total);
371  }
372  // Destroyed the FunctionScheduler without calling shutdown.
373  // Everything should have been cleaned up, and the function will no longer
374  // get called.
375  delay(2);
376  EXPECT_EQ(2, total);
377 }
378 
379 TEST(FunctionScheduler, StartDelay) {
380  atomic<int> total{0};
382  fs.addFunction([&] { total += 2; }, testInterval(2), "add2", testInterval(2));
383  fs.addFunction([&] { total += 3; }, testInterval(3), "add3", testInterval(2));
384  EXPECT_THROW(
385  fs.addFunction(
386  [&] { total += 2; }, testInterval(3), "addX", testInterval(-1)),
387  std::invalid_argument);
388  fs.start();
389  delay(1); // t1
390  EXPECT_EQ(0, total);
391  // t2 : add2 total=2
392  // t2 : add3 total=5
393  delay(2); // t3
394  EXPECT_EQ(5, total);
395  // t4 : add2: total=7
396  // t5 : add3: total=10
397  // t6 : add2: total=12
398  delay(4); // t7
399  EXPECT_EQ(12, total);
400  fs.cancelFunction("add2");
401  // t8 : add3: total=15
402  delay(2); // t9
403  EXPECT_EQ(15, total);
404  fs.shutdown();
405  delay(3);
406  EXPECT_EQ(15, total);
407  fs.shutdown();
408 }
409 
410 TEST(FunctionScheduler, NoSteadyCatchup) {
411  std::atomic<int> ticks(0);
413  // fs.setSteady(false); is the default
414  fs.addFunction(
415  [&ticks] {
416  if (++ticks == 2) {
417  std::this_thread::sleep_for(std::chrono::milliseconds(200));
418  }
419  },
420  milliseconds(5));
421  fs.start();
422  std::this_thread::sleep_for(std::chrono::milliseconds(500));
423 
424  // no steady catch up means we'd tick once for 200ms, then remaining
425  // 300ms / 5 = 60 times
426  EXPECT_LE(ticks.load(), 61);
427 }
428 
429 TEST(FunctionScheduler, SteadyCatchup) {
430  std::atomic<int> ticks(0);
432  fs.setSteady(true);
433  fs.addFunction(
434  [&ticks] {
435  if (++ticks == 2) {
436  std::this_thread::sleep_for(std::chrono::milliseconds(200));
437  }
438  },
439  milliseconds(5));
440  fs.start();
441 
442  std::this_thread::sleep_for(std::chrono::milliseconds(500));
443 
444  // tick every 5ms. Despite tick == 2 is slow, later ticks should be fast
445  // enough to catch back up to schedule
446  EXPECT_NEAR(100, ticks.load(), 10);
447 }
448 
449 TEST(FunctionScheduler, UniformDistribution) {
450  atomic<int> total{0};
451  const int kTicks = 2;
452  std::chrono::milliseconds minInterval =
453  testInterval(kTicks) - (timeFactor / 5);
454  std::chrono::milliseconds maxInterval =
455  testInterval(kTicks) + (timeFactor / 5);
458  [&] { total += 2; },
459  minInterval,
460  maxInterval,
461  "UniformDistribution",
462  std::chrono::milliseconds(0));
463  fs.start();
464  delay(1);
465  EXPECT_EQ(2, total);
466  delay(kTicks);
467  EXPECT_EQ(4, total);
468  delay(kTicks);
469  EXPECT_EQ(6, total);
470  fs.shutdown();
471  delay(2);
472  EXPECT_EQ(6, total);
473 }
474 
475 TEST(FunctionScheduler, ConsistentDelay) {
476  std::atomic<int> ticks(0);
478 
479  std::atomic<long long> epoch(0);
480  epoch = duration_cast<milliseconds>(steady_clock::now().time_since_epoch())
481  .count();
482 
483  // We should have runs at t = 0, 600, 800, 1200, or 4 total.
484  // If at const interval, it would be t = 0, 600, 1000, or 3 total.
486  [&ticks, &epoch] {
487  auto now =
488  duration_cast<milliseconds>(steady_clock::now().time_since_epoch())
489  .count();
490  int t = ++ticks;
491  if (t != 2) {
492  // Sensitive to delays above 100ms.
493  EXPECT_NEAR((now - epoch) - (t - 1) * 400, 0, 100);
494  }
495  if (t == 1) {
496  /* sleep override */
497  std::this_thread::sleep_for(std::chrono::milliseconds(600));
498  }
499  },
500  milliseconds(400),
501  "ConsistentDelay");
502 
503  fs.start();
504 
505  /* sleep override */
506  std::this_thread::sleep_for(std::chrono::milliseconds(1300));
507  EXPECT_EQ(ticks.load(), 4);
508 }
509 
510 TEST(FunctionScheduler, ExponentialBackoff) {
511  atomic<int> total{0};
512  atomic<int> expectedInterval{0};
513  atomic<int> nextInterval{2};
516  [&] { total += 2; },
517  [&expectedInterval, &nextInterval]() mutable {
518  auto interval = nextInterval.load();
519  expectedInterval = interval;
520  nextInterval = interval * interval;
521  return testInterval(interval);
522  },
523  "ExponentialBackoff",
524  "2^n * 100ms",
525  std::chrono::milliseconds(0));
526  fs.start();
527  delay(1);
528  EXPECT_EQ(2, total);
529  delay(expectedInterval);
530  EXPECT_EQ(4, total);
531  delay(expectedInterval);
532  EXPECT_EQ(6, total);
533  fs.shutdown();
534  delay(2);
535  EXPECT_EQ(6, total);
536 }
537 
538 TEST(FunctionScheduler, GammaIntervalDistribution) {
539  atomic<int> total{0};
540  atomic<int> expectedInterval{0};
542  std::default_random_engine generator(folly::Random::rand32());
543  // The alpha and beta arguments are selected, somewhat randomly, to be 2.0.
544  // These values do not matter much in this test, as we are not testing the
545  // std::gamma_distribution itself...
546  std::gamma_distribution<double> gamma(2.0, 2.0);
548  [&] { total += 2; },
549  [&expectedInterval, generator, gamma]() mutable {
550  expectedInterval =
551  getTicksWithinRange(static_cast<int>(gamma(generator)), 2, 10);
552  return testInterval(expectedInterval);
553  },
554  "GammaDistribution",
555  "gamma(2.0,2.0)*100ms",
556  std::chrono::milliseconds(0));
557  fs.start();
558  delay(1);
559  EXPECT_EQ(2, total);
560  delay(expectedInterval);
561  EXPECT_EQ(4, total);
562  delay(expectedInterval);
563  EXPECT_EQ(6, total);
564  fs.shutdown();
565  delay(2);
566  EXPECT_EQ(6, total);
567 }
568 
569 TEST(FunctionScheduler, AddWithRunOnce) {
570  atomic<int> total{0};
572  fs.addFunctionOnce([&] { total += 2; }, "add2");
573  fs.start();
574  delay(1);
575  EXPECT_EQ(2, total);
576  delay(2);
577  EXPECT_EQ(2, total);
578 
579  fs.addFunctionOnce([&] { total += 2; }, "add2");
580  delay(1);
581  EXPECT_EQ(4, total);
582  delay(2);
583  EXPECT_EQ(4, total);
584 
585  fs.shutdown();
586 }
587 
588 TEST(FunctionScheduler, cancelFunctionAndWait) {
589  atomic<int> total{0};
591  fs.addFunction(
592  [&] {
593  delay(5);
594  total += 2;
595  },
596  testInterval(100),
597  "add2");
598 
599  fs.start();
600  delay(1);
601  EXPECT_EQ(0, total); // add2 is still sleeping
602 
604  EXPECT_EQ(2, total); // add2 should have completed
605 
606  EXPECT_FALSE(fs.cancelFunction("add2")); // add2 has been canceled
607  fs.shutdown();
608 }
609 
610 #if defined(__linux__)
611 namespace {
616 class PThreadCreateFailure {
617  public:
618  PThreadCreateFailure() {
619  ++forceFailure_;
620  }
621  ~PThreadCreateFailure() {
622  --forceFailure_;
623  }
624 
625  static bool shouldFail() {
626  return forceFailure_ > 0;
627  }
628 
629  private:
630  static std::atomic<int> forceFailure_;
631 };
632 
633 std::atomic<int> PThreadCreateFailure::forceFailure_{0};
634 } // namespace
635 
636 // Replace the system pthread_create() function with our own stub, so we can
637 // trigger failures in the StartThrows() test.
638 extern "C" int pthread_create(
639  pthread_t* thread,
640  const pthread_attr_t* attr,
641  void* (*start_routine)(void*),
642  void* arg) {
643  static const auto realFunction = reinterpret_cast<decltype(&pthread_create)>(
644  dlsym(RTLD_NEXT, "pthread_create"));
645  // For sanity, make sure we didn't find ourself,
646  // since that would cause infinite recursion.
647  CHECK_NE(realFunction, pthread_create);
648 
649  if (PThreadCreateFailure::shouldFail()) {
650  errno = EINVAL;
651  return -1;
652  }
653  return realFunction(thread, attr, start_routine, arg);
654 }
655 
656 TEST(FunctionScheduler, StartThrows) {
658  PThreadCreateFailure fail;
659  EXPECT_ANY_THROW(fs.start());
661 }
662 #endif
663 
664 TEST(FunctionScheduler, cancelAllFunctionsAndWait) {
665  atomic<int> total{0};
667 
668  fs.addFunction(
669  [&] {
670  delay(5);
671  total += 2;
672  },
673  testInterval(100),
674  "add2");
675 
676  fs.start();
677  delay(1);
678  EXPECT_EQ(0, total); // add2 is still sleeping
679 
681  EXPECT_EQ(2, total);
682 
683  EXPECT_FALSE(fs.cancelFunction("add2")); // add2 has been canceled
684  fs.shutdown();
685 }
686 
687 TEST(FunctionScheduler, CancelAndWaitOnRunningFunc) {
688  folly::Baton<> baton;
689  std::thread th([&baton]() {
691  fs.addFunction([] { delay(10); }, testInterval(2), "func");
692  fs.start();
693  delay(1);
695  baton.post();
696  });
697 
698  ASSERT_TRUE(baton.try_wait_for(testInterval(15)));
699  th.join();
700 }
701 
702 TEST(FunctionScheduler, CancelAllAndWaitWithRunningFunc) {
703  folly::Baton<> baton;
704  std::thread th([&baton]() {
706  fs.addFunction([] { delay(10); }, testInterval(2), "func");
707  fs.start();
708  delay(1);
710  baton.post();
711  });
712 
713  ASSERT_TRUE(baton.try_wait_for(testInterval(15)));
714  th.join();
715 }
716 
717 TEST(FunctionScheduler, CancelAllAndWaitWithOneRunningAndOneWaiting) {
718  folly::Baton<> baton;
719  std::thread th([&baton]() {
720  std::atomic<int> nExecuted(0);
722  fs.addFunction(
723  [&nExecuted] {
724  nExecuted++;
725  delay(10);
726  },
727  testInterval(2),
728  "func0");
729  fs.addFunction(
730  [&nExecuted] {
731  nExecuted++;
732  delay(10);
733  },
734  testInterval(2),
735  "func1",
736  testInterval(5));
737  fs.start();
738  delay(1);
740  EXPECT_EQ(nExecuted, 1);
741  baton.post();
742  });
743 
744  ASSERT_TRUE(baton.try_wait_for(testInterval(15)));
745  th.join();
746 }
747 
748 TEST(FunctionScheduler, ConcurrentCancelFunctionAndWait) {
750  fs.addFunction([] { delay(10); }, testInterval(2), "func");
751 
752  fs.start();
753  delay(1);
754  std::thread th1([&fs] { EXPECT_TRUE(fs.cancelFunctionAndWait("func")); });
755  delay(1);
756  std::thread th2([&fs] { EXPECT_FALSE(fs.cancelFunctionAndWait("func")); });
757  th1.join();
758  th2.join();
759 }
#define EXPECT_LE(val1, val2)
Definition: gtest.h:1928
void addFunctionOnce(Function< void()> &&cb, StringPiece nameID=StringPiece(), std::chrono::milliseconds startDelay=std::chrono::milliseconds(0))
void addFunction(Function< void()> &&cb, std::chrono::milliseconds interval, StringPiece nameID=StringPiece(), std::chrono::milliseconds startDelay=std::chrono::milliseconds(0))
#define EXPECT_ANY_THROW(statement)
Definition: gtest.h:1847
#define EXPECT_NO_THROW(statement)
Definition: gtest.h:1845
#define EXPECT_THROW(statement, expected_exception)
Definition: gtest.h:1843
LogLevel max
Definition: LogLevel.cpp:31
std::default_random_engine generator
#define EXPECT_EQ(val1, val2)
Definition: gtest.h:1922
std::chrono::steady_clock::time_point now()
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
bool resetFunctionTimer(StringPiece nameID)
FOLLY_ALWAYS_INLINE bool try_wait_for(const std::chrono::duration< Rep, Period > &timeout, const WaitOptions &opt=wait_options()) noexcept
Definition: Baton.h:206
bool cancelFunction(StringPiece nameID)
State
See Core for details.
Definition: Core.h:43
LogLevel min
Definition: LogLevel.cpp:30
void fail()
void post() noexcept
Definition: Baton.h:123
int * count
#define EXPECT_NEAR(val1, val2, abs_error)
Definition: gtest.h:2043
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
void addFunctionUniformDistribution(Function< void()> &&cb, std::chrono::milliseconds minInterval, std::chrono::milliseconds maxInterval, StringPiece nameID, std::chrono::milliseconds startDelay)
bool cancelFunctionAndWait(StringPiece nameID)
void addFunctionConsistentDelay(Function< void()> &&cb, std::chrono::milliseconds interval, StringPiece nameID=StringPiece(), std::chrono::milliseconds startDelay=std::chrono::milliseconds(0))
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
static uint32_t rand32()
Definition: Random.h:213
void addFunctionGenericDistribution(Function< void()> &&cb, IntervalDistributionFunc &&intervalFunc, const std::string &nameID, const std::string &intervalDescr, std::chrono::milliseconds startDelay)
#define ASSERT_TRUE(condition)
Definition: gtest.h:1865
TEST(SequencedExecutor, CPUThreadPoolExecutor)
state
Definition: http_parser.c:272
void setSteady(bool steady)