27 using namespace folly;
37 std::vector<std::thread>
threads{FLAGS_threads};
39 t = std::thread([&]() {
41 for (
auto i = 0u;
i < iters;
i++) {
49 for (
auto&
t : threads) {
58 std::vector<std::thread>
threads{FLAGS_threads};
60 t = std::thread([&]() {
62 for (
auto i = 0u;
i < iters;
i++) {
70 for (
auto&
t : threads) {
80 std::vector<std::thread>
threads{FLAGS_threads};
82 t = std::thread([&]() {
86 if (fu.load(std::memory_order_relaxed)) {
94 for (
auto i = 0u;
i < iters;
i++) {
100 for (
auto&
t : threads) {
107 std::atomic<bool> done{
false};
110 std::vector<std::thread>
threads{FLAGS_threads};
112 t = std::thread([&]() {
119 [&] {
return done.load(std::memory_order_relaxed) == 0; },
121 if (done.load(std::memory_order_relaxed)) {
129 for (
auto i = 0u;
i < iters;
i++) {
135 for (
auto&
t : threads) {
144 std::atomic<bool> done{
false};
146 std::vector<std::thread>
threads{FLAGS_threads};
148 t = std::thread([&]() {
152 if (done.load(std::memory_order_relaxed)) {
160 for (
auto i = 0u;
i < iters;
i++) {
167 for (
auto&
t : threads) {
175 std::atomic<bool> done{
false};
177 std::vector<std::thread>
threads{FLAGS_threads};
179 t = std::thread([&]() {
186 [&] {
return done.load(std::memory_order_relaxed) == 0; },
188 if (done.load(std::memory_order_relaxed)) {
196 for (
auto i = 0u;
i < iters;
i++) {
202 for (
auto&
t : threads) {
208 gflags::ParseCommandLineFlags(&argc, &argv,
true);
void unpark(const Key key, Unparker &&func)
Atom< std::uint32_t > Futex
ParkResult park(const Key key, D &&data, ToPark &&toPark, PreWait &&preWait)
—— Concurrent Priority Queue Implementation ——
int main(int argc, char **argv)
FutexResult futexWait(const Futex *futex, uint32_t expected, uint32_t waitMask)
BENCHMARK_RELATIVE(ParkingLotNoWaitersWake, iters)
std::vector< std::thread::id > threads
BENCHMARK(fbFollyGlobalBenchmarkBaseline)
DEFINE_uint64(threads, 32,"Number of threads for benchmark")
int futexWake(const Futex *futex, int count, uint32_t wakeMask)