proxygen
SharedMutexTest.cpp File Reference
#include <folly/SharedMutex.h>
#include <stdlib.h>
#include <thread>
#include <vector>
#include <boost/optional.hpp>
#include <boost/thread/shared_mutex.hpp>
#include <folly/Benchmark.h>
#include <folly/MPMCQueue.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
#include <folly/synchronization/RWSpinLock.h>
#include <folly/test/DeterministicSchedule.h>
#include <folly/test/TestUtils.h>

Go to the source code of this file.

Classes

struct  TokenLocker
 
struct  Locker
 
struct  EnterLocker
 
struct  PosixRWLock
 
struct  PosixMutex
 

Macros

#define BENCH_BASE(...)   FB_VA_GLUE(BENCHMARK_NAMED_PARAM, (__VA_ARGS__))
 
#define BENCH_REL(...)   FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM, (__VA_ARGS__))
 

Typedefs

typedef DeterministicSchedule DSched
 
typedef SharedMutexImpl< true, void, DeterministicAtomic, true > DSharedMutexReadPriority
 
typedef SharedMutexImpl< false, void, DeterministicAtomic, true > DSharedMutexWritePriority
 

Functions

template<typename Lock >
void runBasicTest ()
 
 TEST (SharedMutex, basic)
 
template<typename Lock >
void runBasicHoldersTest ()
 
 TEST (SharedMutex, basic_holders)
 
template<typename Lock >
void runManyReadLocksTestWithTokens ()
 
 TEST (SharedMutex, many_read_locks_with_tokens)
 
template<typename Lock >
void runManyReadLocksTestWithoutTokens ()
 
 TEST (SharedMutex, many_read_locks_without_tokens)
 
template<typename Lock >
void runTimeoutInPastTest ()
 
 TEST (SharedMutex, timeout_in_past)
 
template<class Func >
bool funcHasDuration (milliseconds expectedDuration, Func func)
 
template<typename Lock >
void runFailingTryTimeoutTest ()
 
 TEST (SharedMutex, failing_try_timeout)
 
template<typename Lock >
void runBasicUpgradeTest ()
 
 TEST (SharedMutex, basic_upgrade_tests)
 
 TEST (SharedMutex, read_has_prio)
 
 TEST (SharedMutex, write_has_prio)
 
template<template< typename > class Atom, typename Lock , typename Locker >
static void runContendedReaders (size_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void folly_rwspin_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void shmtx_wr_pri_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void shmtx_w_bare_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void shmtx_rd_pri_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void shmtx_r_bare_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void folly_ticket_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void boost_shared_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
static void pthrd_rwlock_reads (uint32_t numOps, size_t numThreads, bool useSeparateLocks)
 
template<template< typename > class Atom, typename Lock , typename Locker >
static void runMixed (size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void folly_rwspin (size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void shmtx_wr_pri (uint32_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void shmtx_w_bare (uint32_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void shmtx_rd_pri (uint32_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void shmtx_r_bare (uint32_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void folly_ticket (size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void boost_shared (size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void pthrd_rwlock (size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
static void pthrd_mutex_ (size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
 
template<typename Lock , template< typename > class Atom>
static void runAllAndValidate (size_t numOps, size_t numThreads)
 
 TEST (SharedMutex, deterministic_concurrent_readers_of_one_lock_read_prio)
 
 TEST (SharedMutex, deterministic_concurrent_readers_of_one_lock_write_prio)
 
 TEST (SharedMutex, concurrent_readers_of_one_lock_read_prio)
 
 TEST (SharedMutex, concurrent_readers_of_one_lock_write_prio)
 
 TEST (SharedMutex, deterministic_readers_of_concurrent_locks_read_prio)
 
 TEST (SharedMutex, deterministic_readers_of_concurrent_locks_write_prio)
 
 TEST (SharedMutex, readers_of_concurrent_locks_read_prio)
 
 TEST (SharedMutex, readers_of_concurrent_locks_write_prio)
 
 TEST (SharedMutex, deterministic_mixed_mostly_read_read_prio)
 
 TEST (SharedMutex, deterministic_mixed_mostly_read_write_prio)
 
 TEST (SharedMutex, mixed_mostly_read_read_prio)
 
 TEST (SharedMutex, mixed_mostly_read_write_prio)
 
 TEST (SharedMutex, deterministic_mixed_mostly_write_read_prio)
 
 TEST (SharedMutex, deterministic_mixed_mostly_write_write_prio)
 
 TEST (SharedMutex, deterministic_lost_wakeup_write_prio)
 
static std::size_t adjustReps (std::size_t reps)
 
 TEST (SharedMutex, mixed_mostly_write_read_prio)
 
 TEST (SharedMutex, mixed_mostly_write_write_prio)
 
 TEST (SharedMutex, deterministic_all_ops_read_prio)
 
 TEST (SharedMutex, deterministic_all_ops_write_prio)
 
 TEST (SharedMutex, all_ops_read_prio)
 
 TEST (SharedMutex, all_ops_write_prio)
 
 FOLLY_ASSUME_FBVECTOR_COMPATIBLE (boost::optional< boost::optional< SharedMutexToken >>) template< typename Lock
 
template<typename >
class Atom static void runRemoteUnlock (size_t numOps, double preWriteFraction, double preUpgradeFraction, size_t numSendingThreads, size_t numReceivingThreads)
 
 TEST (SharedMutex, deterministic_remote_write_prio)
 
 TEST (SharedMutex, deterministic_remote_read_prio)
 
 TEST (SharedMutex, remote_write_prio)
 
 TEST (SharedMutex, remote_read_prio)
 
static void burn (size_t n)
 
template<typename Lock , template< typename > class Atom = atomic>
static void runPingPong (size_t numRounds, size_t burnCount)
 
static void folly_rwspin_ping_pong (size_t n, size_t scale, size_t burnCount)
 
static void shmtx_w_bare_ping_pong (size_t n, size_t scale, size_t burnCount)
 
static void shmtx_r_bare_ping_pong (size_t n, size_t scale, size_t burnCount)
 
static void folly_ticket_ping_pong (size_t n, size_t scale, size_t burnCount)
 
static void boost_shared_ping_pong (size_t n, size_t scale, size_t burnCount)
 
static void pthrd_rwlock_ping_pong (size_t n, size_t scale, size_t burnCount)
 
 TEST (SharedMutex, deterministic_ping_pong_write_prio)
 
 TEST (SharedMutex, deterministic_ping_pong_read_prio)
 
 TEST (SharedMutex, ping_pong_write_prio)
 
 TEST (SharedMutex, ping_pong_read_prio)
 
 BENCHMARK (single_thread_lock_shared_unlock_shared, iters)
 
 BENCHMARK (single_thread_lock_unlock, iters)
 
 BENCHMARK_DRAW_LINE ()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_reads, 1thread, 1, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare_reads, 1thread, 1, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_reads, 1thread, 1, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_reads, 1thread, 1, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_reads, 2thread, 2, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare_reads, 2thread, 2, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_reads, 2thread, 2, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_reads, 2thread, 2, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_reads, 4thread, 4, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare_reads, 4thread, 4, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_reads, 4thread, 4, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_reads, 4thread, 4, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_reads, 8thread, 8, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare_reads, 8thread, 8, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_reads, 8thread, 8, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_reads, 8thread, 8, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_reads, 16thread, 16, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare_reads, 16thread, 16, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_reads, 16thread, 16, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_reads, 16thread, 16, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_reads, 32thread, 32, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare_reads, 32thread, 32, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_reads, 32thread, 32, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_reads, 32thread, 32, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_reads, 64thread, 64, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare_reads, 64thread, 64, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_reads, 64thread, 64, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_reads, 64thread, 64, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 1thread_all_write, 1, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 1thread_all_write, 1, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 1thread_all_write, 1, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(pthrd_mutex_, 1thread_all_write, 1, 1.0, false)) BENCHMARK_DRAW_LINE()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 2thread_all_write, 2, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 2thread_all_write, 2, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 2thread_all_write, 2, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(pthrd_mutex_, 2thread_all_write, 2, 1.0, false)) BENCHMARK_DRAW_LINE()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 4thread_all_write, 4, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 4thread_all_write, 4, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 4thread_all_write, 4, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(pthrd_mutex_, 4thread_all_write, 4, 1.0, false)) BENCHMARK_DRAW_LINE()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 8thread_all_write, 8, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 8thread_all_write, 8, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 8thread_all_write, 8, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(pthrd_mutex_, 8thread_all_write, 8, 1.0, false)) BENCHMARK_DRAW_LINE()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 16thread_all_write, 16, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 16thread_all_write, 16, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 16thread_all_write, 16, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(pthrd_mutex_, 16thread_all_write, 16, 1.0, false)) BENCHMARK_DRAW_LINE()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 32thread_all_write, 32, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 32thread_all_write, 32, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 32thread_all_write, 32, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(pthrd_mutex_, 32thread_all_write, 32, 1.0, false)) BENCHMARK_DRAW_LINE()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 64thread_all_write, 64, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 64thread_all_write, 64, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 64thread_all_write, 64, 1.0, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(pthrd_mutex_, 64thread_all_write, 64, 1.0, false)) BENCHMARK_DRAW_LINE()
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 1thread_10pct_write, 1, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 1thread_10pct_write, 1, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 1thread_10pct_write, 1, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 2thread_10pct_write, 2, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 2thread_10pct_write, 2, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 2thread_10pct_write, 2, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 4thread_10pct_write, 4, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 4thread_10pct_write, 4, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 4thread_10pct_write, 4, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 8thread_10pct_write, 8, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 8thread_10pct_write, 8, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 8thread_10pct_write, 8, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 16thread_10pct_write, 16, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 16thread_10pct_write, 16, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 16thread_10pct_write, 16, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 32thread_10pct_write, 32, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 32thread_10pct_write, 32, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 32thread_10pct_write, 32, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 64thread_10pct_write, 64, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 64thread_10pct_write, 64, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 64thread_10pct_write, 64, 0.10, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 1thread_1pct_write, 1, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare, 1thread_1pct_write, 1, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare, 1thread_1pct_write, 1, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 1thread_1pct_write, 1, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 2thread_1pct_write, 2, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare, 2thread_1pct_write, 2, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare, 2thread_1pct_write, 2, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 2thread_1pct_write, 2, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 4thread_1pct_write, 4, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare, 4thread_1pct_write, 4, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare, 4thread_1pct_write, 4, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 4thread_1pct_write, 4, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 8thread_1pct_write, 8, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare, 8thread_1pct_write, 8, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare, 8thread_1pct_write, 8, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 8thread_1pct_write, 8, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 16thread_1pct_write, 16, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare, 16thread_1pct_write, 16, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare, 16thread_1pct_write, 16, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 16thread_1pct_write, 16, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 32thread_1pct_write, 32, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare, 32thread_1pct_write, 32, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare, 32thread_1pct_write, 32, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 32thread_1pct_write, 32, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 64thread_1pct_write, 64, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_w_bare, 64thread_1pct_write, 64, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare, 64thread_1pct_write, 64, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
false false false FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared, 64thread_1pct_write, 64, 0.01, false)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 2thr_2lock_50pct_write, 2, 0.50, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 2thr_2lock_50pct_write, 2, 0.50, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 4thr_4lock_50pct_write, 4, 0.50, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 8thr_8lock_50pct_write, 8, 0.50, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 8thr_8lock_50pct_write, 8, 0.50, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 16thr_16lock_50pct_write, 16, 0.50, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true true true FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 32thr_32lock_50pct_write, 32, 0.50, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 32thr_32lock_50pct_write, 32, 0.50, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true true true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 64thr_64lock_50pct_write, 64, 0.50, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 2thr_2lock_10pct_write, 2, 0.10, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 2thr_2lock_10pct_write, 2, 0.10, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 4thr_4lock_10pct_write, 4, 0.10, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 8thr_8lock_10pct_write, 8, 0.10, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 8thr_8lock_10pct_write, 8, 0.10, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 16thr_16lock_10pct_write, 16, 0.10, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true true true FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 32thr_32lock_10pct_write, 32, 0.10, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 32thr_32lock_10pct_write, 32, 0.10, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true true true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 64thr_64lock_10pct_write, 64, 0.10, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 2thr_2lock_1pct_write, 2, 0.01, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 2thr_2lock_1pct_write, 2, 0.01, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 4thr_4lock_1pct_write, 4, 0.01, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 8thr_8lock_1pct_write, 8, 0.01, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 8thr_8lock_1pct_write, 8, 0.01, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 16thr_16lock_1pct_write, 16, 0.01, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true true true FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin, 32thr_32lock_1pct_write, 32, 0.01, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
true true true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_rd_pri, 32thr_32lock_1pct_write, 32, 0.01, true)) FB_VA_GLUE(BENCHMARK_NAMED_PARAM
 
true true true true true true true true FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_wr_pri, 64thr_64lock_1pct_write, 64, 0.01, true)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_ping_pong, burn0, 1, 0)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_ping_pong, burn0, 1, 0)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_ping_pong, burn0, 1, 0)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_ping_pong, burn100k, 100, 100000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_ping_pong, burn100k, 100, 100000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_ping_pong, burn100k, 100, 100000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_ping_pong, burn300k, 100, 300000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_ping_pong, burn300k, 100, 300000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_ping_pong, burn300k, 100, 300000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_NAMED_PARAM,(folly_rwspin_ping_pong, burn1M, 1000, 1000000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(shmtx_r_bare_ping_pong, burn1M, 1000, 1000000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
 FB_VA_GLUE (BENCHMARK_RELATIVE_NAMED_PARAM,(boost_shared_ping_pong, burn1M, 1000, 1000000)) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM
 
int main (int argc, char **argv)
 

Variables

 shmtx_wr_pri_reads
 
false shmtx_rd_pri_reads
 
false false folly_ticket_reads
 
false false false pthrd_rwlock_reads
 
 shmtx_wr_pri
 
false folly_ticket
 
false false pthrd_rwlock
 
false shmtx_rd_pri
 
true folly_rwspin
 
 shmtx_w_bare_ping_pong
 
 burn0
 
 folly_ticket_ping_pong
 
 pthrd_rwlock_ping_pong
 
 burn100k
 
 burn300k
 
 burn1M
 

Macro Definition Documentation

#define BENCH_BASE (   ...)    FB_VA_GLUE(BENCHMARK_NAMED_PARAM, (__VA_ARGS__))

Definition at line 1435 of file SharedMutexTest.cpp.

#define BENCH_REL (   ...)    FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM, (__VA_ARGS__))

Definition at line 1436 of file SharedMutexTest.cpp.

Typedef Documentation

Definition at line 38 of file SharedMutexTest.cpp.

Definition at line 40 of file SharedMutexTest.cpp.

Definition at line 42 of file SharedMutexTest.cpp.

Function Documentation

static std::size_t adjustReps ( std::size_t  reps)
static

Definition at line 1090 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread.

Referenced by TEST().

1090  {
1092  return reps / 10;
1093  }
1094  return reps;
1095 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
BENCHMARK ( single_thread_lock_shared_unlock_shared  ,
iters   
)

Definition at line 1416 of file SharedMutexTest.cpp.

References folly::doNotOptimizeAway(), folly::lock(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::lock_shared(), and folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::unlock_shared().

1416  {
1417  SharedMutex lock;
1418  for (size_t n = 0; n < iters; ++n) {
1419  SharedMutex::Token token;
1420  lock.lock_shared(token);
1422  lock.unlock_shared(token);
1423  }
1424 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
auto doNotOptimizeAway(const T &datum) -> typename std::enable_if< !detail::DoNotOptimizeAwayNeedsIndirect< T >::value >::type
Definition: Benchmark.h:258
BENCHMARK ( single_thread_lock_unlock  ,
iters   
)

Definition at line 1426 of file SharedMutexTest.cpp.

References folly::doNotOptimizeAway(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::lock(), folly::lock(), and folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::unlock().

1426  {
1427  SharedMutex lock;
1428  for (size_t n = 0; n < iters; ++n) {
1429  lock.lock();
1431  lock.unlock();
1432  }
1433 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
auto doNotOptimizeAway(const T &datum) -> typename std::enable_if< !detail::DoNotOptimizeAwayNeedsIndirect< T >::value >::type
Definition: Benchmark.h:258
BENCHMARK_DRAW_LINE ( )
static void boost_shared ( size_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 726 of file SharedMutexTest.cpp.

730  {
731  runMixed<atomic, boost::shared_mutex, Locker>(
732  numOps, numThreads, writeFraction, useSeparateLocks);
733 }
static void boost_shared_ping_pong ( size_t  n,
size_t  scale,
size_t  burnCount 
)
static

Definition at line 1373 of file SharedMutexTest.cpp.

1373  {
1374  runPingPong<boost::shared_mutex>(n / scale, burnCount);
1375 }
static void boost_shared_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 606 of file SharedMutexTest.cpp.

606  {
607  runContendedReaders<atomic, boost::shared_mutex, Locker>(
608  numOps, numThreads, useSeparateLocks);
609 }
static void burn ( size_t  n)
static

Definition at line 1293 of file SharedMutexTest.cpp.

References Atom, folly::doNotOptimizeAway(), and i.

Referenced by runPingPong().

1293  {
1294  for (size_t i = 0; i < n; ++i) {
1296  }
1297 }
auto doNotOptimizeAway(const T &datum) -> typename std::enable_if< !detail::DoNotOptimizeAwayNeedsIndirect< T >::value >::type
Definition: Benchmark.h:258
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_reads, 1thread, 1, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare_reads, 1thread, 1, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_reads, 1thread, 1, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_reads, 1thread, 1, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_reads, 2thread, 2, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare_reads, 2thread, 2, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_reads, 2thread, 2, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_reads, 2thread, 2, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_reads, 4thread, 4, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare_reads, 4thread, 4, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_reads, 4thread, 4, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_reads, 4thread, 4, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_reads, 8thread, 8, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare_reads, 8thread, 8, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_reads, 8thread, 8, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_reads, 8thread, 8, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_reads, 16thread, 16, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare_reads, 16thread, 16, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_reads, 16thread, 16, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_reads, 16thread, 16, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_reads, 32thread, 32, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare_reads, 32thread, 32, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_reads, 32thread, 32, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_reads, 32thread, 32, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_reads, 64thread, 64, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare_reads, 64thread, 64, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_reads, 64thread, 64, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_reads, 64thread, 64, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 1thread_all_write, 1, 1.0, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 1thread_all_write, 1, 1.0, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 1thread_all_write, 1, 1.0, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(pthrd_mutex_, 1thread_all_write, 1, 1.0, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 2thread_all_write, 2, 1.0, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 2thread_all_write, 2, 1.0, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 2thread_all_write, 2, 1.0, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(pthrd_mutex_, 2thread_all_write, 2, 1.0, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 4thread_all_write, 4, 1.0, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 4thread_all_write, 4, 1.0, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 4thread_all_write, 4, 1.0, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(pthrd_mutex_, 4thread_all_write, 4, 1.0, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 8thread_all_write, 8, 1.0, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 8thread_all_write, 8, 1.0, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 8thread_all_write, 8, 1.0, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(pthrd_mutex_, 8thread_all_write, 8, 1.0, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 16thread_all_write, 16, 1.0, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 16thread_all_write, 16, 1.0, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 16thread_all_write, 16, 1.0, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(pthrd_mutex_, 16thread_all_write, 16, 1.0, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 32thread_all_write, 32, 1.0, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 32thread_all_write, 32, 1.0, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 32thread_all_write, 32, 1.0, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(pthrd_mutex_, 32thread_all_write, 32, 1.0, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 64thread_all_write, 64, 1.0, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 64thread_all_write, 64, 1.0, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 64thread_all_write, 64, 1.0, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(pthrd_mutex_, 64thread_all_write, 64, 1.0, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 1thread_10pct_write, 1, 0.10, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 1thread_10pct_write, 1, 0.10, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 1thread_10pct_write, 1, 0.10, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 2thread_10pct_write, 2, 0.10, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 2thread_10pct_write, 2, 0.10, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 2thread_10pct_write, 2, 0.10, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 4thread_10pct_write, 4, 0.10, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 4thread_10pct_write, 4, 0.10, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 4thread_10pct_write, 4, 0.10, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 8thread_10pct_write, 8, 0.10, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 8thread_10pct_write, 8, 0.10, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 8thread_10pct_write, 8, 0.10, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 16thread_10pct_write, 16, 0.10, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 16thread_10pct_write, 16, 0.10, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 16thread_10pct_write, 16, 0.10, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 32thread_10pct_write, 32, 0.10, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 32thread_10pct_write, 32, 0.10, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 32thread_10pct_write, 32, 0.10, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 64thread_10pct_write, 64, 0.10, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 64thread_10pct_write, 64, 0.10, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 64thread_10pct_write, 64, 0.10, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 1thread_1pct_write, 1, 0.01, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare, 1thread_1pct_write, 1, 0.01, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare, 1thread_1pct_write, 1, 0.01, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 1thread_1pct_write, 1, 0.01, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 2thread_1pct_write, 2, 0.01, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare, 2thread_1pct_write, 2, 0.01, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare, 2thread_1pct_write, 2, 0.01, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 2thread_1pct_write, 2, 0.01, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 4thread_1pct_write, 4, 0.01, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare, 4thread_1pct_write, 4, 0.01, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare, 4thread_1pct_write, 4, 0.01, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 4thread_1pct_write, 4, 0.01, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 8thread_1pct_write, 8, 0.01, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare, 8thread_1pct_write, 8, 0.01, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare, 8thread_1pct_write, 8, 0.01, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 8thread_1pct_write, 8, 0.01, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 16thread_1pct_write, 16, 0.01, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare, 16thread_1pct_write, 16, 0.01, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare, 16thread_1pct_write, 16, 0.01, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 16thread_1pct_write, 16, 0.01, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 32thread_1pct_write, 32, 0.01, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare, 32thread_1pct_write, 32, 0.01, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare, 32thread_1pct_write, 32, 0.01, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 32thread_1pct_write, 32, 0.01, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 64thread_1pct_write, 64, 0.01, false)   
)
false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_w_bare, 64thread_1pct_write, 64, 0.01, false)   
)
false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare, 64thread_1pct_write, 64, 0.01, false)   
)
false false false FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared, 64thread_1pct_write, 64, 0.01, false)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 2thr_2lock_50pct_write, 2, 0.50, true)   
)
true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 2thr_2lock_50pct_write, 2, 0.50, true)   
)
true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 4thr_4lock_50pct_write, 4, 0.50, true)   
)
true true true FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 8thr_8lock_50pct_write, 8, 0.50, true)   
)
true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 8thr_8lock_50pct_write, 8, 0.50, true)   
)
true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 16thr_16lock_50pct_write, 16, 0.50, true)   
)
true true true true true true FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 32thr_32lock_50pct_write, 32, 0.50, true)   
)
true true true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 32thr_32lock_50pct_write, 32, 0.50, true)   
)
true true true true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 64thr_64lock_50pct_write, 64, 0.50, true)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 2thr_2lock_10pct_write, 2, 0.10, true)   
)
true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 2thr_2lock_10pct_write, 2, 0.10, true)   
)
true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 4thr_4lock_10pct_write, 4, 0.10, true)   
)
true true true FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 8thr_8lock_10pct_write, 8, 0.10, true)   
)
true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 8thr_8lock_10pct_write, 8, 0.10, true)   
)
true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 16thr_16lock_10pct_write, 16, 0.10, true)   
)
true true true true true true FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 32thr_32lock_10pct_write, 32, 0.10, true)   
)
true true true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 32thr_32lock_10pct_write, 32, 0.10, true)   
)
true true true true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 64thr_64lock_10pct_write, 64, 0.10, true)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 2thr_2lock_1pct_write, 2, 0.01, true)   
)
true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 2thr_2lock_1pct_write, 2, 0.01, true)   
)
true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 4thr_4lock_1pct_write, 4, 0.01, true)   
)
true true true FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 8thr_8lock_1pct_write, 8, 0.01, true)   
)
true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 8thr_8lock_1pct_write, 8, 0.01, true)   
)
true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 16thr_16lock_1pct_write, 16, 0.01, true)   
)
true true true true true true FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin, 32thr_32lock_1pct_write, 32, 0.01, true)   
)
true true true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_rd_pri, 32thr_32lock_1pct_write, 32, 0.01, true)   
)
true true true true true true true true FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_wr_pri, 64thr_64lock_1pct_write, 64, 0.01, true)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_ping_pong, burn0, 1, 0)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_ping_pong, burn100k, 100, 100000)   
)
FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_ping_pong, burn100k, 100, 100000)   
)
FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_ping_pong, burn100k, 100, 100000)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_ping_pong, burn300k, 100, 300000)   
)
FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_ping_pong, burn300k, 100, 300000)   
)
FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_ping_pong, burn300k, 100, 300000)   
)
FB_VA_GLUE ( BENCHMARK_NAMED_PARAM  ,
(folly_rwspin_ping_pong, burn1M, 1000, 1000000)   
)
FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(shmtx_r_bare_ping_pong, burn1M, 1000, 1000000)   
)
FB_VA_GLUE ( BENCHMARK_RELATIVE_NAMED_PARAM  ,
(boost_shared_ping_pong, burn1M, 1000, 1000000)   
)
FOLLY_ASSUME_FBVECTOR_COMPATIBLE ( boost::optional< boost::optional< SharedMutexToken >>  )

Referenced by TEST().

static void folly_rwspin ( size_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 672 of file SharedMutexTest.cpp.

676  {
677  runMixed<atomic, RWSpinLock, Locker>(
678  numOps, numThreads, writeFraction, useSeparateLocks);
679 }
static void folly_rwspin_ping_pong ( size_t  n,
size_t  scale,
size_t  burnCount 
)
static

Definition at line 1357 of file SharedMutexTest.cpp.

1357  {
1358  runPingPong<RWSpinLock>(n / scale, burnCount);
1359 }
static void folly_rwspin_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 570 of file SharedMutexTest.cpp.

570  {
571  runContendedReaders<atomic, RWSpinLock, Locker>(
572  numOps, numThreads, useSeparateLocks);
573 }
static void folly_ticket ( size_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 717 of file SharedMutexTest.cpp.

721  {
722  runMixed<atomic, RWTicketSpinLock64, Locker>(
723  numOps, numThreads, writeFraction, useSeparateLocks);
724 }
static void folly_ticket_ping_pong ( size_t  n,
size_t  scale,
size_t  burnCount 
)
static

Definition at line 1369 of file SharedMutexTest.cpp.

1369  {
1370  runPingPong<RWTicketSpinLock64>(n / scale, burnCount);
1371 }
static void folly_ticket_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 600 of file SharedMutexTest.cpp.

600  {
601  runContendedReaders<atomic, RWTicketSpinLock64, Locker>(
602  numOps, numThreads, useSeparateLocks);
603 }
template<class Func >
bool funcHasDuration ( milliseconds  expectedDuration,
Func  func 
)

Definition at line 232 of file SharedMutexTest.cpp.

References now(), and start.

Referenced by runFailingTryTimeoutTest().

232  {
233  // elapsed time should eventually fall within expectedDuration +- 25%
234  for (int tries = 0; tries < 100; ++tries) {
235  auto start = steady_clock::now();
236  func();
237  auto elapsed = steady_clock::now() - start;
238  if (elapsed > expectedDuration - expectedDuration / 4 &&
239  elapsed < expectedDuration + expectedDuration / 4) {
240  return true;
241  }
242  }
243  return false;
244 }
std::chrono::steady_clock::time_point now()
auto start
int main ( int  argc,
char **  argv 
)

Definition at line 2125 of file SharedMutexTest.cpp.

References testing::InitGoogleTest(), RUN_ALL_TESTS(), and folly::runBenchmarksOnFlag().

2125  {
2126  (void)folly_rwspin_reads;
2127  (void)shmtx_wr_pri_reads;
2128  (void)shmtx_w_bare_reads;
2129  (void)shmtx_rd_pri_reads;
2130  (void)shmtx_r_bare_reads;
2131  (void)folly_ticket_reads;
2132  (void)boost_shared_reads;
2133  (void)pthrd_rwlock_reads;
2134  (void)folly_rwspin;
2135  (void)shmtx_wr_pri;
2136  (void)shmtx_w_bare;
2137  (void)shmtx_rd_pri;
2138  (void)shmtx_r_bare;
2139  (void)folly_ticket;
2140  (void)boost_shared;
2141  (void)pthrd_rwlock;
2142  (void)pthrd_mutex_;
2143  (void)folly_rwspin_ping_pong;
2144  (void)shmtx_w_bare_ping_pong;
2145  (void)shmtx_r_bare_ping_pong;
2146  (void)folly_ticket_ping_pong;
2147  (void)boost_shared_ping_pong;
2148  (void)pthrd_rwlock_ping_pong;
2149 
2150  testing::InitGoogleTest(&argc, argv);
2151  gflags::ParseCommandLineFlags(&argc, &argv, true);
2152  int rv = RUN_ALL_TESTS();
2154  return rv;
2155 }
false shmtx_rd_pri_reads
static void shmtx_r_bare_ping_pong(size_t n, size_t scale, size_t burnCount)
false shmtx_rd_pri
shmtx_wr_pri_reads
int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_
Definition: gtest.h:2232
static void shmtx_w_bare_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks)
static void shmtx_r_bare(uint32_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
static void folly_rwspin_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks)
false false folly_ticket_reads
static void boost_shared_ping_pong(size_t n, size_t scale, size_t burnCount)
static void shmtx_w_bare(uint32_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
false folly_ticket
false false false pthrd_rwlock_reads
shmtx_w_bare_ping_pong
static void pthrd_mutex_(size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
char ** argv
pthrd_rwlock_ping_pong
static void folly_rwspin_ping_pong(size_t n, size_t scale, size_t burnCount)
static void boost_shared(size_t numOps, size_t numThreads, double writeFraction, bool useSeparateLocks)
shmtx_wr_pri
false false pthrd_rwlock
true folly_rwspin
bool runBenchmarksOnFlag()
Definition: Benchmark.h:48
GTEST_API_ void InitGoogleTest(int *argc, char **argv)
Definition: gtest.cc:5370
static void boost_shared_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks)
folly_ticket_ping_pong
static void shmtx_r_bare_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks)
static void pthrd_mutex_ ( size_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 744 of file SharedMutexTest.cpp.

References Atom.

748  {
749  runMixed<atomic, PosixMutex, Locker>(
750  numOps, numThreads, writeFraction, useSeparateLocks);
751 }
static void pthrd_rwlock ( size_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 735 of file SharedMutexTest.cpp.

739  {
740  runMixed<atomic, PosixRWLock, Locker>(
741  numOps, numThreads, writeFraction, useSeparateLocks);
742 }
static void pthrd_rwlock_ping_pong ( size_t  n,
size_t  scale,
size_t  burnCount 
)
static

Definition at line 1377 of file SharedMutexTest.cpp.

1377  {
1378  runPingPong<PosixRWLock>(n / scale, burnCount);
1379 }
static void pthrd_rwlock_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 612 of file SharedMutexTest.cpp.

References Atom.

612  {
613  runContendedReaders<atomic, PosixRWLock, Locker>(
614  numOps, numThreads, useSeparateLocks);
615 }
template<typename Lock , template< typename > class Atom>
static void runAllAndValidate ( size_t  numOps,
size_t  numThreads 
)
static

Definition at line 754 of file SharedMutexTest.cpp.

References BENCHMARK_SUSPEND, EXPECT_EQ, EXPECT_FALSE, EXPECT_TRUE, i, folly::test::DeterministicSchedule::join(), now(), folly::pushmi::detail::t, folly::test::DeterministicSchedule::thread(), threads, uint64_t, and folly::fibers::yield().

754  {
755  Lock globalLock;
756  Atom<int> globalExclusiveCount(0);
757  Atom<int> globalUpgradeCount(0);
758  Atom<int> globalSharedCount(0);
759 
760  Atom<bool> go(false);
761 
762  // clang crashes on access to Atom<> captured by ref in closure
763  Atom<int>* globalExclusiveCountPtr = &globalExclusiveCount;
764  Atom<int>* globalUpgradeCountPtr = &globalUpgradeCount;
765  Atom<int>* globalSharedCountPtr = &globalSharedCount;
766  Atom<bool>* goPtr = &go;
767 
768  vector<thread> threads(numThreads);
769 
771  for (size_t t = 0; t < numThreads; ++t) {
772  threads[t] = DSched::thread([&, t, numThreads] {
773  struct drand48_data buffer;
774  srand48_r(t, &buffer);
775 
776  bool exclusive = false;
777  bool upgrade = false;
778  bool shared = false;
779  bool ourGlobalTokenUsed = false;
780  SharedMutexToken ourGlobalToken;
781 
782  Lock privateLock;
783  vector<SharedMutexToken> privateTokens;
784 
785  while (!goPtr->load()) {
787  }
788  for (size_t op = t; op < numOps; op += numThreads) {
789  // randVal in [0,1000)
790  long randVal;
791  lrand48_r(&buffer, &randVal);
792  randVal = (long)((randVal * (uint64_t)1000) / 0x7fffffff);
793 
794  // make as many assertions as possible about the global state
795  if (exclusive) {
796  EXPECT_EQ(1, globalExclusiveCountPtr->load(memory_order_acquire));
797  EXPECT_EQ(0, globalUpgradeCountPtr->load(memory_order_acquire));
798  EXPECT_EQ(0, globalSharedCountPtr->load(memory_order_acquire));
799  }
800  if (upgrade) {
801  EXPECT_EQ(0, globalExclusiveCountPtr->load(memory_order_acquire));
802  EXPECT_EQ(1, globalUpgradeCountPtr->load(memory_order_acquire));
803  }
804  if (shared) {
805  EXPECT_EQ(0, globalExclusiveCountPtr->load(memory_order_acquire));
806  EXPECT_TRUE(globalSharedCountPtr->load(memory_order_acquire) > 0);
807  } else {
808  EXPECT_FALSE(ourGlobalTokenUsed);
809  }
810 
811  // independent 20% chance we do something to the private lock
812  if (randVal < 200) {
813  // it's okay to take multiple private shared locks because
814  // we never take an exclusive lock, so reader versus writer
815  // priority doesn't cause deadlocks
816  if (randVal < 100 && privateTokens.size() > 0) {
817  auto i = randVal % privateTokens.size();
818  privateLock.unlock_shared(privateTokens[i]);
819  privateTokens.erase(privateTokens.begin() + i);
820  } else {
821  SharedMutexToken token;
822  privateLock.lock_shared(token);
823  privateTokens.push_back(token);
824  }
825  continue;
826  }
827 
828  // if we've got a lock, the only thing we can do is release it
829  // or transform it into a different kind of lock
830  if (exclusive) {
831  exclusive = false;
832  --*globalExclusiveCountPtr;
833  if (randVal < 500) {
834  globalLock.unlock();
835  } else if (randVal < 700) {
836  globalLock.unlock_and_lock_shared();
837  ++*globalSharedCountPtr;
838  shared = true;
839  } else if (randVal < 900) {
840  globalLock.unlock_and_lock_shared(ourGlobalToken);
841  ++*globalSharedCountPtr;
842  shared = true;
843  ourGlobalTokenUsed = true;
844  } else {
845  globalLock.unlock_and_lock_upgrade();
846  ++*globalUpgradeCountPtr;
847  upgrade = true;
848  }
849  } else if (upgrade) {
850  upgrade = false;
851  --*globalUpgradeCountPtr;
852  if (randVal < 500) {
853  globalLock.unlock_upgrade();
854  } else if (randVal < 700) {
855  globalLock.unlock_upgrade_and_lock_shared();
856  ++*globalSharedCountPtr;
857  shared = true;
858  } else if (randVal < 900) {
859  globalLock.unlock_upgrade_and_lock_shared(ourGlobalToken);
860  ++*globalSharedCountPtr;
861  shared = true;
862  ourGlobalTokenUsed = true;
863  } else {
864  globalLock.unlock_upgrade_and_lock();
865  ++*globalExclusiveCountPtr;
866  exclusive = true;
867  }
868  } else if (shared) {
869  shared = false;
870  --*globalSharedCountPtr;
871  if (ourGlobalTokenUsed) {
872  globalLock.unlock_shared(ourGlobalToken);
873  ourGlobalTokenUsed = false;
874  } else {
875  globalLock.unlock_shared();
876  }
877  } else if (randVal < 400) {
878  // 40% chance of shared lock with token, 5 ways to get it
879 
880  // delta t goes from -1 millis to 7 millis
881  auto dt = microseconds(10 * (randVal - 100));
882 
883  if (randVal < 400) {
884  globalLock.lock_shared(ourGlobalToken);
885  shared = true;
886  } else if (randVal < 500) {
887  shared = globalLock.try_lock_shared(ourGlobalToken);
888  } else if (randVal < 600) {
889  shared = globalLock.try_lock_shared_for(dt, ourGlobalToken);
890  } else if (randVal < 800) {
891  shared = globalLock.try_lock_shared_until(
892  system_clock::now() + dt, ourGlobalToken);
893  }
894  if (shared) {
895  ourGlobalTokenUsed = true;
896  ++*globalSharedCountPtr;
897  }
898  } else if (randVal < 800) {
899  // 40% chance of shared lock without token
900  auto dt = microseconds(10 * (randVal - 100));
901  if (randVal < 400) {
902  globalLock.lock_shared();
903  shared = true;
904  } else if (randVal < 500) {
905  shared = globalLock.try_lock_shared();
906  } else if (randVal < 600) {
907  shared = globalLock.try_lock_shared_for(dt);
908  } else if (randVal < 800) {
909  shared =
910  globalLock.try_lock_shared_until(system_clock::now() + dt);
911  }
912  if (shared) {
913  ++*globalSharedCountPtr;
914  }
915  } else if (randVal < 900) {
916  // 10% change of upgrade lock
917  globalLock.lock_upgrade();
918  upgrade = true;
919  ++*globalUpgradeCountPtr;
920  } else {
921  // 10% chance of exclusive lock, 5 ways to get it
922 
923  // delta t goes from -1 millis to 9 millis
924  auto dt = microseconds(100 * (randVal - 910));
925 
926  if (randVal < 400) {
927  globalLock.lock();
928  exclusive = true;
929  } else if (randVal < 500) {
930  exclusive = globalLock.try_lock();
931  } else if (randVal < 600) {
932  exclusive = globalLock.try_lock_for(dt);
933  } else if (randVal < 700) {
934  exclusive = globalLock.try_lock_until(steady_clock::now() + dt);
935  } else {
936  exclusive = globalLock.try_lock_until(system_clock::now() + dt);
937  }
938  if (exclusive) {
939  ++*globalExclusiveCountPtr;
940  }
941  }
942  }
943 
944  if (exclusive) {
945  --*globalExclusiveCountPtr;
946  globalLock.unlock();
947  }
948  if (upgrade) {
949  --*globalUpgradeCountPtr;
950  globalLock.unlock_upgrade();
951  }
952  if (shared) {
953  --*globalSharedCountPtr;
954  if (ourGlobalTokenUsed) {
955  globalLock.unlock_shared(ourGlobalToken);
956  ourGlobalTokenUsed = false;
957  } else {
958  globalLock.unlock_shared();
959  }
960  }
961  for (auto& token : privateTokens) {
962  privateLock.unlock_shared(token);
963  }
964  });
965  }
966  }
967 
968  go.store(true);
969  for (auto& thr : threads) {
970  DSched::join(thr);
971  }
972 }
std::vector< uint8_t > buffer(kBufferSize+16)
#define EXPECT_EQ(val1, val2)
Definition: gtest.h:1922
#define BENCHMARK_SUSPEND
Definition: Benchmark.h:576
std::chrono::steady_clock::time_point now()
static std::thread thread(Func &&func, Args &&...args)
std::vector< std::thread::id > threads
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
static void join(std::thread &child)
template<typename Lock >
void runBasicHoldersTest ( )

Definition at line 86 of file SharedMutexTest.cpp.

References EXPECT_FALSE, EXPECT_TRUE, folly::lock(), and folly::gen::move.

86  {
87  Lock lock;
88  SharedMutexToken token;
89 
90  {
91  // create an exclusive write lock via holder
92  typename Lock::WriteHolder holder(lock);
93  EXPECT_FALSE(lock.try_lock());
94  EXPECT_FALSE(lock.try_lock_shared(token));
95 
96  // move ownership to another write holder via move constructor
97  typename Lock::WriteHolder holder2(std::move(holder));
98  EXPECT_FALSE(lock.try_lock());
99  EXPECT_FALSE(lock.try_lock_shared(token));
100 
101  // move ownership to another write holder via assign operator
102  typename Lock::WriteHolder holder3(nullptr);
103  holder3 = std::move(holder2);
104  EXPECT_FALSE(lock.try_lock());
105  EXPECT_FALSE(lock.try_lock_shared(token));
106 
107  // downgrade from exclusive to upgrade lock via move constructor
108  typename Lock::UpgradeHolder holder4(std::move(holder3));
109 
110  // ensure we can lock from a shared source
111  EXPECT_FALSE(lock.try_lock());
112  EXPECT_TRUE(lock.try_lock_shared(token));
113  lock.unlock_shared(token);
114 
115  // promote from upgrade to exclusive lock via move constructor
116  typename Lock::WriteHolder holder5(std::move(holder4));
117  EXPECT_FALSE(lock.try_lock());
118  EXPECT_FALSE(lock.try_lock_shared(token));
119 
120  // downgrade exclusive to shared lock via move constructor
121  typename Lock::ReadHolder holder6(std::move(holder5));
122 
123  // ensure we can lock from another shared source
124  EXPECT_FALSE(lock.try_lock());
125  EXPECT_TRUE(lock.try_lock_shared(token));
126  lock.unlock_shared(token);
127  }
128 
129  {
130  typename Lock::WriteHolder holder(lock);
131  EXPECT_FALSE(lock.try_lock());
132  }
133 
134  {
135  typename Lock::ReadHolder holder(lock);
136  typename Lock::ReadHolder holder2(lock);
137  typename Lock::UpgradeHolder holder3(lock);
138  }
139 
140  {
141  typename Lock::UpgradeHolder holder(lock);
142  typename Lock::ReadHolder holder2(lock);
143  typename Lock::ReadHolder holder3(std::move(holder));
144  }
145 }
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
template<typename Lock >
void runBasicTest ( )

Definition at line 45 of file SharedMutexTest.cpp.

References EXPECT_FALSE, EXPECT_TRUE, and folly::lock().

45  {
46  Lock lock;
47  SharedMutexToken token1;
48  SharedMutexToken token2;
49  SharedMutexToken token3;
50 
51  EXPECT_TRUE(lock.try_lock());
52  EXPECT_FALSE(lock.try_lock());
53  EXPECT_FALSE(lock.try_lock_shared(token1));
54  lock.unlock();
55 
56  EXPECT_TRUE(lock.try_lock_shared(token1));
57  EXPECT_FALSE(lock.try_lock());
58  EXPECT_TRUE(lock.try_lock_shared(token2));
59  lock.lock_shared(token3);
60  lock.unlock_shared(token3);
61  lock.unlock_shared(token2);
62  lock.unlock_shared(token1);
63 
64  lock.lock();
65  lock.unlock();
66 
67  lock.lock_shared(token1);
68  lock.lock_shared(token2);
69  lock.unlock_shared(token1);
70  lock.unlock_shared(token2);
71 
72  lock.lock();
73  lock.unlock_and_lock_shared(token1);
74  lock.lock_shared(token2);
75  lock.unlock_shared(token2);
76  lock.unlock_shared(token1);
77 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
template<typename Lock >
void runBasicUpgradeTest ( )

Definition at line 319 of file SharedMutexTest.cpp.

References EXPECT_FALSE, EXPECT_TRUE, and folly::lock().

319  {
320  Lock lock;
321  typename Lock::Token token1;
322  typename Lock::Token token2;
323 
324  lock.lock_upgrade();
325  EXPECT_FALSE(lock.try_lock());
326  EXPECT_TRUE(lock.try_lock_shared(token1));
327  lock.unlock_shared(token1);
328  lock.unlock_upgrade();
329 
330  lock.lock_upgrade();
331  lock.unlock_upgrade_and_lock();
332  EXPECT_FALSE(lock.try_lock_shared(token1));
333  lock.unlock();
334 
335  lock.lock_upgrade();
336  lock.unlock_upgrade_and_lock_shared(token1);
337  lock.lock_upgrade();
338  lock.unlock_upgrade_and_lock_shared(token2);
339  lock.unlock_shared(token1);
340  lock.unlock_shared(token2);
341 
342  lock.lock();
343  lock.unlock_and_lock_upgrade();
344  EXPECT_TRUE(lock.try_lock_shared(token1));
345  lock.unlock_upgrade();
346  lock.unlock_shared(token1);
347 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
template<template< typename > class Atom, typename Lock , typename Locker >
static void runContendedReaders ( size_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 530 of file SharedMutexTest.cpp.

References BENCHMARK_SUSPEND, folly::copy(), folly::doNotOptimizeAway(), folly::test::DeterministicSchedule::join(), folly::lock(), Locker::lock_shared(), folly::pushmi::detail::t, folly::test::DeterministicSchedule::thread(), threads, Locker::unlock_shared(), and folly::fibers::yield().

530  {
531  char padding1[64];
532  (void)padding1;
533  Lock globalLock;
534  int valueProtectedByLock = 10;
535  char padding2[64];
536  (void)padding2;
537  Atom<bool> go(false);
538  Atom<bool>* goPtr = &go; // workaround for clang bug
539  vector<thread> threads(numThreads);
540 
542  for (size_t t = 0; t < numThreads; ++t) {
543  threads[t] = DSched::thread([&, t, numThreads] {
544  Lock privateLock;
545  Lock* lock = useSeparateLocks ? &privateLock : &globalLock;
546  Locker locker;
547  while (!goPtr->load()) {
549  }
550  for (size_t op = t; op < numOps; op += numThreads) {
551  locker.lock_shared(lock);
552  // note: folly::doNotOptimizeAway reads and writes to its arg,
553  // so the following two lines are very different than a call
554  // to folly::doNotOptimizeAway(valueProtectedByLock);
555  auto copy = valueProtectedByLock;
557  locker.unlock_shared(lock);
558  }
559  });
560  }
561  }
562 
563  go.store(true);
564  for (auto& thr : threads) {
565  DSched::join(thr);
566  }
567 }
#define BENCHMARK_SUSPEND
Definition: Benchmark.h:576
static std::thread thread(Func &&func, Args &&...args)
std::vector< std::thread::id > threads
constexpr std::decay< T >::type copy(T &&value) noexcept(noexcept(typename std::decay< T >::type(std::forward< T >(value))))
Definition: Utility.h:72
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
void unlock_shared(T *lockable)
static void join(std::thread &child)
auto doNotOptimizeAway(const T &datum) -> typename std::enable_if< !detail::DoNotOptimizeAwayNeedsIndirect< T >::value >::type
Definition: Benchmark.h:258
void lock_shared(T *lockable)
template<typename Lock >
void runFailingTryTimeoutTest ( )

Definition at line 247 of file SharedMutexTest.cpp.

References EXPECT_FALSE, EXPECT_TRUE, funcHasDuration(), folly::lock(), and now().

247  {
248  Lock lock;
249  lock.lock();
250  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
251  EXPECT_FALSE(lock.try_lock_for(milliseconds(10)));
252  }));
253  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
254  typename Lock::Token token;
255  EXPECT_FALSE(lock.try_lock_shared_for(milliseconds(10), token));
256  }));
257  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
258  EXPECT_FALSE(lock.try_lock_upgrade_for(milliseconds(10)));
259  }));
260  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
261  EXPECT_FALSE(lock.try_lock_until(steady_clock::now() + milliseconds(10)));
262  }));
263  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
264  typename Lock::Token token;
265  EXPECT_FALSE(lock.try_lock_shared_until(
266  steady_clock::now() + milliseconds(10), token));
267  }));
268  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
269  EXPECT_FALSE(
270  lock.try_lock_upgrade_until(steady_clock::now() + milliseconds(10)));
271  }));
272  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
273  EXPECT_FALSE(lock.try_lock_until(system_clock::now() + milliseconds(10)));
274  }));
275  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
276  typename Lock::Token token;
277  EXPECT_FALSE(lock.try_lock_shared_until(
278  system_clock::now() + milliseconds(10), token));
279  }));
280  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
281  EXPECT_FALSE(
282  lock.try_lock_upgrade_until(system_clock::now() + milliseconds(10)));
283  }));
284  lock.unlock();
285 
286  lock.lock_shared();
287  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
288  EXPECT_FALSE(lock.try_lock_for(milliseconds(10)));
289  }));
290  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
291  EXPECT_FALSE(lock.try_lock_until(steady_clock::now() + milliseconds(10)));
292  }));
293  EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
294  EXPECT_FALSE(lock.try_lock_until(system_clock::now() + milliseconds(10)));
295  }));
296  lock.unlock_shared();
297 
298  lock.lock();
299  for (int p = 0; p < 8; ++p) {
300  EXPECT_FALSE(lock.try_lock_for(nanoseconds(1 << p)));
301  }
302  lock.unlock();
303 
304  for (int p = 0; p < 8; ++p) {
305  typename Lock::ReadHolder holder1(lock);
306  typename Lock::ReadHolder holder2(lock);
307  typename Lock::ReadHolder holder3(lock);
308  EXPECT_FALSE(lock.try_lock_for(nanoseconds(1 << p)));
309  }
310 }
std::chrono::steady_clock::time_point now()
bool funcHasDuration(milliseconds expectedDuration, Func func)
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
template<typename Lock >
void runManyReadLocksTestWithoutTokens ( )

Definition at line 179 of file SharedMutexTest.cpp.

References EXPECT_TRUE, i, and folly::lock().

179  {
180  Lock lock;
181 
182  for (int i = 0; i < 1000; ++i) {
183  EXPECT_TRUE(lock.try_lock_shared());
184  }
185  for (int i = 0; i < 1000; ++i) {
186  lock.unlock_shared();
187  }
188  EXPECT_TRUE(lock.try_lock());
189  lock.unlock();
190 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
template<typename Lock >
void runManyReadLocksTestWithTokens ( )

Definition at line 154 of file SharedMutexTest.cpp.

References EXPECT_TRUE, i, folly::lock(), and tokens.

154  {
155  Lock lock;
156 
157  vector<SharedMutexToken> tokens;
158  for (int i = 0; i < 1000; ++i) {
159  tokens.emplace_back();
160  EXPECT_TRUE(lock.try_lock_shared(tokens.back()));
161  }
162  for (auto& token : tokens) {
163  lock.unlock_shared(token);
164  }
165  EXPECT_TRUE(lock.try_lock());
166  lock.unlock();
167 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
static const char tokens[256]
Definition: http_parser.c:184
template<template< typename > class Atom, typename Lock , typename Locker >
static void runMixed ( size_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 618 of file SharedMutexTest.cpp.

References BENCHMARK_SUSPEND, folly::doNotOptimizeAway(), folly::test::DeterministicSchedule::join(), Locker::lock(), folly::lock(), Locker::lock_shared(), folly::pushmi::detail::t, folly::test::DeterministicSchedule::thread(), threads, Locker::unlock(), Locker::unlock_shared(), and folly::fibers::yield().

622  {
623  char padding1[64];
624  (void)padding1;
625  Lock globalLock;
626  int valueProtectedByLock = 0;
627  char padding2[64];
628  (void)padding2;
629  Atom<bool> go(false);
630  Atom<bool>* goPtr = &go; // workaround for clang bug
631  vector<thread> threads(numThreads);
632 
634  for (size_t t = 0; t < numThreads; ++t) {
635  threads[t] = DSched::thread([&, t, numThreads] {
636  struct drand48_data buffer;
637  srand48_r(t, &buffer);
638  long writeThreshold = writeFraction * 0x7fffffff;
639  Lock privateLock;
640  Lock* lock = useSeparateLocks ? &privateLock : &globalLock;
641  Locker locker;
642  while (!goPtr->load()) {
644  }
645  for (size_t op = t; op < numOps; op += numThreads) {
646  long randVal;
647  lrand48_r(&buffer, &randVal);
648  bool writeOp = randVal < writeThreshold;
649  if (writeOp) {
650  locker.lock(lock);
651  if (!useSeparateLocks) {
652  ++valueProtectedByLock;
653  }
654  locker.unlock(lock);
655  } else {
656  locker.lock_shared(lock);
657  auto v = valueProtectedByLock;
659  locker.unlock_shared(lock);
660  }
661  }
662  });
663  }
664  }
665 
666  go.store(true);
667  for (auto& thr : threads) {
668  DSched::join(thr);
669  }
670 }
std::vector< uint8_t > buffer(kBufferSize+16)
void unlock(T *lockable)
#define BENCHMARK_SUSPEND
Definition: Benchmark.h:576
static std::thread thread(Func &&func, Args &&...args)
std::vector< std::thread::id > threads
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
void unlock_shared(T *lockable)
static void join(std::thread &child)
void lock(T *lockable)
auto doNotOptimizeAway(const T &datum) -> typename std::enable_if< !detail::DoNotOptimizeAwayNeedsIndirect< T >::value >::type
Definition: Benchmark.h:258
void lock_shared(T *lockable)
template<typename Lock , template< typename > class Atom = atomic>
static void runPingPong ( size_t  numRounds,
size_t  burnCount 
)
static

Definition at line 1302 of file SharedMutexTest.cpp.

References BENCHMARK_SUSPEND, burn(), folly::gen::first, i, folly::test::DeterministicSchedule::join(), folly::ssl::detail::locks(), folly::test::DeterministicSchedule::thread(), threads, and folly::fibers::yield().

1302  {
1303  char padding1[56];
1304  (void)padding1;
1305  pair<Lock, char[56]> locks[3];
1306  char padding2[56];
1307  (void)padding2;
1308 
1309  Atom<int> avail(0);
1310  auto availPtr = &avail; // workaround for clang crash
1311  Atom<bool> go(false);
1312  auto goPtr = &go; // workaround for clang crash
1313  vector<thread> threads(2);
1314 
1315  locks[0].first.lock();
1316  locks[1].first.lock();
1317  locks[2].first.lock_shared();
1318 
1320  threads[0] = DSched::thread([&] {
1321  ++*availPtr;
1322  while (!goPtr->load()) {
1324  }
1325  for (size_t i = 0; i < numRounds; ++i) {
1326  locks[i % 3].first.unlock();
1327  locks[(i + 2) % 3].first.lock();
1328  burn(burnCount);
1329  }
1330  });
1331  threads[1] = DSched::thread([&] {
1332  ++*availPtr;
1333  while (!goPtr->load()) {
1335  }
1336  for (size_t i = 0; i < numRounds; ++i) {
1337  locks[i % 3].first.lock_shared();
1338  burn(burnCount);
1339  locks[(i + 2) % 3].first.unlock_shared();
1340  }
1341  });
1342 
1343  while (avail.load() < 2) {
1345  }
1346  }
1347 
1348  go.store(true);
1349  for (auto& thr : threads) {
1350  DSched::join(thr);
1351  }
1352  locks[numRounds % 3].first.unlock();
1353  locks[(numRounds + 1) % 3].first.unlock();
1354  locks[(numRounds + 2) % 3].first.unlock_shared();
1355 }
static std::unique_ptr< SSLLock[]> & locks()
#define BENCHMARK_SUSPEND
Definition: Benchmark.h:576
static std::thread thread(Func &&func, Args &&...args)
std::vector< std::thread::id > threads
static void burn(size_t n)
static void join(std::thread &child)
constexpr detail::First first
Definition: Base-inl.h:2553
template<typename >
class Atom static void runRemoteUnlock ( size_t  numOps,
double  preWriteFraction,
double  preUpgradeFraction,
size_t  numSendingThreads,
size_t  numReceivingThreads 
)

Definition at line 1149 of file SharedMutexTest.cpp.

References Atom, BENCHMARK_SUSPEND, folly::test::DeterministicSchedule::join(), folly::gen::move, folly::none, folly::pushmi::detail::t, folly::test::DeterministicSchedule::thread(), threads, and folly::fibers::yield().

1154  {
1155  Lock globalLock;
1157  auto queuePtr = &queue; // workaround for clang crash
1158 
1159  Atom<bool> go(false);
1160  auto goPtr = &go; // workaround for clang crash
1161  Atom<int> pendingSenders(numSendingThreads);
1162  auto pendingSendersPtr = &pendingSenders; // workaround for clang crash
1163  vector<thread> threads(numSendingThreads + numReceivingThreads);
1164 
1166  for (size_t t = 0; t < threads.size(); ++t) {
1167  threads[t] = DSched::thread([&, t, numSendingThreads] {
1168  if (t >= numSendingThreads) {
1169  // we're a receiver
1170  typename decltype(queue)::value_type elem;
1171  while (true) {
1172  queuePtr->blockingRead(elem);
1173  if (!elem) {
1174  // EOF, pass the EOF token
1175  queuePtr->blockingWrite(std::move(elem));
1176  break;
1177  }
1178  if (*elem) {
1179  globalLock.unlock_shared(**elem);
1180  } else {
1181  globalLock.unlock_shared();
1182  }
1183  }
1184  return;
1185  }
1186  // else we're a sender
1187 
1188  struct drand48_data buffer;
1189  srand48_r(t, &buffer);
1190 
1191  while (!goPtr->load()) {
1193  }
1194  for (size_t op = t; op < numOps; op += numSendingThreads) {
1195  long unscaledRandVal;
1196  lrand48_r(&buffer, &unscaledRandVal);
1197 
1198  // randVal in [0,1]
1199  double randVal = ((double)unscaledRandVal) / 0x7fffffff;
1200 
1201  // extract a bit and rescale
1202  bool useToken = randVal >= 0.5;
1203  randVal = (randVal - (useToken ? 0.5 : 0.0)) * 2;
1204 
1205  boost::optional<SharedMutexToken> maybeToken;
1206 
1207  if (useToken) {
1208  SharedMutexToken token;
1209  if (randVal < preWriteFraction) {
1210  globalLock.lock();
1211  globalLock.unlock_and_lock_shared(token);
1212  } else if (randVal < preWriteFraction + preUpgradeFraction / 2) {
1213  globalLock.lock_upgrade();
1214  globalLock.unlock_upgrade_and_lock_shared(token);
1215  } else if (randVal < preWriteFraction + preUpgradeFraction) {
1216  globalLock.lock_upgrade();
1217  globalLock.unlock_upgrade_and_lock();
1218  globalLock.unlock_and_lock_shared(token);
1219  } else {
1220  globalLock.lock_shared(token);
1221  }
1222  maybeToken = token;
1223  } else {
1224  if (randVal < preWriteFraction) {
1225  globalLock.lock();
1226  globalLock.unlock_and_lock_shared();
1227  } else if (randVal < preWriteFraction + preUpgradeFraction / 2) {
1228  globalLock.lock_upgrade();
1229  globalLock.unlock_upgrade_and_lock_shared();
1230  } else if (randVal < preWriteFraction + preUpgradeFraction) {
1231  globalLock.lock_upgrade();
1232  globalLock.unlock_upgrade_and_lock();
1233  globalLock.unlock_and_lock_shared();
1234  } else {
1235  globalLock.lock_shared();
1236  }
1237  }
1238 
1239  // blockingWrite is emplace-like, so this automatically adds
1240  // another level of wrapping
1241  queuePtr->blockingWrite(maybeToken);
1242  }
1243  if (--*pendingSendersPtr == 0) {
1244  queuePtr->blockingWrite(boost::none);
1245  }
1246  });
1247  }
1248  }
1249 
1250  go.store(true);
1251  for (auto& thr : threads) {
1252  DSched::join(thr);
1253  }
1254 }
std::vector< uint8_t > buffer(kBufferSize+16)
#define BENCHMARK_SUSPEND
Definition: Benchmark.h:576
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
static std::thread thread(Func &&func, Args &&...args)
std::vector< std::thread::id > threads
#define Atom
static void join(std::thread &child)
constexpr None none
Definition: Optional.h:87
template<typename Lock >
void runTimeoutInPastTest ( )

Definition at line 202 of file SharedMutexTest.cpp.

References EXPECT_TRUE, folly::lock(), and now().

202  {
203  Lock lock;
204 
205  EXPECT_TRUE(lock.try_lock_for(milliseconds(0)));
206  lock.unlock();
207  EXPECT_TRUE(lock.try_lock_for(milliseconds(-1)));
208  lock.unlock();
209  EXPECT_TRUE(lock.try_lock_shared_for(milliseconds(0)));
210  lock.unlock_shared();
211  EXPECT_TRUE(lock.try_lock_shared_for(milliseconds(-1)));
212  lock.unlock_shared();
213  EXPECT_TRUE(lock.try_lock_until(system_clock::now() - milliseconds(1)));
214  lock.unlock();
215  EXPECT_TRUE(
216  lock.try_lock_shared_until(system_clock::now() - milliseconds(1)));
217  lock.unlock_shared();
218  EXPECT_TRUE(lock.try_lock_until(steady_clock::now() - milliseconds(1)));
219  lock.unlock();
220  EXPECT_TRUE(
221  lock.try_lock_shared_until(steady_clock::now() - milliseconds(1)));
222  lock.unlock_shared();
223 }
std::chrono::steady_clock::time_point now()
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
static void shmtx_r_bare ( uint32_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 708 of file SharedMutexTest.cpp.

712  {
713  runMixed<atomic, SharedMutexReadPriority, Locker>(
714  numOps, numThreads, writeFraction, useSeparateLocks);
715 }
static void shmtx_r_bare_ping_pong ( size_t  n,
size_t  scale,
size_t  burnCount 
)
static

Definition at line 1365 of file SharedMutexTest.cpp.

1365  {
1366  runPingPong<SharedMutexReadPriority>(n / scale, burnCount);
1367 }
static void shmtx_r_bare_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 594 of file SharedMutexTest.cpp.

594  {
595  runContendedReaders<atomic, SharedMutexReadPriority, Locker>(
596  numOps, numThreads, useSeparateLocks);
597 }
static void shmtx_rd_pri ( uint32_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 699 of file SharedMutexTest.cpp.

703  {
704  runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
705  numOps, numThreads, writeFraction, useSeparateLocks);
706 }
static void shmtx_rd_pri_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 588 of file SharedMutexTest.cpp.

588  {
589  runContendedReaders<atomic, SharedMutexReadPriority, TokenLocker>(
590  numOps, numThreads, useSeparateLocks);
591 }
static void shmtx_w_bare ( uint32_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 690 of file SharedMutexTest.cpp.

694  {
695  runMixed<atomic, SharedMutexWritePriority, Locker>(
696  numOps, numThreads, writeFraction, useSeparateLocks);
697 }
static void shmtx_w_bare_ping_pong ( size_t  n,
size_t  scale,
size_t  burnCount 
)
static

Definition at line 1361 of file SharedMutexTest.cpp.

1361  {
1362  runPingPong<SharedMutexWritePriority>(n / scale, burnCount);
1363 }
static void shmtx_w_bare_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 582 of file SharedMutexTest.cpp.

582  {
583  runContendedReaders<atomic, SharedMutexWritePriority, Locker>(
584  numOps, numThreads, useSeparateLocks);
585 }
static void shmtx_wr_pri ( uint32_t  numOps,
size_t  numThreads,
double  writeFraction,
bool  useSeparateLocks 
)
static

Definition at line 681 of file SharedMutexTest.cpp.

685  {
686  runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
687  numOps, numThreads, writeFraction, useSeparateLocks);
688 }
static void shmtx_wr_pri_reads ( uint32_t  numOps,
size_t  numThreads,
bool  useSeparateLocks 
)
static

Definition at line 576 of file SharedMutexTest.cpp.

576  {
577  runContendedReaders<atomic, SharedMutexWritePriority, TokenLocker>(
578  numOps, numThreads, useSeparateLocks);
579 }
TEST ( SharedMutex  ,
basic   
)

Definition at line 79 of file SharedMutexTest.cpp.

79  {
80  runBasicTest<SharedMutexReadPriority>();
81  runBasicTest<SharedMutexWritePriority>();
82  runBasicTest<SharedMutexSuppressTSAN>();
83 }
TEST ( SharedMutex  ,
basic_holders   
)

Definition at line 147 of file SharedMutexTest.cpp.

147  {
148  runBasicHoldersTest<SharedMutexReadPriority>();
149  runBasicHoldersTest<SharedMutexWritePriority>();
150  runBasicHoldersTest<SharedMutexSuppressTSAN>();
151 }
TEST ( SharedMutex  ,
many_read_locks_with_tokens   
)

Definition at line 169 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread, and SKIP_IF.

169  {
170  // This test fails in an assertion in the TSAN library because there are too
171  // many mutexes
173  runManyReadLocksTestWithTokens<SharedMutexReadPriority>();
174  runManyReadLocksTestWithTokens<SharedMutexWritePriority>();
175  runManyReadLocksTestWithTokens<SharedMutexSuppressTSAN>();
176 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
many_read_locks_without_tokens   
)

Definition at line 192 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread, and SKIP_IF.

192  {
193  // This test fails in an assertion in the TSAN library because there are too
194  // many mutexes
196  runManyReadLocksTestWithoutTokens<SharedMutexReadPriority>();
197  runManyReadLocksTestWithoutTokens<SharedMutexWritePriority>();
198  runManyReadLocksTestWithoutTokens<SharedMutexSuppressTSAN>();
199 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
timeout_in_past   
)

Definition at line 225 of file SharedMutexTest.cpp.

225  {
226  runTimeoutInPastTest<SharedMutexReadPriority>();
227  runTimeoutInPastTest<SharedMutexWritePriority>();
228  runTimeoutInPastTest<SharedMutexSuppressTSAN>();
229 }
TEST ( SharedMutex  ,
failing_try_timeout   
)

Definition at line 312 of file SharedMutexTest.cpp.

312  {
313  runFailingTryTimeoutTest<SharedMutexReadPriority>();
314  runFailingTryTimeoutTest<SharedMutexWritePriority>();
315  runFailingTryTimeoutTest<SharedMutexSuppressTSAN>();
316 }
TEST ( SharedMutex  ,
basic_upgrade_tests   
)

Definition at line 349 of file SharedMutexTest.cpp.

349  {
350  runBasicUpgradeTest<SharedMutexReadPriority>();
351  runBasicUpgradeTest<SharedMutexWritePriority>();
352  runBasicUpgradeTest<SharedMutexSuppressTSAN>();
353 }
TEST ( SharedMutex  ,
read_has_prio   
)

Definition at line 355 of file SharedMutexTest.cpp.

References EXPECT_FALSE, EXPECT_TRUE, folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::lock(), folly::lock(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::lock_shared(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::try_lock_shared(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::try_lock_upgrade(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::unlock(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::unlock_shared(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::unlock_upgrade(), and folly::fibers::yield().

355  {
357  SharedMutexToken token1;
358  SharedMutexToken token2;
359  lock.lock_shared(token1);
360  bool exclusiveAcquired = false;
361  auto writer = thread([&] {
362  lock.lock();
363  exclusiveAcquired = true;
364  lock.unlock();
365  });
366 
367  // lock() can't complete until we unlock token1, but it should stake
368  // its claim with regards to other exclusive or upgrade locks. We can
369  // use try_lock_upgrade to poll for that eventuality.
370  while (lock.try_lock_upgrade()) {
371  lock.unlock_upgrade();
373  }
374  EXPECT_FALSE(exclusiveAcquired);
375 
376  // Even though lock() is stuck we should be able to get token2
377  EXPECT_TRUE(lock.try_lock_shared(token2));
378  lock.unlock_shared(token1);
379  lock.unlock_shared(token2);
380  writer.join();
381  EXPECT_TRUE(exclusiveAcquired);
382 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
#define EXPECT_TRUE(condition)
Definition: gtest.h:1859
#define EXPECT_FALSE(condition)
Definition: gtest.h:1862
TEST ( SharedMutex  ,
write_has_prio   
)

Definition at line 384 of file SharedMutexTest.cpp.

References folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::lock(), folly::lock(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::lock_shared(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::try_lock_shared(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::unlock(), folly::SharedMutexImpl< ReaderPriority, Tag_, Atom, BlockImmediately, AnnotateForThreadSanitizer >::unlock_shared(), and folly::fibers::yield().

384  {
386  SharedMutexToken token1;
387  SharedMutexToken token2;
388  lock.lock_shared(token1);
389  auto writer = thread([&] {
390  lock.lock();
391  lock.unlock();
392  });
393 
394  // eventually lock() should block readers
395  while (lock.try_lock_shared(token2)) {
396  lock.unlock_shared(token2);
398  }
399 
400  lock.unlock_shared(token1);
401  writer.join();
402 }
auto lock(SynchronizedLocker...lockersIn) -> std::tuple< typename SynchronizedLocker::LockedPtr... >
Definition: Synchronized.h:871
TEST ( SharedMutex  ,
deterministic_concurrent_readers_of_one_lock_read_prio   
)

Definition at line 974 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

974  {
975  for (int pass = 0; pass < 3; ++pass) {
976  DSched sched(DSched::uniform(pass));
977  runContendedReaders<DeterministicAtomic, DSharedMutexReadPriority, Locker>(
978  1000, 3, false);
979  }
980 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
deterministic_concurrent_readers_of_one_lock_write_prio   
)

Definition at line 982 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

982  {
983  for (int pass = 0; pass < 3; ++pass) {
984  DSched sched(DSched::uniform(pass));
985  runContendedReaders<DeterministicAtomic, DSharedMutexWritePriority, Locker>(
986  1000, 3, false);
987  }
988 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
concurrent_readers_of_one_lock_read_prio   
)

Definition at line 990 of file SharedMutexTest.cpp.

990  {
991  for (int pass = 0; pass < 10; ++pass) {
992  runContendedReaders<atomic, SharedMutexReadPriority, Locker>(
993  100000, 32, false);
994  }
995 }
TEST ( SharedMutex  ,
concurrent_readers_of_one_lock_write_prio   
)

Definition at line 997 of file SharedMutexTest.cpp.

997  {
998  for (int pass = 0; pass < 10; ++pass) {
999  runContendedReaders<atomic, SharedMutexWritePriority, Locker>(
1000  100000, 32, false);
1001  }
1002 }
TEST ( SharedMutex  ,
deterministic_readers_of_concurrent_locks_read_prio   
)

Definition at line 1004 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1004  {
1005  for (int pass = 0; pass < 3; ++pass) {
1006  DSched sched(DSched::uniform(pass));
1007  runContendedReaders<DeterministicAtomic, DSharedMutexReadPriority, Locker>(
1008  1000, 3, true);
1009  }
1010 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
deterministic_readers_of_concurrent_locks_write_prio   
)

Definition at line 1012 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1012  {
1013  for (int pass = 0; pass < 3; ++pass) {
1014  DSched sched(DSched::uniform(pass));
1015  runContendedReaders<DeterministicAtomic, DSharedMutexWritePriority, Locker>(
1016  1000, 3, true);
1017  }
1018 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
readers_of_concurrent_locks_read_prio   
)

Definition at line 1020 of file SharedMutexTest.cpp.

1020  {
1021  for (int pass = 0; pass < 10; ++pass) {
1022  runContendedReaders<atomic, SharedMutexReadPriority, TokenLocker>(
1023  100000, 32, true);
1024  }
1025 }
TEST ( SharedMutex  ,
readers_of_concurrent_locks_write_prio   
)

Definition at line 1027 of file SharedMutexTest.cpp.

1027  {
1028  for (int pass = 0; pass < 10; ++pass) {
1029  runContendedReaders<atomic, SharedMutexWritePriority, TokenLocker>(
1030  100000, 32, true);
1031  }
1032 }
TEST ( SharedMutex  ,
deterministic_mixed_mostly_read_read_prio   
)

Definition at line 1034 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1034  {
1035  for (int pass = 0; pass < 3; ++pass) {
1036  DSched sched(DSched::uniform(pass));
1037  runMixed<DeterministicAtomic, DSharedMutexReadPriority, Locker>(
1038  1000, 3, 0.1, false);
1039  }
1040 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
deterministic_mixed_mostly_read_write_prio   
)

Definition at line 1042 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1042  {
1043  for (int pass = 0; pass < 3; ++pass) {
1044  DSched sched(DSched::uniform(pass));
1045  runMixed<DeterministicAtomic, DSharedMutexWritePriority, Locker>(
1046  1000, 3, 0.1, false);
1047  }
1048 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
mixed_mostly_read_read_prio   
)

Definition at line 1050 of file SharedMutexTest.cpp.

1050  {
1051  for (int pass = 0; pass < 5; ++pass) {
1052  runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
1053  10000, 32, 0.1, false);
1054  }
1055 }
TEST ( SharedMutex  ,
mixed_mostly_read_write_prio   
)

Definition at line 1057 of file SharedMutexTest.cpp.

1057  {
1058  for (int pass = 0; pass < 5; ++pass) {
1059  runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
1060  10000, 32, 0.1, false);
1061  }
1062 }
TEST ( SharedMutex  ,
deterministic_mixed_mostly_write_read_prio   
)

Definition at line 1064 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1064  {
1065  for (int pass = 0; pass < 1; ++pass) {
1066  DSched sched(DSched::uniform(pass));
1067  runMixed<DeterministicAtomic, DSharedMutexReadPriority, TokenLocker>(
1068  1000, 10, 0.9, false);
1069  }
1070 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
deterministic_mixed_mostly_write_write_prio   
)

Definition at line 1072 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1072  {
1073  for (int pass = 0; pass < 1; ++pass) {
1074  DSched sched(DSched::uniform(pass));
1075  runMixed<DeterministicAtomic, DSharedMutexWritePriority, TokenLocker>(
1076  1000, 10, 0.9, false);
1077  }
1078 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
deterministic_lost_wakeup_write_prio   
)

Definition at line 1080 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniformSubset().

1080  {
1081  for (int pass = 0; pass < 10; ++pass) {
1082  DSched sched(DSched::uniformSubset(pass, 2, 200));
1083  runMixed<DeterministicAtomic, DSharedMutexWritePriority, TokenLocker>(
1084  1000, 3, 1.0, false);
1085  }
1086 }
static std::function< size_t(size_t)> uniformSubset(uint64_t seed, size_t n=2, size_t m=64)
TEST ( SharedMutex  ,
mixed_mostly_write_read_prio   
)

Definition at line 1097 of file SharedMutexTest.cpp.

References adjustReps(), and folly::kIsSanitizeAddress.

1097  {
1098  for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) {
1099  runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
1100  adjustReps(50000), adjustReps(300), 0.9, false);
1101  }
1102 }
constexpr bool kIsSanitizeAddress
Definition: Portability.h:118
static std::size_t adjustReps(std::size_t reps)
TEST ( SharedMutex  ,
mixed_mostly_write_write_prio   
)

Definition at line 1104 of file SharedMutexTest.cpp.

References adjustReps(), and folly::kIsSanitizeAddress.

1104  {
1105  for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) {
1106  runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
1107  adjustReps(50000), adjustReps(300), 0.9, false);
1108  }
1109 }
constexpr bool kIsSanitizeAddress
Definition: Portability.h:118
static std::size_t adjustReps(std::size_t reps)
TEST ( SharedMutex  ,
deterministic_all_ops_read_prio   
)

Definition at line 1111 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1111  {
1112  for (int pass = 0; pass < 5; ++pass) {
1113  DSched sched(DSched::uniform(pass));
1114  runAllAndValidate<DSharedMutexReadPriority, DeterministicAtomic>(1000, 8);
1115  }
1116 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
deterministic_all_ops_write_prio   
)

Definition at line 1118 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread, SKIP_IF, and folly::test::DeterministicSchedule::uniform().

1118  {
1119  // This test fails in TSAN because of noisy lock ordering inversions.
1121  for (int pass = 0; pass < 5; ++pass) {
1122  DSched sched(DSched::uniform(pass));
1123  runAllAndValidate<DSharedMutexWritePriority, DeterministicAtomic>(1000, 8);
1124  }
1125 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
static std::function< size_t(size_t)> uniform(uint64_t seed)
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
all_ops_read_prio   
)

Definition at line 1127 of file SharedMutexTest.cpp.

1127  {
1128  for (int pass = 0; pass < 5; ++pass) {
1129  runAllAndValidate<SharedMutexReadPriority, atomic>(100000, 32);
1130  }
1131 }
TEST ( SharedMutex  ,
all_ops_write_prio   
)

Definition at line 1133 of file SharedMutexTest.cpp.

References Atom, FOLLY_ASSUME_FBVECTOR_COMPATIBLE(), folly::kIsSanitizeThread, and SKIP_IF.

1133  {
1134  // This test fails in TSAN because of noisy lock ordering inversions.
1136  for (int pass = 0; pass < 5; ++pass) {
1137  runAllAndValidate<SharedMutexWritePriority, atomic>(100000, 32);
1138  }
1139 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
deterministic_remote_write_prio   
)

Definition at line 1256 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread, SKIP_IF, and folly::test::DeterministicSchedule::uniform().

1256  {
1257  // This test fails in an assertion in the TSAN library because there are too
1258  // many mutexes
1260  for (int pass = 0; pass < 1; ++pass) {
1261  DSched sched(DSched::uniform(pass));
1262  runRemoteUnlock<DSharedMutexWritePriority, DeterministicAtomic>(
1263  500, 0.1, 0.1, 5, 5);
1264  }
1265 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
static std::function< size_t(size_t)> uniform(uint64_t seed)
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
deterministic_remote_read_prio   
)

Definition at line 1267 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1267  {
1268  for (int pass = 0; pass < 1; ++pass) {
1269  DSched sched(DSched::uniform(pass));
1270  runRemoteUnlock<DSharedMutexReadPriority, DeterministicAtomic>(
1271  500, 0.1, 0.1, 5, 5);
1272  }
1273 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
remote_write_prio   
)

Definition at line 1275 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread, and SKIP_IF.

1275  {
1276  // This test fails in an assertion in the TSAN library because there are too
1277  // many mutexes
1279  for (int pass = 0; pass < 10; ++pass) {
1280  runRemoteUnlock<SharedMutexWritePriority, atomic>(100000, 0.1, 0.1, 5, 5);
1281  }
1282 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
remote_read_prio   
)

Definition at line 1284 of file SharedMutexTest.cpp.

References folly::kIsSanitizeAddress, folly::kIsSanitizeThread, and SKIP_IF.

1284  {
1285  // This test fails in an assertion in the TSAN library because there are too
1286  // many mutexes
1288  for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 100); ++pass) {
1289  runRemoteUnlock<SharedMutexReadPriority, atomic>(100000, 0.1, 0.1, 5, 5);
1290  }
1291 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
constexpr bool kIsSanitizeAddress
Definition: Portability.h:118
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
deterministic_ping_pong_write_prio   
)

Definition at line 1381 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread, SKIP_IF, and folly::test::DeterministicSchedule::uniform().

1381  {
1382  // This test fails in TSAN because some mutexes are lock_shared() in one
1383  // thread and unlock_shared() in a different thread.
1385  for (int pass = 0; pass < 1; ++pass) {
1386  DSched sched(DSched::uniform(pass));
1387  runPingPong<DSharedMutexWritePriority, DeterministicAtomic>(500, 0);
1388  }
1389 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
static std::function< size_t(size_t)> uniform(uint64_t seed)
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
deterministic_ping_pong_read_prio   
)

Definition at line 1391 of file SharedMutexTest.cpp.

References folly::test::DeterministicSchedule::uniform().

1391  {
1392  for (int pass = 0; pass < 1; ++pass) {
1393  DSched sched(DSched::uniform(pass));
1394  runPingPong<DSharedMutexReadPriority, DeterministicAtomic>(500, 0);
1395  }
1396 }
static std::function< size_t(size_t)> uniform(uint64_t seed)
TEST ( SharedMutex  ,
ping_pong_write_prio   
)

Definition at line 1398 of file SharedMutexTest.cpp.

References folly::kIsSanitizeThread, and SKIP_IF.

1398  {
1399  // This test fails in TSAN because some mutexes are lock_shared() in one
1400  // thread and unlock_shared() in a different thread.
1402  for (int pass = 0; pass < 1; ++pass) {
1403  runPingPong<SharedMutexWritePriority, atomic>(50000, 0);
1404  }
1405 }
constexpr bool kIsSanitizeThread
Definition: Portability.h:124
#define SKIP_IF(expr)
Definition: TestUtils.h:59
TEST ( SharedMutex  ,
ping_pong_read_prio   
)

Definition at line 1407 of file SharedMutexTest.cpp.

1407  {
1408  for (int pass = 0; pass < 1; ++pass) {
1409  runPingPong<SharedMutexReadPriority, atomic>(50000, 0);
1410  }
1411 }

Variable Documentation

burn0

Definition at line 1760 of file SharedMutexTest.cpp.

burn100k

Definition at line 1767 of file SharedMutexTest.cpp.

burn1M

Definition at line 1781 of file SharedMutexTest.cpp.

burn300k

Definition at line 1774 of file SharedMutexTest.cpp.

true true true true true true true folly_rwspin

Definition at line 1697 of file SharedMutexTest.cpp.

false false folly_ticket

Definition at line 1516 of file SharedMutexTest.cpp.

folly_ticket_ping_pong

Definition at line 1762 of file SharedMutexTest.cpp.

false false folly_ticket_reads

Definition at line 1448 of file SharedMutexTest.cpp.

false false false pthrd_rwlock

Definition at line 1518 of file SharedMutexTest.cpp.

pthrd_rwlock_ping_pong

Definition at line 1764 of file SharedMutexTest.cpp.

false false false pthrd_rwlock_reads

Definition at line 1450 of file SharedMutexTest.cpp.

true true true true true true true true shmtx_rd_pri

Definition at line 1630 of file SharedMutexTest.cpp.

false shmtx_rd_pri_reads

Definition at line 1446 of file SharedMutexTest.cpp.

shmtx_w_bare_ping_pong

Definition at line 1760 of file SharedMutexTest.cpp.

true true true true true true shmtx_wr_pri

Definition at line 1514 of file SharedMutexTest.cpp.

shmtx_wr_pri_reads

Definition at line 1444 of file SharedMutexTest.cpp.