22 #include <glog/logging.h> 33 for (
int i = work;
i > 0; --
i) {
43 template <
typename>
class Atom = std::atomic>
55 bool allocAll =
false) {
57 using Rec =
typename FC::Rec;
61 std::atomic<bool>
start{
false};
62 std::atomic<int> started{0};
63 Example ex(lines, dedicated, numRecs);
64 std::atomic<uint64_t> total{0};
68 std::vector<Rec*>
v(numRecs);
69 for (
int i = 0;
i < numRecs; ++
i) {
72 for (
int i = numRecs;
i > 0; --
i) {
77 std::vector<std::thread>
threads(nthreads);
78 for (
int tid = 0; tid <
nthreads; ++tid) {
79 threads[tid] = std::thread([&, tid] {
81 Rec* myrec = (combining &&
tc) ? ex.allocRec() :
nullptr;
83 while (!
start.load()) {
90 sum += ex.fetchAddNoFC(1);
96 sum += ex.fetchAdd(1, myrec);
110 std::unique_lock<Mutex> l;
114 VLOG(2) << tid <<
" " << ex.getVal() <<
" ...........";
117 std::this_thread::sleep_for(10ms);
118 VLOG(2) << tid <<
" " << ex.getVal() <<
" ===========";
123 ex.acquireExclusive();
127 VLOG(2) << tid <<
" " << ex.getVal() <<
" ...........";
130 std::this_thread::sleep_for(10ms);
131 VLOG(2) << tid <<
" " << ex.getVal() <<
" ===========";
135 ex.releaseExclusive();
138 total.fetch_add(sum);
139 if (combining && tc) {
154 for (
auto&
t : threads) {
167 CHECK_EQ(ops, ex.getVal());
170 uint64_t expected = n * (n - 1) / 2;
171 CHECK_EQ(expected, total);
174 std::chrono::duration_cast<std::chrono::nanoseconds>(tend - tbegin)
192 bool allocAll =
false) {
196 return fc_test<Example, bool, M>(
210 return fc_test<Example, Req, M>(
std::atomic< int64_t > sum(0)
uint64_t fc_test(int nthreads, int lines, int numRecs, int work, int ops, bool combining, bool dedicated, bool tc, bool syncops, bool excl=false, bool allocAll=false)
#define BENCHMARK_SUSPEND
uint64_t run_test(int nthreads, int lines, int numRecs, int work, int ops, bool combining, bool simple, bool dedicated, bool tc, bool syncops, bool excl=false, bool allocAll=false)
std::chrono::steady_clock::time_point now()
—— Concurrent Priority Queue Implementation ——
std::vector< std::thread::id > threads
S lines(StringPiece source)
**Optimized Holders **The template hazptr_array< M > provides most of the functionality *of M hazptr_holder s but with faster construction destruction *for M
auto doNotOptimizeAway(const T &datum) -> typename std::enable_if< !detail::DoNotOptimizeAwayNeedsIndirect< T >::value >::type