proxygen
ConcurrentHashMap-detail.h
Go to the documentation of this file.
1 /*
2  * Copyright 2017-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
19 #include <atomic>
20 #include <mutex>
21 
22 namespace folly {
23 
24 namespace detail {
25 
26 namespace concurrenthashmap {
27 
28 // hazptr deleter that can use an allocator.
29 template <typename Allocator>
31  public:
32  template <typename Node>
33  void operator()(Node* node) {
34  node->~Node();
35  Allocator().deallocate((uint8_t*)node, sizeof(Node));
36  }
37 };
38 
39 template <typename Allocator>
41  size_t count_;
42 
43  public:
44  HazptrBucketDeleter(size_t count) : count_(count) {}
45  HazptrBucketDeleter() = default;
46  template <typename Bucket>
47  void operator()(Bucket* bucket) {
48  bucket->destroy(count_);
49  }
50 };
51 
52 template <
53  typename KeyType,
54  typename ValueType,
55  typename Allocator,
56  typename Enabled = void>
57 class ValueHolder {
58  public:
59  typedef std::pair<const KeyType, ValueType> value_type;
60 
61  explicit ValueHolder(const ValueHolder& other) : item_(other.item_) {}
62 
63  template <typename Arg, typename... Args>
64  ValueHolder(std::piecewise_construct_t, Arg&& k, Args&&... args)
65  : item_(
66  std::piecewise_construct,
67  std::forward_as_tuple(std::forward<Arg>(k)),
68  std::forward_as_tuple(std::forward<Args>(args)...)) {}
69  value_type& getItem() {
70  return item_;
71  }
72 
73  private:
74  value_type item_;
75 };
76 
77 // If the ValueType is not copy constructible, we can instead add
78 // an extra indirection. Adds more allocations / deallocations and
79 // pulls in an extra cacheline.
80 template <typename KeyType, typename ValueType, typename Allocator>
82  KeyType,
83  ValueType,
84  Allocator,
85  std::enable_if_t<
86  !std::is_nothrow_copy_constructible<ValueType>::value ||
87  !std::is_nothrow_copy_constructible<KeyType>::value>> {
88  public:
89  typedef std::pair<const KeyType, ValueType> value_type;
90 
91  explicit ValueHolder(const ValueHolder& other) {
92  other.owned_ = false;
93  item_ = other.item_;
94  }
95 
96  template <typename Arg, typename... Args>
97  ValueHolder(std::piecewise_construct_t, Arg&& k, Args&&... args) {
98  item_ = (value_type*)Allocator().allocate(sizeof(value_type));
99  new (item_) value_type(
100  std::piecewise_construct,
101  std::forward_as_tuple(std::forward<Arg>(k)),
102  std::forward_as_tuple(std::forward<Args>(args)...));
103  }
104 
106  if (owned_) {
107  item_->~value_type();
108  Allocator().deallocate((uint8_t*)item_, sizeof(value_type));
109  }
110  }
111 
112  value_type& getItem() {
113  return *item_;
114  }
115 
116  private:
117  value_type* item_;
118  mutable bool owned_{true};
119 };
120 
121 template <
122  typename KeyType,
123  typename ValueType,
124  typename Allocator,
125  template <typename> class Atom = std::atomic>
127  NodeT<KeyType, ValueType, Allocator, Atom>,
128  Atom,
129  concurrenthashmap::HazptrDeleter<Allocator>> {
130  public:
131  typedef std::pair<const KeyType, ValueType> value_type;
132 
133  explicit NodeT(NodeT* other) : item_(other->item_) {
134  this->set_deleter( // defined in hazptr_obj
136  this->acquire_link_safe(); // defined in hazptr_obj_base_linked
137  }
138 
139  template <typename Arg, typename... Args>
140  NodeT(Arg&& k, Args&&... args)
141  : item_(
142  std::piecewise_construct,
143  std::forward<Arg>(k),
144  std::forward<Args>(args)...) {
145  this->set_deleter( // defined in hazptr_obj
147  this->acquire_link_safe(); // defined in hazptr_obj_base_linked
148  }
149 
150  void release() {
151  this->unlink();
152  }
153 
154  value_type& getItem() {
155  return item_.getItem();
156  }
157 
158  template <typename S>
159  void push_links(bool m, S& s) {
160  if (m) {
161  auto p = next_.load(std::memory_order_acquire);
162  if (p) {
163  s.push(p);
164  }
165  }
166  }
167 
168  Atom<NodeT*> next_{nullptr};
169 
170  private:
172 };
173 
174 } // namespace concurrenthashmap
175 
176 /* A Segment is a single shard of the ConcurrentHashMap.
177  * All writes take the lock, while readers are all wait-free.
178  * Readers always proceed in parallel with the single writer.
179  *
180  *
181  * Possible additional optimizations:
182  *
183  * * insert / erase could be lock / wait free. Would need to be
184  * careful that assign and rehash don't conflict (possibly with
185  * reader/writer lock, or microlock per node or per bucket, etc).
186  * Java 8 goes halfway, and does lock per bucket, except for the
187  * first item, that is inserted with a CAS (which is somewhat
188  * specific to java having a lock per object)
189  *
190  * * I tried using trylock() and find() to warm the cache for insert()
191  * and erase() similar to Java 7, but didn't have much luck.
192  *
193  * * We could order elements using split ordering, for faster rehash,
194  * and no need to ever copy nodes. Note that a full split ordering
195  * including dummy nodes increases the memory usage by 2x, but we
196  * could split the difference and still require a lock to set bucket
197  * pointers.
198  */
199 template <
200  typename KeyType,
201  typename ValueType,
202  uint8_t ShardBits = 0,
203  typename HashFn = std::hash<KeyType>,
204  typename KeyEqual = std::equal_to<KeyType>,
205  typename Allocator = std::allocator<uint8_t>,
206  template <typename> class Atom = std::atomic,
207  class Mutex = std::mutex>
208 class alignas(64) ConcurrentHashMapSegment {
209  enum class InsertType {
210  DOES_NOT_EXIST, // insert/emplace operations. If key exists, return false.
211  MUST_EXIST, // assign operations. If key does not exist, return false.
212  ANY, // insert_or_assign.
213  MATCH, // assign_if_equal (not in std). For concurrent maps, a
214  // way to atomically change a value if equal to some other
215  // value.
216  };
217 
218  public:
219  typedef KeyType key_type;
220  typedef ValueType mapped_type;
221  typedef std::pair<const KeyType, ValueType> value_type;
222  typedef std::size_t size_type;
223 
225  class Iterator;
226 
228  size_t initial_buckets,
229  float load_factor,
230  size_t max_size)
231  : load_factor_(load_factor), max_size_(max_size) {
232  initial_buckets = folly::nextPowTwo(initial_buckets);
233  DCHECK(
234  max_size_ == 0 ||
235  (isPowTwo(max_size_) &&
236  (folly::popcount(max_size_ - 1) + ShardBits <= 32)));
237  auto buckets = Buckets::create(initial_buckets);
238  buckets_.store(buckets, std::memory_order_release);
239  load_factor_nodes_ = initial_buckets * load_factor_;
240  bucket_count_.store(initial_buckets, std::memory_order_relaxed);
241  }
242 
244  auto buckets = buckets_.load(std::memory_order_relaxed);
245  // We can delete and not retire() here, since users must have
246  // their own synchronization around destruction.
247  auto count = bucket_count_.load(std::memory_order_relaxed);
248  buckets->unlink_and_reclaim_nodes(count);
249  buckets->destroy(count);
250  }
251 
252  size_t size() {
253  return size_;
254  }
255 
256  bool empty() {
257  return size() == 0;
258  }
259 
260  bool insert(Iterator& it, std::pair<key_type, mapped_type>&& foo) {
261  return insert(it, std::move(foo.first), std::move(foo.second));
262  }
263 
264  template <typename Key, typename Value>
265  bool insert(Iterator& it, Key&& k, Value&& v) {
266  auto node = (Node*)Allocator().allocate(sizeof(Node));
267  new (node) Node(std::forward<Key>(k), std::forward<Value>(v));
268  auto res = insert_internal(
269  it,
270  node->getItem().first,
271  InsertType::DOES_NOT_EXIST,
272  [](const ValueType&) { return false; },
273  node,
274  node);
275  if (!res) {
276  node->~Node();
277  Allocator().deallocate((uint8_t*)node, sizeof(Node));
278  }
279  return res;
280  }
281 
282  template <typename Key, typename... Args>
283  bool try_emplace(Iterator& it, Key&& k, Args&&... args) {
284  // Note: first key is only ever compared. Second is moved in to
285  // create the node, and the first key is never touched again.
286  return insert_internal(
287  it,
288  std::forward<Key>(k),
289  InsertType::DOES_NOT_EXIST,
290  [](const ValueType&) { return false; },
291  nullptr,
292  std::forward<Key>(k),
293  std::forward<Args>(args)...);
294  }
295 
296  template <typename... Args>
297  bool emplace(Iterator& it, const KeyType& k, Node* node) {
298  return insert_internal(
299  it,
300  k,
301  InsertType::DOES_NOT_EXIST,
302  [](const ValueType&) { return false; },
303  node,
304  node);
305  }
306 
307  template <typename Key, typename Value>
308  bool insert_or_assign(Iterator& it, Key&& k, Value&& v) {
309  auto node = (Node*)Allocator().allocate(sizeof(Node));
310  new (node) Node(std::forward<Key>(k), std::forward<Value>(v));
311  auto res = insert_internal(
312  it,
313  node->getItem().first,
314  InsertType::ANY,
315  [](const ValueType&) { return false; },
316  node,
317  node);
318  if (!res) {
319  node->~Node();
320  Allocator().deallocate((uint8_t*)node, sizeof(Node));
321  }
322  return res;
323  }
324 
325  template <typename Key, typename Value>
326  bool assign(Iterator& it, Key&& k, Value&& v) {
327  auto node = (Node*)Allocator().allocate(sizeof(Node));
328  new (node) Node(std::forward<Key>(k), std::forward<Value>(v));
329  auto res = insert_internal(
330  it,
331  node->getItem().first,
332  InsertType::MUST_EXIST,
333  [](const ValueType&) { return false; },
334  node,
335  node);
336  if (!res) {
337  node->~Node();
338  Allocator().deallocate((uint8_t*)node, sizeof(Node));
339  }
340  return res;
341  }
342 
343  template <typename Key, typename Value>
345  Iterator& it,
346  Key&& k,
347  const ValueType& expected,
348  Value&& desired) {
349  auto node = (Node*)Allocator().allocate(sizeof(Node));
350  new (node) Node(std::forward<Key>(k), std::forward<Value>(desired));
351  auto res = insert_internal(
352  it,
353  node->getItem().first,
354  InsertType::MATCH,
355  [&expected](const ValueType& v) { return v == expected; },
356  node,
357  node);
358  if (!res) {
359  node->~Node();
360  Allocator().deallocate((uint8_t*)node, sizeof(Node));
361  }
362  return res;
363  }
364 
365  template <typename MatchFunc, typename... Args>
367  Iterator& it,
368  const KeyType& k,
370  MatchFunc match,
371  Node* cur,
372  Args&&... args) {
373  auto h = HashFn()(k);
374  std::unique_lock<Mutex> g(m_);
375 
376  size_t bcount = bucket_count_.load(std::memory_order_relaxed);
377  auto buckets = buckets_.load(std::memory_order_relaxed);
378  // Check for rehash needed for DOES_NOT_EXIST
379  if (size_ >= load_factor_nodes_ && type == InsertType::DOES_NOT_EXIST) {
380  if (max_size_ && size_ << 1 > max_size_) {
381  // Would exceed max size.
382  throw std::bad_alloc();
383  }
384  rehash(bcount << 1);
385  buckets = buckets_.load(std::memory_order_relaxed);
386  bcount = bucket_count_.load(std::memory_order_relaxed);
387  }
388 
389  auto idx = getIdx(bcount, h);
390  auto head = &buckets->buckets_[idx]();
391  auto node = head->load(std::memory_order_relaxed);
392  auto headnode = node;
393  auto prev = head;
394  auto& hazbuckets = it.hazptrs_[0];
395  auto& haznode = it.hazptrs_[1];
396  hazbuckets.reset(buckets);
397  while (node) {
398  // Is the key found?
399  if (KeyEqual()(k, node->getItem().first)) {
400  it.setNode(node, buckets, bcount, idx);
401  haznode.reset(node);
402  if (type == InsertType::MATCH) {
403  if (!match(node->getItem().second)) {
404  return false;
405  }
406  }
407  if (type == InsertType::DOES_NOT_EXIST) {
408  return false;
409  } else {
410  if (!cur) {
411  cur = (Node*)Allocator().allocate(sizeof(Node));
412  new (cur) Node(std::forward<Args>(args)...);
413  }
414  auto next = node->next_.load(std::memory_order_relaxed);
415  cur->next_.store(next, std::memory_order_relaxed);
416  if (next) {
417  next->acquire_link(); // defined in hazptr_obj_base_linked
418  }
419  prev->store(cur, std::memory_order_release);
420  g.unlock();
421  // Release not under lock.
422  node->release();
423  return true;
424  }
425  }
426 
427  prev = &node->next_;
428  node = node->next_.load(std::memory_order_relaxed);
429  }
430  if (type != InsertType::DOES_NOT_EXIST && type != InsertType::ANY) {
431  haznode.reset();
432  hazbuckets.reset();
433  return false;
434  }
435  // Node not found, check for rehash on ANY
436  if (size_ >= load_factor_nodes_ && type == InsertType::ANY) {
437  if (max_size_ && size_ << 1 > max_size_) {
438  // Would exceed max size.
439  throw std::bad_alloc();
440  }
441  rehash(bcount << 1);
442 
443  // Reload correct bucket.
444  buckets = buckets_.load(std::memory_order_relaxed);
445  bcount <<= 1;
446  hazbuckets.reset(buckets);
447  idx = getIdx(bcount, h);
448  head = &buckets->buckets_[idx]();
449  headnode = head->load(std::memory_order_relaxed);
450  }
451 
452  // We found a slot to put the node.
453  size_++;
454  if (!cur) {
455  // InsertType::ANY
456  // OR DOES_NOT_EXIST, but only in the try_emplace case
457  DCHECK(type == InsertType::ANY || type == InsertType::DOES_NOT_EXIST);
458  cur = (Node*)Allocator().allocate(sizeof(Node));
459  new (cur) Node(std::forward<Args>(args)...);
460  }
461  cur->next_.store(headnode, std::memory_order_relaxed);
462  head->store(cur, std::memory_order_release);
463  it.setNode(cur, buckets, bcount, idx);
464  return true;
465  }
466 
467  // Must hold lock.
468  void rehash(size_t bucket_count) {
469  auto buckets = buckets_.load(std::memory_order_relaxed);
470  auto newbuckets = Buckets::create(bucket_count);
471 
472  load_factor_nodes_ = bucket_count * load_factor_;
473 
474  auto oldcount = bucket_count_.load(std::memory_order_relaxed);
475  for (size_t i = 0; i < oldcount; i++) {
476  auto bucket = &buckets->buckets_[i]();
477  auto node = bucket->load(std::memory_order_relaxed);
478  if (!node) {
479  continue;
480  }
481  auto h = HashFn()(node->getItem().first);
482  auto idx = getIdx(bucket_count, h);
483  // Reuse as long a chain as possible from the end. Since the
484  // nodes don't have previous pointers, the longest last chain
485  // will be the same for both the previous hashmap and the new one,
486  // assuming all the nodes hash to the same bucket.
487  auto lastrun = node;
488  auto lastidx = idx;
489  auto count = 0;
490  auto last = node->next_.load(std::memory_order_relaxed);
491  for (; last != nullptr;
492  last = last->next_.load(std::memory_order_relaxed)) {
493  auto k = getIdx(bucket_count, HashFn()(last->getItem().first));
494  if (k != lastidx) {
495  lastidx = k;
496  lastrun = last;
497  count = 0;
498  }
499  count++;
500  }
501  // Set longest last run in new bucket, incrementing the refcount.
502  lastrun->acquire_link(); // defined in hazptr_obj_base_linked
503  newbuckets->buckets_[lastidx]().store(lastrun, std::memory_order_relaxed);
504  // Clone remaining nodes
505  for (; node != lastrun;
506  node = node->next_.load(std::memory_order_relaxed)) {
507  auto newnode = (Node*)Allocator().allocate(sizeof(Node));
508  new (newnode) Node(node);
509  auto k = getIdx(bucket_count, HashFn()(node->getItem().first));
510  auto prevhead = &newbuckets->buckets_[k]();
511  newnode->next_.store(prevhead->load(std::memory_order_relaxed));
512  prevhead->store(newnode, std::memory_order_relaxed);
513  }
514  }
515 
516  auto oldbuckets = buckets_.load(std::memory_order_relaxed);
517  seqlock_.fetch_add(1, std::memory_order_release);
518  bucket_count_.store(bucket_count, std::memory_order_release);
519  buckets_.store(newbuckets, std::memory_order_release);
520  seqlock_.fetch_add(1, std::memory_order_release);
521  oldbuckets->retire(
523  }
524 
525  bool find(Iterator& res, const KeyType& k) {
526  auto& hazcurr = res.hazptrs_[1];
527  auto& haznext = res.hazptrs_[2];
528  auto h = HashFn()(k);
529  size_t bcount;
530  Buckets* buckets;
531  getBucketsAndCount(bcount, buckets, res.hazptrs_[0]);
532 
533  auto idx = getIdx(bcount, h);
534  auto prev = &buckets->buckets_[idx]();
535  auto node = hazcurr.get_protected(*prev);
536  while (node) {
537  if (KeyEqual()(k, node->getItem().first)) {
538  res.setNode(node, buckets, bcount, idx);
539  return true;
540  }
541  node = haznext.get_protected(node->next_);
542  hazcurr.swap(haznext);
543  }
544  return false;
545  }
546 
547  // Listed separately because we need a prev pointer.
548  size_type erase(const key_type& key) {
549  return erase_internal(key, nullptr);
550  }
551 
552  size_type erase_internal(const key_type& key, Iterator* iter) {
553  Node* node{nullptr};
554  auto h = HashFn()(key);
555  {
556  std::lock_guard<Mutex> g(m_);
557 
558  size_t bcount = bucket_count_.load(std::memory_order_relaxed);
559  auto buckets = buckets_.load(std::memory_order_relaxed);
560  auto idx = getIdx(bcount, h);
561  auto head = &buckets->buckets_[idx]();
562  node = head->load(std::memory_order_relaxed);
563  Node* prev = nullptr;
564  while (node) {
565  if (KeyEqual()(key, node->getItem().first)) {
566  auto next = node->next_.load(std::memory_order_relaxed);
567  if (next) {
568  next->acquire_link(); // defined in hazptr_obj_base_linked
569  }
570  if (prev) {
571  prev->next_.store(next, std::memory_order_release);
572  } else {
573  // Must be head of list.
574  head->store(next, std::memory_order_release);
575  }
576 
577  if (iter) {
578  iter->hazptrs_[0].reset(buckets);
579  iter->setNode(
580  node->next_.load(std::memory_order_acquire),
581  buckets,
582  bcount,
583  idx);
584  iter->next();
585  }
586  size_--;
587  break;
588  }
589  prev = node;
590  node = node->next_.load(std::memory_order_relaxed);
591  }
592  }
593  // Delete the node while not under the lock.
594  if (node) {
595  node->release();
596  return 1;
597  }
598 
599  return 0;
600  }
601 
602  // Unfortunately because we are reusing nodes on rehash, we can't
603  // have prev pointers in the bucket chain. We have to start the
604  // search from the bucket.
605  //
606  // This is a small departure from standard stl containers: erase may
607  // throw if hash or key_eq functions throw.
608  void erase(Iterator& res, Iterator& pos) {
609  erase_internal(pos->first, &res);
610  // Invalidate the iterator.
611  pos = cend();
612  }
613 
614  void clear() {
615  size_t bcount = bucket_count_.load(std::memory_order_relaxed);
616  Buckets* buckets;
617  auto newbuckets = Buckets::create(bcount);
618  {
619  std::lock_guard<Mutex> g(m_);
620  buckets = buckets_.load(std::memory_order_relaxed);
621  buckets_.store(newbuckets, std::memory_order_release);
622  size_ = 0;
623  }
625  }
626 
627  void max_load_factor(float factor) {
628  std::lock_guard<Mutex> g(m_);
629  load_factor_ = factor;
630  load_factor_nodes_ =
631  bucket_count_.load(std::memory_order_relaxed) * load_factor_;
632  }
633 
635  Iterator res;
636  size_t bcount;
637  Buckets* buckets;
638  getBucketsAndCount(bcount, buckets, res.hazptrs_[0]);
639  res.setNode(nullptr, buckets, bcount, 0);
640  res.next();
641  return res;
642  }
643 
645  return Iterator(nullptr);
646  }
647 
648  // Could be optimized to avoid an extra pointer dereference by
649  // allocating buckets_ at the same time.
650  class Buckets : public hazptr_obj_base<
651  Buckets,
652  Atom,
653  concurrenthashmap::HazptrBucketDeleter<Allocator>> {
655 
656  Buckets() {}
657  ~Buckets() {}
658 
659  public:
660  static Buckets* create(size_t count) {
661  auto buf =
662  Allocator().allocate(sizeof(Buckets) + sizeof(BucketRoot) * count);
663  auto buckets = new (buf) Buckets();
664  for (size_t i = 0; i < count; i++) {
665  new (&buckets->buckets_[i]) BucketRoot;
666  }
667  return buckets;
668  }
669 
670  void destroy(size_t count) {
671  for (size_t i = 0; i < count; i++) {
672  buckets_[i].~BucketRoot();
673  }
674  this->~Buckets();
675  Allocator().deallocate(
676  (uint8_t*)this, sizeof(BucketRoot) * count + sizeof(*this));
677  }
678 
680  for (size_t i = 0; i < count; i++) {
681  auto node = buckets_[i]().load(std::memory_order_relaxed);
682  if (node) {
683  buckets_[i]().store(nullptr, std::memory_order_relaxed);
684  while (node) {
685  auto next = node->next_.load(std::memory_order_relaxed);
686  if (next) {
687  node->next_.store(nullptr, std::memory_order_relaxed);
688  }
689  node->unlink_and_reclaim_unchecked();
690  node = next;
691  }
692  }
693  }
694  }
695 
696  BucketRoot buckets_[0];
697  };
698 
699  public:
700  class Iterator {
701  public:
703  FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {}
705 
706  void
707  setNode(Node* node, Buckets* buckets, size_t bucket_count, uint64_t idx) {
708  node_ = node;
709  buckets_ = buckets;
710  idx_ = idx;
711  bucket_count_ = bucket_count;
712  }
713 
714  const value_type& operator*() const {
715  DCHECK(node_);
716  return node_->getItem();
717  }
718 
719  const value_type* operator->() const {
720  DCHECK(node_);
721  return &(node_->getItem());
722  }
723 
724  const Iterator& operator++() {
725  DCHECK(node_);
726  node_ = hazptrs_[2].get_protected(node_->next_);
727  hazptrs_[1].swap(hazptrs_[2]);
728  if (!node_) {
729  ++idx_;
730  next();
731  }
732  return *this;
733  }
734 
735  void next() {
736  while (!node_) {
737  if (idx_ >= bucket_count_) {
738  break;
739  }
740  DCHECK(buckets_);
741  node_ = hazptrs_[1].get_protected(buckets_->buckets_[idx_]());
742  if (node_) {
743  break;
744  }
745  ++idx_;
746  }
747  }
748 
749  bool operator==(const Iterator& o) const {
750  return node_ == o.node_;
751  }
752 
753  bool operator!=(const Iterator& o) const {
754  return !(*this == o);
755  }
756 
757  Iterator& operator=(const Iterator& o) = delete;
758 
760  if (this != &o) {
761  hazptrs_ = std::move(o.hazptrs_);
762  node_ = std::exchange(o.node_, nullptr);
763  buckets_ = std::exchange(o.buckets_, nullptr);
764  bucket_count_ = std::exchange(o.bucket_count_, 0);
765  idx_ = std::exchange(o.idx_, 0);
766  }
767  return *this;
768  }
769 
770  Iterator(const Iterator& o) = delete;
771 
773  : hazptrs_(std::move(o.hazptrs_)),
774  node_(std::exchange(o.node_, nullptr)),
775  buckets_(std::exchange(o.buckets_, nullptr)),
776  bucket_count_(std::exchange(o.bucket_count_, 0)),
777  idx_(std::exchange(o.idx_, 0)) {}
778 
779  // These are accessed directly from the functions above
781 
782  private:
783  Node* node_{nullptr};
784  Buckets* buckets_{nullptr};
785  size_t bucket_count_{0};
786  uint64_t idx_{0};
787  };
788 
789  private:
790  // Shards have already used low ShardBits of the hash.
791  // Shift it over to use fresh bits.
792  uint64_t getIdx(size_t bucket_count, size_t hash) {
793  return (hash >> ShardBits) & (bucket_count - 1);
794  }
796  size_t& bcount,
797  Buckets*& buckets,
798  hazptr_holder<Atom>& hazptr) {
799  while (true) {
800  auto seqlock = seqlock_.load(std::memory_order_acquire);
801  bcount = bucket_count_.load(std::memory_order_acquire);
802  buckets = hazptr.get_protected(buckets_);
803  auto seqlock2 = seqlock_.load(std::memory_order_acquire);
804  if (!(seqlock & 1) && (seqlock == seqlock2)) {
805  break;
806  }
807  }
808  DCHECK(buckets);
809  }
810 
811  Mutex m_;
814  size_t size_{0};
815  size_t const max_size_;
816 
817  // Fields needed for read-only access, on separate cacheline.
818  alignas(64) Atom<Buckets*> buckets_{nullptr};
819  std::atomic<uint64_t> seqlock_{0};
820  Atom<size_t> bucket_count_;
821 };
822 } // namespace detail
823 } // namespace folly
constexpr unsigned int popcount(T const v)
Definition: Bits.h:130
bool insert_or_assign(Iterator &it, Key &&k, Value &&v)
std::pair< const KeyType, ValueType > value_type
*than *hazptr_holder h
Definition: Hazptr.h:116
ConcurrentHashMapSegment(size_t initial_buckets, float load_factor, size_t max_size)
auto v
ValueHolder(std::piecewise_construct_t, Arg &&k, Args &&...args)
#define FOLLY_ALWAYS_INLINE
Definition: CPortability.h:151
FOLLY_ALWAYS_INLINE T * get_protected(const Atom< T * > &src) noexcept
Definition: HazptrHolder.h:138
void getBucketsAndCount(size_t &bcount, Buckets *&buckets, hazptr_holder< Atom > &hazptr)
constexpr T nextPowTwo(T const v)
Definition: Bits.h:149
bool find(Iterator &res, const KeyType &k)
PskType type
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
bool assign_if_equal(Iterator &it, Key &&k, const ValueType &expected, Value &&desired)
internal::KeyMatcher< M > Key(M inner_matcher)
void retire(D deleter={}, hazptr_domain< Atom > &domain=default_hazptr_domain< Atom >())
Definition: HazptrObj.h:229
#define Mutex
STL namespace.
bool try_emplace(Iterator &it, Key &&k, Args &&...args)
bool assign(Iterator &it, Key &&k, Value &&v)
bool emplace(Iterator &it, const KeyType &k, Node *node)
internal::ArgsMatcher< InnerMatcher > Args(const InnerMatcher &matcher)
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
KeyType
Definition: Signature.h:17
requires E e noexcept(noexcept(s.error(std::move(e))))
def load()
Definition: deadlock.py:441
#define nullptr
Definition: http_parser.c:41
bool insert(Iterator &it, Key &&k, Value &&v)
constexpr bool isPowTwo(T const v)
Definition: Bits.h:161
void erase(Iterator &res, Iterator &pos)
constexpr auto size(C const &c) -> decltype(c.size())
Definition: Access.h:45
size_type erase_internal(const key_type &key, Iterator *iter)
bool Value(const T &value, M matcher)
#define Atom
static map< string, int > m
std::pair< const KeyType, ValueType > value_type
ValueHolder< KeyType, ValueType, Allocator > item_
int * count
void setNode(Node *node, Buckets *buckets, size_t bucket_count, uint64_t idx)
T exchange(T &obj, U &&new_value)
Definition: Utility.h:120
std::mutex mutex
bool insert_internal(Iterator &it, const KeyType &k, InsertType type, MatchFunc match, Node *cur, Args &&...args)
g_t g(f_t)
static set< string > s
bool insert(Iterator &it, std::pair< key_type, mapped_type > &&foo)
KeyT k
uint64_t getIdx(size_t bucket_count, size_t hash)
Iterator< typename Container::const_iterator > cend(const Container &c)
Definition: Padded.h:324
std::pair< const KeyType, ValueType > value_type
def next(obj)
Definition: ast.py:58