proxygen
ConcurrentHashMap.h
Go to the documentation of this file.
1 /*
2  * Copyright 2017-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <folly/Optional.h>
21 #include <atomic>
22 #include <mutex>
23 
24 namespace folly {
25 
74 template <
75  typename KeyType,
76  typename ValueType,
77  typename HashFn = std::hash<KeyType>,
78  typename KeyEqual = std::equal_to<KeyType>,
79  typename Allocator = std::allocator<uint8_t>,
80  uint8_t ShardBits = 8,
81  template <typename> class Atom = std::atomic,
82  class Mutex = std::mutex>
85  KeyType,
86  ValueType,
87  ShardBits,
88  HashFn,
89  KeyEqual,
90  Allocator,
91  Atom,
92  Mutex>;
93  static constexpr uint64_t NumShards = (1 << ShardBits);
94  // Slightly higher than 1.0, in case hashing to shards isn't
95  // perfectly balanced, reserve(size) will still work without
96  // rehashing.
97  float load_factor_ = 1.05;
98 
99  public:
101 
102  typedef KeyType key_type;
103  typedef ValueType mapped_type;
104  typedef std::pair<const KeyType, ValueType> value_type;
105  typedef std::size_t size_type;
106  typedef HashFn hasher;
107  typedef KeyEqual key_equal;
109 
110  /*
111  * Construct a ConcurrentHashMap with 1 << ShardBits shards, size
112  * and max_size given. Both size and max_size will be rounded up to
113  * the next power of two, if they are not already a power of two, so
114  * that we can index in to Shards efficiently.
115  *
116  * Insertion functions will throw bad_alloc if max_size is exceeded.
117  */
118  explicit ConcurrentHashMap(size_t size = 8, size_t max_size = 0) {
120  if (max_size != 0) {
121  max_size_ = folly::nextPowTwo(max_size);
122  }
123  CHECK(max_size_ == 0 || max_size_ >= size_);
124  for (uint64_t i = 0; i < NumShards; i++) {
125  segments_[i].store(nullptr, std::memory_order_relaxed);
126  }
127  }
128 
130  : size_(o.size_), max_size_(o.max_size_) {
131  for (uint64_t i = 0; i < NumShards; i++) {
132  segments_[i].store(
133  o.segments_[i].load(std::memory_order_relaxed),
134  std::memory_order_relaxed);
135  o.segments_[i].store(nullptr, std::memory_order_relaxed);
136  }
137  }
138 
140  for (uint64_t i = 0; i < NumShards; i++) {
141  auto seg = segments_[i].load(std::memory_order_relaxed);
142  if (seg) {
143  seg->~SegmentT();
144  Allocator().deallocate((uint8_t*)seg, sizeof(SegmentT));
145  }
146  segments_[i].store(
147  o.segments_[i].load(std::memory_order_relaxed),
148  std::memory_order_relaxed);
149  o.segments_[i].store(nullptr, std::memory_order_relaxed);
150  }
151  size_ = o.size_;
152  max_size_ = o.max_size_;
153  return *this;
154  }
155 
156  /* Note that some objects stored in ConcurrentHashMap may outlive the
157  * ConcurrentHashMap's destructor, if you need immediate cleanup, call
158  * hazptr_cleanup(), which guarantees all objects are destructed after
159  * it completes.
160  */
162  for (uint64_t i = 0; i < NumShards; i++) {
163  auto seg = segments_[i].load(std::memory_order_relaxed);
164  if (seg) {
165  seg->~SegmentT();
166  Allocator().deallocate((uint8_t*)seg, sizeof(SegmentT));
167  }
168  }
169  }
170 
172  for (uint64_t i = 0; i < NumShards; i++) {
173  auto seg = segments_[i].load(std::memory_order_acquire);
174  if (seg) {
175  if (!seg->empty()) {
176  return false;
177  }
178  }
179  }
180  return true;
181  }
182 
183  ConstIterator find(const KeyType& k) const {
184  auto segment = pickSegment(k);
185  ConstIterator res(this, segment);
186  auto seg = segments_[segment].load(std::memory_order_acquire);
187  if (!seg || !seg->find(res.it_, k)) {
188  res.segment_ = NumShards;
189  }
190  return res;
191  }
192 
194  return ConstIterator(NumShards);
195  }
196 
198  return ConstIterator(this);
199  }
200 
202  return cend();
203  }
204 
206  return cbegin();
207  }
208 
209  std::pair<ConstIterator, bool> insert(
210  std::pair<key_type, mapped_type>&& foo) {
211  auto segment = pickSegment(foo.first);
212  std::pair<ConstIterator, bool> res(
213  std::piecewise_construct,
214  std::forward_as_tuple(this, segment),
215  std::forward_as_tuple(false));
216  res.second = ensureSegment(segment)->insert(res.first.it_, std::move(foo));
217  return res;
218  }
219 
220  template <typename Key, typename Value>
221  std::pair<ConstIterator, bool> insert(Key&& k, Value&& v) {
222  auto segment = pickSegment(k);
223  std::pair<ConstIterator, bool> res(
224  std::piecewise_construct,
225  std::forward_as_tuple(this, segment),
226  std::forward_as_tuple(false));
227  res.second = ensureSegment(segment)->insert(
228  res.first.it_, std::forward<Key>(k), std::forward<Value>(v));
229  return res;
230  }
231 
232  template <typename Key, typename... Args>
233  std::pair<ConstIterator, bool> try_emplace(Key&& k, Args&&... args) {
234  auto segment = pickSegment(k);
235  std::pair<ConstIterator, bool> res(
236  std::piecewise_construct,
237  std::forward_as_tuple(this, segment),
238  std::forward_as_tuple(false));
239  res.second = ensureSegment(segment)->try_emplace(
240  res.first.it_, std::forward<Key>(k), std::forward<Args>(args)...);
241  return res;
242  }
243 
244  template <typename... Args>
245  std::pair<ConstIterator, bool> emplace(Args&&... args) {
246  using Node = typename SegmentT::Node;
247  auto node = (Node*)Allocator().allocate(sizeof(Node));
248  new (node) Node(std::forward<Args>(args)...);
249  auto segment = pickSegment(node->getItem().first);
250  std::pair<ConstIterator, bool> res(
251  std::piecewise_construct,
252  std::forward_as_tuple(this, segment),
253  std::forward_as_tuple(false));
254  res.second = ensureSegment(segment)->emplace(
255  res.first.it_, node->getItem().first, node);
256  if (!res.second) {
257  node->~Node();
258  Allocator().deallocate((uint8_t*)node, sizeof(Node));
259  }
260  return res;
261  }
262 
263  template <typename Key, typename Value>
264  std::pair<ConstIterator, bool> insert_or_assign(Key&& k, Value&& v) {
265  auto segment = pickSegment(k);
266  std::pair<ConstIterator, bool> res(
267  std::piecewise_construct,
268  std::forward_as_tuple(this, segment),
269  std::forward_as_tuple(false));
270  res.second = ensureSegment(segment)->insert_or_assign(
271  res.first.it_, std::forward<Key>(k), std::forward<Value>(v));
272  return res;
273  }
274 
275  template <typename Key, typename Value>
277  auto segment = pickSegment(k);
278  ConstIterator res(this, segment);
279  auto seg = segments_[segment].load(std::memory_order_acquire);
280  if (!seg) {
281  return none;
282  } else {
283  auto r =
284  seg->assign(res.it_, std::forward<Key>(k), std::forward<Value>(v));
285  if (!r) {
286  return none;
287  }
288  }
289  return std::move(res);
290  }
291 
292  // Assign to desired if and only if key k is equal to expected
293  template <typename Key, typename Value>
295  assign_if_equal(Key&& k, const ValueType& expected, Value&& desired) {
296  auto segment = pickSegment(k);
297  ConstIterator res(this, segment);
298  auto seg = segments_[segment].load(std::memory_order_acquire);
299  if (!seg) {
300  return none;
301  } else {
302  auto r = seg->assign_if_equal(
303  res.it_,
304  std::forward<Key>(k),
305  expected,
306  std::forward<Value>(desired));
307  if (!r) {
308  return none;
309  }
310  }
311  return std::move(res);
312  }
313 
314  // Copying wrappers around insert and find.
315  // Only available for copyable types.
316  const ValueType operator[](const KeyType& key) {
317  auto item = insert(key, ValueType());
318  return item.first->second;
319  }
320 
321  const ValueType at(const KeyType& key) const {
322  auto item = find(key);
323  if (item == cend()) {
324  throw std::out_of_range("at(): value out of range");
325  }
326  return item->second;
327  }
328 
329  // TODO update assign interface, operator[], at
330 
331  size_type erase(const key_type& k) {
332  auto segment = pickSegment(k);
333  auto seg = segments_[segment].load(std::memory_order_acquire);
334  if (!seg) {
335  return 0;
336  } else {
337  return seg->erase(k);
338  }
339  }
340 
341  // Calls the hash function, and therefore may throw.
343  auto segment = pickSegment(pos->first);
344  ConstIterator res(this, segment);
345  ensureSegment(segment)->erase(res.it_, pos.it_);
346  res.next(); // May point to segment end, and need to advance.
347  return res;
348  }
349 
350  // NOT noexcept, initializes new shard segments vs.
351  void clear() {
352  for (uint64_t i = 0; i < NumShards; i++) {
353  auto seg = segments_[i].load(std::memory_order_acquire);
354  if (seg) {
355  seg->clear();
356  }
357  }
358  }
359 
360  void reserve(size_t count) {
361  count = count >> ShardBits;
362  for (uint64_t i = 0; i < NumShards; i++) {
363  auto seg = segments_[i].load(std::memory_order_acquire);
364  if (seg) {
365  seg->rehash(count);
366  }
367  }
368  }
369 
370  // This is a rolling size, and is not exact at any moment in time.
371  size_t size() const noexcept {
372  size_t res = 0;
373  for (uint64_t i = 0; i < NumShards; i++) {
374  auto seg = segments_[i].load(std::memory_order_acquire);
375  if (seg) {
376  res += seg->size();
377  }
378  }
379  return res;
380  }
381 
382  float max_load_factor() const {
383  return load_factor_;
384  }
385 
386  void max_load_factor(float factor) {
387  for (uint64_t i = 0; i < NumShards; i++) {
388  auto seg = segments_[i].load(std::memory_order_acquire);
389  if (seg) {
390  seg->max_load_factor(factor);
391  }
392  }
393  }
394 
396  public:
397  friend class ConcurrentHashMap;
398 
399  const value_type& operator*() const {
400  return *it_;
401  }
402 
403  const value_type* operator->() const {
404  return &*it_;
405  }
406 
408  ++it_;
409  next();
410  return *this;
411  }
412 
413  bool operator==(const ConstIterator& o) const {
414  return it_ == o.it_ && segment_ == o.segment_;
415  }
416 
417  bool operator!=(const ConstIterator& o) const {
418  return !(*this == o);
419  }
420 
421  ConstIterator& operator=(const ConstIterator& o) = delete;
422 
424  if (this != &o) {
425  it_ = std::move(o.it_);
426  segment_ = std::exchange(o.segment_, uint64_t(NumShards));
427  parent_ = std::exchange(o.parent_, nullptr);
428  }
429  return *this;
430  }
431 
432  ConstIterator(const ConstIterator& o) = delete;
433 
435  : it_(std::move(o.it_)),
436  segment_(std::exchange(o.segment_, uint64_t(NumShards))),
437  parent_(std::exchange(o.parent_, nullptr)) {}
438 
440  : segment_(segment), parent_(parent) {}
441 
442  private:
443  // cbegin iterator
445  : it_(parent->ensureSegment(0)->cbegin()),
446  segment_(0),
447  parent_(parent) {
448  // Always iterate to the first element, could be in any shard.
449  next();
450  }
451 
452  // cend iterator
453  explicit ConstIterator(uint64_t shards) : it_(nullptr), segment_(shards) {}
454 
455  void next() {
456  while (it_ == parent_->ensureSegment(segment_)->cend() &&
458  SegmentT* seg{nullptr};
459  while (!seg) {
460  segment_++;
461  seg = parent_->segments_[segment_].load(std::memory_order_acquire);
462  if (segment_ < parent_->NumShards) {
463  if (!seg) {
464  continue;
465  }
466  it_ = seg->cbegin();
467  }
468  break;
469  }
470  }
471  }
472 
476  };
477 
478  private:
479  uint64_t pickSegment(const KeyType& k) const {
480  auto h = HashFn()(k);
481  // Use the lowest bits for our shard bits.
482  //
483  // This works well even if the hash function is biased towards the
484  // low bits: The sharding will happen in the segments_ instead of
485  // in the segment buckets, so we'll still get write sharding as
486  // well.
487  //
488  // Low-bit bias happens often for std::hash using small numbers,
489  // since the integer hash function is the identity function.
490  return h & (NumShards - 1);
491  }
492 
494  SegmentT* seg = segments_[i].load(std::memory_order_acquire);
495  if (!seg) {
496  SegmentT* newseg = (SegmentT*)Allocator().allocate(sizeof(SegmentT));
497  newseg = new (newseg)
498  SegmentT(size_ >> ShardBits, load_factor_, max_size_ >> ShardBits);
499  if (!segments_[i].compare_exchange_strong(seg, newseg)) {
500  // seg is updated with new value, delete ours.
501  newseg->~SegmentT();
502  Allocator().deallocate((uint8_t*)newseg, sizeof(SegmentT));
503  } else {
504  seg = newseg;
505  }
506  }
507  return seg;
508  }
509 
510  mutable Atom<SegmentT*> segments_[NumShards];
511  size_t size_{0};
512  size_t max_size_{0};
513 };
514 
515 } // namespace folly
bool insert_or_assign(Iterator &it, Key &&k, Value &&v)
SegmentT * ensureSegment(uint64_t i) const
*than *hazptr_holder h
Definition: Hazptr.h:116
bool empty() const noexcept
ConstIterator & operator=(const ConstIterator &o)=delete
std::pair< ConstIterator, bool > try_emplace(Key &&k, Args &&...args)
ConstIterator(ConstIterator &&o) noexcept
constexpr T nextPowTwo(T const v)
Definition: Bits.h:149
ConstIterator cbegin() const noexcept
ConstIterator cend() const noexcept
const value_type * operator->() const
concurrenthashmap::NodeT< KeyType, ValueType, Allocator, Atom > Node
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
folly::Optional< ConstIterator > assign(Key &&k, Value &&v)
internal::KeyMatcher< M > Key(M inner_matcher)
#define Mutex
STL namespace.
bool try_emplace(Iterator &it, Key &&k, Args &&...args)
const value_type & operator*() const
bool emplace(Iterator &it, const KeyType &k, Node *node)
internal::ArgsMatcher< InnerMatcher > Args(const InnerMatcher &matcher)
std::pair< ConstIterator, bool > insert(Key &&k, Value &&v)
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
KeyType
Definition: Signature.h:17
requires E e noexcept(noexcept(s.error(std::move(e))))
size_type erase(const key_type &k)
ConstIterator(const ConcurrentHashMap *parent)
#define nullptr
Definition: http_parser.c:41
const ValueType at(const KeyType &key) const
folly::Optional< ConstIterator > assign_if_equal(Key &&k, const ValueType &expected, Value &&desired)
bool operator==(const ConstIterator &o) const
ConcurrentHashMap(size_t size=8, size_t max_size=0)
uint64_t pickSegment(const KeyType &k) const
const ValueType operator[](const KeyType &key)
ConstIterator find(const KeyType &k) const
ConcurrentHashMap(ConcurrentHashMap &&o) noexcept
bool Value(const T &value, M matcher)
#define Atom
ConstIterator & operator=(ConstIterator &&o) noexcept
void max_load_factor(float factor)
std::pair< ConstIterator, bool > insert(std::pair< key_type, mapped_type > &&foo)
ConstIterator(const ConstIterator &o)=delete
static constexpr uint64_t NumShards
ConstIterator end() const noexcept
std::pair< ConstIterator, bool > emplace(Args &&...args)
int * count
ConstIterator begin() const noexcept
size_t size() const noexcept
T exchange(T &obj, U &&new_value)
Definition: Utility.h:120
std::mutex mutex
const
Definition: upload.py:398
ConstIterator(const ConcurrentHashMap *parent, uint64_t segment)
void reserve(size_t count)
bool insert(Iterator &it, std::pair< key_type, mapped_type > &&foo)
Atom< SegmentT * > segments_[NumShards]
ConcurrentHashMap & operator=(ConcurrentHashMap &&o)
KeyT k
ConstIterator erase(ConstIterator &pos)
std::pair< const KeyType, ValueType > value_type
folly::Function< void()> parent
Definition: AtFork.cpp:34
detail::ConcurrentHashMapSegment< KeyType, ValueType, ShardBits, HashFn, KeyEqual, Allocator, Atom, Mutex > SegmentT
constexpr None none
Definition: Optional.h:87
bool operator!=(const ConstIterator &o) const
std::pair< ConstIterator, bool > insert_or_assign(Key &&k, Value &&v)