proxygen
AtomicHashMap-inl.h
Go to the documentation of this file.
1 /*
2  * Copyright 2012-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
19 #endif
20 
22 
23 namespace folly {
24 
25 // AtomicHashMap constructor -- Atomic wrapper that allows growth
26 // This class has a lot of overhead (184 Bytes) so only use for big maps
27 template <
28  typename KeyT,
29  typename ValueT,
30  typename HashFcn,
31  typename EqualFcn,
32  typename Allocator,
33  typename ProbeFcn,
34  typename KeyConvertFcn>
35 AtomicHashMap<
36  KeyT,
37  ValueT,
38  HashFcn,
39  EqualFcn,
40  Allocator,
41  ProbeFcn,
42  KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config)
43  : kGrowthFrac_(
44  config.growthFactor < 0 ? 1.0f - config.maxLoadFactor
45  : config.growthFactor) {
46  CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f);
47  subMaps_[0].store(
48  SubMap::create(finalSizeEst, config).release(),
49  std::memory_order_relaxed);
50  auto subMapCount = kNumSubMaps_;
51  FOR_EACH_RANGE (i, 1, subMapCount) {
52  subMaps_[i].store(nullptr, std::memory_order_relaxed);
53  }
54  numMapsAllocated_.store(1, std::memory_order_relaxed);
55 }
56 
57 // emplace --
58 template <
59  typename KeyT,
60  typename ValueT,
61  typename HashFcn,
62  typename EqualFcn,
63  typename Allocator,
64  typename ProbeFcn,
65  typename KeyConvertFcn>
66 template <
67  typename LookupKeyT,
68  typename LookupHashFcn,
69  typename LookupEqualFcn,
70  typename LookupKeyToKeyFcn,
71  typename... ArgTs>
72 std::pair<
73  typename AtomicHashMap<
74  KeyT,
75  ValueT,
76  HashFcn,
77  EqualFcn,
78  Allocator,
79  ProbeFcn,
80  KeyConvertFcn>::iterator,
81  bool>
83  KeyT,
84  ValueT,
85  HashFcn,
86  EqualFcn,
87  Allocator,
88  ProbeFcn,
89  KeyConvertFcn>::emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
91  LookupKeyT,
92  LookupHashFcn,
93  LookupEqualFcn,
94  LookupKeyToKeyFcn>(k, std::forward<ArgTs>(vCtorArgs)...);
95  SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
96  return std::make_pair(
97  iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success);
98 }
99 
100 // insertInternal -- Allocates new sub maps as existing ones fill up.
101 template <
102  typename KeyT,
103  typename ValueT,
104  typename HashFcn,
105  typename EqualFcn,
106  typename Allocator,
107  typename ProbeFcn,
108  typename KeyConvertFcn>
109 template <
110  typename LookupKeyT,
111  typename LookupHashFcn,
112  typename LookupEqualFcn,
113  typename LookupKeyToKeyFcn,
114  typename... ArgTs>
115 typename AtomicHashMap<
116  KeyT,
117  ValueT,
118  HashFcn,
119  EqualFcn,
120  Allocator,
121  ProbeFcn,
122  KeyConvertFcn>::SimpleRetT
124  KeyT,
125  ValueT,
126  HashFcn,
127  EqualFcn,
128  Allocator,
129  ProbeFcn,
130  KeyConvertFcn>::insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
131 beginInsertInternal:
132  auto nextMapIdx = // this maintains our state
133  numMapsAllocated_.load(std::memory_order_acquire);
134  typename SubMap::SimpleRetT ret;
135  FOR_EACH_RANGE (i, 0, nextMapIdx) {
136  // insert in each map successively. If one succeeds, we're done!
137  SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
138  ret = subMap->template insertInternal<
139  LookupKeyT,
140  LookupHashFcn,
141  LookupEqualFcn,
142  LookupKeyToKeyFcn>(key, std::forward<ArgTs>(vCtorArgs)...);
143  if (ret.idx == subMap->capacity_) {
144  continue; // map is full, so try the next one
145  }
146  // Either collision or success - insert in either case
147  return SimpleRetT(i, ret.idx, ret.success);
148  }
149 
150  // If we made it this far, all maps are full and we need to try to allocate
151  // the next one.
152 
153  SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
154  if (nextMapIdx >= kNumSubMaps_ ||
155  primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
156  // Can't allocate any more sub maps.
157  throw AtomicHashMapFullError();
158  }
159 
160  if (tryLockMap(nextMapIdx)) {
161  // Alloc a new map and shove it in. We can change whatever
162  // we want because other threads are waiting on us...
163  size_t numCellsAllocated = (size_t)(
164  primarySubMap->capacity_ *
165  std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
166  size_t newSize = size_t(numCellsAllocated * kGrowthFrac_);
167  DCHECK(
168  subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
169  (SubMap*)kLockedPtr_);
170  // create a new map using the settings stored in the first map
171 
172  Config config;
173  config.emptyKey = primarySubMap->kEmptyKey_;
174  config.lockedKey = primarySubMap->kLockedKey_;
175  config.erasedKey = primarySubMap->kErasedKey_;
176  config.maxLoadFactor = primarySubMap->maxLoadFactor();
178  primarySubMap->getEntryCountThreadCacheSize();
179  subMaps_[nextMapIdx].store(
180  SubMap::create(newSize, config).release(), std::memory_order_relaxed);
181 
182  // Publish the map to other threads.
183  numMapsAllocated_.fetch_add(1, std::memory_order_release);
184  DCHECK_EQ(
185  nextMapIdx + 1, numMapsAllocated_.load(std::memory_order_relaxed));
186  } else {
187  // If we lost the race, we'll have to wait for the next map to get
188  // allocated before doing any insertion here.
190  return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
191  });
192  }
193 
194  // Relaxed is ok here because either we just created this map, or we
195  // just did a spin wait with an acquire load on numMapsAllocated_.
196  SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
197  DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
198  ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
199  if (ret.idx != loadedMap->capacity_) {
200  return SimpleRetT(nextMapIdx, ret.idx, ret.success);
201  }
202  // We took way too long and the new map is already full...try again from
203  // the top (this should pretty much never happen).
204  goto beginInsertInternal;
205 }
206 
207 // find --
208 template <
209  typename KeyT,
210  typename ValueT,
211  typename HashFcn,
212  typename EqualFcn,
213  typename Allocator,
214  typename ProbeFcn,
215  typename KeyConvertFcn>
216 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
217 typename AtomicHashMap<
218  KeyT,
219  ValueT,
220  HashFcn,
221  EqualFcn,
222  Allocator,
223  ProbeFcn,
224  KeyConvertFcn>::iterator
226  KeyT,
227  ValueT,
228  HashFcn,
229  EqualFcn,
230  Allocator,
231  ProbeFcn,
232  KeyConvertFcn>::find(LookupKeyT k) {
233  SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
234  if (!ret.success) {
235  return end();
236  }
237  SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
238  return iterator(this, ret.i, subMap->makeIter(ret.j));
239 }
240 
241 template <
242  typename KeyT,
243  typename ValueT,
244  typename HashFcn,
245  typename EqualFcn,
246  typename Allocator,
247  typename ProbeFcn,
248  typename KeyConvertFcn>
249 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
250 typename AtomicHashMap<
251  KeyT,
252  ValueT,
253  HashFcn,
254  EqualFcn,
255  Allocator,
256  ProbeFcn,
257  KeyConvertFcn>::const_iterator
259  KeyT,
260  ValueT,
261  HashFcn,
262  EqualFcn,
263  Allocator,
264  ProbeFcn,
265  KeyConvertFcn>::find(LookupKeyT k) const {
266  return const_cast<AtomicHashMap*>(this)
267  ->find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
268 }
269 
270 // findInternal --
271 template <
272  typename KeyT,
273  typename ValueT,
274  typename HashFcn,
275  typename EqualFcn,
276  typename Allocator,
277  typename ProbeFcn,
278  typename KeyConvertFcn>
279 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
280 typename AtomicHashMap<
281  KeyT,
282  ValueT,
283  HashFcn,
284  EqualFcn,
285  Allocator,
286  ProbeFcn,
287  KeyConvertFcn>::SimpleRetT
289  KeyT,
290  ValueT,
291  HashFcn,
292  EqualFcn,
293  Allocator,
294  ProbeFcn,
295  KeyConvertFcn>::findInternal(const LookupKeyT k) const {
296  SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
297  typename SubMap::SimpleRetT ret =
298  primaryMap
299  ->template findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
300  if (LIKELY(ret.idx != primaryMap->capacity_)) {
301  return SimpleRetT(0, ret.idx, ret.success);
302  }
303  const unsigned int numMaps =
304  numMapsAllocated_.load(std::memory_order_acquire);
305  FOR_EACH_RANGE (i, 1, numMaps) {
306  // Check each map successively. If one succeeds, we're done!
307  SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
308  ret =
309  thisMap
310  ->template findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(
311  k);
312  if (LIKELY(ret.idx != thisMap->capacity_)) {
313  return SimpleRetT(i, ret.idx, ret.success);
314  }
315  }
316  // Didn't find our key...
317  return SimpleRetT(numMaps, 0, false);
318 }
319 
320 // findAtInternal -- see encodeIndex() for details.
321 template <
322  typename KeyT,
323  typename ValueT,
324  typename HashFcn,
325  typename EqualFcn,
326  typename Allocator,
327  typename ProbeFcn,
328  typename KeyConvertFcn>
329 typename AtomicHashMap<
330  KeyT,
331  ValueT,
332  HashFcn,
333  EqualFcn,
334  Allocator,
335  ProbeFcn,
336  KeyConvertFcn>::SimpleRetT
338  KeyT,
339  ValueT,
340  HashFcn,
341  EqualFcn,
342  Allocator,
343  ProbeFcn,
344  KeyConvertFcn>::findAtInternal(uint32_t idx) const {
345  uint32_t subMapIdx, subMapOffset;
346  if (idx & kSecondaryMapBit_) {
347  // idx falls in a secondary map
348  idx &= ~kSecondaryMapBit_; // unset secondary bit
349  subMapIdx = idx >> kSubMapIndexShift_;
350  DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
351  subMapOffset = idx & kSubMapIndexMask_;
352  } else {
353  // idx falls in primary map
354  subMapIdx = 0;
355  subMapOffset = idx;
356  }
357  return SimpleRetT(subMapIdx, subMapOffset, true);
358 }
359 
360 // erase --
361 template <
362  typename KeyT,
363  typename ValueT,
364  typename HashFcn,
365  typename EqualFcn,
366  typename Allocator,
367  typename ProbeFcn,
368  typename KeyConvertFcn>
369 typename AtomicHashMap<
370  KeyT,
371  ValueT,
372  HashFcn,
373  EqualFcn,
374  Allocator,
375  ProbeFcn,
376  KeyConvertFcn>::size_type
378  KeyT,
379  ValueT,
380  HashFcn,
381  EqualFcn,
382  Allocator,
383  ProbeFcn,
384  KeyConvertFcn>::erase(const KeyT k) {
385  int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
386  FOR_EACH_RANGE (i, 0, numMaps) {
387  // Check each map successively. If one succeeds, we're done!
388  if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
389  return 1;
390  }
391  }
392  // Didn't find our key...
393  return 0;
394 }
395 
396 // capacity -- summation of capacities of all submaps
397 template <
398  typename KeyT,
399  typename ValueT,
400  typename HashFcn,
401  typename EqualFcn,
402  typename Allocator,
403  typename ProbeFcn,
404  typename KeyConvertFcn>
405 size_t AtomicHashMap<
406  KeyT,
407  ValueT,
408  HashFcn,
409  EqualFcn,
410  Allocator,
411  ProbeFcn,
412  KeyConvertFcn>::capacity() const {
413  size_t totalCap(0);
414  int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
415  FOR_EACH_RANGE (i, 0, numMaps) {
416  totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
417  }
418  return totalCap;
419 }
420 
421 // spaceRemaining --
422 // number of new insertions until current submaps are all at max load
423 template <
424  typename KeyT,
425  typename ValueT,
426  typename HashFcn,
427  typename EqualFcn,
428  typename Allocator,
429  typename ProbeFcn,
430  typename KeyConvertFcn>
431 size_t AtomicHashMap<
432  KeyT,
433  ValueT,
434  HashFcn,
435  EqualFcn,
436  Allocator,
437  ProbeFcn,
438  KeyConvertFcn>::spaceRemaining() const {
439  size_t spaceRem(0);
440  int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
441  FOR_EACH_RANGE (i, 0, numMaps) {
442  SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
443  spaceRem +=
444  std::max(0, thisMap->maxEntries_ - &thisMap->numEntries_.readFull());
445  }
446  return spaceRem;
447 }
448 
449 // clear -- Wipes all keys and values from primary map and destroys
450 // all secondary maps. Not thread safe.
451 template <
452  typename KeyT,
453  typename ValueT,
454  typename HashFcn,
455  typename EqualFcn,
456  typename Allocator,
457  typename ProbeFcn,
458  typename KeyConvertFcn>
459 void AtomicHashMap<
460  KeyT,
461  ValueT,
462  HashFcn,
463  EqualFcn,
464  Allocator,
465  ProbeFcn,
466  KeyConvertFcn>::clear() {
467  subMaps_[0].load(std::memory_order_relaxed)->clear();
468  int const numMaps = numMapsAllocated_.load(std::memory_order_relaxed);
469  FOR_EACH_RANGE (i, 1, numMaps) {
470  SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
471  DCHECK(thisMap);
472  SubMap::destroy(thisMap);
473  subMaps_[i].store(nullptr, std::memory_order_relaxed);
474  }
475  numMapsAllocated_.store(1, std::memory_order_relaxed);
476 }
477 
478 // size --
479 template <
480  typename KeyT,
481  typename ValueT,
482  typename HashFcn,
483  typename EqualFcn,
484  typename Allocator,
485  typename ProbeFcn,
486  typename KeyConvertFcn>
487 size_t AtomicHashMap<
488  KeyT,
489  ValueT,
490  HashFcn,
491  EqualFcn,
492  Allocator,
493  ProbeFcn,
494  KeyConvertFcn>::size() const {
495  size_t totalSize(0);
496  int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
497  FOR_EACH_RANGE (i, 0, numMaps) {
498  totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
499  }
500  return totalSize;
501 }
502 
503 // encodeIndex -- Encode the submap index and offset into return.
504 // index_ret must be pre-populated with the submap offset.
505 //
506 // We leave index_ret untouched when referring to the primary map
507 // so it can be as large as possible (31 data bits). Max size of
508 // secondary maps is limited by what can fit in the low 27 bits.
509 //
510 // Returns the following bit-encoded data in index_ret:
511 // if subMap == 0 (primary map) =>
512 // bit(s) value
513 // 31 0
514 // 0-30 submap offset (index_ret input)
515 //
516 // if subMap > 0 (secondary maps) =>
517 // bit(s) value
518 // 31 1
519 // 27-30 which subMap
520 // 0-26 subMap offset (index_ret input)
521 template <
522  typename KeyT,
523  typename ValueT,
524  typename HashFcn,
525  typename EqualFcn,
526  typename Allocator,
527  typename ProbeFcn,
528  typename KeyConvertFcn>
529 inline uint32_t AtomicHashMap<
530  KeyT,
531  ValueT,
532  HashFcn,
533  EqualFcn,
534  Allocator,
535  ProbeFcn,
536  KeyConvertFcn>::encodeIndex(uint32_t subMap, uint32_t offset) {
537  DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
538  if (subMap == 0) {
539  return offset;
540  }
541  // Make sure subMap isn't too big
542  DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
543  // Make sure subMap bits of offset are clear
544  DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
545 
546  // Set high-order bits to encode which submap this index belongs to
547  return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
548 }
549 
550 // Iterator implementation
551 
552 template <
553  typename KeyT,
554  typename ValueT,
555  typename HashFcn,
556  typename EqualFcn,
557  typename Allocator,
558  typename ProbeFcn,
559  typename KeyConvertFcn>
560 template <class ContT, class IterVal, class SubIt>
562  KeyT,
563  ValueT,
564  HashFcn,
565  EqualFcn,
566  Allocator,
567  ProbeFcn,
568  KeyConvertFcn>::ahm_iterator
569  : boost::iterator_facade<
570  ahm_iterator<ContT, IterVal, SubIt>,
571  IterVal,
572  boost::forward_traversal_tag> {
573  explicit ahm_iterator() : ahm_(nullptr) {}
574 
575  // Conversion ctor for interoperability between const_iterator and
576  // iterator. The enable_if<> magic keeps us well-behaved for
577  // is_convertible<> (v. the iterator_facade documentation).
578  template <class OtherContT, class OtherVal, class OtherSubIt>
581  typename std::enable_if<
583  : ahm_(o.ahm_), subMap_(o.subMap_), subIt_(o.subIt_) {}
584 
585  /*
586  * Returns the unique index that can be used for access directly
587  * into the data storage.
588  */
589  uint32_t getIndex() const {
590  CHECK(!isEnd());
591  return ahm_->encodeIndex(subMap_, subIt_.getIndex());
592  }
593 
594  private:
595  friend class AtomicHashMap;
596  explicit ahm_iterator(ContT* ahm, uint32_t subMap, const SubIt& subIt)
597  : ahm_(ahm), subMap_(subMap), subIt_(subIt) {}
598 
599  friend class boost::iterator_core_access;
600 
601  void increment() {
602  CHECK(!isEnd());
603  ++subIt_;
604  checkAdvanceToNextSubmap();
605  }
606 
607  bool equal(const ahm_iterator& other) const {
608  if (ahm_ != other.ahm_) {
609  return false;
610  }
611 
612  if (isEnd() || other.isEnd()) {
613  return isEnd() == other.isEnd();
614  }
615 
616  return subMap_ == other.subMap_ && subIt_ == other.subIt_;
617  }
618 
619  IterVal& dereference() const {
620  return *subIt_;
621  }
622 
623  bool isEnd() const {
624  return ahm_ == nullptr;
625  }
626 
628  if (isEnd()) {
629  return;
630  }
631 
632  SubMap* thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
633  while (subIt_ == thisMap->end()) {
634  // This sub iterator is done, advance to next one
635  if (subMap_ + 1 <
636  ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
637  ++subMap_;
638  thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
639  subIt_ = thisMap->begin();
640  } else {
641  ahm_ = nullptr;
642  return;
643  }
644  }
645  }
646 
647  private:
648  ContT* ahm_;
650  SubIt subIt_;
651 }; // ahm_iterator
652 
653 } // namespace folly
bool equal(const ahm_iterator &other) const
auto f
bool tryLockMap(unsigned int idx)
static const uint32_t kNumSubMapBits_
LogLevel max
Definition: LogLevel.cpp:31
int32_t KeyT
iterator find(LookupKeyT k)
PskType type
void atomic_hash_spin_wait(Cond condition)
SimpleRetT findAtInternal(uint32_t idx) const
ThreadCachedInt< uint64_t > numEntries_
#define LIKELY(x)
Definition: Likely.h:47
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
int32_t ValueT
def load()
Definition: deadlock.py:441
#define nullptr
Definition: http_parser.c:41
const float kGrowthFrac_
AHArrayT::Config config
#define FOR_EACH_RANGE(i, begin, end)
Definition: Foreach.h:313
static void destroy(AtomicHashArray *)
size_t spaceRemaining() const
std::atomic< SubMap * > subMaps_[kNumSubMaps_]
std::atomic< uint32_t > numMapsAllocated_
static const uint32_t kSubMapIndexMask_
uint32_t getEntryCountThreadCacheSize() const
size_type erase(key_type k)
ahm_iterator(const ahm_iterator< OtherContT, OtherVal, OtherSubIt > &o, typename std::enable_if< std::is_convertible< OtherSubIt, SubIt >::value >::type *=nullptr)
ahm_iterator(ContT *ahm, uint32_t subMap, const SubIt &subIt)
SimpleRetT findInternal(const LookupKeyT k) const
static const char *const value
Definition: Conv.cpp:50
SimpleRetT insertInternal(LookupKeyT key, ArgTs &&...value)
static const uint32_t kSubMapIndexShift_
static SmartPtr create(size_t maxSize, const Config &c=Config())
static const uintptr_t kLockedPtr_
static uint32_t encodeIndex(uint32_t subMap, uint32_t subMapIdx)
static const uint32_t kSecondaryMapBit_
static const uint32_t kNumSubMaps_
double maxLoadFactor() const
size_t capacity() const
KeyT k
iterator makeIter(size_t idx)
ahm_iterator< AtomicHashMap, value_type, typename SubMap::iterator > iterator
std::pair< iterator, bool > emplace(LookupKeyT k, ArgTs &&...vCtorArg)