proxygen
HazptrDomain.h
Go to the documentation of this file.
1 /*
2  * Copyright 2018-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
22 
23 #include <folly/Portability.h>
24 #include <folly/Singleton.h>
26 
27 #include <atomic>
28 #include <unordered_set> // for hash set in bulk_reclaim
29 
33 
34 namespace folly {
35 
36 namespace detail {
37 
39  return 1000;
40 }
41 
42 } // namespace detail
43 
51 template <template <typename> class Atom>
52 class hazptr_domain {
53  static constexpr int kThreshold = detail::hazptr_domain_rcount_threshold();
54  static constexpr int kMultiplier = 2;
55  static constexpr uint64_t kSyncTimePeriod{2000000000}; // nanoseconds
56 
57  Atom<hazptr_rec<Atom>*> hazptrs_{nullptr};
58  Atom<hazptr_obj<Atom>*> retired_{nullptr};
59  Atom<uint64_t> sync_time_{0};
60  /* Using signed int for rcount_ because it may transiently be negative.
61  Using signed int for all integer variables that may be involved in
62  calculations related to the value of rcount_. */
63  Atom<int> hcount_{0};
64  Atom<int> rcount_{0};
65  Atom<uint16_t> num_bulk_reclaims_{0};
66  bool shutdown_{false};
67 
68  public:
70  hazptr_domain() = default;
71 
74  shutdown_ = true;
75  reclaim_all_objects();
76  free_hazptr_recs();
77  }
78 
79  hazptr_domain(const hazptr_domain&) = delete;
80  hazptr_domain(hazptr_domain&&) = delete;
81  hazptr_domain& operator=(const hazptr_domain&) = delete;
82  hazptr_domain& operator=(hazptr_domain&&) = delete;
83 
84  public:
86  template <typename T, typename D = std::default_delete<T>>
87  void retire(T* obj, D reclaim = {}) {
88  struct hazptr_retire_node : hazptr_obj<Atom> {
89  std::unique_ptr<T, D> obj_;
90  hazptr_retire_node(T* retireObj, D toReclaim)
91  : obj_{retireObj, std::move(toReclaim)} {}
92  };
93 
94  auto node = new hazptr_retire_node(obj, std::move(reclaim));
95  node->reclaim_ = [](hazptr_obj<Atom>* p, hazptr_obj_list<Atom>&) {
96  delete static_cast<hazptr_retire_node*>(p);
97  };
98  hazptr_obj_list<Atom> l(node);
99  push_retired(l);
100  }
101 
103  void cleanup() noexcept {
104  relaxed_cleanup();
105  wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s
106  }
107 
108  private:
109  friend void hazptr_domain_push_retired<Atom>(
111  bool check,
113  friend class hazptr_holder<Atom>;
114  friend class hazptr_obj<Atom>;
115 #if FOLLY_HAZPTR_THR_LOCAL
116  friend class hazptr_tc<Atom>;
117 #endif
118 
121  auto rec = try_acquire_existing_hprec();
122  return rec != nullptr ? rec : acquire_new_hprec();
123  }
124 
127  hprec->release();
128  }
129 
131  void push_retired(hazptr_obj_list<Atom>& l, bool check = true) {
132  /*** Full fence ***/ asymmetricLightBarrier();
133  while (true) {
134  auto r = retired();
135  l.tail()->set_next(r);
136  if (retired_.compare_exchange_weak(
137  r,
138  l.head(),
139  std::memory_order_release,
140  std::memory_order_acquire)) {
141  break;
142  }
143  }
144  rcount_.fetch_add(l.count(), std::memory_order_release);
145  if (check) {
146  check_cleanup_and_reclaim();
147  }
148  }
149 
151  return hazptrs_.load(std::memory_order_acquire);
152  }
153 
155  return retired_.load(std::memory_order_acquire);
156  }
157 
159  return hcount_.load(std::memory_order_acquire);
160  }
161 
163  return rcount_.load(std::memory_order_acquire);
164  }
165 
166  bool reached_threshold(int rc, int hc) const noexcept {
167  return rc >= kThreshold && rc >= kMultiplier * hc;
168  }
169 
171  auto retired = retired_.exchange(nullptr);
172  while (retired) {
173  auto obj = retired;
174  hazptr_obj_list<Atom> l;
175  while (obj) {
176  auto next = obj->next();
177  DCHECK(obj != next);
178  (*(obj->reclaim()))(obj, l);
179  obj = next;
180  }
181  if (l.count()) {
182  push_retired(l);
183  }
184  retired = retired_.exchange(nullptr);
185  }
186  }
187 
189  /* Leak the hazard pointers for the default domain to avoid
190  destruction order issues with thread caches. */
191  if (this == &default_hazptr_domain<Atom>()) {
192  return;
193  }
194  auto rec = head();
195  while (rec) {
196  auto next = rec->next();
197  DCHECK(!rec->active());
198  delete rec;
199  rec = next;
200  }
201  }
202 
204  if (try_timed_cleanup()) {
205  return;
206  }
207  if (reached_threshold(rcount(), hcount())) {
208  try_bulk_reclaim();
209  }
210  }
211 
213 #if FOLLY_HAZPTR_THR_LOCAL
214  hazptr_obj<Atom>* h = nullptr;
215  hazptr_obj<Atom>* t = nullptr;
216  for (hazptr_priv<Atom>& priv :
217  hazptr_priv_singleton<Atom>::accessAllThreads()) {
218  priv.collect(h, t);
219  }
220  if (h) {
221  DCHECK(t);
222  hazptr_obj_list<Atom> l(h, t, 0);
223  push_retired(l);
224  }
225 #endif
226  rcount_.store(0, std::memory_order_release);
227  bulk_reclaim(true);
228  }
229 
231  while (num_bulk_reclaims_.load(std::memory_order_acquire) > 0) {
233  }
234  }
235 
237  auto hc = hcount();
238  auto rc = rcount();
239  if (!reached_threshold(rc, hc)) {
240  return;
241  }
242  rc = rcount_.exchange(0, std::memory_order_release);
243  if (!reached_threshold(rc, hc)) {
244  /* No need to add rc back to rcount_. At least one concurrent
245  try_bulk_reclaim will proceed to bulk_reclaim. */
246  return;
247  }
248  bulk_reclaim();
249  }
250 
251  void bulk_reclaim(bool transitive = false) {
252  num_bulk_reclaims_.fetch_add(1, std::memory_order_acquire);
253  while (true) {
254  auto obj = retired_.exchange(nullptr, std::memory_order_acquire);
255  /*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
256  auto rec = hazptrs_.load(std::memory_order_acquire);
257  /* Part 1 - read hazard pointer values into private search structure */
258  std::unordered_set<const void*> hashset; // TOTO: lock-free fixed hash set
259  for (; rec; rec = rec->next()) {
260  hashset.insert(rec->hazptr());
261  }
262  /* Part 2 - for each retired object, reclaim if no match */
263  if (bulk_lookup_and_reclaim(obj, hashset) || !transitive) {
264  break;
265  }
266  }
267  num_bulk_reclaims_.fetch_sub(1, std::memory_order_release);
268  }
269 
271  hazptr_obj<Atom>* obj,
272  const std::unordered_set<const void*>& hashset) {
273  hazptr_obj_list<Atom> children;
274  hazptr_obj_list<Atom> matched;
275  while (obj) {
276  auto next = obj->next();
277  DCHECK_NE(obj, next);
278  if (hashset.count(obj->raw_ptr()) == 0) {
279  (*(obj->reclaim()))(obj, children);
280  } else {
281  matched.push(obj);
282  }
283  obj = next;
284  }
285 #if FOLLY_HAZPTR_THR_LOCAL
286  if (!shutdown_) {
287  hazptr_priv_tls<Atom>().push_all_to_domain(false);
288  }
289 #endif
290  bool done = ((children.count() == 0) && (retired() == nullptr));
291  matched.splice(children);
292  if (matched.count() > 0) {
293  push_retired(matched, false /* don't call bulk_reclaim recursively */);
294  }
295  return done;
296  }
297 
299  uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
300  std::chrono::steady_clock::now().time_since_epoch())
301  .count();
302  auto prevtime = sync_time_.load(std::memory_order_relaxed);
303  if (time < prevtime ||
304  !sync_time_.compare_exchange_strong(
305  prevtime, time + kSyncTimePeriod, std::memory_order_relaxed)) {
306  return false;
307  }
308  relaxed_cleanup(); // calling regular cleanup may self deadlock
309  return true;
310  }
311 
313  auto rec = head();
314  while (rec) {
315  auto next = rec->next();
316  if (rec->try_acquire()) {
317  return rec;
318  }
319  rec = next;
320  }
321  return nullptr;
322  }
323 
325  auto rec = new hazptr_rec<Atom>;
326  rec->set_active();
327  rec->set_domain(this);
328  while (true) {
329  auto h = head();
330  rec->set_next(h);
331  if (hazptrs_.compare_exchange_weak(
332  h, rec, std::memory_order_release, std::memory_order_acquire)) {
333  break;
334  }
335  }
336  hcount_.fetch_add(1);
337  return rec;
338  }
339 }; // hazptr_domain
340 
347 template <template <typename> class Atom>
350  static hazptr_domain<Atom> domain;
351  return domain;
352  }
353 };
354 
355 template <>
358  return default_domain;
359  }
360 };
361 
362 template <template <typename> class Atom>
365 }
366 
368 template <template <typename> class Atom>
371  bool check,
372  hazptr_domain<Atom>& domain) noexcept {
373  domain.push_retired(l, check);
374 }
375 
377 template <template <typename> class Atom, typename T, typename D>
378 FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
379  default_hazptr_domain<Atom>().retire(obj, std::move(reclaim));
380 }
381 
383 template <template <typename> class Atom>
385  domain.cleanup();
386 }
387 
388 } // namespace folly
int hcount() const noexcept
Definition: HazptrDomain.h:158
hazptr_obj< Atom > * retired() const noexcept
Definition: HazptrDomain.h:154
void splice(hazptr_obj_list< Atom > &l)
Definition: HazptrObj.h:169
bool bulk_lookup_and_reclaim(hazptr_obj< Atom > *obj, const std::unordered_set< const void * > &hashset)
Definition: HazptrDomain.h:270
*than *hazptr_holder h
Definition: Hazptr.h:116
void hazptr_retire(T *obj, D reclaim={})
Definition: HazptrDomain.h:378
ReclaimFnPtr reclaim() noexcept
Definition: HazptrObj.h:94
#define FOLLY_ALWAYS_INLINE
Definition: CPortability.h:151
void push(hazptr_obj< Atom > *obj)
Definition: HazptrObj.h:160
hazptr_obj< Atom > * tail()
Definition: HazptrObj.h:152
void wait_for_zero_bulk_reclaims()
Definition: HazptrDomain.h:230
hazptr_domain< std::atomic > default_domain
Definition: Hazptr.cpp:23
void asymmetricHeavyBarrier(AMBFlags flags)
hazptr_domain< Atom > & default_hazptr_domain()
Definition: HazptrDomain.h:363
std::chrono::steady_clock::time_point now()
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
STL namespace.
void bulk_reclaim(bool transitive=false)
Definition: HazptrDomain.h:251
folly::std T
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
requires E e noexcept(noexcept(s.error(std::move(e))))
static FOLLY_ALWAYS_INLINE hazptr_domain< Atom > & get()
Definition: HazptrDomain.h:349
hazptr_rec< Atom > * try_acquire_existing_hprec()
Definition: HazptrDomain.h:312
hazptr_rec< Atom > * hprec_acquire()
Definition: HazptrDomain.h:120
void retire(T *obj, D reclaim={})
Definition: HazptrDomain.h:87
hazptr_obj< Atom > * head()
Definition: HazptrObj.h:148
void relaxed_cleanup() noexcept
Definition: HazptrDomain.h:212
#define D(name, bit)
Definition: CpuId.h:145
#define Atom
void cleanup() noexcept
Definition: HazptrDomain.h:103
constexpr int hazptr_domain_rcount_threshold()
Definition: HazptrDomain.h:38
void hazptr_cleanup(hazptr_domain< Atom > &domain=default_hazptr_domain< Atom >()) noexcept
Definition: HazptrDomain.h:384
void hprec_release(hazptr_rec< Atom > *hprec) noexcept
Definition: HazptrDomain.h:126
void push_retired(hazptr_obj_list< Atom > &l, bool check=true)
Definition: HazptrDomain.h:131
FOLLY_ALWAYS_INLINE void asymmetricLightBarrier()
void check_cleanup_and_reclaim()
Definition: HazptrDomain.h:203
int * count
hazptr_rec< Atom > * head() const noexcept
Definition: HazptrDomain.h:150
const
Definition: upload.py:398
const void * raw_ptr() const
Definition: HazptrObj.h:98
hazptr_obj< Atom > * next() const noexcept
Definition: HazptrObj.h:86
int rcount() const noexcept
Definition: HazptrDomain.h:162
bool reached_threshold(int rc, int hc) const noexcept
Definition: HazptrDomain.h:166
void set_active() noexcept
Definition: HazptrRec.h:54
void hazptr_domain_push_retired(hazptr_obj_list< Atom > &l, bool check=true, hazptr_domain< Atom > &domain=default_hazptr_domain< Atom >()) noexcept
Definition: HazptrDomain.h:369
bool check(const dynamic &schema, const dynamic &value, bool check=true)
hazptr_rec< Atom > * acquire_new_hprec()
Definition: HazptrDomain.h:324
std::chrono::nanoseconds time()
def next(obj)
Definition: ast.py:58