28 #include <unordered_set> 51 template <
template <
typename>
class Atom>
54 static constexpr
int kMultiplier = 2;
55 static constexpr
uint64_t kSyncTimePeriod{2000000000};
57 Atom<hazptr_rec<Atom>*> hazptrs_{
nullptr};
58 Atom<hazptr_obj<Atom>*> retired_{
nullptr};
59 Atom<uint64_t> sync_time_{0};
65 Atom<uint16_t> num_bulk_reclaims_{0};
66 bool shutdown_{
false};
75 reclaim_all_objects();
86 template <
typename T,
typename D = std::default_delete<T>>
89 std::unique_ptr<T, D> obj_;
90 hazptr_retire_node(
T* retireObj,
D toReclaim)
91 : obj_{retireObj,
std::move(toReclaim)} {}
94 auto node =
new hazptr_retire_node(obj,
std::move(reclaim));
96 delete static_cast<hazptr_retire_node*
>(p);
105 wait_for_zero_bulk_reclaims();
109 friend void hazptr_domain_push_retired<Atom>(
115 #if FOLLY_HAZPTR_THR_LOCAL 121 auto rec = try_acquire_existing_hprec();
122 return rec !=
nullptr ? rec : acquire_new_hprec();
135 l.
tail()->set_next(r);
136 if (retired_.compare_exchange_weak(
139 std::memory_order_release,
140 std::memory_order_acquire)) {
144 rcount_.fetch_add(l.
count(), std::memory_order_release);
146 check_cleanup_and_reclaim();
151 return hazptrs_.load(std::memory_order_acquire);
155 return retired_.load(std::memory_order_acquire);
159 return hcount_.load(std::memory_order_acquire);
163 return rcount_.load(std::memory_order_acquire);
167 return rc >= kThreshold && rc >= kMultiplier * hc;
171 auto retired = retired_.exchange(
nullptr);
174 hazptr_obj_list<Atom> l;
176 auto next = obj->next();
178 (*(obj->reclaim()))(obj, l);
184 retired = retired_.exchange(
nullptr);
191 if (
this == &default_hazptr_domain<Atom>()) {
196 auto next = rec->next();
197 DCHECK(!rec->active());
204 if (try_timed_cleanup()) {
207 if (reached_threshold(rcount(), hcount())) {
213 #if FOLLY_HAZPTR_THR_LOCAL 217 hazptr_priv_singleton<Atom>::accessAllThreads()) {
222 hazptr_obj_list<Atom> l(h, t, 0);
226 rcount_.store(0, std::memory_order_release);
231 while (num_bulk_reclaims_.load(std::memory_order_acquire) > 0) {
239 if (!reached_threshold(rc, hc)) {
242 rc = rcount_.exchange(0, std::memory_order_release);
243 if (!reached_threshold(rc, hc)) {
252 num_bulk_reclaims_.fetch_add(1, std::memory_order_acquire);
254 auto obj = retired_.exchange(
nullptr, std::memory_order_acquire);
256 auto rec = hazptrs_.load(std::memory_order_acquire);
258 std::unordered_set<const void*> hashset;
259 for (; rec; rec = rec->next()) {
260 hashset.insert(rec->hazptr());
263 if (bulk_lookup_and_reclaim(obj, hashset) || !transitive) {
267 num_bulk_reclaims_.fetch_sub(1, std::memory_order_release);
272 const std::unordered_set<const void*>& hashset) {
273 hazptr_obj_list<Atom> children;
274 hazptr_obj_list<Atom> matched;
277 DCHECK_NE(obj,
next);
278 if (hashset.count(obj->
raw_ptr()) == 0) {
279 (*(obj->
reclaim()))(obj, children);
285 #if FOLLY_HAZPTR_THR_LOCAL 287 hazptr_priv_tls<Atom>().push_all_to_domain(
false);
290 bool done = ((children.
count() == 0) && (retired() ==
nullptr));
292 if (matched.
count() > 0) {
293 push_retired(matched,
false );
299 uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
302 auto prevtime = sync_time_.load(std::memory_order_relaxed);
303 if (time < prevtime ||
304 !sync_time_.compare_exchange_strong(
305 prevtime, time + kSyncTimePeriod, std::memory_order_relaxed)) {
315 auto next = rec->next();
316 if (rec->try_acquire()) {
327 rec->set_domain(
this);
331 if (hazptrs_.compare_exchange_weak(
332 h, rec, std::memory_order_release, std::memory_order_acquire)) {
336 hcount_.fetch_add(1);
347 template <
template <
typename>
class Atom>
362 template <
template <
typename>
class Atom>
368 template <
template <
typename>
class Atom>
373 domain.push_retired(l,
check);
377 template <
template <
typename>
class Atom,
typename T,
typename D>
379 default_hazptr_domain<Atom>().retire(obj,
std::move(reclaim));
383 template <
template <
typename>
class Atom>
int hcount() const noexcept
hazptr_obj< Atom > * retired() const noexcept
void splice(hazptr_obj_list< Atom > &l)
bool bulk_lookup_and_reclaim(hazptr_obj< Atom > *obj, const std::unordered_set< const void * > &hashset)
void hazptr_retire(T *obj, D reclaim={})
ReclaimFnPtr reclaim() noexcept
#define FOLLY_ALWAYS_INLINE
void push(hazptr_obj< Atom > *obj)
hazptr_obj< Atom > * tail()
void wait_for_zero_bulk_reclaims()
hazptr_domain< std::atomic > default_domain
void asymmetricHeavyBarrier(AMBFlags flags)
hazptr_domain< Atom > & default_hazptr_domain()
std::chrono::steady_clock::time_point now()
constexpr detail::Map< Move > move
void reclaim_all_objects()
void bulk_reclaim(bool transitive=false)
—— Concurrent Priority Queue Implementation ——
requires E e noexcept(noexcept(s.error(std::move(e))))
static FOLLY_ALWAYS_INLINE hazptr_domain< Atom > & get()
hazptr_rec< Atom > * try_acquire_existing_hprec()
hazptr_rec< Atom > * hprec_acquire()
void retire(T *obj, D reclaim={})
hazptr_obj< Atom > * head()
void relaxed_cleanup() noexcept
constexpr int hazptr_domain_rcount_threshold()
void hazptr_cleanup(hazptr_domain< Atom > &domain=default_hazptr_domain< Atom >()) noexcept
void hprec_release(hazptr_rec< Atom > *hprec) noexcept
void push_retired(hazptr_obj_list< Atom > &l, bool check=true)
FOLLY_ALWAYS_INLINE void asymmetricLightBarrier()
void check_cleanup_and_reclaim()
hazptr_rec< Atom > * head() const noexcept
const void * raw_ptr() const
hazptr_obj< Atom > * next() const noexcept
int rcount() const noexcept
bool reached_threshold(int rc, int hc) const noexcept
void set_active() noexcept
void hazptr_domain_push_retired(hazptr_obj_list< Atom > &l, bool check=true, hazptr_domain< Atom > &domain=default_hazptr_domain< Atom >()) noexcept
bool check(const dynamic &schema, const dynamic &value, bool check=true)
hazptr_rec< Atom > * acquire_new_hprec()
std::chrono::nanoseconds time()