39 template <
class T,
size_t kNumSlots = 64>
46 void reset(
const std::shared_ptr<T>& p =
nullptr) {
51 auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
52 auto holder = std::allocate_shared<Holder>(alloc, p);
53 *slot = std::shared_ptr<T>(holder, p.get());
57 std::shared_ptr<T>
get()
const {
64 template <
class,
size_t>
67 std::array<std::shared_ptr<T>, kNumSlots>
slots_;
70 template <
class T,
size_t kNumSlots = 64>
75 *slot = p.
slots_[slot.index];
79 std::weak_ptr<T>
get()
const {
84 std::array<std::weak_ptr<T>, kNumSlots>
slots_;
99 template <
class T,
size_t kNumSlots = 64>
107 auto slots =
slots_.load(std::memory_order_acquire);
115 void reset(
const std::shared_ptr<T>& p =
nullptr) {
116 auto newslots = folly::make_unique<Slots>();
121 auto alloc = getCoreAllocator<Holder, kNumSlots>(slot.index);
122 auto holder = std::allocate_shared<Holder>(alloc, p);
123 *slot = std::shared_ptr<T>(holder, p.get());
126 auto oldslots =
slots_.exchange(newslots.release());
132 std::shared_ptr<T>
get()
const {
134 auto slots = hazptr[0].get_protected(
slots_);
144 std::array<std::shared_ptr<T>, kNumSlots>
slots_;
std::array< std::weak_ptr< T >, kNumSlots > slots_
void reset(const std::shared_ptr< T > &p=nullptr)
—— Concurrent Priority Queue Implementation ——
AtomicCoreCachedSharedPtr(const std::shared_ptr< T > &p=nullptr)
void reset(const std::shared_ptr< T > &p=nullptr)
std::shared_ptr< T > Holder
std::array< std::shared_ptr< T >, kNumSlots > slots_
detail::RangeEnumerator< Range > enumerate(Range &&r)
std::shared_ptr< T > Holder
~AtomicCoreCachedSharedPtr()
static size_t current(size_t numStripes)
CoreCachedWeakPtr(const CoreCachedSharedPtr< T, kNumSlots > &p)
std::array< std::shared_ptr< T >, kNumSlots > slots_
CoreCachedSharedPtr(const std::shared_ptr< T > &p=nullptr)