26 namespace threadlocal_detail {
68 : nextId_(1), threadEntry_(threadEntry), strict_(strict) {
76 #ifdef FOLLY_TLD_USE_FOLLY_TLS 78 return &threadEntryListSingleton;
83 int ret = pthread_key_create(&
pthreadKey_,
nullptr);
96 static auto instance = detail::createGlobal<PthreadKey, void>();
103 int ret = pthread_setspecific(instance->get(), threadEntryList);
107 return threadEntryList;
115 auto& meta = *threadEntry->
meta;
119 pthread_setspecific(meta.pthreadKey_, threadEntry);
125 std::lock_guard<std::mutex>
g(meta.lock_);
127 threadEntry->removed_ =
true;
128 meta.erase(&(*threadEntry));
131 threadEntry->elements[
i].node.eraseZero();
140 for (
bool shouldRun =
true; shouldRun;) {
145 threadEntry->elements[
i].cleanup();
150 pthread_setspecific(meta.pthreadKey_,
nullptr);
153 auto threadEntryList = threadEntry->list;
154 DCHECK_GT(threadEntryList->count, 0u);
156 --threadEntryList->count;
158 if (threadEntryList->count) {
163 for (
bool shouldRunOuter =
true; shouldRunOuter;) {
164 shouldRunOuter =
false;
165 auto tmp = threadEntryList->head;
167 auto& meta = *tmp->meta;
168 pthread_setspecific(meta.pthreadKey_, tmp);
173 for (
bool shouldRunInner =
true; shouldRunInner;) {
174 shouldRunInner =
false;
178 tmp->elements[
i].cleanup();
179 shouldRunInner =
true;
180 shouldRunOuter =
true;
184 pthread_setspecific(meta.pthreadKey_,
nullptr);
190 auto head = threadEntryList->head;
191 threadEntryList->head =
nullptr;
194 head = head->listNext;
197 tmp->elements =
nullptr;
198 tmp->setElementsCapacity(0);
201 #ifndef FOLLY_TLD_USE_FOLLY_TLS 206 #ifndef FOLLY_TLD_USE_FOLLY_TLS 207 delete threadEntryList;
220 std::lock_guard<std::mutex>
g(meta.lock_);
222 id = ent->
value.load();
227 if (!meta.freeIds_.empty()) {
228 id = meta.freeIds_.back();
229 meta.freeIds_.pop_back();
247 std::vector<ElementWrapper> elements;
263 std::lock_guard<std::mutex>
g(meta.lock_);
269 auto& node = meta.head_.elements[id].node;
270 while (!node.empty()) {
271 auto*
next = node.getNext();
276 if (id < elementsCapacity && e->elements[
id].
ptr) {
277 elements.push_back(e->
elements[
id]);
295 meta.freeIds_.push_back(
id);
305 LOG(
WARNING) <<
"Destructor discarding an exception that was thrown.";
312 size_t& newCapacity) {
321 (threadEntry->
meta &&
326 assert(newCapacity > prevCapacity);
361 throw std::bad_alloc();
370 throw std::bad_alloc();
387 uint32_t idval =
id->getOrAllocate(meta);
388 if (prevCapacity > idval) {
397 std::lock_guard<std::mutex>
g(meta.lock_);
399 if (prevCapacity == 0) {
400 meta.push_back(threadEntry);
410 if (prevCapacity != 0) {
414 sizeof(*reallocated) * prevCapacity);
419 for (
size_t i = prevCapacity;
i < newCapacity;
i++) {
436 if (prevCapacity != 0) {
438 reallocated,
head_.
elements,
sizeof(*reallocated) * prevCapacity);
443 for (
size_t i = prevCapacity;
i < newCapacity;
i++) {
454 std::lock_guard<std::mutex>
g(
lock_);
static PthreadKeyUnregister instance_
constexpr auto kBigGrowthFactor
auto wlock(Synchronized< D, M > &synchronized, Args &&...args)
#define FOLLY_ALWAYS_INLINE
bool usingJEMalloc() noexcept
std::atomic< uint32_t > value
—— Concurrent Priority Queue Implementation ——
#define FOLLY_STATIC_CTOR_PRIORITY_MAX
static const size_t jemallocMinInPlaceExpandable
auto rlock(const Synchronized< Data, Mutex > &synchronized, Args &&...args)
void init(ThreadEntry *entry, uint32_t newId)
#define FOR_EACH_RANGE(i, begin, end)
size_t(* xallocx)(void *, size_t, size_t, int)
void checkPosixError(int err, Args &&...args)
void initIfZero(bool locked)
size_t getElementsCapacity() const noexcept
size_t(* nallocx)(size_t, int)
void *(* mallocx)(size_t, int)
DeleterFunType * deleter1
constexpr uint32_t kEntryIDInvalid
void push_back(ThreadEntry *head)
constexpr auto kSmallGrowthFactor
void swap(SwapTrackingAlloc< T > &, SwapTrackingAlloc< T > &)
void initZero(ThreadEntry *entry, uint32_t newId)
static void registerKey(pthread_key_t key)
ElementWrapper * elements
void setElementsCapacity(size_t capacity) noexcept