38 AtomicStruct<std::chrono::steady_clock::duration>
52 mallctl(
"thread.tcache.flush",
nullptr,
nullptr,
nullptr, 0);
65 unsigned arenaForCurrent;
73 mib[1] =
static_cast<size_t>(arenaForCurrent);
76 }
catch (
const std::runtime_error& ex) {
84 #if (FOLLY_X64 || FOLLY_PPC64) && defined(_GNU_SOURCE) && \ 85 defined(__linux__) && !FOLLY_MOBILE && !FOLLY_SANITIZE_ADDRESS 87 static FOLLY_TLS uintptr_t tls_stackLimit;
88 static FOLLY_TLS
size_t tls_stackSize;
90 static size_t pageSize() {
91 static const size_t s_pageSize = sysconf(_SC_PAGESIZE);
95 static void fetchStackLimits() {
98 if ((err = pthread_getattr_np(pthread_self(), &attr))) {
102 LOG(
WARNING) <<
"pthread_getaddr_np failed errno=" << err;
109 pthread_attr_destroy(&attr);
114 if ((err = pthread_attr_getstack(&attr, &addr, &rawSize))) {
116 FB_LOG_EVERY_MS(ERROR, 10000) <<
"pthread_attr_getstack error " << err;
121 if (rawSize >= (1ULL << 32)) {
133 <<
"pthread_attr_getstack returned insane stack size " << rawSize;
138 assert(addr !=
nullptr);
139 assert(rawSize >= PTHREAD_STACK_MIN);
144 if (pthread_attr_getguardsize(&attr, &guardSize) != 0) {
147 assert(rawSize > guardSize);
150 tls_stackLimit =
reinterpret_cast<uintptr_t
>(
addr) + guardSize;
151 tls_stackSize = rawSize - guardSize;
153 assert((tls_stackLimit & (pageSize() - 1)) == 0);
158 auto rv =
reinterpret_cast<uintptr_t
>(&marker);
163 if (tls_stackSize == 0) {
166 if (tls_stackSize <=
std::max(static_cast<size_t>(1), retain)) {
171 auto sp = getStackPtr();
172 assert(sp >= tls_stackLimit);
173 assert(sp - tls_stackLimit < tls_stackSize);
175 auto end = (sp - retain) & ~(pageSize() - 1);
176 if (
end <= tls_stackLimit) {
181 size_t len =
end - tls_stackLimit;
182 assert((len & (pageSize() - 1)) == 0);
183 if (madvise((
void*)tls_stackLimit, len, MADV_DONTNEED) != 0) {
193 assert(errno == EAGAIN || errno == ENOMEM || errno == EINVAL);
static const CacheLocality & system()
static void unmapUnusedStack(size_t retain=kDefaultStackToRetain)
bool usingJEMalloc() noexcept
—— Concurrent Priority Queue Implementation ——
void mallctlRead(const char *cmd, T *out)
int(* mallctlbymib)(const size_t *, size_t, void *, size_t *, void *, size_t)
FOLLY_ALWAYS_INLINE void call_once(basic_once_flag< Mutex, Atom > &flag, F &&f, Args &&...args)
#define FB_LOG_EVERY_MS(severity, milli_interval)
auto end(TestAdlIterable &instance)
int(* mallctl)(const char *, void *, size_t *, void *, size_t)
static AtomicStruct< std::chrono::steady_clock::duration > defaultIdleTimeout
int(* mallctlnametomib)(const char *, size_t *, size_t *)
ThreadPoolListHook * addr
static void flushLocalMallocCaches()