28 #include <type_traits> 51 #if FOLLY_ASAN_ENABLED && defined(FOLLY_TLS) 52 #define FOLLY_F14_TLS_IF_ASAN FOLLY_TLS 54 #define FOLLY_F14_TLS_IF_ASAN 57 #if FOLLY_F14_VECTOR_INTRINSICS_AVAILABLE 59 #if FOLLY_F14_CRC_INTRINSIC_AVAILABLE 63 #include <nmmintrin.h> 74 #include <immintrin.h> 75 #include <xmmintrin.h> 99 return m->computeStats();
107 template <
typename T>
116 template <F14IntrinsicsMode>
134 #if defined(_LIBCPP_VERSION) 136 template <
typename K,
typename V,
typename H>
145 template <
typename H>
149 template <
typename...
Args>
156 template <
typename K,
typename V,
typename H,
typename Enable =
void>
161 template <
typename K,
typename V,
typename H>
167 !StdIsFastHash<H>::value || !is_nothrow_invocable<H, K>::value>> {
178 #if FOLLY_F14_VECTOR_INTRINSICS_AVAILABLE 181 template <
typename Policy>
186 class F14HashToken final {
188 F14HashToken() =
default;
191 using HashPair = std::pair<std::size_t, std::size_t>;
193 explicit F14HashToken(HashPair hp) : hp_(hp) {}
194 explicit operator HashPair()
const {
200 template <
typename Policy>
201 friend class f14::detail::F14Table;
208 template <
typename Arg,
typename Default>
212 template <
typename Arg,
typename Default>
221 typename Void =
void>
229 struct EligibleForHeterogeneousFind<
234 void_t<typename Hasher::is_transparent, typename KeyEqual::is_transparent>>
242 using EligibleForHeterogeneousInsert =
Conjunction<
243 EligibleForHeterogeneousFind<TableKey, Hasher, KeyEqual, ArgKey>,
244 std::is_constructible<TableKey, ArgKey>>;
250 typename KeyArg0OrBool,
252 using KeyTypeForEmplaceHelper = std::conditional_t<
253 sizeof...(KeyArgs) == 1 &&
255 EligibleForHeterogeneousFind<
259 KeyArg0OrBool>::
value),
268 using KeyTypeForEmplace = KeyTypeForEmplaceHelper<
272 std::tuple_element_t<0, std::tuple<KeyArgs...,
bool>>,
277 template <
typename T>
280 __builtin_prefetch(static_cast<void const*>(ptr));
282 __prefetch(static_cast<void const*>(ptr));
285 static_cast<char const*>(static_cast<void const*>(ptr)), _MM_HINT_T0);
289 template <
typename T>
292 if (
sizeof(mask) ==
sizeof(
unsigned)) {
293 return __builtin_ctz(static_cast<unsigned>(mask));
295 return __builtin_ctzll(mask);
300 using TagVector = uint8x16_t;
304 constexpr
unsigned kMaskSpacing = 4;
306 using TagVector = __m128i;
310 constexpr
unsigned kMaskSpacing = 1;
316 constexpr std::size_t kRequiredVectorAlignment =
319 using EmptyTagVectorType = std::aligned_storage_t<
320 sizeof(TagVector) + kRequiredVectorAlignment,
323 extern EmptyTagVectorType kEmptyTagVector;
328 template <
unsigned BitCount>
330 static constexpr MaskType
value =
331 (FullMask<BitCount - 1>
::value << kMaskSpacing) + 1;
335 struct FullMask<1> : std::integral_constant<MaskType, 1> {};
342 class SparseMaskIter {
343 static_assert(kMaskSpacing == 4,
"");
348 explicit SparseMaskIter(MaskType mask)
349 : interleavedMask_{
static_cast<uint32_t>(((mask >> 32) << 2) | mask)} {}
352 return interleavedMask_ != 0;
357 unsigned i = findFirstSetNonZero(interleavedMask_);
358 interleavedMask_ &= (interleavedMask_ - 1);
359 return ((i >> 2) | (i << 2)) & 0xf;
364 class DenseMaskIter {
365 static_assert(kMaskSpacing == 4,
"");
372 explicit DenseMaskIter(
uint8_t const* tags, MaskType mask) {
376 count_ =
popcount(static_cast<uint32_t>(((mask >> 32) << 2) | mask));
377 if (
LIKELY((mask & 1) != 0)) {
380 index_ = findFirstSetNonZero(mask) / kMaskSpacing;
396 }
while ((tags_[index_] & 0x80) == 0);
405 class SparseMaskIter {
409 explicit SparseMaskIter(MaskType mask) : mask_{mask} {}
417 unsigned i = findFirstSetNonZero(mask_);
418 mask_ &= (mask_ - 1);
419 return i / kMaskSpacing;
424 class DenseMaskIter {
429 explicit DenseMaskIter(
uint8_t const*, MaskType mask) : mask_{mask} {}
437 if (
LIKELY((mask_ & 1) != 0)) {
438 mask_ >>= kMaskSpacing;
441 unsigned s = findFirstSetNonZero(mask_);
442 unsigned rv = index_ + (s / kMaskSpacing);
443 mask_ >>= (s + kMaskSpacing);
453 class MaskRangeIter {
458 explicit MaskRangeIter(MaskType mask) {
461 mask_ = mask * ((1 << kMaskSpacing) - 1);
468 std::pair<unsigned, unsigned>
next() {
471 unsigned b = findFirstSetNonZero(mask_);
472 unsigned e = findFirstSetNonZero(~(mask_ | (mask_ - 1)));
475 return std::make_pair((
s + b) / kMaskSpacing, (
s + e) / kMaskSpacing);
482 class LastOccupiedInMask {
486 explicit LastOccupiedInMask(MaskType mask) : mask_{mask} {}
488 bool hasIndex()
const {
492 unsigned index()
const {
501 class FirstEmptyInMask {
505 explicit FirstEmptyInMask(MaskType mask) : mask_{mask} {}
507 bool hasIndex()
const {
511 unsigned index()
const {
513 return findFirstSetNonZero(mask_) / kMaskSpacing;
517 template <
typename ItemType>
518 struct alignas(kRequiredVectorAlignment) F14Chunk {
519 using Item = ItemType;
529 static constexpr
unsigned kCapacity =
sizeof(Item) == 4 ? 12 : 14;
531 static constexpr
unsigned kDesiredCapacity = kCapacity - 2;
533 static constexpr
unsigned kAllocatedCapacity =
534 kCapacity + (
sizeof(Item) == 16 ? 1 : 0);
540 std::array<uint8_t, 14> tags_;
552 uint8_t outboundOverflowCount_;
555 std::aligned_storage_t<sizeof(Item), alignof(Item)>,
559 static F14Chunk* emptyInstance() {
560 auto raw =
reinterpret_cast<char*
>(&kEmptyTagVector);
561 if (kRequiredVectorAlignment >
alignof(
max_align_t)) {
562 auto delta = kRequiredVectorAlignment -
563 (
reinterpret_cast<uintptr_t
>(raw) % kRequiredVectorAlignment);
566 auto rv =
reinterpret_cast<F14Chunk*
>(raw);
568 (reinterpret_cast<uintptr_t>(rv) % kRequiredVectorAlignment) == 0,
"");
578 #if FOLLY_SSE >= 2 && __GNUC__ <= 5 && !__clang__ 581 auto*
v =
static_cast<__m128i*
>(
static_cast<void*
>(&tags_[0]));
582 _mm_store_si128(
v, _mm_setzero_si128());
584 std::memset(&tags_[0],
'\0', 16);
588 void copyOverflowInfoFrom(F14Chunk
const&
rhs) {
590 control_ +=
static_cast<uint8_t>(rhs.control_ & 0xf0);
591 outboundOverflowCount_ = rhs.outboundOverflowCount_;
594 unsigned hostedOverflowCount()
const {
595 return control_ >> 4;
598 static constexpr
uint8_t kIncrHostedOverflowCount = 0x10;
599 static constexpr
uint8_t kDecrHostedOverflowCount =
602 void adjustHostedOverflowCount(
uint8_t op) {
607 return (control_ & 0xf) != 0;
610 std::size_t chunk0Capacity()
const {
611 return control_ & 0xf;
614 void markEof(std::size_t c0c) {
616 this != emptyInstance() && control_ == 0 && c0c > 0 && c0c <= 0xf &&
619 control_ =
static_cast<uint8_t>(c0c);
622 unsigned outboundOverflowCount()
const {
623 return outboundOverflowCount_;
626 void incrOutboundOverflowCount() {
627 if (outboundOverflowCount_ != 255) {
628 ++outboundOverflowCount_;
632 void decrOutboundOverflowCount() {
633 if (outboundOverflowCount_ != 255) {
634 --outboundOverflowCount_;
638 std::size_t
tag(std::size_t index)
const {
642 void setTag(std::size_t index, std::size_t
tag) {
644 this != emptyInstance() && tag >= 0x80 && tag <= 0xff,
"");
645 tags_[index] =
static_cast<uint8_t>(tag);
648 void clearTag(std::size_t index) {
656 SparseMaskIter tagMatchIter(std::size_t
needle)
const {
658 uint8x16_t tagV = vld1q_u8(&tags_[0]);
659 auto needleV = vdupq_n_u8(static_cast<uint8_t>(needle));
660 auto eqV = vceqq_u8(tagV, needleV);
663 uint8x8_t maskV = vshrn_n_u16(vreinterpretq_u16_u8(eqV), 4);
664 uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(maskV), 0) & kFullMask;
665 return SparseMaskIter(mask);
668 MaskType occupiedMask()
const {
669 uint8x16_t tagV = vld1q_u8(&tags_[0]);
672 vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_u8(tagV), 7));
673 uint8x8_t maskV = vshrn_n_u16(vreinterpretq_u16_u8(occupiedV), 4);
674 return vget_lane_u64(vreinterpret_u64_u8(maskV), 0) & kFullMask;
680 TagVector
const* tagVector()
const {
681 return static_cast<TagVector
const*
>(
static_cast<void const*
>(&tags_[0]));
684 SparseMaskIter tagMatchIter(std::size_t needle)
const {
686 auto tagV = _mm_load_si128(tagVector());
704 auto needleV = _mm_set1_epi8(static_cast<uint8_t>(needle));
705 auto eqV = _mm_cmpeq_epi8(tagV, needleV);
706 auto mask = _mm_movemask_epi8(eqV) & kFullMask;
707 return SparseMaskIter{mask};
710 MaskType occupiedMask()
const {
711 auto tagV = _mm_load_si128(tagVector());
712 return _mm_movemask_epi8(tagV) & kFullMask;
716 DenseMaskIter occupiedIter()
const {
717 return DenseMaskIter{&tags_[0], occupiedMask()};
720 MaskRangeIter occupiedRangeIter()
const {
721 return MaskRangeIter{occupiedMask()};
724 LastOccupiedInMask lastOccupied()
const {
725 return LastOccupiedInMask{occupiedMask()};
728 FirstEmptyInMask firstEmpty()
const {
729 return FirstEmptyInMask{occupiedMask() ^ kFullMask};
732 bool occupied(std::size_t index)
const {
734 return tags_[index] != 0;
737 Item* itemAddr(std::size_t
i)
const {
738 return static_cast<Item*
>(
739 const_cast<void*
>(
static_cast<void const*
>(&rawItems_[
i])));
742 Item& item(std::size_t i) {
747 Item
const& citem(std::size_t i)
const {
752 static F14Chunk& owner(Item& item, std::size_t index) {
754 static_cast<uint8_t*
>(
static_cast<void*
>(std::addressof(item))) -
755 offsetof(F14Chunk, rawItems_) - index *
sizeof(Item);
756 auto chunkAddr =
static_cast<F14Chunk*
>(
static_cast<void*
>(rawAddr));
769 template <
typename Ptr>
770 class PackedChunkItemPtr {
772 PackedChunkItemPtr(Ptr p, std::size_t
i)
noexcept :
ptr_{p}, index_{i} {
780 std::size_t index()
const {
786 return ptr_ < rhs.ptr_;
789 bool operator==(PackedChunkItemPtr
const& rhs)
const {
791 return ptr_ == rhs.ptr_;
794 bool operator!=(PackedChunkItemPtr
const& rhs)
const {
795 return !(*
this ==
rhs);
805 template <
typename T>
806 class PackedChunkItemPtr<T*> {
807 static_assert((
alignof(F14Chunk<T>) % 16) == 0,
"");
854 static constexpr uintptr_t powerMod(uintptr_t
x, uintptr_t
y, uintptr_t
m) {
855 return y == 0 ? 1 : (x * powerMod(x, y - 1, m)) % m;
858 static constexpr uintptr_t kIndexBits = 4;
859 static constexpr uintptr_t kIndexMask = (uintptr_t{1} << kIndexBits) - 1;
865 static constexpr uintptr_t kAlignMask = (uintptr_t{1} << kAlignBits) - 1;
867 static constexpr uintptr_t kModulus = uintptr_t{1}
868 << (kIndexBits - kAlignBits);
869 static constexpr uintptr_t kSizeInverse =
870 powerMod(
sizeof(T) >> kAlignBits, kModulus - 1, kModulus);
873 PackedChunkItemPtr(T* p, std::size_t
i)
noexcept {
874 uintptr_t encoded = i >> (kIndexBits - kAlignBits);
875 assume((encoded & ~kAlignMask) == 0);
876 raw_ =
reinterpret_cast<uintptr_t
>(p) | encoded;
882 return reinterpret_cast<T*
>(raw_ & ~kAlignMask);
885 std::size_t index()
const {
886 auto encoded = (raw_ & kAlignMask) << (kIndexBits - kAlignBits);
888 ((raw_ >> kAlignBits) * kSizeInverse) & (kIndexMask >> kAlignBits);
889 return encoded | deduced;
893 return raw_ < rhs.raw_;
895 bool operator==(PackedChunkItemPtr
const& rhs)
const {
896 return raw_ == rhs.raw_;
898 bool operator!=(PackedChunkItemPtr
const& rhs)
const {
899 return !(*
this ==
rhs);
906 template <
typename ChunkPtr>
909 using Chunk =
typename std::pointer_traits<ChunkPtr>::element_type;
912 using Item =
typename Chunk::Item;
913 using ItemPtr =
typename std::pointer_traits<ChunkPtr>::template rebind<Item>;
915 typename std::pointer_traits<ChunkPtr>::template rebind<Item const>;
917 using Packed = PackedChunkItemPtr<ItemPtr>;
921 F14ItemIter()
noexcept : itemPtr_{
nullptr}, index_{0} {}
925 explicit F14ItemIter(Packed
const& packed)
926 : itemPtr_{packed.ptr()}, index_{packed.index()} {}
928 F14ItemIter(ChunkPtr chunk, std::size_t index)
929 : itemPtr_{std::pointer_traits<ItemPtr>::pointer_to(chunk->item(index))},
933 std::pointer_traits<ItemPtr>::pointer_to(chunk->item(index)) !=
935 assume(itemPtr_ !=
nullptr);
945 if (
LIKELY(
c->occupied(index_))) {
971 for (std::size_t
i = 1; !likelyDead ||
i != 0; ++
i) {
983 auto last = c->lastOccupied();
984 if (checkEof && !likelyDead) {
985 prefetchAddr(&*c - 1);
987 if (
LIKELY(last.hasIndex())) {
988 index_ = last.index();
989 itemPtr_ = std::pointer_traits<ItemPtr>::pointer_to(c->item(index_));
995 void precheckedAdvance() {
996 advanceImpl(
false,
false);
1000 advanceImpl(
true,
false);
1004 advanceImpl(
true,
true);
1007 ChunkPtr chunk()
const {
1008 return std::pointer_traits<ChunkPtr>::pointer_to(
1009 Chunk::owner(*itemPtr_, index_));
1012 std::size_t index()
const {
1016 Item* itemAddr()
const {
1017 return std::addressof(*itemPtr_);
1019 Item& item()
const {
1022 Item
const& citem()
const {
1026 bool atEnd()
const {
1027 return itemPtr_ ==
nullptr;
1030 Packed pack()
const {
1031 return Packed{itemPtr_,
static_cast<uint8_t>(index_)};
1037 return itemPtr_ == rhs.itemPtr_;
1040 bool operator!=(F14ItemIter
const& rhs)
const {
1041 return !(*
this ==
rhs);
1051 template <
typename SizeType,
typename ItemIter,
bool EnablePackedItemIter>
1052 struct SizeAndPackedBegin {
1056 typename ItemIter::Packed packedBegin_{ItemIter{}.pack()};
1059 typename ItemIter::Packed& packedBegin() {
1060 return packedBegin_;
1063 typename ItemIter::Packed
const& packedBegin()
const {
1064 return packedBegin_;
1068 template <
typename SizeType,
typename ItemIter>
1069 struct SizeAndPackedBegin<SizeType, ItemIter, false> {
1072 [[noreturn]]
typename ItemIter::Packed& packedBegin() {
1076 [[noreturn]]
typename ItemIter::Packed
const& packedBegin()
const {
1081 template <
typename Policy>
1082 class F14Table :
public Policy {
1084 using Item =
typename Policy::Item;
1087 using allocator_type =
typename Policy::Alloc;
1090 using Alloc =
typename Policy::Alloc;
1091 using AllocTraits =
typename Policy::AllocTraits;
1092 using Hasher =
typename Policy::Hasher;
1093 using InternalSizeType =
typename Policy::InternalSizeType;
1094 using KeyEqual =
typename Policy::KeyEqual;
1096 using Policy::kAllocIsAlwaysEqual;
1097 using Policy::kDefaultConstructIsNoexcept;
1098 using Policy::kEnableItemIteration;
1099 using Policy::kSwapIsNoexcept;
1101 using Policy::destroyItemOnClear;
1102 using Policy::isAvalanchingHasher;
1103 using Policy::prefetchBeforeCopy;
1104 using Policy::prefetchBeforeDestroy;
1105 using Policy::prefetchBeforeRehash;
1107 using ByteAlloc =
typename AllocTraits::template rebind_alloc<uint8_t>;
1108 using BytePtr =
typename std::allocator_traits<ByteAlloc>::pointer;
1110 using Chunk = F14Chunk<Item>;
1112 typename std::pointer_traits<BytePtr>::template rebind<Chunk>;
1114 using HashPair =
typename F14HashToken::HashPair;
1117 using ItemIter = F14ItemIter<ChunkPtr>;
1122 ChunkPtr chunks_{Chunk::emptyInstance()};
1123 InternalSizeType chunkMask_{0};
1124 SizeAndPackedBegin<InternalSizeType, ItemIter, kEnableItemIteration>
1125 sizeAndPackedBegin_;
1131 swap(chunks_, rhs.chunks_);
1132 swap(chunkMask_, rhs.chunkMask_);
1133 swap(sizeAndPackedBegin_.size_, rhs.sizeAndPackedBegin_.size_);
1134 if (kEnableItemIteration) {
1136 sizeAndPackedBegin_.packedBegin(),
1137 rhs.sizeAndPackedBegin_.packedBegin());
1143 std::size_t initialCapacity,
1145 KeyEqual
const& keyEqual,
1147 : Policy{hasher, keyEqual, alloc} {
1148 if (initialCapacity > 0) {
1153 F14Table(F14Table
const& rhs) : Policy{rhs} {
1154 buildFromF14Table(rhs);
1157 F14Table(F14Table
const& rhs,
Alloc const& alloc) : Policy{
rhs, alloc} {
1158 buildFromF14Table(rhs);
1169 F14Table(F14Table&& rhs,
Alloc const& alloc)
noexcept(kAllocIsAlwaysEqual)
1171 if (kAllocIsAlwaysEqual || this->alloc() == rhs.alloc()) {
1180 F14Table& operator=(F14Table
const& rhs) {
1183 static_cast<Policy&
>(*this) =
rhs;
1184 buildFromF14Table(rhs);
1189 F14Table& operator=(F14Table&& rhs)
noexcept(
1192 (kAllocIsAlwaysEqual ||
1197 static_cast<Policy&
>(*this) =
std::move(rhs);
1199 kAllocIsAlwaysEqual || this->alloc() == rhs.alloc()) {
1222 kAllocIsAlwaysEqual || this->alloc() == rhs.alloc(),
1223 "swap is undefined for unequal non-propagating allocators");
1224 this->swapPolicy(rhs);
1263 #if FOLLY_X64 || FOLLY_AARCH64 1265 static HashPair splitHash(std::size_t hash) {
1266 static_assert(
sizeof(std::size_t) ==
sizeof(
uint64_t),
"");
1268 if (!isAvalanchingHasher()) {
1269 #if FOLLY_F14_CRC_INTRINSIC_AVAILABLE 1272 std::size_t
c = _mm_crc32_u64(0, hash);
1273 tag = (c >> 24) | 0x80;
1277 std::size_t c = __crc32cd(0, hash);
1278 tag = (c >> 24) | 0x80;
1291 auto const kMul = 0xc4ceb9fe1a85ec53ULL;
1294 __int64 signedLo = _mul128(
1295 static_cast<__int64>(hash), static_cast<__int64>(kMul), &signedHi);
1296 auto hi =
static_cast<uint64_t>(signedHi);
1297 auto lo =
static_cast<uint64_t>(signedLo);
1300 (
static_cast<unsigned __int128
>(hash) * kMul) >> 64);
1301 auto lo = hash * kMul;
1305 tag = ((hash >> 15) & 0x7f) | 0x80;
1310 tag = (hash >> 56) | 0x80;
1312 return std::make_pair(hash, tag);
1316 static HashPair splitHash(std::size_t hash) {
1317 static_assert(
sizeof(std::size_t) ==
sizeof(
uint32_t),
"");
1319 if (!isAvalanchingHasher()) {
1320 #if FOLLY_F14_CRC_INTRINSIC_AVAILABLE 1323 auto c = _mm_crc32_u32(0, hash);
1324 tag =
static_cast<uint8_t>(~(
c >> 25));
1327 auto c = __crc32cw(0, hash);
1328 tag =
static_cast<uint8_t>(~(
c >> 25));
1336 tag =
static_cast<uint8_t>(~(hash >> 25));
1340 tag = (hash >> 24) | 0x80;
1342 return std::make_pair(hash, tag);
1348 static std::size_t chunkAllocSize(
1350 std::size_t maxSizeWithoutRehash) {
1351 if (chunkCount == 1) {
1353 static_assert(offsetof(Chunk, rawItems_) == 16,
"");
1354 return 16 +
sizeof(Item) * maxSizeWithoutRehash;
1356 return sizeof(Chunk) * chunkCount;
1360 ChunkPtr initializeChunks(
1362 std::size_t chunkCount,
1363 std::size_t maxSizeWithoutRehash) {
1365 auto chunks =
static_cast<Chunk*
>(
static_cast<void*
>(&*raw));
1369 chunks[0].markEof(chunkCount == 1 ? maxSizeWithoutRehash : 1);
1370 return std::pointer_traits<ChunkPtr>::pointer_to(*
chunks);
1376 return ItemIter{sizeAndPackedBegin_.packedBegin()};
1388 return sizeAndPackedBegin_.size_;
1391 std::size_t max_size()
const noexcept {
1392 auto&
a = this->alloc();
1393 return std::min<std::size_t>(
1395 AllocTraits::max_size(
a));
1398 std::size_t bucket_count()
const noexcept {
1402 if (chunkMask_ != 0) {
1403 return (chunkMask_ + 1) * Chunk::kDesiredCapacity;
1405 return chunks_->chunk0Capacity();
1409 std::size_t max_bucket_count()
const noexcept {
1413 float load_factor()
const noexcept {
1416 :
static_cast<float>(
size()) / static_cast<float>(bucket_count());
1419 float max_load_factor()
const noexcept {
1423 void max_load_factor(
float)
noexcept {
1474 std::size_t probeDelta(HashPair hp)
const {
1475 return 2 * hp.second + 1;
1478 template <
typename K>
1480 std::size_t index = hp.first;
1481 std::size_t step = probeDelta(hp);
1482 for (std::size_t tries = 0; tries <= chunkMask_; ++tries) {
1483 ChunkPtr chunk = chunks_ + (index & chunkMask_);
1484 if (
sizeof(Chunk) > 64) {
1485 prefetchAddr(chunk->itemAddr(8));
1487 auto hits = chunk->tagMatchIter(hp.second);
1488 while (hits.hasNext()) {
1489 auto i = hits.next();
1490 if (
LIKELY(this->keyMatchesItem(key, chunk->item(
i)))) {
1494 return ItemIter{chunk,
i};
1497 if (
LIKELY(chunk->outboundOverflowCount() == 0)) {
1516 template <
typename K>
1517 F14HashToken prehash(K
const& key)
const {
1519 auto hp = splitHash(this->computeKeyHash(key));
1520 ChunkPtr firstChunk = chunks_ + (hp.first & chunkMask_);
1521 prefetchAddr(firstChunk);
1525 template <
typename K>
1527 auto hp = splitHash(this->computeKeyHash(key));
1528 return findImpl(hp, key);
1531 template <
typename K>
1533 find(F14HashToken
const& token, K
const& key)
const {
1535 splitHash(this->computeKeyHash(key)) == static_cast<HashPair>(token),
1537 return findImpl(static_cast<HashPair>(token), key);
1541 void adjustSizeAndBeginAfterInsert(ItemIter iter) {
1542 if (kEnableItemIteration) {
1544 auto packed = iter.pack();
1545 if (sizeAndPackedBegin_.packedBegin() < packed) {
1546 sizeAndPackedBegin_.packedBegin() = packed;
1550 ++sizeAndPackedBegin_.size_;
1554 void eraseBlank(ItemIter iter, HashPair hp) {
1555 iter.chunk()->clearTag(iter.index());
1557 if (iter.chunk()->hostedOverflowCount() != 0) {
1559 std::size_t index = hp.first;
1560 std::size_t delta = probeDelta(hp);
1563 ChunkPtr chunk = chunks_ + (index & chunkMask_);
1564 if (chunk == iter.chunk()) {
1565 chunk->adjustHostedOverflowCount(hostedOp);
1568 chunk->decrOutboundOverflowCount();
1569 hostedOp = Chunk::kDecrHostedOverflowCount;
1575 void adjustSizeAndBeginBeforeErase(ItemIter iter) {
1576 --sizeAndPackedBegin_.size_;
1577 if (kEnableItemIteration) {
1578 if (iter.pack() == sizeAndPackedBegin_.packedBegin()) {
1582 iter.precheckedAdvance();
1584 sizeAndPackedBegin_.packedBegin() = iter.pack();
1589 template <
typename...
Args>
1590 void insertAtBlank(ItemIter pos, HashPair hp,
Args&&... args) {
1592 auto dst = pos.itemAddr();
1593 this->constructValueAtItem(
size(), dst, std::forward<Args>(args)...);
1595 eraseBlank(pos, hp);
1598 adjustSizeAndBeginAfterInsert(pos);
1601 ItemIter allocateTag(
uint8_t* fullness, HashPair hp) {
1603 std::size_t index = hp.first;
1604 std::size_t delta = probeDelta(hp);
1607 index &= chunkMask_;
1608 chunk = chunks_ + index;
1609 if (
LIKELY(fullness[index] < Chunk::kCapacity)) {
1612 chunk->incrOutboundOverflowCount();
1613 hostedOp = Chunk::kIncrHostedOverflowCount;
1616 unsigned itemIndex = fullness[index]++;
1618 chunk->setTag(itemIndex, hp.second);
1619 chunk->adjustHostedOverflowCount(hostedOp);
1620 return ItemIter{chunk, itemIndex};
1623 ChunkPtr lastOccupiedChunk()
const {
1625 if (kEnableItemIteration) {
1626 return begin().chunk();
1628 return chunks_ + chunkMask_;
1632 template <
typename T>
1633 void directBuildFrom(T&& src) {
1642 this->beforeBuild(src.size(), bucket_count(), std::forward<T>(src));
1646 undoState, success, src.size(), bucket_count(), std::forward<T>(src));
1660 bucket_count() == src.bucket_count()) {
1662 auto n = chunkAllocSize(chunkMask_ + 1, bucket_count());
1663 std::memcpy(&chunks_[0], &src.chunks_[0], n);
1664 sizeAndPackedBegin_.size_ = src.size();
1665 if (kEnableItemIteration) {
1666 auto srcBegin = src.begin();
1667 sizeAndPackedBegin_.packedBegin() =
1668 ItemIter{chunks_ + (srcBegin.chunk() - src.chunks_),
1673 std::size_t maxChunkIndex = src.lastOccupiedChunk() - src.chunks_;
1677 auto srcChunk = &src.chunks_[maxChunkIndex];
1678 Chunk* dstChunk = &chunks_[maxChunkIndex];
1680 dstChunk->copyOverflowInfoFrom(*srcChunk);
1682 auto iter = srcChunk->occupiedIter();
1683 if (prefetchBeforeCopy()) {
1684 for (
auto piter = iter; piter.hasNext();) {
1685 this->prefetchValue(srcChunk->citem(piter.next()));
1689 std::size_t dstI = 0;
1690 for (; iter.hasNext(); ++dstI) {
1691 auto srcI = iter.next();
1693 std::forward<T>(src).buildArgForItem(srcChunk->item(srcI));
1694 auto dst = dstChunk->itemAddr(dstI);
1695 this->constructValueAtItem(
1696 0, dst, std::forward<decltype(srcArg)>(srcArg));
1697 dstChunk->setTag(dstI, srcChunk->tag(srcI));
1698 ++sizeAndPackedBegin_.size_;
1703 }
while (
size() != src.size());
1706 if (kEnableItemIteration) {
1707 sizeAndPackedBegin_.packedBegin() =
1708 ItemIter{chunks_ + maxChunkIndex,
1709 chunks_[maxChunkIndex].lastOccupied().index()}
1717 template <
typename T>
1718 void rehashBuildFrom(T&& src) {
1722 std::array<uint8_t, 256> stackBuf;
1724 auto cc = chunkMask_ + 1;
1725 if (
cc <= stackBuf.size()) {
1726 fullness = stackBuf.data();
1728 ByteAlloc
a{this->alloc()};
1729 fullness = &*std::allocator_traits<ByteAlloc>::allocate(
a,
cc);
1732 if (
cc > stackBuf.size()) {
1733 ByteAlloc
a{this->alloc()};
1734 std::allocator_traits<ByteAlloc>::deallocate(
1736 std::pointer_traits<
typename std::allocator_traits<
1737 ByteAlloc>::pointer>::pointer_to(*fullness),
1741 std::memset(fullness,
'\0',
cc);
1751 this->beforeBuild(src.size(), bucket_count(), std::forward<T>(src));
1755 undoState, success, src.size(), bucket_count(), std::forward<T>(src));
1765 std::size_t srcChunkIndex = src.lastOccupiedChunk() - src.chunks_;
1767 auto srcChunk = &src.chunks_[srcChunkIndex];
1768 auto iter = srcChunk->occupiedIter();
1769 if (prefetchBeforeRehash()) {
1770 for (
auto piter = iter; piter.hasNext();) {
1771 this->prefetchValue(srcChunk->item(piter.next()));
1774 if (srcChunk->hostedOverflowCount() == 0) {
1777 while (iter.hasNext()) {
1778 auto i = iter.next();
1779 auto& srcItem = srcChunk->item(
i);
1780 auto&& srcArg = std::forward<T>(src).buildArgForItem(srcItem);
1781 HashPair hp{srcChunkIndex, srcChunk->tag(
i)};
1783 allocateTag(fullness, hp),
1785 std::forward<decltype(srcArg)>(srcArg));
1789 while (iter.hasNext()) {
1790 auto i = iter.next();
1791 auto& srcItem = srcChunk->item(
i);
1792 auto&& srcArg = std::forward<T>(src).buildArgForItem(srcItem);
1793 auto const& srcKey = src.keyForValue(srcArg);
1794 auto hp = splitHash(this->computeKeyHash(srcKey));
1797 allocateTag(fullness, hp),
1799 std::forward<decltype(srcArg)>(srcArg));
1802 if (srcChunkIndex == 0) {
1811 template <
typename T>
1814 if (src.size() == 0) {
1818 reserveForInsert(src.size());
1820 if (chunkMask_ == src.chunkMask_) {
1821 directBuildFrom(std::forward<T>(src));
1823 rehashBuildFrom(std::forward<T>(src));
1833 std::size_t capacity,
1834 std::size_t origChunkCount,
1835 std::size_t origMaxSizeWithoutRehash) {
1839 std::size_t
const kInitialCapacity = 2;
1840 std::size_t
const kHalfChunkCapacity =
1841 (Chunk::kDesiredCapacity / 2) & ~std::size_t{1};
1842 std::size_t newMaxSizeWithoutRehash;
1843 std::size_t newChunkCount;
1844 if (capacity <= kHalfChunkCapacity) {
1846 newMaxSizeWithoutRehash =
1847 (capacity < kInitialCapacity) ? kInitialCapacity : kHalfChunkCapacity;
1849 newChunkCount =
nextPowTwo((capacity - 1) / Chunk::kDesiredCapacity + 1);
1850 newMaxSizeWithoutRehash = newChunkCount * Chunk::kDesiredCapacity;
1852 constexpr std::size_t kMaxChunksWithoutCapacityOverflow =
1855 if (newChunkCount > kMaxChunksWithoutCapacityOverflow ||
1856 newMaxSizeWithoutRehash > max_size()) {
1857 throw_exception<std::bad_alloc>();
1861 if (origMaxSizeWithoutRehash != newMaxSizeWithoutRehash) {
1864 origMaxSizeWithoutRehash,
1866 newMaxSizeWithoutRehash);
1871 std::size_t origChunkCount,
1872 std::size_t origMaxSizeWithoutRehash,
1873 std::size_t newChunkCount,
1874 std::size_t newMaxSizeWithoutRehash) {
1875 auto origChunks = chunks_;
1877 BytePtr rawAllocation;
1878 auto undoState = this->beforeRehash(
1880 origMaxSizeWithoutRehash,
1881 newMaxSizeWithoutRehash,
1882 chunkAllocSize(newChunkCount, newMaxSizeWithoutRehash),
1885 initializeChunks(rawAllocation, newChunkCount, newMaxSizeWithoutRehash);
1889 chunkMask_ =
static_cast<InternalSizeType
>(newChunkCount - 1);
1894 BytePtr finishedRawAllocation =
nullptr;
1895 std::size_t finishedAllocSize = 0;
1897 if (origMaxSizeWithoutRehash > 0) {
1898 finishedRawAllocation = std::pointer_traits<BytePtr>::pointer_to(
1899 *static_cast<uint8_t*>(static_cast<void*>(&*origChunks)));
1901 chunkAllocSize(origChunkCount, origMaxSizeWithoutRehash);
1904 finishedRawAllocation = rawAllocation;
1906 chunkAllocSize(newChunkCount, newMaxSizeWithoutRehash);
1907 chunks_ = origChunks;
1910 chunkMask_ =
static_cast<InternalSizeType
>(origChunkCount - 1);
1918 origMaxSizeWithoutRehash,
1919 newMaxSizeWithoutRehash,
1920 finishedRawAllocation,
1926 }
else if (origChunkCount == 1 && newChunkCount == 1) {
1928 auto srcChunk = origChunks;
1929 auto dstChunk = chunks_;
1930 std::size_t srcI = 0;
1931 std::size_t dstI = 0;
1932 while (dstI <
size()) {
1933 if (
LIKELY(srcChunk->occupied(srcI))) {
1934 dstChunk->setTag(dstI, srcChunk->tag(srcI));
1935 this->moveItemDuringRehash(
1936 dstChunk->itemAddr(dstI), srcChunk->item(srcI));
1941 if (kEnableItemIteration) {
1942 sizeAndPackedBegin_.packedBegin() = ItemIter{dstChunk, dstI - 1}.pack();
1946 std::array<uint8_t, 256> stackBuf;
1948 if (newChunkCount <= stackBuf.size()) {
1949 fullness = stackBuf.data();
1951 ByteAlloc
a{this->alloc()};
1954 &*std::allocator_traits<ByteAlloc>::allocate(
a, newChunkCount);
1956 std::memset(fullness,
'\0', newChunkCount);
1958 if (newChunkCount > stackBuf.size()) {
1959 ByteAlloc
a{this->alloc()};
1960 std::allocator_traits<ByteAlloc>::deallocate(
1962 std::pointer_traits<
typename std::allocator_traits<
1963 ByteAlloc>::pointer>::pointer_to(*fullness),
1968 auto srcChunk = origChunks + origChunkCount - 1;
1969 std::size_t remaining =
size();
1970 while (remaining > 0) {
1971 auto iter = srcChunk->occupiedIter();
1972 if (prefetchBeforeRehash()) {
1973 for (
auto piter = iter; piter.hasNext();) {
1974 this->prefetchValue(srcChunk->item(piter.next()));
1977 while (iter.hasNext()) {
1979 auto srcI = iter.next();
1980 Item& srcItem = srcChunk->item(srcI);
1981 auto hp = splitHash(
1982 this->computeItemHash(const_cast<Item const&>(srcItem)));
1985 auto dstIter = allocateTag(fullness, hp);
1986 this->moveItemDuringRehash(dstIter.itemAddr(), srcItem);
1991 if (kEnableItemIteration) {
1993 std::size_t
i = chunkMask_;
1994 while (fullness[i] == 0) {
1997 sizeAndPackedBegin_.packedBegin() =
1998 ItemIter{chunks_ +
i, std::size_t{fullness[
i]} - 1}.pack();
2005 void asanOnReserve(std::size_t capacity) {
2007 asanPendingSafeInserts += capacity -
size();
2011 bool asanShouldAddExtraRehash() {
2014 }
else if (asanPendingSafeInserts > 0) {
2017 }
else if (
size() <= 1) {
2020 constexpr std::size_t kBigPrime = 4294967291U;
2021 auto s = (asanRehashState += kBigPrime);
2022 return (
s %
size()) == 0;
2026 void asanExtraRehash() {
2027 auto cc = chunkMask_ + 1;
2028 auto bc = bucket_count();
2029 rehashImpl(
cc, bc,
cc, bc);
2032 void asanOnInsert() {
2043 if (asanShouldAddExtraRehash()) {
2051 void rehash(std::size_t capacity) {
2055 void reserve(std::size_t capacity) {
2058 asanOnReserve(capacity);
2060 std::max<std::size_t>(capacity,
size()),
2066 void reserveForInsert(
size_t incoming = 1) {
2067 auto capacity =
size() + incoming;
2068 auto bc = bucket_count();
2069 if (capacity - 1 >= bc) {
2070 reserveImpl(capacity, chunkMask_ + 1, bc);
2077 template <
typename K,
typename...
Args>
2078 std::pair<ItemIter, bool> tryEmplaceValue(K
const& key,
Args&&... args) {
2079 const auto hp = splitHash(this->computeKeyHash(key));
2082 auto existing = findImpl(hp, key);
2083 if (!existing.atEnd()) {
2084 return std::make_pair(existing,
false);
2092 std::size_t index = hp.first;
2093 ChunkPtr chunk = chunks_ + (index & chunkMask_);
2094 auto firstEmpty = chunk->firstEmpty();
2096 if (!firstEmpty.hasIndex()) {
2097 std::size_t delta = probeDelta(hp);
2099 chunk->incrOutboundOverflowCount();
2101 chunk = chunks_ + (index & chunkMask_);
2102 firstEmpty = chunk->firstEmpty();
2103 }
while (!firstEmpty.hasIndex());
2104 chunk->adjustHostedOverflowCount(Chunk::kIncrHostedOverflowCount);
2106 std::size_t itemIndex = firstEmpty.index();
2109 chunk->setTag(itemIndex, hp.second);
2110 ItemIter iter{chunk, itemIndex};
2113 insertAtBlank(iter, hp, std::forward<Args>(args)...);
2114 return std::make_pair(iter,
true);
2118 template <
bool Reset>
2120 if (chunks_ == Chunk::emptyInstance()) {
2127 bool willReset = Reset || chunkMask_ + 1 >= 16;
2129 auto origSize =
size();
2130 auto origCapacity = bucket_count();
2132 this->beforeReset(origSize, origCapacity);
2134 this->beforeClear(origSize, origCapacity);
2138 if (destroyItemOnClear()) {
2139 for (std::size_t ci = 0; ci <= chunkMask_; ++ci) {
2140 ChunkPtr chunk = chunks_ + ci;
2141 auto iter = chunk->occupiedIter();
2142 if (prefetchBeforeDestroy()) {
2143 for (
auto piter = iter; piter.hasNext();) {
2144 this->prefetchValue(chunk->item(piter.next()));
2147 while (iter.hasNext()) {
2148 this->destroyItem(chunk->item(iter.next()));
2156 auto c0c = chunks_[0].chunk0Capacity();
2157 for (std::size_t ci = 0; ci <= chunkMask_; ++ci) {
2158 chunks_[ci].clear();
2160 chunks_[0].markEof(c0c);
2162 if (kEnableItemIteration) {
2163 sizeAndPackedBegin_.packedBegin() = ItemIter{}.pack();
2165 sizeAndPackedBegin_.size_ = 0;
2169 BytePtr rawAllocation = std::pointer_traits<BytePtr>::pointer_to(
2170 *static_cast<uint8_t*>(static_cast<void*>(&*chunks_)));
2171 std::size_t rawSize = chunkAllocSize(chunkMask_ + 1, bucket_count());
2173 chunks_ = Chunk::emptyInstance();
2176 this->afterReset(origSize, origCapacity, rawAllocation, rawSize);
2178 this->afterClear(origSize, origCapacity);
2182 void eraseImpl(ItemIter pos, HashPair hp) {
2183 this->destroyItem(pos.item());
2184 adjustSizeAndBeginBeforeErase(pos);
2185 eraseBlank(pos, hp);
2192 void eraseIter(ItemIter pos) {
2193 eraseIterInto(pos, [](value_type&&) {});
2199 template <
typename BeforeDestroy>
2200 void eraseIterInto(ItemIter pos, BeforeDestroy&& beforeDestroy) {
2202 if (pos.chunk()->hostedOverflowCount() != 0) {
2203 hp = splitHash(this->computeItemHash(pos.citem()));
2205 beforeDestroy(this->valueAtItemForExtract(pos.item()));
2209 template <
typename K>
2210 std::size_t eraseKey(K
const& key) {
2211 return eraseKeyInto(key, [](value_type&&) {});
2214 template <
typename K,
typename BeforeDestroy>
2215 std::size_t eraseKeyInto(K
const& key, BeforeDestroy&& beforeDestroy) {
2219 auto hp = splitHash(this->computeKeyHash(key));
2220 auto iter = findImpl(hp, key);
2221 if (!iter.atEnd()) {
2222 beforeDestroy(this->valueAtItemForExtract(iter.item()));
2223 eraseImpl(iter, hp);
2233 auto bc = bucket_count();
2236 reserveImpl(bc, 0, 0);
2237 }
catch (std::bad_alloc
const&) {
2252 std::size_t getAllocatedMemorySize()
const {
2253 std::size_t
sum = 0;
2254 visitAllocationClasses(
2255 [&sum](std::size_t bytes, std::size_t n) { sum += bytes * n; });
2268 template <
typename V>
2269 void visitAllocationClasses(V&& visitor)
const {
2270 auto bc = bucket_count();
2271 this->visitPolicyAllocationClasses(
2272 (bc == 0 ? 0 : chunkAllocSize(chunkMask_ + 1, bc)),
2279 template <
typename V>
2280 void visitItems(V&& visitor)
const {
2284 std::size_t maxChunkIndex = lastOccupiedChunk() - chunks_;
2285 auto chunk = &chunks_[0];
2286 for (std::size_t
i = 0;
i <= maxChunkIndex; ++
i, ++chunk) {
2287 auto iter = chunk->occupiedIter();
2288 if (prefetchBeforeCopy()) {
2289 for (
auto piter = iter; piter.hasNext();) {
2290 this->prefetchValue(chunk->citem(piter.next()));
2293 while (iter.hasNext()) {
2294 visitor(chunk->citem(iter.next()));
2300 template <
typename V>
2301 void visitContiguousItemRanges(V&& visitor)
const {
2305 std::size_t maxChunkIndex = lastOccupiedChunk() - chunks_;
2306 auto chunk = &chunks_[0];
2307 for (std::size_t
i = 0;
i <= maxChunkIndex; ++
i, ++chunk) {
2308 for (
auto iter = chunk->occupiedRangeIter(); iter.hasNext();) {
2309 auto be = iter.next();
2311 chunk->occupied(be.first) && chunk->occupied(be.second - 1),
"");
2312 Item
const*
b = chunk->itemAddr(be.first);
2313 visitor(b, b + (be.second - be.first));
2319 static std::size_t& histoAt(
2320 std::vector<std::size_t>& histo,
2321 std::size_t index) {
2322 if (histo.size() <= index) {
2323 histo.resize(index + 1);
2325 return histo.at(index);
2333 if (
kIsDebug && kEnableItemIteration) {
2337 for (
auto iter =
begin(); iter !=
end(); iter.advance()) {
2346 (chunks_ == Chunk::emptyInstance()) == (bucket_count() == 0),
"");
2350 auto cc = bucket_count() == 0 ? 0 : chunkMask_ + 1;
2351 for (std::size_t ci = 0; ci <
cc; ++ci) {
2352 ChunkPtr chunk = chunks_ + ci;
2355 auto iter = chunk->occupiedIter();
2357 std::size_t chunkOccupied = 0;
2358 for (
auto piter = iter; piter.hasNext(); piter.next()) {
2361 n1 += chunkOccupied;
2368 while (iter.hasNext()) {
2369 auto ii = iter.next();
2373 auto& item = chunk->citem(ii);
2374 auto hp = splitHash(this->computeItemHash(item));
2377 std::size_t
dist = 1;
2378 std::size_t index = hp.first;
2379 std::size_t delta = probeDelta(hp);
2380 while ((index & chunkMask_) != ci) {
2390 for (std::size_t ti = 0; ti < 256; ++ti) {
2392 HashPair hp{ci, tag};
2394 std::size_t
dist = 1;
2395 std::size_t index = hp.first;
2396 std::size_t delta = probeDelta(hp);
2397 for (std::size_t tries = 0; tries <= chunkMask_ &&
2398 chunks_[index & chunkMask_].outboundOverflowCount() != 0;
2420 stats.
totalBytes =
sizeof(*this) + getAllocatedMemorySize();
2429 #endif // FOLLY_F14_VECTOR_INTRINSICS_AVAILABLE
constexpr unsigned int popcount(T const v)
constexpr std::size_t constexpr_find_first_set(T t)
std::vector< std::size_t > keyProbeLengthHisto
typename remove_cvref< T >::type remove_cvref_t
std::atomic< int64_t > sum(0)
#define FOLLY_F14_TLS_IF_ASAN
#define FOLLY_ALWAYS_INLINE
constexpr T nextPowTwo(T const v)
constexpr T constexpr_min(T a)
constexpr detail::Map< Move > move
auto begin(TestAdlIterable &instance)
constexpr bool kIsSanitizeAddress
internal::ArgsMatcher< InnerMatcher > Args(const InnerMatcher &matcher)
The non test part of the code is expected to have failures gtest_output_test_ cc
—— Concurrent Priority Queue Implementation ——
requires E e noexcept(noexcept(s.error(std::move(e))))
static auto computeHelper(T const *m) -> decltype(m->computeStats())
bool_constant< true > true_type
FOLLY_PUSH_WARNING RHS rhs
constexpr T constexpr_max(T a)
static F14TableStats compute(T const &m)
FOLLY_F14_TLS_IF_ASAN std::size_t asanPendingSafeInserts
bool operator!=(const Unexpected< Error > &lhs, const Unexpected< Error > &rhs)
std::vector< std::size_t > missProbeLengthHisto
FOLLY_ALWAYS_INLINE void assume_unreachable()
constexpr auto empty(C const &c) -> decltype(c.empty())
std::vector< std::size_t > chunkOccupancyHisto
bool Value(const T &value, M matcher)
static constexpr F14IntrinsicsMode getF14IntrinsicsMode()
auto end(TestAdlIterable &instance)
static map< string, int > m
constexpr unsigned int findLastSet(T const v)
std::uniform_int_distribution< milliseconds::rep > dist
std::enable_if< std::is_integral< Src >::value &&IsSomeString< Tgt >::value &&sizeof(Src)< 4 >::typetoAppend(Src value, Tgt *result){typedef typename std::conditional< std::is_signed< Src >::value, int64_t, uint64_t >::type Intermediate;toAppend< Tgt >static_cast< Intermediate >value), result);}template< class Src >typename std::enable_if< std::is_integral< Src >::value &&sizeof(Src)< 4 &&!std::is_same< Src, char >::value, size_t >::typeestimateSpaceNeeded(Src value){typedef typename std::conditional< std::is_signed< Src >::value, int64_t, uint64_t >::type Intermediate;return estimateSpaceNeeded(static_cast< Intermediate >value));}template< class Tgt, class Src >typename std::enable_if< std::is_enum< Src >::value &&IsSomeString< Tgt >::value >::typetoAppend(Src value, Tgt *result){toAppend(static_cast< typename std::underlying_type< Src >::type >value), result);}template< class Src >typename std::enable_if< std::is_enum< Src >::value, size_t >::typeestimateSpaceNeeded(Src value){return estimateSpaceNeeded(static_cast< typename std::underlying_type< Src >::type >value));}namespace detail{constexpr int kConvMaxDecimalInShortestLow=-6;constexpr int kConvMaxDecimalInShortestHigh=21;}template< class Tgt, class Src >typename std::enable_if< std::is_floating_point< Src >::value &&IsSomeString< Tgt >::value >::typetoAppend(Src value, Tgt *result, double_conversion::DoubleToStringConverter::DtoaMode mode, unsigned int numDigits){using namespace double_conversion;DoubleToStringConverter conv(DoubleToStringConverter::NO_FLAGS,"Infinity","NaN", 'E', detail::kConvMaxDecimalInShortestLow, detail::kConvMaxDecimalInShortestHigh, 6, 1);char buffer[256];StringBuilder builder(buffer, sizeof(buffer));switch(mode){case DoubleToStringConverter::SHORTEST:conv.ToShortest(value,&builder);break;case DoubleToStringConverter::SHORTEST_SINGLE:conv.ToShortestSingle(static_cast< float >value),&builder);break;case DoubleToStringConverter::FIXED:conv.ToFixed(value, int(numDigits),&builder);break;default:CHECK(mode==DoubleToStringConverter::PRECISION);conv.ToPrecision(value, int(numDigits),&builder);break;}const size_t length=size_t(builder.position());builder.Finalize();result->append(buffer, length);}template< class Tgt, class Src >typename std::enable_if< std::is_floating_point< Src >::value &&IsSomeString< Tgt >::value >::typetoAppend(Src value, Tgt *result){toAppend(value, result, double_conversion::DoubleToStringConverter::SHORTEST, 0);}template< class Src >typename std::enable_if< std::is_floating_point< Src >::value, size_t >::typeestimateSpaceNeeded(Src value){constexpr int kMaxMantissaSpace=double_conversion::DoubleToStringConverter::kBase10MaximalLength+1;constexpr int kMaxExponentSpace=2+3;static const int kMaxPositiveSpace=std::max({kMaxMantissaSpace+kMaxExponentSpace, kMaxMantissaSpace-detail::kConvMaxDecimalInShortestLow, detail::kConvMaxDecimalInShortestHigh,});return size_t(kMaxPositiveSpace+(value< 0?1:0));}template< class Src >struct HasLengthEstimator:std::false_type{};template< class Src >constexpr typename std::enable_if< !std::is_fundamental< Src >::value &&!IsSomeString< Src >::value &&!std::is_convertible< Src, const char * >::value &&!std::is_convertible< Src, StringPiece >::value &&!std::is_enum< Src >::value &&!HasLengthEstimator< Src >::value, size_t >::typeestimateSpaceNeeded(const Src &){return sizeof(Src)+1;}namespace detail{template< class Tgt >typename std::enable_if< IsSomeString< Tgt >::value, size_t >::typeestimateSpaceToReserve(size_t sofar, Tgt *){return sofar;}template< class T, class...Ts >size_t estimateSpaceToReserve(size_t sofar, const T &v, const Ts &...vs){return estimateSpaceToReserve(sofar+estimateSpaceNeeded(v), vs...);}template< class...Ts >void reserveInTarget(const Ts &...vs){getLastElement(vs...) -> reserve(estimateSpaceToReserve(0, vs...))
std::is_trivially_copyable< T > is_trivially_copyable
static const char *const value
std::vector< std::size_t > chunkHostedOverflowHisto
std::size_t overheadBytes
#define FOLLY_SAFE_CHECK(expr, msg)
FOLLY_F14_TLS_IF_ASAN std::size_t asanRehashState
void swap(exception_wrapper &a, exception_wrapper &b) noexcept
bool operator==(const Unexpected< Error > &lhs, const Unexpected< Error > &rhs)
std::vector< std::size_t > chunkOutboundOverflowHisto
static F14TableStats computeHelper(...)
uint64_t value(const typename LockFreeRingBuffer< T, Atom >::Cursor &rbcursor)
bool_constant< false > false_type
FOLLY_NODISCARD T * launder(T *in) noexcept
void swap(SwapTrackingAlloc< T > &, SwapTrackingAlloc< T > &)
FOLLY_ALWAYS_INLINE void assume(bool cond)
#define FOLLY_SAFE_DCHECK(expr, msg)
bool check(const dynamic &schema, const dynamic &value, bool check=true)
std::enable_if< IsLessThanComparable< Value >::value, bool >::type operator<(const Expected< Value, Error > &lhs, const Expected< Value, Error > &rhs)