proxygen
EliasFanoCoding.h
Go to the documentation of this file.
1 /*
2  * Copyright 2013-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
24 #pragma once
25 
26 #include <algorithm>
27 #include <cstdlib>
28 #include <limits>
29 #include <type_traits>
30 
31 #include <folly/Likely.h>
32 #include <folly/Portability.h>
33 #include <folly/Range.h>
37 #include <folly/lang/Assume.h>
38 #include <folly/lang/Bits.h>
39 #include <glog/logging.h>
40 
41 #if !FOLLY_X64
42 #error EliasFanoCoding.h requires x86_64
43 #endif
44 
45 namespace folly {
46 namespace compression {
47 
48 static_assert(kIsLittleEndian, "EliasFanoCoding.h requires little endianness");
49 
50 constexpr size_t kCacheLineSize = 64;
51 
52 template <class Pointer>
54  EliasFanoCompressedListBase() = default;
55 
56  template <class OtherPointer>
59  : size(other.size),
61  data(other.data),
62  skipPointers(reinterpret_cast<Pointer>(other.skipPointers)),
63  forwardPointers(reinterpret_cast<Pointer>(other.forwardPointers)),
64  lower(reinterpret_cast<Pointer>(other.lower)),
65  upper(reinterpret_cast<Pointer>(other.upper)) {}
66 
67  template <class T = Pointer>
68  auto free() -> decltype(::free(T(nullptr))) {
70  }
71 
72  size_t upperSize() const {
73  return size_t(data.end() - upper);
74  }
75 
76  size_t size = 0;
78 
79  // WARNING: EliasFanoCompressedList has no ownership of data. The 7
80  // bytes following the last byte should be readable.
82 
83  Pointer skipPointers = nullptr;
84  Pointer forwardPointers = nullptr;
85  Pointer lower = nullptr;
86  Pointer upper = nullptr;
87 };
88 
91 
92 template <
93  class Value,
94  class SkipValue = size_t,
95  size_t kSkipQuantum = 0, // 0 = disabled
96  size_t kForwardQuantum = 0> // 0 = disabled
98  static_assert(
100  "Value should be unsigned integral");
101 
102  typedef EliasFanoCompressedList CompressedList;
103  typedef MutableEliasFanoCompressedList MutableCompressedList;
104 
105  typedef Value ValueType;
106  typedef SkipValue SkipValueType;
107  struct Layout;
108 
109  static constexpr size_t skipQuantum = kSkipQuantum;
110  static constexpr size_t forwardQuantum = kForwardQuantum;
111 
112  static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
113  if (UNLIKELY(size == 0 || upperBound < size)) {
114  return 0;
115  }
116  // Result that should be returned is "floor(log(upperBound / size))".
117  // In order to avoid expensive division, we rely on
118  // "floor(a) - floor(b) - 1 <= floor(a - b) <= floor(a) - floor(b)".
119  // Assuming "candidate = floor(log(upperBound)) - floor(log(upperBound))",
120  // then result is either "candidate - 1" or "candidate".
121  auto candidate = folly::findLastSet(upperBound) - folly::findLastSet(size);
122  // NOTE: As size != 0, "candidate" is always < 64.
123  return (size > (upperBound >> candidate)) ? candidate - 1 : candidate;
124  }
125 
126  // Requires: input range (begin, end) is sorted (encoding
127  // crashes if it's not).
128  // WARNING: encode() mallocates EliasFanoCompressedList::data. As
129  // EliasFanoCompressedList has no ownership of it, you need to call
130  // free() explicitly.
131  template <class RandomAccessIterator>
132  static MutableCompressedList encode(
133  RandomAccessIterator begin,
134  RandomAccessIterator end) {
135  if (begin == end) {
136  return MutableCompressedList();
137  }
138  EliasFanoEncoderV2 encoder(size_t(end - begin), *(end - 1));
139  for (; begin != end; ++begin) {
140  encoder.add(*begin);
141  }
142  return encoder.finish();
143  }
144 
145  explicit EliasFanoEncoderV2(const MutableCompressedList& result)
146  : lower_(result.lower),
147  upper_(result.upper),
148  skipPointers_(reinterpret_cast<SkipValueType*>(result.skipPointers)),
149  forwardPointers_(
150  reinterpret_cast<SkipValueType*>(result.forwardPointers)),
151  result_(result) {
152  std::fill(result.data.begin(), result.data.end(), '\0');
153  }
154 
155  EliasFanoEncoderV2(size_t size, ValueType upperBound)
157  Layout::fromUpperBoundAndSize(upperBound, size).allocList()) {}
158 
159  void add(ValueType value) {
160  CHECK_LT(value, std::numeric_limits<ValueType>::max());
161  CHECK_GE(value, lastValue_);
162 
163  const auto numLowerBits = result_.numLowerBits;
164  const ValueType upperBits = value >> numLowerBits;
165 
166  // Upper sequence consists of upperBits 0-bits and (size_ + 1) 1-bits.
167  const size_t pos = upperBits + size_;
168  upper_[pos / 8] |= 1U << (pos % 8);
169  // Append numLowerBits bits to lower sequence.
170  if (numLowerBits != 0) {
171  const ValueType lowerBits = value & ((ValueType(1) << numLowerBits) - 1);
172  writeBits56(lower_, size_ * numLowerBits, numLowerBits, lowerBits);
173  }
174 
175  if /* constexpr */ (skipQuantum != 0) {
176  while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
177  // Store the number of preceding 1-bits.
178  skipPointers_[skipPointersSize_++] = SkipValue(size_);
179  }
180  }
181 
182  if /* constexpr */ (forwardQuantum != 0) {
183  if ((size_ + 1) % forwardQuantum == 0) {
184  const auto k = size_ / forwardQuantum;
185  // Store the number of preceding 0-bits.
186  forwardPointers_[k] = upperBits;
187  }
188  }
189 
190  lastValue_ = value;
191  ++size_;
192  }
193 
194  const MutableCompressedList& finish() const {
195  CHECK_EQ(size_, result_.size);
196  return result_;
197  }
198 
199  private:
200  // Writes value (with len up to 56 bits) to data starting at pos-th bit.
201  static void
202  writeBits56(unsigned char* data, size_t pos, uint8_t len, uint64_t value) {
203  DCHECK_LE(uint32_t(len), 56);
204  DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
205  unsigned char* const ptr = data + (pos / 8);
206  uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
207  ptrv |= value << (pos % 8);
208  folly::storeUnaligned<uint64_t>(ptr, ptrv);
209  }
210 
211  unsigned char* lower_ = nullptr;
212  unsigned char* upper_ = nullptr;
213  SkipValueType* skipPointers_ = nullptr;
214  SkipValueType* forwardPointers_ = nullptr;
215 
216  ValueType lastValue_ = 0;
217  size_t size_ = 0;
218  size_t skipPointersSize_ = 0;
219 
220  MutableCompressedList result_;
221 };
222 
223 template <
224  class Value,
225  class SkipValue,
226  size_t kSkipQuantum,
227  size_t kForwardQuantum>
228 struct EliasFanoEncoderV2<Value, SkipValue, kSkipQuantum, kForwardQuantum>::
229  Layout {
230  static Layout fromUpperBoundAndSize(size_t upperBound, size_t size) {
231  // numLowerBits can be at most 56 because of detail::writeBits56.
232  const uint8_t numLowerBits =
233  std::min(defaultNumLowerBits(upperBound, size), uint8_t(56));
234  // *** Upper bits.
235  // Upper bits are stored using unary delta encoding.
236  // For example, (3 5 5 9) will be encoded as 1000011001000_2.
237  const size_t upperSizeBits =
238  (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
239  size; // 1-bits.
240  const size_t upper = (upperSizeBits + 7) / 8;
241 
242  // *** Validity checks.
243  // Shift by numLowerBits must be valid.
244  CHECK_LT(numLowerBits, 8 * sizeof(Value));
245  CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
246  CHECK_LT(
247  upperBound >> numLowerBits, std::numeric_limits<SkipValueType>::max());
248 
249  return fromInternalSizes(numLowerBits, upper, size);
250  }
251 
252  static Layout
254  Layout layout;
255  layout.size = size;
256  layout.numLowerBits = numLowerBits;
257 
258  layout.lower = (numLowerBits * size + 7) / 8;
259  layout.upper = upper;
260 
261  // *** Skip pointers.
262  // Store (1-indexed) position of every skipQuantum-th
263  // 0-bit in upper bits sequence.
264  if /* constexpr */ (skipQuantum != 0) {
265  // 8 * upper is used here instead of upperSizeBits, as that is
266  // more serialization-friendly way (upperSizeBits doesn't need
267  // to be known by this function, unlike upper).
268 
269  size_t numSkipPointers = (8 * upper - size) / skipQuantum;
270  layout.skipPointers = numSkipPointers * sizeof(SkipValueType);
271  }
272 
273  // *** Forward pointers.
274  // Store (1-indexed) position of every forwardQuantum-th
275  // 1-bit in upper bits sequence.
276  if /* constexpr */ (forwardQuantum != 0) {
277  size_t numForwardPointers = size / forwardQuantum;
278  layout.forwardPointers = numForwardPointers * sizeof(SkipValueType);
279  }
280 
281  return layout;
282  }
283 
284  size_t bytes() const {
286  }
287 
288  template <class Range>
290  Range& buf) const {
292  result.size = size;
293  result.numLowerBits = numLowerBits;
294  result.data = buf.subpiece(0, bytes());
295 
296  auto advance = [&](size_t n) {
297  auto begin = buf.data();
298  buf.advance(n);
299  return begin;
300  };
301 
304  result.lower = advance(lower);
305  result.upper = advance(upper);
306 
307  return result;
308  }
309 
311  uint8_t* buf = nullptr;
312  // WARNING: Current read/write logic assumes that the 7 bytes
313  // following the last byte of lower and upper sequences are
314  // readable (stored value doesn't matter and won't be changed), so
315  // we allocate additional 7 bytes, but do not include them in size
316  // of returned value.
317  if (size > 0) {
318  buf = static_cast<uint8_t*>(malloc(bytes() + 7));
319  }
320  folly::MutableByteRange bufRange(buf, bytes());
321  return openList(bufRange);
322  }
323 
324  size_t size = 0;
326 
327  // Sizes in bytes.
328  size_t lower = 0;
329  size_t upper = 0;
330  size_t skipPointers = 0;
331  size_t forwardPointers = 0;
332 };
333 
334 namespace detail {
335 
336 template <class Encoder, class Instructions, class SizeType>
337 class UpperBitsReader : ForwardPointers<Encoder::forwardQuantum>,
338  SkipPointers<Encoder::skipQuantum> {
339  typedef typename Encoder::SkipValueType SkipValueType;
340 
341  public:
342  typedef typename Encoder::ValueType ValueType;
343 
344  explicit UpperBitsReader(const typename Encoder::CompressedList& list)
345  : ForwardPointers<Encoder::forwardQuantum>(list.forwardPointers),
346  SkipPointers<Encoder::skipQuantum>(list.skipPointers),
347  start_(list.upper) {
348  reset();
349  }
350 
351  void reset() {
352  block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
354  outer_ = 0;
355  value_ = 0;
356  }
357 
358  SizeType position() const {
359  return position_;
360  }
361  ValueType value() const {
362  return value_;
363  }
364 
365  ValueType previous() {
366  size_t inner;
367  block_t block;
368  getPreviousInfo(block, inner, outer_);
369  block_ = folly::loadUnaligned<block_t>(start_ + outer_);
370  block_ ^= block;
371  --position_;
372  return setValue(inner);
373  }
374 
375  ValueType next() {
376  // Skip to the first non-zero block.
377  while (block_ == 0) {
378  outer_ += sizeof(block_t);
379  block_ = folly::loadUnaligned<block_t>(start_ + outer_);
380  }
381 
382  ++position_;
383  size_t inner = Instructions::ctz(block_);
384  block_ = Instructions::blsr(block_);
385 
386  return setValue(inner);
387  }
388 
389  ValueType skip(SizeType n) {
390  DCHECK_GT(n, 0);
391 
392  position_ += n; // n 1-bits will be read.
393 
394  // Use forward pointer.
395  if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
396  const size_t steps = position_ / Encoder::forwardQuantum;
397  const size_t dest = folly::loadUnaligned<SkipValueType>(
398  this->forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
399 
400  reposition(dest + steps * Encoder::forwardQuantum);
401  n = position_ + 1 - steps * Encoder::forwardQuantum; // n is > 0.
402  }
403 
404  size_t cnt;
405  // Find necessary block.
406  while ((cnt = Instructions::popcount(block_)) < n) {
407  n -= cnt;
408  outer_ += sizeof(block_t);
409  block_ = folly::loadUnaligned<block_t>(start_ + outer_);
410  }
411 
412  // Skip to the n-th one in the block.
413  DCHECK_GT(n, 0);
414  size_t inner = select64<Instructions>(block_, n - 1);
415  block_ &= (block_t(-1) << inner) << 1;
416 
417  return setValue(inner);
418  }
419 
420  // Skip to the first element that is >= v and located *after* the current
421  // one (so even if current value equals v, position will be increased by 1).
422  ValueType skipToNext(ValueType v) {
423  DCHECK_GE(v, value_);
424 
425  // Use skip pointer.
426  if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
427  const size_t steps = v / Encoder::skipQuantum;
428  const size_t dest = folly::loadUnaligned<SkipValueType>(
429  this->skipPointers_ + (steps - 1) * sizeof(SkipValueType));
430 
431  reposition(dest + Encoder::skipQuantum * steps);
432  position_ = dest - 1;
433 
434  // Correct value_ will be set during the next() call at the end.
435 
436  // NOTE: Corresponding block of lower bits sequence may be
437  // prefetched here (via __builtin_prefetch), but experiments
438  // didn't show any significant improvements.
439  }
440 
441  // Skip by blocks.
442  size_t cnt;
443  size_t skip = v - (8 * outer_ - position_ - 1);
444 
445  constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
446  while ((cnt = Instructions::popcount(~block_)) < skip) {
447  skip -= cnt;
448  position_ += kBitsPerBlock - cnt;
449  outer_ += sizeof(block_t);
450  block_ = folly::loadUnaligned<block_t>(start_ + outer_);
451  }
452 
453  if (LIKELY(skip)) {
454  auto inner = select64<Instructions>(~block_, skip - 1);
455  position_ += inner - skip + 1;
456  block_ &= block_t(-1) << inner;
457  }
458 
459  next();
460  return value_;
461  }
462 
469  SizeType prepareSkipTo(ValueType v) const {
470  auto position = position_;
471 
472  if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
473  auto outer = outer_;
474  const size_t steps = v / Encoder::skipQuantum;
475  const size_t dest = folly::loadUnaligned<SkipValueType>(
476  this->skipPointers_ + (steps - 1) * sizeof(SkipValueType));
477 
478  position = dest - 1;
479  outer = (dest + Encoder::skipQuantum * steps) / 8;
480 
481  // Prefetch up to the beginning of where we linear search. After that,
482  // hardware prefetching will outperform our own. In addition, this
483  // simplifies calculating what to prefetch as we don't have to calculate
484  // the entire destination address. Two cache lines are prefetched because
485  // this results in fewer cycles used (based on practical results) than
486  // one. However, three cache lines does not have any additional effect.
487  const auto addr = start_ + outer;
488  __builtin_prefetch(addr);
489  __builtin_prefetch(addr + kCacheLineSize);
490  }
491 
492  return position;
493  }
494 
495  ValueType jump(size_t n) {
496  if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
497  reset();
498  } else {
499  // Avoid reading the head, skip() will reposition.
501  }
502  return skip(n);
503  }
504 
505  ValueType jumpToNext(ValueType v) {
506  if (Encoder::skipQuantum == 0 || v < Encoder::skipQuantum) {
507  reset();
508  } else {
509  value_ = 0; // Avoid reading the head, skipToNext() will reposition.
510  }
511  return skipToNext(v);
512  }
513 
514  ValueType previousValue() const {
515  block_t block;
516  size_t inner;
517  OuterType outer;
518  getPreviousInfo(block, inner, outer);
519  return static_cast<ValueType>(8 * outer + inner - (position_ - 1));
520  }
521 
522  void setDone(SizeType endPos) {
523  position_ = endPos;
524  }
525 
526  private:
527  ValueType setValue(size_t inner) {
528  value_ = static_cast<ValueType>(8 * outer_ + inner - position_);
529  return value_;
530  }
531 
532  void reposition(SizeType dest) {
533  outer_ = dest / 8;
534  block_ = folly::loadUnaligned<block_t>(start_ + outer_);
535  block_ &= ~((block_t(1) << (dest % 8)) - 1);
536  }
537 
538  using block_t = uint64_t;
539  // The size in bytes of the upper bits is limited by n + universe / 8,
540  // so a type that can hold either sizes or values is sufficient.
542 
543  void getPreviousInfo(block_t& block, size_t& inner, OuterType& outer) const {
544  DCHECK_NE(position(), std::numeric_limits<SizeType>::max());
545  DCHECK_GT(position(), 0);
546 
547  outer = outer_;
548  block = folly::loadUnaligned<block_t>(start_ + outer);
549  inner = size_t(value_) - 8 * outer_ + position_;
550  block &= (block_t(1) << inner) - 1;
551  while (UNLIKELY(block == 0)) {
552  DCHECK_GT(outer, 0);
553  outer -= std::min<OuterType>(sizeof(block_t), outer);
554  block = folly::loadUnaligned<block_t>(start_ + outer);
555  }
556  inner = 8 * sizeof(block_t) - 1 - Instructions::clz(block);
557  }
558 
559  const unsigned char* const start_;
561  SizeType position_; // Index of current value (= #reads - 1).
562  OuterType outer_; // Outer offset: number of consumed bytes in upper.
563  ValueType value_;
564 };
565 
566 } // namespace detail
567 
568 // If kUnchecked = true the caller must guarantee that all the
569 // operations return valid elements, i.e., they would never return
570 // false if checked.
571 template <
572  class Encoder,
573  class Instructions = instructions::Default,
574  bool kUnchecked = false,
575  class SizeType = size_t>
577  public:
579  typedef typename Encoder::ValueType ValueType;
580 
581  explicit EliasFanoReader(const typename Encoder::CompressedList& list)
582  : upper_(list),
583  lower_(list.lower),
584  size_(list.size),
585  numLowerBits_(list.numLowerBits) {
586  DCHECK(Instructions::supported());
587  // To avoid extra branching during skipTo() while reading
588  // upper sequence we need to know the last element.
589  // If kUnchecked == true, we do not check that skipTo() is called
590  // within the bounds, so we can avoid initializing lastValue_.
591  if (kUnchecked || UNLIKELY(list.size == 0)) {
592  lastValue_ = 0;
593  return;
594  }
595  ValueType lastUpperValue = ValueType(8 * list.upperSize() - size_);
596  auto it = list.upper + list.upperSize() - 1;
597  DCHECK_NE(*it, 0);
598  lastUpperValue -= 8 - folly::findLastSet(*it);
599  lastValue_ = readLowerPart(size_ - 1) | (lastUpperValue << numLowerBits_);
600  }
601 
602  void reset() {
603  upper_.reset();
604  value_ = kInvalidValue;
605  }
606 
607  bool previous() {
608  if (!kUnchecked && UNLIKELY(position() == 0)) {
609  reset();
610  return false;
611  }
612  upper_.previous();
613  value_ =
614  readLowerPart(upper_.position()) | (upper_.value() << numLowerBits_);
615  return true;
616  }
617 
618  bool next() {
619  if (!kUnchecked && UNLIKELY(position() + 1 >= size_)) {
620  return setDone();
621  }
622  upper_.next();
623  value_ =
624  readLowerPart(upper_.position()) | (upper_.value() << numLowerBits_);
625  return true;
626  }
627 
628  bool skip(SizeType n) {
629  CHECK_GT(n, 0);
630 
631  if (kUnchecked || LIKELY(position() + n < size_)) {
632  if (LIKELY(n < kLinearScanThreshold)) {
633  for (SizeType i = 0; i < n; ++i) {
634  upper_.next();
635  }
636  } else {
637  upper_.skip(n);
638  }
639  value_ =
640  readLowerPart(upper_.position()) | (upper_.value() << numLowerBits_);
641  return true;
642  }
643 
644  return setDone();
645  }
646 
647  bool skipTo(ValueType value) {
648  // Also works when value_ == kInvalidValue.
649  if (value != kInvalidValue) {
650  DCHECK_GE(value + 1, value_ + 1);
651  }
652 
653  if (!kUnchecked && value > lastValue_) {
654  return setDone();
655  } else if (value == value_) {
656  return true;
657  }
658 
659  ValueType upperValue = (value >> numLowerBits_);
660  ValueType upperSkip = upperValue - upper_.value();
661  // The average density of ones in upper bits is 1/2.
662  // LIKELY here seems to make things worse, even for small skips.
663  if (upperSkip < 2 * kLinearScanThreshold) {
664  do {
665  upper_.next();
666  } while (UNLIKELY(upper_.value() < upperValue));
667  } else {
668  upper_.skipToNext(upperValue);
669  }
670 
671  iterateTo(value);
672  return true;
673  }
674 
679  void prepareSkipTo(ValueType value) const {
680  // Also works when value_ == kInvalidValue.
681  if (value != kInvalidValue) {
682  DCHECK_GE(value + 1, value_ + 1);
683  }
684 
685  if ((!kUnchecked && value > lastValue_) || (value == value_)) {
686  return;
687  }
688 
689  // Do minimal computation required to prefetch address used in
690  // `readLowerPart()`.
691  ValueType upperValue = (value >> numLowerBits_);
692  const auto upperPosition = upper_.prepareSkipTo(upperValue);
693  const auto addr = lower_ + (upperPosition * numLowerBits_ / 8);
694  __builtin_prefetch(addr);
695  __builtin_prefetch(addr + kCacheLineSize);
696  }
697 
698  bool jump(SizeType n) {
699  if (LIKELY(n < size_)) { // Also checks that n != -1.
700  value_ = readLowerPart(n) | (upper_.jump(n + 1) << numLowerBits_);
701  return true;
702  }
703  return setDone();
704  }
705 
706  bool jumpTo(ValueType value) {
707  if (!kUnchecked && value > lastValue_) {
708  return setDone();
709  }
710 
711  upper_.jumpToNext(value >> numLowerBits_);
712  iterateTo(value);
713  return true;
714  }
715 
716  ValueType lastValue() const {
717  CHECK(!kUnchecked);
718  return lastValue_;
719  }
720 
721  ValueType previousValue() const {
722  DCHECK_GT(position(), 0);
723  DCHECK_LT(position(), size());
724  return readLowerPart(upper_.position() - 1) |
725  (upper_.previousValue() << numLowerBits_);
726  }
727 
728  SizeType size() const {
729  return size_;
730  }
731 
732  bool valid() const {
733  return position() < size(); // Also checks that position() != -1.
734  }
735 
736  SizeType position() const {
737  return upper_.position();
738  }
739  ValueType value() const {
740  DCHECK(valid());
741  return value_;
742  }
743 
744  private:
745  // Must hold kInvalidValue + 1 == 0.
746  constexpr static ValueType kInvalidValue =
748 
749  bool setDone() {
750  value_ = kInvalidValue;
751  upper_.setDone(size_);
752  return false;
753  }
754 
755  ValueType readLowerPart(SizeType i) const {
756  DCHECK_LT(i, size_);
757  const size_t pos = i * numLowerBits_;
758  const unsigned char* ptr = lower_ + (pos / 8);
759  const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
760  // This removes the branch in the fallback implementation of
761  // bzhi. The condition is verified at encoding time.
762  assume(numLowerBits_ < sizeof(ValueType) * 8);
763  return Instructions::bzhi(ptrv >> (pos % 8), numLowerBits_);
764  }
765 
766  void iterateTo(ValueType value) {
767  while (true) {
768  value_ =
769  readLowerPart(upper_.position()) | (upper_.value() << numLowerBits_);
770  if (LIKELY(value_ >= value)) {
771  break;
772  }
773  upper_.next();
774  }
775  }
776 
777  constexpr static size_t kLinearScanThreshold = 8;
778 
780  const uint8_t* lower_;
781  SizeType size_;
782  ValueType value_ = kInvalidValue;
783  ValueType lastValue_;
785 };
786 
787 } // namespace compression
788 } // namespace folly
constexpr unsigned int popcount(T const v)
Definition: Bits.h:130
void * ptr
EliasFanoReader(const typename Encoder::CompressedList &list)
void prepareSkipTo(ValueType value) const
BitVectorEncoder< uint32_t, uint32_t, 128, 128 > Encoder
static Layout fromUpperBoundAndSize(size_t upperBound, size_t size)
LogLevel max
Definition: LogLevel.cpp:31
PskType type
static Layout fromInternalSizes(uint8_t numLowerBits, size_t upper, size_t size)
#define LIKELY(x)
Definition: Likely.h:47
void advance(size_type n)
Definition: Range.h:672
dest
Definition: upload.py:394
auto begin(TestAdlIterable &instance)
Definition: ForeachTest.cpp:56
folly::std T
constexpr auto kIsLittleEndian
Definition: Portability.h:278
EliasFanoCompressedListBase< const uint8_t * > EliasFanoCompressedList
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
detail::Skip skip(size_t count)
Definition: Base-inl.h:2598
EliasFanoCompressedListBase< typename Range::iterator > openList(Range &buf) const
ValueType readLowerPart(SizeType i) const
MutableCompressedList allocList() const
MutableEliasFanoCompressedList MutableCompressedList
EliasFanoEncoderV2(const MutableCompressedList &result)
constexpr size_t kCacheLineSize
EliasFanoCompressedListBase(const EliasFanoCompressedListBase< OtherPointer > &other)
static void writeBits56(unsigned char *data, size_t pos, uint8_t len, uint64_t value)
LogLevel min
Definition: LogLevel.cpp:30
bool Value(const T &value, M matcher)
void getPreviousInfo(block_t &block, size_t &inner, OuterType &outer) const
auto end(TestAdlIterable &instance)
Definition: ForeachTest.cpp:62
constexpr Iter data() const
Definition: Range.h:446
Range subpiece(size_type first, size_type length=npos) const
Definition: Range.h:686
Encoder::MutableCompressedList list
constexpr unsigned int findLastSet(T const v)
Definition: Bits.h:105
SizeType prepareSkipTo(ValueType v) const
static const char *const value
Definition: Conv.cpp:50
EliasFanoCompressedList CompressedList
void free()
typename std::common_type< ValueType, SizeType >::type OuterType
UpperBitsReader(const typename Encoder::CompressedList &list)
constexpr Iter end() const
Definition: Range.h:455
constexpr Iter begin() const
Definition: Range.h:452
uintptr_t start_
detail::UpperBitsReader< Encoder, Instructions, SizeType > upper_
static MutableCompressedList encode(RandomAccessIterator begin, RandomAccessIterator end)
static uint8_t defaultNumLowerBits(size_t upperBound, size_t size)
uint64_t value(const typename LockFreeRingBuffer< T, Atom >::Cursor &rbcursor)
#define UNLIKELY(x)
Definition: Likely.h:48
FOLLY_ALWAYS_INLINE void assume(bool cond)
Definition: Assume.h:41
KeyT k
auto free() -> decltype(::free(T(nullptr)))
EliasFanoCompressedListBase< uint8_t * > MutableEliasFanoCompressedList
ThreadPoolListHook * addr
EliasFanoEncoderV2(size_t size, ValueType upperBound)
const MutableCompressedList & finish() const
def next(obj)
Definition: ast.py:58