56 other.cachedRange = {};
57 other.attached =
false;
63 other.cachedRange = {};
64 other.attached =
false;
115 if (
data_.attached) {
116 queue_->updateCacheRef(
data_);
120 if (
data_.attached) {
121 queue_->clearWritableRangeCache();
125 queue_ = other.queue_;
127 if (
data_.attached) {
128 queue_->updateCacheRef(
data_);
138 : queue_(other.queue_) {}
140 if (
data_.attached) {
141 queue_->clearWritableRangeCache();
150 if (
data_.attached) {
151 queue_->clearWritableRangeCache();
159 if (
data_.attached) {
160 queue_->clearWritableRangeCache();
184 return data_.cachedRange.first;
194 return data_.cachedRange.second -
data_.cachedRange.first;
206 DCHECK_LE(n, length());
207 data_.cachedRange.first += n;
219 data_.cachedRange.first += n;
226 queue_->fillWritableRangeCache(
data_);
252 DCHECK(queue_ !=
nullptr || !data_.
attached);
258 (queue_->
head_ !=
nullptr &&
261 queue_->
head_->prev()->writableTail() +
262 queue_->
head_->prev()->tailroom()));
272 std::pair<void*, std::size_t>
headroom();
283 void prepend(
const void* buf, std::size_t n);
294 void append(std::unique_ptr<folly::IOBuf>&& buf,
bool pack =
false);
309 void append(
const void* buf,
size_t len);
334 std::size_t blockSize = (1U << 31));
358 std::size_t newAllocationSize,
363 return std::make_pair(
420 std::unique_ptr<folly::IOBuf>
split(
size_t n) {
421 return split(n,
true);
429 return split(n,
false);
459 std::unique_ptr<folly::IOBuf>
move() {
486 std::unique_ptr<folly::IOBuf>
pop_front();
494 throw std::invalid_argument(
"IOBufQueue: chain length not cached");
528 void gather(std::size_t maxLength);
535 std::unique_ptr<folly::IOBuf>
split(
size_t n,
bool throwOnUnderflow);
553 std::unique_ptr<folly::IOBuf>
head_;
614 auto buf = head_->
prev();
616 (
void*)(buf->writableTail() + buf->tailroom()),
634 if (
LIKELY(head_ !=
nullptr)) {
649 std::size_t newAllocationSize,
void append(IOBufQueue &&other, bool pack=false)
WritableRangeCacheData * cachePtr_
std::unique_ptr< folly::IOBuf > split(size_t n)
void appendUnsafe(size_t n)
const folly::IOBuf * front() const
void append(std::unique_ptr< folly::IOBuf > &&buf, bool pack=false)
size_t chainLength() const
std::pair< void *, std::size_t > preallocateSlow(std::size_t min, std::size_t newAllocationSize, std::size_t max)
void prepend(const void *buf, std::size_t n)
std::unique_ptr< folly::IOBuf > splitAtMost(size_t n)
WritableRangeCache & operator=(const WritableRangeCache &other)
WritableRangeCache(WritableRangeCache &&other)
void dcheckCacheIntegrity() const
WritableRangeCacheData & operator=(WritableRangeCacheData &&other)
constexpr detail::Map< Move > move
WritableRangeCacheData data_
void clearWritableRangeCache()
constexpr size_type size() const
void markPrepended(std::size_t n)
std::unique_ptr< folly::IOBuf > move()
void reset(IOBufQueue *q)
void append(StringPiece sp)
—— Concurrent Priority Queue Implementation ——
void appendToString(std::string &out) const
requires E e noexcept(noexcept(s.error(std::move(e))))
void * writableTail() const
const Options & options() const
std::size_t tailroom() const
std::pair< void *, std::size_t > preallocate(std::size_t min, std::size_t newAllocationSize, std::size_t max=std::numeric_limits< std::size_t >::max())
size_t trimStartAtMost(size_t amount)
WritableRangeCache & operator=(WritableRangeCache &&other)
void fillWritableRangeCache(WritableRangeCacheData &dest)
WritableRangeCacheData()=default
IOBufQueue(const Options &options=Options())
WritableRangeCacheData localCache_
static Options cacheChainLength()
static const size_t kChainLengthNotCached
std::pair< void *, std::size_t > headroom()
constexpr Iter data() const
GuardImpl guard(ErrorHandler &&handler)
void wrapBuffer(const void *buf, size_t len, std::size_t blockSize=(1U<< 31))
std::unique_ptr< folly::IOBuf > head_
void * allocate(std::size_t n)
size_t trimEndAtMost(size_t amount)
FOLLY_NOINLINE void appendSlow(size_t n)
void updateWritableTailCache()
FOLLY_NODISCARD detail::ScopeGuardImplDecay< F, true > makeGuard(F &&f) noexcept(noexcept(detail::ScopeGuardImplDecay< F, true >(static_cast< F && >(f))))
WritableRangeCache(folly::IOBufQueue *q=nullptr)
WritableRangeCache(const WritableRangeCache &other)
void trimStart(size_t amount)
WritableRangeCacheData(WritableRangeCacheData &&other)
std::pair< uint8_t *, uint8_t * > cachedRange
void trimEnd(size_t amount)
void postallocate(std::size_t n)
void gather(std::size_t maxLength)
std::unique_ptr< folly::IOBuf > pop_front()
void updateCacheRef(WritableRangeCacheData &newRef)