proxygen
IOBuf.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2013-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef __STDC_LIMIT_MACROS
18 #define __STDC_LIMIT_MACROS
19 #endif
20 
21 #include <folly/io/IOBuf.h>
22 
23 #include <cassert>
24 #include <cstdint>
25 #include <cstdlib>
26 #include <stdexcept>
27 
28 #include <folly/Conv.h>
29 #include <folly/Likely.h>
30 #include <folly/Memory.h>
31 #include <folly/ScopeGuard.h>
33 #include <folly/io/Cursor.h>
34 #include <folly/lang/Align.h>
35 #include <folly/memory/Malloc.h>
36 
37 using std::unique_ptr;
38 
39 namespace {
40 
41 enum : uint16_t {
42  kHeapMagic = 0xa5a5,
43  // This memory segment contains an IOBuf that is still in use
44  kIOBufInUse = 0x01,
45  // This memory segment contains buffer data that is still in use
46  kDataInUse = 0x02,
47 };
48 
49 enum : std::size_t {
50  // When create() is called for buffers less than kDefaultCombinedBufSize,
51  // we allocate a single combined memory segment for the IOBuf and the data
52  // together. See the comments for createCombined()/createSeparate() for more
53  // details.
54  //
55  // (The size of 1k is largely just a guess here. We could could probably do
56  // benchmarks of real applications to see if adjusting this number makes a
57  // difference. Callers that know their exact use case can also explicitly
58  // call createCombined() or createSeparate().)
59  kDefaultCombinedBufSize = 1024
60 };
61 
62 // Helper function for IOBuf::takeOwnership()
63 void takeOwnershipError(
64  bool freeOnError,
65  void* buf,
67  void* userData) {
68  if (!freeOnError) {
69  return;
70  }
71  if (!freeFn) {
72  free(buf);
73  return;
74  }
75  try {
76  freeFn(buf, userData);
77  } catch (...) {
78  // The user's free function is not allowed to throw.
79  // (We are already in the middle of throwing an exception, so
80  // we cannot let this exception go unhandled.)
81  abort();
82  }
83 }
84 
85 } // namespace
86 
87 namespace folly {
88 
90  explicit HeapPrefix(uint16_t flg) : magic(kHeapMagic), flags(flg) {}
92  // Reset magic to 0 on destruction. This is solely for debugging purposes
93  // to help catch bugs where someone tries to use HeapStorage after it has
94  // been deleted.
95  magic = 0;
96  }
97 
99  std::atomic<uint16_t> flags;
100 };
101 
104  // The IOBuf is last in the HeapStorage object.
105  // This way operator new will work even if allocating a subclass of IOBuf
106  // that requires more space.
108 };
109 
111  // Make sure jemalloc allocates from the 64-byte class. Putting this here
112  // because HeapStorage is private so it can't be at namespace level.
113  static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!");
114 
115  HeapStorage hs;
118 };
119 
121  // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
122  // no other threads should be referring to it yet.
123  refcount.store(1, std::memory_order_relaxed);
124 }
125 
127  : freeFn(fn), userData(arg) {
128  // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
129  // no other threads should be referring to it yet.
130  refcount.store(1, std::memory_order_relaxed);
131 }
132 
133 void* IOBuf::operator new(size_t size) {
134  size_t fullSize = offsetof(HeapStorage, buf) + size;
135  auto* storage = static_cast<HeapStorage*>(checkedMalloc(fullSize));
136 
137  new (&storage->prefix) HeapPrefix(kIOBufInUse);
138  return &(storage->buf);
139 }
140 
141 void* IOBuf::operator new(size_t /* size */, void* ptr) {
142  return ptr;
143 }
144 
145 void IOBuf::operator delete(void* ptr) {
146  auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
147  auto* storage = reinterpret_cast<HeapStorage*>(storageAddr);
148  releaseStorage(storage, kIOBufInUse);
149 }
150 
151 void IOBuf::operator delete(void* /* ptr */, void* /* placement */) {
152  // Provide matching operator for `IOBuf::new` to avoid MSVC compilation
153  // warning (C4291) about memory leak when exception is thrown in the
154  // constructor.
155 }
156 
157 void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) {
158  CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic));
159 
160  // Use relaxed memory order here. If we are unlucky and happen to get
161  // out-of-date data the compare_exchange_weak() call below will catch
162  // it and load new data with memory_order_acq_rel.
163  auto flags = storage->prefix.flags.load(std::memory_order_acquire);
164  DCHECK_EQ((flags & freeFlags), freeFlags);
165 
166  while (true) {
167  uint16_t newFlags = uint16_t(flags & ~freeFlags);
168  if (newFlags == 0) {
169  // The storage space is now unused. Free it.
170  storage->prefix.HeapPrefix::~HeapPrefix();
171  free(storage);
172  return;
173  }
174 
175  // This storage segment still contains portions that are in use.
176  // Just clear the flags specified in freeFlags for now.
177  auto ret = storage->prefix.flags.compare_exchange_weak(
178  flags, newFlags, std::memory_order_acq_rel);
179  if (ret) {
180  // We successfully updated the flags.
181  return;
182  }
183 
184  // We failed to update the flags. Some other thread probably updated them
185  // and cleared some of the other bits. Continue around the loop to see if
186  // we are the last user now, or if we need to try updating the flags again.
187  }
188 }
189 
190 void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
191  auto* storage = static_cast<HeapStorage*>(userData);
192  releaseStorage(storage, kDataInUse);
193 }
194 
196  : next_(this),
197  prev_(this),
198  data_(nullptr),
199  length_(0),
201  SharedInfo* info;
202  allocExtBuffer(capacity, &buf_, &info, &capacity_);
203  setSharedInfo(info);
204  data_ = buf_;
205 }
206 
208  CopyBufferOp /* op */,
209  const void* buf,
210  std::size_t size,
211  std::size_t headroom,
212  std::size_t minTailroom)
213  : IOBuf(CREATE, headroom + size + minTailroom) {
214  advance(headroom);
215  if (size > 0) {
216  assert(buf != nullptr);
217  memcpy(writableData(), buf, size);
218  append(size);
219  }
220 }
221 
224  ByteRange br,
225  std::size_t headroom,
226  std::size_t minTailroom)
227  : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {}
228 
229 unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) {
230  // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
231  // all with a single allocation.
232  //
233  // We don't do this for larger buffers since it can be wasteful if the user
234  // needs to reallocate the buffer but keeps using the same IOBuf object.
235  // In this case we can't free the data space until the IOBuf is also
236  // destroyed. Callers can explicitly call createCombined() or
237  // createSeparate() if they know their use case better, and know if they are
238  // likely to reallocate the buffer later.
239  if (capacity <= kDefaultCombinedBufSize) {
240  return createCombined(capacity);
241  }
242  return createSeparate(capacity);
243 }
244 
245 unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) {
246  // To save a memory allocation, allocate space for the IOBuf object, the
247  // SharedInfo struct, and the data itself all with a single call to malloc().
248  size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
249  size_t mallocSize = goodMallocSize(requiredStorage);
250  auto* storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize));
251 
252  new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse);
253  new (&storage->shared) SharedInfo(freeInternalBuf, storage);
254 
255  uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
256  uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
257  size_t actualCapacity = size_t(storageEnd - bufAddr);
258  unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
260  packFlagsAndSharedInfo(0, &storage->shared),
261  bufAddr,
262  actualCapacity,
263  bufAddr,
264  0));
265  return ret;
266 }
267 
268 unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) {
269  return std::make_unique<IOBuf>(CREATE, capacity);
270 }
271 
272 unique_ptr<IOBuf> IOBuf::createChain(
273  size_t totalCapacity,
274  std::size_t maxBufCapacity) {
275  unique_ptr<IOBuf> out =
276  create(std::min(totalCapacity, size_t(maxBufCapacity)));
277  size_t allocatedCapacity = out->capacity();
278 
279  while (allocatedCapacity < totalCapacity) {
280  unique_ptr<IOBuf> newBuf = create(
281  std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity)));
282  allocatedCapacity += newBuf->capacity();
283  out->prependChain(std::move(newBuf));
284  }
285 
286  return out;
287 }
288 
291  void* buf,
292  std::size_t capacity,
293  std::size_t length,
294  FreeFunction freeFn,
295  void* userData,
296  bool freeOnError)
297  : next_(this),
298  prev_(this),
299  data_(static_cast<uint8_t*>(buf)),
300  buf_(static_cast<uint8_t*>(buf)),
301  length_(length),
302  capacity_(capacity),
305  try {
306  setSharedInfo(new SharedInfo(freeFn, userData));
307  } catch (...) {
308  takeOwnershipError(freeOnError, buf, freeFn, userData);
309  throw;
310  }
311 }
312 
313 unique_ptr<IOBuf> IOBuf::takeOwnership(
314  void* buf,
315  std::size_t capacity,
316  std::size_t length,
317  FreeFunction freeFn,
318  void* userData,
319  bool freeOnError) {
320  try {
321  // TODO: We could allocate the IOBuf object and SharedInfo all in a single
322  // memory allocation. We could use the existing HeapStorage class, and
323  // define a new kSharedInfoInUse flag. We could change our code to call
324  // releaseStorage(kFlagFreeSharedInfo) when this kFlagFreeSharedInfo,
325  // rather than directly calling delete.
326  //
327  // Note that we always pass freeOnError as false to the constructor.
328  // If the constructor throws we'll handle it below. (We have to handle
329  // allocation failures from std::make_unique too.)
330  return std::make_unique<IOBuf>(
331  TAKE_OWNERSHIP, buf, capacity, length, freeFn, userData, false);
332  } catch (...) {
333  takeOwnershipError(freeOnError, buf, freeFn, userData);
334  throw;
335  }
336 }
337 
338 IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity) noexcept
339  : IOBuf(
341  0,
342  // We cast away the const-ness of the buffer here.
343  // This is okay since IOBuf users must use unshare() to create a copy
344  // of this buffer before writing to the buffer.
345  static_cast<uint8_t*>(const_cast<void*>(buf)),
346  capacity,
347  static_cast<uint8_t*>(const_cast<void*>(buf)),
348  capacity) {}
349 
351  : IOBuf(op, br.data(), br.size()) {}
352 
353 unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) {
354  return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
355 }
356 
357 IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) noexcept {
358  return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
359 }
360 
362 
364  : data_(other.data_),
365  buf_(other.buf_),
366  length_(other.length_),
367  capacity_(other.capacity_),
369  // Reset other so it is a clean state to be destroyed.
370  other.data_ = nullptr;
371  other.buf_ = nullptr;
372  other.length_ = 0;
373  other.capacity_ = 0;
374  other.flagsAndSharedInfo_ = 0;
375 
376  // If other was part of the chain, assume ownership of the rest of its chain.
377  // (It's only valid to perform move assignment on the head of a chain.)
378  if (other.next_ != &other) {
379  next_ = other.next_;
380  next_->prev_ = this;
381  other.next_ = &other;
382 
383  prev_ = other.prev_;
384  prev_->next_ = this;
385  other.prev_ = &other;
386  }
387 
388  // Sanity check to make sure that other is in a valid state to be destroyed.
389  DCHECK_EQ(other.prev_, &other);
390  DCHECK_EQ(other.next_, &other);
391 }
392 
393 IOBuf::IOBuf(const IOBuf& other) {
394  *this = other.cloneAsValue();
395 }
396 
399  uintptr_t flagsAndSharedInfo,
400  uint8_t* buf,
401  std::size_t capacity,
402  uint8_t* data,
403  std::size_t length) noexcept
404  : next_(this),
405  prev_(this),
406  data_(data),
407  buf_(buf),
408  length_(length),
410  flagsAndSharedInfo_(flagsAndSharedInfo) {
411  assert(data >= buf);
412  assert(data + length <= buf + capacity);
413 }
414 
416  // Destroying an IOBuf destroys the entire chain.
417  // Users of IOBuf should only explicitly delete the head of any chain.
418  // The other elements in the chain will be automatically destroyed.
419  while (next_ != this) {
420  // Since unlink() returns unique_ptr() and we don't store it,
421  // it will automatically delete the unlinked element.
422  (void)next_->unlink();
423  }
424 
426 }
427 
429  if (this == &other) {
430  return *this;
431  }
432 
433  // If we are part of a chain, delete the rest of the chain.
434  while (next_ != this) {
435  // Since unlink() returns unique_ptr() and we don't store it,
436  // it will automatically delete the unlinked element.
437  (void)next_->unlink();
438  }
439 
440  // Decrement our refcount on the current buffer
442 
443  // Take ownership of the other buffer's data
444  data_ = other.data_;
445  buf_ = other.buf_;
446  length_ = other.length_;
447  capacity_ = other.capacity_;
448  flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
449  // Reset other so it is a clean state to be destroyed.
450  other.data_ = nullptr;
451  other.buf_ = nullptr;
452  other.length_ = 0;
453  other.capacity_ = 0;
454  other.flagsAndSharedInfo_ = 0;
455 
456  // If other was part of the chain, assume ownership of the rest of its chain.
457  // (It's only valid to perform move assignment on the head of a chain.)
458  if (other.next_ != &other) {
459  next_ = other.next_;
460  next_->prev_ = this;
461  other.next_ = &other;
462 
463  prev_ = other.prev_;
464  prev_->next_ = this;
465  other.prev_ = &other;
466  }
467 
468  // Sanity check to make sure that other is in a valid state to be destroyed.
469  DCHECK_EQ(other.prev_, &other);
470  DCHECK_EQ(other.next_, &other);
471 
472  return *this;
473 }
474 
475 IOBuf& IOBuf::operator=(const IOBuf& other) {
476  if (this != &other) {
477  *this = IOBuf(other);
478  }
479  return *this;
480 }
481 
482 bool IOBuf::empty() const {
483  const IOBuf* current = this;
484  do {
485  if (current->length() != 0) {
486  return false;
487  }
488  current = current->next_;
489  } while (current != this);
490  return true;
491 }
492 
494  size_t numElements = 1;
495  for (IOBuf* current = next_; current != this; current = current->next_) {
496  ++numElements;
497  }
498  return numElements;
499 }
500 
501 std::size_t IOBuf::computeChainDataLength() const {
502  std::size_t fullLength = length_;
503  for (IOBuf* current = next_; current != this; current = current->next_) {
504  fullLength += current->length_;
505  }
506  return fullLength;
507 }
508 
509 void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
510  // Take ownership of the specified IOBuf
511  IOBuf* other = iobuf.release();
512 
513  // Remember the pointer to the tail of the other chain
514  IOBuf* otherTail = other->prev_;
515 
516  // Hook up prev_->next_ to point at the start of the other chain,
517  // and other->prev_ to point at prev_
518  prev_->next_ = other;
519  other->prev_ = prev_;
520 
521  // Hook up otherTail->next_ to point at us,
522  // and prev_ to point back at otherTail,
523  otherTail->next_ = this;
524  prev_ = otherTail;
525 }
526 
527 unique_ptr<IOBuf> IOBuf::clone() const {
528  return std::make_unique<IOBuf>(cloneAsValue());
529 }
530 
531 unique_ptr<IOBuf> IOBuf::cloneOne() const {
532  return std::make_unique<IOBuf>(cloneOneAsValue());
533 }
534 
535 unique_ptr<IOBuf> IOBuf::cloneCoalesced() const {
536  return std::make_unique<IOBuf>(cloneCoalescedAsValue());
537 }
538 
540  std::size_t newHeadroom,
541  std::size_t newTailroom) const {
542  return std::make_unique<IOBuf>(
543  cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom));
544 }
545 
547  auto tmp = cloneOneAsValue();
548 
549  for (IOBuf* current = next_; current != this; current = current->next_) {
550  tmp.prependChain(current->cloneOne());
551  }
552 
553  return tmp;
554 }
555 
557  if (SharedInfo* info = sharedInfo()) {
559  info->refcount.fetch_add(1, std::memory_order_acq_rel);
560  }
561  return IOBuf(
564  buf_,
565  capacity_,
566  data_,
567  length_);
568 }
569 
571  const std::size_t newHeadroom = headroom();
572  const std::size_t newTailroom = prev()->tailroom();
573  return cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom);
574 }
575 
577  std::size_t newHeadroom,
578  std::size_t newTailroom) const {
579  if (!isChained()) {
580  return cloneOneAsValue();
581  }
582  // Coalesce into newBuf
583  const std::size_t newLength = computeChainDataLength();
584  const std::size_t newCapacity = newLength + newHeadroom + newTailroom;
585  IOBuf newBuf{CREATE, newCapacity};
586  newBuf.advance(newHeadroom);
587 
588  auto current = this;
589  do {
590  if (current->length() > 0) {
591  DCHECK_NOTNULL(current->data());
592  DCHECK_LE(current->length(), newBuf.tailroom());
593  memcpy(newBuf.writableTail(), current->data(), current->length());
594  newBuf.append(current->length());
595  }
596  current = current->next();
597  } while (current != this);
598 
599  DCHECK_EQ(newLength, newBuf.length());
600  DCHECK_EQ(newHeadroom, newBuf.headroom());
601  DCHECK_LE(newTailroom, newBuf.tailroom());
602 
603  return newBuf;
604 }
605 
607  // Allocate a new buffer for the data
608  uint8_t* buf;
610  std::size_t actualCapacity;
611  allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
612 
613  // Copy the data
614  // Maintain the same amount of headroom. Since we maintained the same
615  // minimum capacity we also maintain at least the same amount of tailroom.
616  std::size_t headlen = headroom();
617  if (length_ > 0) {
618  assert(data_ != nullptr);
619  memcpy(buf + headlen, data_, length_);
620  }
621 
622  // Release our reference on the old buffer
624  // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
625  setFlagsAndSharedInfo(0, sharedInfo);
626 
627  // Update the buffer pointers to point to the new buffer
628  data_ = buf + headlen;
629  buf_ = buf;
630 }
631 
633  // unshareChained() should only be called if we are part of a chain of
634  // multiple IOBufs. The caller should have already verified this.
635  assert(isChained());
636 
637  IOBuf* current = this;
638  while (true) {
639  if (current->isSharedOne()) {
640  // we have to unshare
641  break;
642  }
643 
644  current = current->next_;
645  if (current == this) {
646  // None of the IOBufs in the chain are shared,
647  // so return without doing anything
648  return;
649  }
650  }
651 
652  // We have to unshare. Let coalesceSlow() do the work.
653  coalesceSlow();
654 }
655 
657  IOBuf* current = this;
658  do {
659  current->markExternallySharedOne();
660  current = current->next_;
661  } while (current != this);
662 }
663 
665  assert(isChained());
666 
667  IOBuf* current = this;
668  while (true) {
669  current->makeManagedOne();
670  current = current->next_;
671  if (current == this) {
672  break;
673  }
674  }
675 }
676 
678  // coalesceSlow() should only be called if we are part of a chain of multiple
679  // IOBufs. The caller should have already verified this.
680  DCHECK(isChained());
681 
682  // Compute the length of the entire chain
683  std::size_t newLength = 0;
684  IOBuf* end = this;
685  do {
686  newLength += end->length_;
687  end = end->next_;
688  } while (end != this);
689 
690  coalesceAndReallocate(newLength, end);
691  // We should be only element left in the chain now
692  DCHECK(!isChained());
693 }
694 
695 void IOBuf::coalesceSlow(size_t maxLength) {
696  // coalesceSlow() should only be called if we are part of a chain of multiple
697  // IOBufs. The caller should have already verified this.
698  DCHECK(isChained());
699  DCHECK_LT(length_, maxLength);
700 
701  // Compute the length of the entire chain
702  std::size_t newLength = 0;
703  IOBuf* end = this;
704  while (true) {
705  newLength += end->length_;
706  end = end->next_;
707  if (newLength >= maxLength) {
708  break;
709  }
710  if (end == this) {
711  throw std::overflow_error(
712  "attempted to coalesce more data than "
713  "available");
714  }
715  }
716 
717  coalesceAndReallocate(newLength, end);
718  // We should have the requested length now
719  DCHECK_GE(length_, maxLength);
720 }
721 
723  size_t newHeadroom,
724  size_t newLength,
725  IOBuf* end,
726  size_t newTailroom) {
727  std::size_t newCapacity = newLength + newHeadroom + newTailroom;
728 
729  // Allocate space for the coalesced buffer.
730  // We always convert to an external buffer, even if we happened to be an
731  // internal buffer before.
732  uint8_t* newBuf;
733  SharedInfo* newInfo;
734  std::size_t actualCapacity;
735  allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
736 
737  // Copy the data into the new buffer
738  uint8_t* newData = newBuf + newHeadroom;
739  uint8_t* p = newData;
740  IOBuf* current = this;
741  size_t remaining = newLength;
742  do {
743  if (current->length_ > 0) {
744  assert(current->length_ <= remaining);
745  assert(current->data_ != nullptr);
746  remaining -= current->length_;
747  memcpy(p, current->data_, current->length_);
748  p += current->length_;
749  }
750  current = current->next_;
751  } while (current != end);
752  assert(remaining == 0);
753 
754  // Point at the new buffer
756 
757  // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
758  setFlagsAndSharedInfo(0, newInfo);
759 
760  capacity_ = actualCapacity;
761  buf_ = newBuf;
762  data_ = newData;
763  length_ = newLength;
764 
765  // Separate from the rest of our chain.
766  // Since we don't store the unique_ptr returned by separateChain(),
767  // this will immediately delete the returned subchain.
768  if (isChained()) {
769  (void)separateChain(next_, current->prev_);
770  }
771 }
772 
774  // Externally owned buffers don't have a SharedInfo object and aren't managed
775  // by the reference count
777  if (!info) {
778  return;
779  }
780 
781  // Decrement the refcount
782  uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel);
783  // Note that fetch_sub() returns the value before we decremented.
784  // If it is 1, we were the only remaining user; if it is greater there are
785  // still other users.
786  if (newcnt > 1) {
787  return;
788  }
789 
790  // We were the last user. Free the buffer
791  freeExtBuffer();
792 
793  // Free the SharedInfo if it was allocated separately.
794  //
795  // This is only used by takeOwnership().
796  //
797  // To avoid this special case handling in decrementRefcount(), we could have
798  // takeOwnership() set a custom freeFn() that calls the user's free function
799  // then frees the SharedInfo object. (This would require that
800  // takeOwnership() store the user's free function with its allocated
801  // SharedInfo object.) However, handling this specially with a flag seems
802  // like it shouldn't be problematic.
803  if (flags() & kFlagFreeSharedInfo) {
804  delete sharedInfo();
805  }
806 }
807 
808 void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) {
809  size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
810  DCHECK_LT(newCapacity, UINT32_MAX);
811 
812  // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
813  // reallocate and free the original buffer. It should only ever be called if
814  // we are the only user of the buffer.
815  DCHECK(!isSharedOne());
816 
817  // We'll need to reallocate the buffer.
818  // There are a few options.
819  // - If we have enough total room, move the data around in the buffer
820  // and adjust the data_ pointer.
821  // - If we're using an internal buffer, we'll switch to an external
822  // buffer with enough headroom and tailroom.
823  // - If we have enough headroom (headroom() >= minHeadroom) but not too much
824  // (so we don't waste memory), we can try one of two things, depending on
825  // whether we use jemalloc or not:
826  // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
827  // - If not using jemalloc and we don't have too much to copy,
828  // we'll use realloc() (note that realloc might have to copy
829  // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h)
830  // - Otherwise, bite the bullet and reallocate.
831  if (headroom() + tailroom() >= minHeadroom + minTailroom) {
832  uint8_t* newData = writableBuffer() + minHeadroom;
833  memmove(newData, data_, length_);
834  data_ = newData;
835  return;
836  }
837 
838  size_t newAllocatedCapacity = 0;
839  uint8_t* newBuffer = nullptr;
840  std::size_t newHeadroom = 0;
841  std::size_t oldHeadroom = headroom();
842 
843  // If we have a buffer allocated with malloc and we just need more tailroom,
844  // try to use realloc()/xallocx() to grow the buffer in place.
846  if (info && (info->freeFn == nullptr) && length_ != 0 &&
847  oldHeadroom >= minHeadroom) {
848  size_t headSlack = oldHeadroom - minHeadroom;
849  newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
850  if (usingJEMalloc()) {
851  // We assume that tailroom is more useful and more important than
852  // headroom (not least because realloc / xallocx allow us to grow the
853  // buffer at the tail, but not at the head) So, if we have more headroom
854  // than we need, we consider that "wasted". We arbitrarily define "too
855  // much" headroom to be 25% of the capacity.
856  if (headSlack * 4 <= newCapacity) {
857  size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
858  void* p = buf_;
859  if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
860  if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
861  newBuffer = static_cast<uint8_t*>(p);
862  newHeadroom = oldHeadroom;
863  }
864  // if xallocx failed, do nothing, fall back to malloc/memcpy/free
865  }
866  }
867  } else { // Not using jemalloc
868  size_t copySlack = capacity() - length_;
869  if (copySlack * 2 <= length_) {
870  void* p = realloc(buf_, newAllocatedCapacity);
871  if (UNLIKELY(p == nullptr)) {
872  throw std::bad_alloc();
873  }
874  newBuffer = static_cast<uint8_t*>(p);
875  newHeadroom = oldHeadroom;
876  }
877  }
878  }
879 
880  // None of the previous reallocation strategies worked (or we're using
881  // an internal buffer). malloc/copy/free.
882  if (newBuffer == nullptr) {
883  newAllocatedCapacity = goodExtBufferSize(newCapacity);
884  newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity));
885  if (length_ > 0) {
886  assert(data_ != nullptr);
887  memcpy(newBuffer + minHeadroom, data_, length_);
888  }
889  if (sharedInfo()) {
890  freeExtBuffer();
891  }
892  newHeadroom = minHeadroom;
893  }
894 
895  std::size_t cap;
896  initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
897 
898  if (flags() & kFlagFreeSharedInfo) {
899  delete sharedInfo();
900  }
901 
902  setFlagsAndSharedInfo(0, info);
903  capacity_ = cap;
904  buf_ = newBuffer;
905  data_ = newBuffer + newHeadroom;
906  // length_ is unchanged
907 }
908 
911  DCHECK(info);
912 
913  if (info->freeFn) {
914  try {
915  info->freeFn(buf_, info->userData);
916  } catch (...) {
917  // The user's free function should never throw. Otherwise we might
918  // throw from the IOBuf destructor. Other code paths like coalesce()
919  // also assume that decrementRefcount() cannot throw.
920  abort();
921  }
922  } else {
923  free(buf_);
924  }
925 }
926 
928  std::size_t minCapacity,
929  uint8_t** bufReturn,
930  SharedInfo** infoReturn,
931  std::size_t* capacityReturn) {
932  size_t mallocSize = goodExtBufferSize(minCapacity);
933  uint8_t* buf = static_cast<uint8_t*>(checkedMalloc(mallocSize));
934  initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
935  *bufReturn = buf;
936 }
937 
938 size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) {
939  // Determine how much space we should allocate. We'll store the SharedInfo
940  // for the external buffer just after the buffer itself. (We store it just
941  // after the buffer rather than just before so that the code can still just
942  // use free(buf_) to free the buffer.)
943  size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
944  // Add room for padding so that the SharedInfo will be aligned on an 8-byte
945  // boundary.
946  minSize = (minSize + 7) & ~7;
947 
948  // Use goodMallocSize() to bump up the capacity to a decent size to request
949  // from malloc, so we can use all of the space that malloc will probably give
950  // us anyway.
951  return goodMallocSize(minSize);
952 }
953 
955  uint8_t* buf,
956  size_t mallocSize,
957  SharedInfo** infoReturn,
958  std::size_t* capacityReturn) {
959  // Find the SharedInfo storage at the end of the buffer
960  // and construct the SharedInfo.
961  uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
962  SharedInfo* sharedInfo = new (infoStart) SharedInfo;
963 
964  *capacityReturn = std::size_t(infoStart - buf);
965  *infoReturn = sharedInfo;
966 }
967 
969  // malloc-allocated buffers are just fine, everything else needs
970  // to be turned into one.
971  if (!sharedInfo() || // user owned, not ours to give up
972  sharedInfo()->freeFn || // not malloc()-ed
973  headroom() != 0 || // malloc()-ed block doesn't start at beginning
974  tailroom() == 0 || // no room for NUL terminator
975  isShared() || // shared
976  isChained()) { // chained
977  // We might as well get rid of all head and tailroom if we're going
978  // to reallocate; we need 1 byte for NUL terminator.
980  }
981 
982  // Ensure NUL terminated
983  *writableTail() = 0;
984  fbstring str(
985  reinterpret_cast<char*>(writableData()),
986  length(),
987  capacity(),
989 
990  if (flags() & kFlagFreeSharedInfo) {
991  delete sharedInfo();
992  }
993 
994  // Reset to a state where we can be deleted cleanly
996  buf_ = nullptr;
997  clear();
998  return str;
999 }
1000 
1002  return Iterator(this, this);
1003 }
1004 
1006  return Iterator(nullptr, nullptr);
1007 }
1008 
1011  iov.reserve(countChainElements());
1012  appendToIov(&iov);
1013  return iov;
1014 }
1015 
1017  IOBuf const* p = this;
1018  do {
1019  // some code can get confused by empty iovs, so skip them
1020  if (p->length() > 0) {
1021  iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())});
1022  }
1023  p = p->next();
1024  } while (p != this);
1025 }
1026 
1027 unique_ptr<IOBuf> IOBuf::wrapIov(const iovec* vec, size_t count) {
1028  unique_ptr<IOBuf> result = nullptr;
1029  for (size_t i = 0; i < count; ++i) {
1030  size_t len = vec[i].iov_len;
1031  void* data = vec[i].iov_base;
1032  if (len > 0) {
1033  auto buf = wrapBuffer(data, len);
1034  if (!result) {
1035  result = std::move(buf);
1036  } else {
1037  result->prependChain(std::move(buf));
1038  }
1039  }
1040  }
1041  if (UNLIKELY(result == nullptr)) {
1042  return create(0);
1043  }
1044  return result;
1045 }
1046 
1047 std::unique_ptr<IOBuf> IOBuf::takeOwnershipIov(
1048  const iovec* vec,
1049  size_t count,
1050  FreeFunction freeFn,
1051  void* userData,
1052  bool freeOnError) {
1053  unique_ptr<IOBuf> result = nullptr;
1054  for (size_t i = 0; i < count; ++i) {
1055  size_t len = vec[i].iov_len;
1056  void* data = vec[i].iov_base;
1057  if (len > 0) {
1058  auto buf = takeOwnership(data, len, freeFn, userData, freeOnError);
1059  if (!result) {
1060  result = std::move(buf);
1061  } else {
1062  result->prependChain(std::move(buf));
1063  }
1064  }
1065  }
1066  if (UNLIKELY(result == nullptr)) {
1067  return create(0);
1068  }
1069  return result;
1070 }
1071 
1072 size_t IOBuf::fillIov(struct iovec* iov, size_t len) const {
1073  IOBuf const* p = this;
1074  size_t i = 0;
1075  while (i < len) {
1076  // some code can get confused by empty iovs, so skip them
1077  if (p->length() > 0) {
1078  iov[i].iov_base = const_cast<uint8_t*>(p->data());
1079  iov[i].iov_len = p->length();
1080  i++;
1081  }
1082  p = p->next();
1083  if (p == this) {
1084  return i;
1085  }
1086  }
1087  return 0;
1088 }
1089 
1090 size_t IOBufHash::operator()(const IOBuf& buf) const noexcept {
1092  hasher.Init(0, 0);
1093  io::Cursor cursor(&buf);
1094  for (;;) {
1095  auto b = cursor.peekBytes();
1096  if (b.empty()) {
1097  break;
1098  }
1099  hasher.Update(b.data(), b.size());
1100  cursor.skip(b.size());
1101  }
1102  uint64_t h1;
1103  uint64_t h2;
1104  hasher.Final(&h1, &h2);
1105  return static_cast<std::size_t>(h1);
1106 }
1107 
1109  io::Cursor ca(&a);
1110  io::Cursor cb(&b);
1111  for (;;) {
1112  auto ba = ca.peekBytes();
1113  auto bb = cb.peekBytes();
1114  if (ba.empty() || bb.empty()) {
1115  return to_ordering(int(bb.empty()) - int(ba.empty()));
1116  }
1117  const size_t n = std::min(ba.size(), bb.size());
1118  DCHECK_GT(n, 0u);
1119  const ordering r = to_ordering(std::memcmp(ba.data(), bb.data(), n));
1120  if (r != ordering::eq) {
1121  return r;
1122  }
1123  // Cursor::skip() may throw if n is too large, but n is not too large here
1124  ca.skip(n);
1125  cb.skip(n);
1126  }
1127 }
1128 
1129 } // namespace folly
void reserveSlow(std::size_t minHeadroom, std::size_t minTailroom)
Definition: IOBuf.cpp:808
void * ptr
constexpr ordering to_ordering(T c)
Definition: Ordering.h:24
static std::unique_ptr< IOBuf > createCombined(std::size_t capacity)
Definition: IOBuf.cpp:245
def info()
Definition: deadlock.py:447
void markExternallyShared()
Definition: IOBuf.cpp:656
static void allocExtBuffer(std::size_t minCapacity, uint8_t **bufReturn, SharedInfo **infoReturn, std::size_t *capacityReturn)
Definition: IOBuf.cpp:927
flags
Definition: http_parser.h:127
void * checkedMalloc(size_t size)
Definition: Malloc.h:227
IOBuf cloneOneAsValue() const
Definition: IOBuf.cpp:556
bool empty() const
Definition: IOBuf.cpp:482
Iterator end() const
Definition: IOBuf.h:1687
static std::unique_ptr< IOBuf > create(std::size_t capacity)
Definition: IOBuf.cpp:229
void(* FreeFunction)(void *buf, void *userData)
Definition: IOBuf.h:234
char b
static std::unique_ptr< IOBuf > wrapBuffer(const void *buf, std::size_t capacity)
Definition: IOBuf.cpp:353
uintptr_t flags() const
Definition: IOBuf.h:1459
IOBuf * next_
Definition: IOBuf.h:1423
TakeOwnershipOp
Definition: IOBuf.h:227
bool usingJEMalloc() noexcept
Definition: Malloc.h:147
fbstring moveToFbString()
Definition: IOBuf.cpp:968
constexpr detail::Map< Move > move
Definition: Base-inl.h:2567
void freeExtBuffer()
Definition: IOBuf.cpp:909
bool isSharedOne() const
Definition: IOBuf.h:952
void reserve(size_type n)
Definition: FBVector.h:1003
void advance(std::size_t amount)
Definition: IOBuf.h:632
const uint8_t * data() const
Definition: IOBuf.h:499
std::unique_ptr< IOBuf > cloneCoalesced() const
Definition: IOBuf.cpp:535
folly::fbvector< struct iovec > getIov() const
Definition: IOBuf.cpp:1009
—— Concurrent Priority Queue Implementation ——
Definition: AtomicBitSet.h:29
bool isChained() const
Definition: IOBuf.h:760
std::unique_ptr< IOBuf > clone() const
Definition: IOBuf.cpp:527
size_t countChainElements() const
Definition: IOBuf.cpp:493
void Init(uint64_t seed1, uint64_t seed2)
IOBuf * prev_
Definition: IOBuf.h:1424
void unshareOneSlow()
Definition: IOBuf.cpp:606
void makeManagedChained()
Definition: IOBuf.cpp:664
requires E e noexcept(noexcept(s.error(std::move(e))))
#define nullptr
Definition: http_parser.c:41
std::unique_ptr< IOBuf > separateChain(IOBuf *head, IOBuf *tail)
Definition: IOBuf.h:883
std::size_t capacity() const
Definition: IOBuf.h:593
static const size_t jemallocMinInPlaceExpandable
Definition: Malloc.h:221
std::size_t tailroom() const
Definition: IOBuf.h:551
static std::unique_ptr< IOBuf > createSeparate(std::size_t capacity)
Definition: IOBuf.cpp:268
ordering impl(IOBuf const &a, IOBuf const &b) const noexcept
Definition: IOBuf.cpp:1108
uint8_t * writableTail()
Definition: IOBuf.h:526
uint8_t * writableBuffer()
Definition: IOBuf.h:572
AcquireMallocatedString
Definition: FBString.h:221
void coalesceAndReallocate(size_t newHeadroom, size_t newLength, IOBuf *end, size_t newTailroom)
Definition: IOBuf.cpp:722
size_t operator()(const IOBuf &buf) const noexcept
Definition: IOBuf.cpp:1090
static void releaseStorage(HeapStorage *storage, uint16_t freeFlags)
Definition: IOBuf.cpp:157
void push_back(const T &value)
Definition: FBVector.h:1156
int current
size_t(* xallocx)(void *, size_t, size_t, int)
Definition: MallocImpl.cpp:37
size_t fillIov(struct iovec *iov, size_t len) const
Definition: IOBuf.cpp:1072
uint8_t * data_
Definition: IOBuf.h:1432
constexpr auto size(C const &c) -> decltype(c.size())
Definition: Access.h:45
FreeFunction freeFn
Definition: IOBuf.h:1353
LogLevel min
Definition: LogLevel.cpp:30
HeapPrefix(uint16_t flg)
Definition: IOBuf.cpp:90
ordering
Definition: Ordering.h:21
void setSharedInfo(SharedInfo *info)
Definition: IOBuf.h:1453
uint8_t * buf_
Definition: IOBuf.h:1433
uint8_t * writableData()
Definition: IOBuf.h:509
std::size_t headroom() const
Definition: IOBuf.h:542
IOBuf cloneAsValue() const
Definition: IOBuf.cpp:546
std::size_t length() const
Definition: IOBuf.h:533
static std::unique_ptr< IOBuf > takeOwnershipIov(const iovec *vec, size_t count, FreeFunction freeFn=nullptr, void *userData=nullptr, bool freeOnError=true)
Definition: IOBuf.cpp:1047
std::size_t length_
Definition: IOBuf.h:1434
char a
IOBuf & operator=(IOBuf &&other) noexcept
Definition: IOBuf.cpp:428
std::unique_ptr< IOBuf > cloneCoalescedWithHeadroomTailroom(std::size_t newHeadroom, std::size_t newTailroom) const
Definition: IOBuf.cpp:539
Definition: Traits.h:588
static uintptr_t packFlagsAndSharedInfo(uintptr_t flags, SharedInfo *info)
Definition: IOBuf.h:1440
std::unique_ptr< IOBuf > cloneOne() const
Definition: IOBuf.cpp:531
std::unique_ptr< IOBuf > unlink()
Definition: IOBuf.h:847
IOBuf * next()
Definition: IOBuf.h:600
void free()
bool isShared() const
Definition: IOBuf.h:902
static IOBuf wrapBufferAsValue(const void *buf, std::size_t capacity) noexcept
Definition: IOBuf.cpp:357
void prependChain(std::unique_ptr< IOBuf > &&iobuf)
Definition: IOBuf.cpp:509
void Final(uint64_t *hash1, uint64_t *hash2) const
static std::unique_ptr< IOBuf > createChain(size_t totalCapacity, std::size_t maxBufCapacity)
Definition: IOBuf.cpp:272
IOBuf * prev()
Definition: IOBuf.h:610
void markExternallySharedOne()
Definition: IOBuf.h:1038
int * count
std::size_t computeChainDataLength() const
Definition: IOBuf.cpp:501
Iterator cbegin() const
Definition: IOBuf.cpp:1001
void clear()
Definition: IOBuf.h:728
static std::unique_ptr< IOBuf > takeOwnership(void *buf, std::size_t capacity, FreeFunction freeFn=nullptr, void *userData=nullptr, bool freeOnError=true)
Definition: IOBuf.h:304
static void freeInternalBuf(void *buf, void *userData)
Definition: IOBuf.cpp:190
SharedInfo * sharedInfo() const
Definition: IOBuf.h:1449
void skip(size_t len)
Definition: Cursor.h:371
Iterator cend() const
Definition: IOBuf.cpp:1005
std::size_t capacity_
Definition: IOBuf.h:1435
void unshareChained()
Definition: IOBuf.cpp:632
static size_t goodExtBufferSize(std::size_t minCapacity)
Definition: IOBuf.cpp:938
void Update(const void *message, size_t length)
void coalesceSlow()
Definition: IOBuf.cpp:677
uintptr_t flagsAndSharedInfo_
Definition: IOBuf.h:1438
IOBuf cloneCoalescedAsValue() const
Definition: IOBuf.cpp:570
void appendToIov(folly::fbvector< struct iovec > *iov) const
Definition: IOBuf.cpp:1016
folly::max_align_t align
Definition: IOBuf.cpp:117
std::atomic< uint32_t > refcount
Definition: IOBuf.h:1355
void setFlagsAndSharedInfo(uintptr_t flags, SharedInfo *info)
Definition: IOBuf.h:1474
#define UNLIKELY(x)
Definition: Likely.h:48
std::atomic< uint16_t > flags
Definition: IOBuf.cpp:99
void makeManagedOne()
Definition: IOBuf.h:1071
void setFlags(uintptr_t flags) const
Definition: IOBuf.h:1464
IOBuf cloneCoalescedAsValueWithHeadroomTailroom(std::size_t newHeadroom, std::size_t newTailroom) const
Definition: IOBuf.cpp:576
static void initExtBuffer(uint8_t *buf, size_t mallocSize, SharedInfo **infoReturn, std::size_t *capacityReturn)
Definition: IOBuf.cpp:954
void append(std::size_t amount)
Definition: IOBuf.h:689
static std::unique_ptr< IOBuf > wrapIov(const iovec *vec, size_t count)
Definition: IOBuf.cpp:1027
void decrementRefcount()
Definition: IOBuf.cpp:773
size_t goodMallocSize(size_t minSize) noexcept
Definition: Malloc.h:201
IOBuf() noexcept
Definition: IOBuf.cpp:361