1/*
2 * Copyright 2013-present Facebook, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef __STDC_LIMIT_MACROS
18#define __STDC_LIMIT_MACROS
19#endif
20
21#include <folly/io/IOBuf.h>
22
23#include <cassert>
24#include <cstdint>
25#include <cstdlib>
26#include <stdexcept>
27
28#include <folly/Conv.h>
29#include <folly/Likely.h>
30#include <folly/Memory.h>
31#include <folly/ScopeGuard.h>
32#include <folly/hash/SpookyHashV2.h>
33#include <folly/io/Cursor.h>
34#include <folly/lang/Align.h>
35#include <folly/memory/Malloc.h>
36
37using std::unique_ptr;
38
39namespace {
40
41enum : uint16_t {
42 kHeapMagic = 0xa5a5,
43 // This memory segment contains an IOBuf that is still in use
44 kIOBufInUse = 0x01,
45 // This memory segment contains buffer data that is still in use
46 kDataInUse = 0x02,
47 // This memory segment contains a SharedInfo that is still in use
48 kSharedInfoInUse = 0x04,
49};
50
51enum : std::size_t {
52 // When create() is called for buffers less than kDefaultCombinedBufSize,
53 // we allocate a single combined memory segment for the IOBuf and the data
54 // together. See the comments for createCombined()/createSeparate() for more
55 // details.
56 //
57 // (The size of 1k is largely just a guess here. We could could probably do
58 // benchmarks of real applications to see if adjusting this number makes a
59 // difference. Callers that know their exact use case can also explicitly
60 // call createCombined() or createSeparate().)
61 kDefaultCombinedBufSize = 1024
62};
63
64// Helper function for IOBuf::takeOwnership()
65void takeOwnershipError(
66 bool freeOnError,
67 void* buf,
68 folly::IOBuf::FreeFunction freeFn,
69 void* userData) {
70 if (!freeOnError) {
71 return;
72 }
73 if (!freeFn) {
74 free(buf);
75 return;
76 }
77 try {
78 freeFn(buf, userData);
79 } catch (...) {
80 // The user's free function is not allowed to throw.
81 // (We are already in the middle of throwing an exception, so
82 // we cannot let this exception go unhandled.)
83 abort();
84 }
85}
86
87} // namespace
88
89namespace folly {
90
91struct IOBuf::HeapPrefix {
92 explicit HeapPrefix(uint16_t flg) : magic(kHeapMagic), flags(flg) {}
93 ~HeapPrefix() {
94 // Reset magic to 0 on destruction. This is solely for debugging purposes
95 // to help catch bugs where someone tries to use HeapStorage after it has
96 // been deleted.
97 magic = 0;
98 }
99
100 uint16_t magic;
101 std::atomic<uint16_t> flags;
102};
103
104struct IOBuf::HeapStorage {
105 HeapPrefix prefix;
106 // The IOBuf is last in the HeapStorage object.
107 // This way operator new will work even if allocating a subclass of IOBuf
108 // that requires more space.
109 folly::IOBuf buf;
110};
111
112struct IOBuf::HeapFullStorage {
113 // Make sure jemalloc allocates from the 64-byte class. Putting this here
114 // because HeapStorage is private so it can't be at namespace level.
115 static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!");
116
117 HeapStorage hs;
118 SharedInfo shared;
119 folly::max_align_t align;
120};
121
122IOBuf::SharedInfo::SharedInfo()
123 : freeFn(nullptr), userData(nullptr), useHeapFullStorage(false) {
124 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
125 // no other threads should be referring to it yet.
126 refcount.store(1, std::memory_order_relaxed);
127}
128
129IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg, bool hfs)
130 : freeFn(fn), userData(arg), useHeapFullStorage(hfs) {
131 // Use relaxed memory ordering here. Since we are creating a new SharedInfo,
132 // no other threads should be referring to it yet.
133 refcount.store(1, std::memory_order_relaxed);
134}
135
136void IOBuf::SharedInfo::releaseStorage(SharedInfo* info) {
137 if (info->useHeapFullStorage) {
138 auto* storageAddr =
139 reinterpret_cast<uint8_t*>(info) - offsetof(HeapFullStorage, shared);
140 auto* storage = reinterpret_cast<HeapFullStorage*>(storageAddr);
141 info->~SharedInfo();
142 IOBuf::releaseStorage(&storage->hs, kSharedInfoInUse);
143 }
144}
145
146void* IOBuf::operator new(size_t size) {
147 size_t fullSize = offsetof(HeapStorage, buf) + size;
148 auto* storage = static_cast<HeapStorage*>(checkedMalloc(fullSize));
149
150 new (&storage->prefix) HeapPrefix(kIOBufInUse);
151 return &(storage->buf);
152}
153
154void* IOBuf::operator new(size_t /* size */, void* ptr) {
155 return ptr;
156}
157
158void IOBuf::operator delete(void* ptr) {
159 auto* storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf);
160 auto* storage = reinterpret_cast<HeapStorage*>(storageAddr);
161 releaseStorage(storage, kIOBufInUse);
162}
163
164void IOBuf::operator delete(void* /* ptr */, void* /* placement */) {
165 // Provide matching operator for `IOBuf::new` to avoid MSVC compilation
166 // warning (C4291) about memory leak when exception is thrown in the
167 // constructor.
168}
169
170void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) {
171 CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic));
172
173 // Use relaxed memory order here. If we are unlucky and happen to get
174 // out-of-date data the compare_exchange_weak() call below will catch
175 // it and load new data with memory_order_acq_rel.
176 auto flags = storage->prefix.flags.load(std::memory_order_acquire);
177 DCHECK_EQ((flags & freeFlags), freeFlags);
178
179 while (true) {
180 uint16_t newFlags = uint16_t(flags & ~freeFlags);
181 if (newFlags == 0) {
182 // The storage space is now unused. Free it.
183 storage->prefix.HeapPrefix::~HeapPrefix();
184 free(storage);
185 return;
186 }
187
188 // This storage segment still contains portions that are in use.
189 // Just clear the flags specified in freeFlags for now.
190 auto ret = storage->prefix.flags.compare_exchange_weak(
191 flags, newFlags, std::memory_order_acq_rel);
192 if (ret) {
193 // We successfully updated the flags.
194 return;
195 }
196
197 // We failed to update the flags. Some other thread probably updated them
198 // and cleared some of the other bits. Continue around the loop to see if
199 // we are the last user now, or if we need to try updating the flags again.
200 }
201}
202
203void IOBuf::freeInternalBuf(void* /* buf */, void* userData) {
204 auto* storage = static_cast<HeapStorage*>(userData);
205 releaseStorage(storage, kDataInUse);
206}
207
208IOBuf::IOBuf(CreateOp, std::size_t capacity)
209 : next_(this),
210 prev_(this),
211 data_(nullptr),
212 length_(0),
213 flagsAndSharedInfo_(0) {
214 SharedInfo* info;
215 allocExtBuffer(capacity, &buf_, &info, &capacity_);
216 setSharedInfo(info);
217 data_ = buf_;
218}
219
220IOBuf::IOBuf(
221 CopyBufferOp /* op */,
222 const void* buf,
223 std::size_t size,
224 std::size_t headroom,
225 std::size_t minTailroom)
226 : IOBuf(CREATE, headroom + size + minTailroom) {
227 advance(headroom);
228 if (size > 0) {
229 assert(buf != nullptr);
230 memcpy(writableData(), buf, size);
231 append(size);
232 }
233}
234
235IOBuf::IOBuf(
236 CopyBufferOp op,
237 ByteRange br,
238 std::size_t headroom,
239 std::size_t minTailroom)
240 : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {}
241
242unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) {
243 // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer
244 // all with a single allocation.
245 //
246 // We don't do this for larger buffers since it can be wasteful if the user
247 // needs to reallocate the buffer but keeps using the same IOBuf object.
248 // In this case we can't free the data space until the IOBuf is also
249 // destroyed. Callers can explicitly call createCombined() or
250 // createSeparate() if they know their use case better, and know if they are
251 // likely to reallocate the buffer later.
252 if (capacity <= kDefaultCombinedBufSize) {
253 return createCombined(capacity);
254 }
255 return createSeparate(capacity);
256}
257
258unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) {
259 // To save a memory allocation, allocate space for the IOBuf object, the
260 // SharedInfo struct, and the data itself all with a single call to malloc().
261 size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity;
262 size_t mallocSize = goodMallocSize(requiredStorage);
263 auto* storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize));
264
265 new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse);
266 new (&storage->shared) SharedInfo(freeInternalBuf, storage);
267
268 uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
269 uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
270 size_t actualCapacity = size_t(storageEnd - bufAddr);
271 unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
272 InternalConstructor(),
273 packFlagsAndSharedInfo(0, &storage->shared),
274 bufAddr,
275 actualCapacity,
276 bufAddr,
277 0));
278 return ret;
279}
280
281unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) {
282 return std::make_unique<IOBuf>(CREATE, capacity);
283}
284
285unique_ptr<IOBuf> IOBuf::createChain(
286 size_t totalCapacity,
287 std::size_t maxBufCapacity) {
288 unique_ptr<IOBuf> out =
289 create(std::min(totalCapacity, size_t(maxBufCapacity)));
290 size_t allocatedCapacity = out->capacity();
291
292 while (allocatedCapacity < totalCapacity) {
293 unique_ptr<IOBuf> newBuf = create(
294 std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity)));
295 allocatedCapacity += newBuf->capacity();
296 out->prependChain(std::move(newBuf));
297 }
298
299 return out;
300}
301
302IOBuf::IOBuf(
303 TakeOwnershipOp,
304 void* buf,
305 std::size_t capacity,
306 std::size_t length,
307 FreeFunction freeFn,
308 void* userData,
309 bool freeOnError)
310 : next_(this),
311 prev_(this),
312 data_(static_cast<uint8_t*>(buf)),
313 buf_(static_cast<uint8_t*>(buf)),
314 length_(length),
315 capacity_(capacity),
316 flagsAndSharedInfo_(
317 packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) {
318 try {
319 setSharedInfo(new SharedInfo(freeFn, userData));
320 } catch (...) {
321 takeOwnershipError(freeOnError, buf, freeFn, userData);
322 throw;
323 }
324}
325
326unique_ptr<IOBuf> IOBuf::takeOwnership(
327 void* buf,
328 std::size_t capacity,
329 std::size_t length,
330 FreeFunction freeFn,
331 void* userData,
332 bool freeOnError) {
333 HeapFullStorage* storage = nullptr;
334 try {
335 size_t requiredStorage = sizeof(HeapFullStorage);
336 size_t mallocSize = goodMallocSize(requiredStorage);
337 storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize));
338
339 new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kSharedInfoInUse);
340 new (&storage->shared)
341 SharedInfo(freeFn, userData, true /*useHeapFullStorage*/);
342
343 return unique_ptr<IOBuf>(new (&storage->hs.buf) IOBuf(
344 InternalConstructor(),
345 packFlagsAndSharedInfo(0, &storage->shared),
346 static_cast<uint8_t*>(buf),
347 capacity,
348 static_cast<uint8_t*>(buf),
349 length));
350 } catch (...) {
351 if (storage) {
352 free(storage);
353 }
354 takeOwnershipError(freeOnError, buf, freeFn, userData);
355 throw;
356 }
357}
358
359IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity) noexcept
360 : IOBuf(
361 InternalConstructor(),
362 0,
363 // We cast away the const-ness of the buffer here.
364 // This is okay since IOBuf users must use unshare() to create a copy
365 // of this buffer before writing to the buffer.
366 static_cast<uint8_t*>(const_cast<void*>(buf)),
367 capacity,
368 static_cast<uint8_t*>(const_cast<void*>(buf)),
369 capacity) {}
370
371IOBuf::IOBuf(WrapBufferOp op, ByteRange br) noexcept
372 : IOBuf(op, br.data(), br.size()) {}
373
374unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) {
375 return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity);
376}
377
378IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) noexcept {
379 return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity);
380}
381
382IOBuf::IOBuf() noexcept {}
383
384IOBuf::IOBuf(IOBuf&& other) noexcept
385 : data_(other.data_),
386 buf_(other.buf_),
387 length_(other.length_),
388 capacity_(other.capacity_),
389 flagsAndSharedInfo_(other.flagsAndSharedInfo_) {
390 // Reset other so it is a clean state to be destroyed.
391 other.data_ = nullptr;
392 other.buf_ = nullptr;
393 other.length_ = 0;
394 other.capacity_ = 0;
395 other.flagsAndSharedInfo_ = 0;
396
397 // If other was part of the chain, assume ownership of the rest of its chain.
398 // (It's only valid to perform move assignment on the head of a chain.)
399 if (other.next_ != &other) {
400 next_ = other.next_;
401 next_->prev_ = this;
402 other.next_ = &other;
403
404 prev_ = other.prev_;
405 prev_->next_ = this;
406 other.prev_ = &other;
407 }
408
409 // Sanity check to make sure that other is in a valid state to be destroyed.
410 DCHECK_EQ(other.prev_, &other);
411 DCHECK_EQ(other.next_, &other);
412}
413
414IOBuf::IOBuf(const IOBuf& other) {
415 *this = other.cloneAsValue();
416}
417
418IOBuf::IOBuf(
419 InternalConstructor,
420 uintptr_t flagsAndSharedInfo,
421 uint8_t* buf,
422 std::size_t capacity,
423 uint8_t* data,
424 std::size_t length) noexcept
425 : next_(this),
426 prev_(this),
427 data_(data),
428 buf_(buf),
429 length_(length),
430 capacity_(capacity),
431 flagsAndSharedInfo_(flagsAndSharedInfo) {
432 assert(data >= buf);
433 assert(data + length <= buf + capacity);
434}
435
436IOBuf::~IOBuf() {
437 // Destroying an IOBuf destroys the entire chain.
438 // Users of IOBuf should only explicitly delete the head of any chain.
439 // The other elements in the chain will be automatically destroyed.
440 while (next_ != this) {
441 // Since unlink() returns unique_ptr() and we don't store it,
442 // it will automatically delete the unlinked element.
443 (void)next_->unlink();
444 }
445
446 decrementRefcount();
447}
448
449IOBuf& IOBuf::operator=(IOBuf&& other) noexcept {
450 if (this == &other) {
451 return *this;
452 }
453
454 // If we are part of a chain, delete the rest of the chain.
455 while (next_ != this) {
456 // Since unlink() returns unique_ptr() and we don't store it,
457 // it will automatically delete the unlinked element.
458 (void)next_->unlink();
459 }
460
461 // Decrement our refcount on the current buffer
462 decrementRefcount();
463
464 // Take ownership of the other buffer's data
465 data_ = other.data_;
466 buf_ = other.buf_;
467 length_ = other.length_;
468 capacity_ = other.capacity_;
469 flagsAndSharedInfo_ = other.flagsAndSharedInfo_;
470 // Reset other so it is a clean state to be destroyed.
471 other.data_ = nullptr;
472 other.buf_ = nullptr;
473 other.length_ = 0;
474 other.capacity_ = 0;
475 other.flagsAndSharedInfo_ = 0;
476
477 // If other was part of the chain, assume ownership of the rest of its chain.
478 // (It's only valid to perform move assignment on the head of a chain.)
479 if (other.next_ != &other) {
480 next_ = other.next_;
481 next_->prev_ = this;
482 other.next_ = &other;
483
484 prev_ = other.prev_;
485 prev_->next_ = this;
486 other.prev_ = &other;
487 }
488
489 // Sanity check to make sure that other is in a valid state to be destroyed.
490 DCHECK_EQ(other.prev_, &other);
491 DCHECK_EQ(other.next_, &other);
492
493 return *this;
494}
495
496IOBuf& IOBuf::operator=(const IOBuf& other) {
497 if (this != &other) {
498 *this = IOBuf(other);
499 }
500 return *this;
501}
502
503bool IOBuf::empty() const {
504 const IOBuf* current = this;
505 do {
506 if (current->length() != 0) {
507 return false;
508 }
509 current = current->next_;
510 } while (current != this);
511 return true;
512}
513
514size_t IOBuf::countChainElements() const {
515 size_t numElements = 1;
516 for (IOBuf* current = next_; current != this; current = current->next_) {
517 ++numElements;
518 }
519 return numElements;
520}
521
522std::size_t IOBuf::computeChainDataLength() const {
523 std::size_t fullLength = length_;
524 for (IOBuf* current = next_; current != this; current = current->next_) {
525 fullLength += current->length_;
526 }
527 return fullLength;
528}
529
530void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) {
531 // Take ownership of the specified IOBuf
532 IOBuf* other = iobuf.release();
533
534 // Remember the pointer to the tail of the other chain
535 IOBuf* otherTail = other->prev_;
536
537 // Hook up prev_->next_ to point at the start of the other chain,
538 // and other->prev_ to point at prev_
539 prev_->next_ = other;
540 other->prev_ = prev_;
541
542 // Hook up otherTail->next_ to point at us,
543 // and prev_ to point back at otherTail,
544 otherTail->next_ = this;
545 prev_ = otherTail;
546}
547
548unique_ptr<IOBuf> IOBuf::clone() const {
549 return std::make_unique<IOBuf>(cloneAsValue());
550}
551
552unique_ptr<IOBuf> IOBuf::cloneOne() const {
553 return std::make_unique<IOBuf>(cloneOneAsValue());
554}
555
556unique_ptr<IOBuf> IOBuf::cloneCoalesced() const {
557 return std::make_unique<IOBuf>(cloneCoalescedAsValue());
558}
559
560unique_ptr<IOBuf> IOBuf::cloneCoalescedWithHeadroomTailroom(
561 std::size_t newHeadroom,
562 std::size_t newTailroom) const {
563 return std::make_unique<IOBuf>(
564 cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom));
565}
566
567IOBuf IOBuf::cloneAsValue() const {
568 auto tmp = cloneOneAsValue();
569
570 for (IOBuf* current = next_; current != this; current = current->next_) {
571 tmp.prependChain(current->cloneOne());
572 }
573
574 return tmp;
575}
576
577IOBuf IOBuf::cloneOneAsValue() const {
578 if (SharedInfo* info = sharedInfo()) {
579 setFlags(kFlagMaybeShared);
580 info->refcount.fetch_add(1, std::memory_order_acq_rel);
581 }
582 return IOBuf(
583 InternalConstructor(),
584 flagsAndSharedInfo_,
585 buf_,
586 capacity_,
587 data_,
588 length_);
589}
590
591IOBuf IOBuf::cloneCoalescedAsValue() const {
592 const std::size_t newHeadroom = headroom();
593 const std::size_t newTailroom = prev()->tailroom();
594 return cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom);
595}
596
597IOBuf IOBuf::cloneCoalescedAsValueWithHeadroomTailroom(
598 std::size_t newHeadroom,
599 std::size_t newTailroom) const {
600 if (!isChained()) {
601 return cloneOneAsValue();
602 }
603 // Coalesce into newBuf
604 const std::size_t newLength = computeChainDataLength();
605 const std::size_t newCapacity = newLength + newHeadroom + newTailroom;
606 IOBuf newBuf{CREATE, newCapacity};
607 newBuf.advance(newHeadroom);
608
609 auto current = this;
610 do {
611 if (current->length() > 0) {
612 DCHECK_NOTNULL(current->data());
613 DCHECK_LE(current->length(), newBuf.tailroom());
614 memcpy(newBuf.writableTail(), current->data(), current->length());
615 newBuf.append(current->length());
616 }
617 current = current->next();
618 } while (current != this);
619
620 DCHECK_EQ(newLength, newBuf.length());
621 DCHECK_EQ(newHeadroom, newBuf.headroom());
622 DCHECK_LE(newTailroom, newBuf.tailroom());
623
624 return newBuf;
625}
626
627void IOBuf::unshareOneSlow() {
628 // Allocate a new buffer for the data
629 uint8_t* buf;
630 SharedInfo* sharedInfo;
631 std::size_t actualCapacity;
632 allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity);
633
634 // Copy the data
635 // Maintain the same amount of headroom. Since we maintained the same
636 // minimum capacity we also maintain at least the same amount of tailroom.
637 std::size_t headlen = headroom();
638 if (length_ > 0) {
639 assert(data_ != nullptr);
640 memcpy(buf + headlen, data_, length_);
641 }
642
643 // Release our reference on the old buffer
644 decrementRefcount();
645 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
646 setFlagsAndSharedInfo(0, sharedInfo);
647
648 // Update the buffer pointers to point to the new buffer
649 data_ = buf + headlen;
650 buf_ = buf;
651}
652
653void IOBuf::unshareChained() {
654 // unshareChained() should only be called if we are part of a chain of
655 // multiple IOBufs. The caller should have already verified this.
656 assert(isChained());
657
658 IOBuf* current = this;
659 while (true) {
660 if (current->isSharedOne()) {
661 // we have to unshare
662 break;
663 }
664
665 current = current->next_;
666 if (current == this) {
667 // None of the IOBufs in the chain are shared,
668 // so return without doing anything
669 return;
670 }
671 }
672
673 // We have to unshare. Let coalesceSlow() do the work.
674 coalesceSlow();
675}
676
677void IOBuf::markExternallyShared() {
678 IOBuf* current = this;
679 do {
680 current->markExternallySharedOne();
681 current = current->next_;
682 } while (current != this);
683}
684
685void IOBuf::makeManagedChained() {
686 assert(isChained());
687
688 IOBuf* current = this;
689 while (true) {
690 current->makeManagedOne();
691 current = current->next_;
692 if (current == this) {
693 break;
694 }
695 }
696}
697
698void IOBuf::coalesceSlow() {
699 // coalesceSlow() should only be called if we are part of a chain of multiple
700 // IOBufs. The caller should have already verified this.
701 DCHECK(isChained());
702
703 // Compute the length of the entire chain
704 std::size_t newLength = 0;
705 IOBuf* end = this;
706 do {
707 newLength += end->length_;
708 end = end->next_;
709 } while (end != this);
710
711 coalesceAndReallocate(newLength, end);
712 // We should be only element left in the chain now
713 DCHECK(!isChained());
714}
715
716void IOBuf::coalesceSlow(size_t maxLength) {
717 // coalesceSlow() should only be called if we are part of a chain of multiple
718 // IOBufs. The caller should have already verified this.
719 DCHECK(isChained());
720 DCHECK_LT(length_, maxLength);
721
722 // Compute the length of the entire chain
723 std::size_t newLength = 0;
724 IOBuf* end = this;
725 while (true) {
726 newLength += end->length_;
727 end = end->next_;
728 if (newLength >= maxLength) {
729 break;
730 }
731 if (end == this) {
732 throw std::overflow_error(
733 "attempted to coalesce more data than "
734 "available");
735 }
736 }
737
738 coalesceAndReallocate(newLength, end);
739 // We should have the requested length now
740 DCHECK_GE(length_, maxLength);
741}
742
743void IOBuf::coalesceAndReallocate(
744 size_t newHeadroom,
745 size_t newLength,
746 IOBuf* end,
747 size_t newTailroom) {
748 std::size_t newCapacity = newLength + newHeadroom + newTailroom;
749
750 // Allocate space for the coalesced buffer.
751 // We always convert to an external buffer, even if we happened to be an
752 // internal buffer before.
753 uint8_t* newBuf;
754 SharedInfo* newInfo;
755 std::size_t actualCapacity;
756 allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity);
757
758 // Copy the data into the new buffer
759 uint8_t* newData = newBuf + newHeadroom;
760 uint8_t* p = newData;
761 IOBuf* current = this;
762 size_t remaining = newLength;
763 do {
764 if (current->length_ > 0) {
765 assert(current->length_ <= remaining);
766 assert(current->data_ != nullptr);
767 remaining -= current->length_;
768 memcpy(p, current->data_, current->length_);
769 p += current->length_;
770 }
771 current = current->next_;
772 } while (current != end);
773 assert(remaining == 0);
774
775 // Point at the new buffer
776 decrementRefcount();
777
778 // Make sure kFlagMaybeShared and kFlagFreeSharedInfo are all cleared.
779 setFlagsAndSharedInfo(0, newInfo);
780
781 capacity_ = actualCapacity;
782 buf_ = newBuf;
783 data_ = newData;
784 length_ = newLength;
785
786 // Separate from the rest of our chain.
787 // Since we don't store the unique_ptr returned by separateChain(),
788 // this will immediately delete the returned subchain.
789 if (isChained()) {
790 (void)separateChain(next_, current->prev_);
791 }
792}
793
794void IOBuf::decrementRefcount() {
795 // Externally owned buffers don't have a SharedInfo object and aren't managed
796 // by the reference count
797 SharedInfo* info = sharedInfo();
798 if (!info) {
799 return;
800 }
801
802 // Decrement the refcount
803 uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel);
804 // Note that fetch_sub() returns the value before we decremented.
805 // If it is 1, we were the only remaining user; if it is greater there are
806 // still other users.
807 if (newcnt > 1) {
808 return;
809 }
810
811 // save the useHeapFullStorage flag here since
812 // freeExtBuffer can delete the sharedInfo()
813 bool useHeapFullStorage = info->useHeapFullStorage;
814
815 // We were the last user. Free the buffer
816 freeExtBuffer();
817
818 // Free the SharedInfo if it was allocated separately.
819 //
820 // This is only used by takeOwnership().
821 //
822 // To avoid this special case handling in decrementRefcount(), we could have
823 // takeOwnership() set a custom freeFn() that calls the user's free function
824 // then frees the SharedInfo object. (This would require that
825 // takeOwnership() store the user's free function with its allocated
826 // SharedInfo object.) However, handling this specially with a flag seems
827 // like it shouldn't be problematic.
828 if (flags() & kFlagFreeSharedInfo) {
829 delete info;
830 } else {
831 if (useHeapFullStorage) {
832 SharedInfo::releaseStorage(info);
833 }
834 }
835}
836
837void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) {
838 size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;
839 DCHECK_LT(newCapacity, UINT32_MAX);
840
841 // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may
842 // reallocate and free the original buffer. It should only ever be called if
843 // we are the only user of the buffer.
844 DCHECK(!isSharedOne());
845
846 // We'll need to reallocate the buffer.
847 // There are a few options.
848 // - If we have enough total room, move the data around in the buffer
849 // and adjust the data_ pointer.
850 // - If we're using an internal buffer, we'll switch to an external
851 // buffer with enough headroom and tailroom.
852 // - If we have enough headroom (headroom() >= minHeadroom) but not too much
853 // (so we don't waste memory), we can try one of two things, depending on
854 // whether we use jemalloc or not:
855 // - If using jemalloc, we can try to expand in place, avoiding a memcpy()
856 // - If not using jemalloc and we don't have too much to copy,
857 // we'll use realloc() (note that realloc might have to copy
858 // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h)
859 // - Otherwise, bite the bullet and reallocate.
860 if (headroom() + tailroom() >= minHeadroom + minTailroom) {
861 uint8_t* newData = writableBuffer() + minHeadroom;
862 memmove(newData, data_, length_);
863 data_ = newData;
864 return;
865 }
866
867 size_t newAllocatedCapacity = 0;
868 uint8_t* newBuffer = nullptr;
869 std::size_t newHeadroom = 0;
870 std::size_t oldHeadroom = headroom();
871
872 // If we have a buffer allocated with malloc and we just need more tailroom,
873 // try to use realloc()/xallocx() to grow the buffer in place.
874 SharedInfo* info = sharedInfo();
875 bool useHeapFullStorage = info && info->useHeapFullStorage;
876 if (info && (info->freeFn == nullptr) && length_ != 0 &&
877 oldHeadroom >= minHeadroom) {
878 size_t headSlack = oldHeadroom - minHeadroom;
879 newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack);
880 if (usingJEMalloc()) {
881 // We assume that tailroom is more useful and more important than
882 // headroom (not least because realloc / xallocx allow us to grow the
883 // buffer at the tail, but not at the head) So, if we have more headroom
884 // than we need, we consider that "wasted". We arbitrarily define "too
885 // much" headroom to be 25% of the capacity.
886 if (headSlack * 4 <= newCapacity) {
887 size_t allocatedCapacity = capacity() + sizeof(SharedInfo);
888 void* p = buf_;
889 if (allocatedCapacity >= jemallocMinInPlaceExpandable) {
890 if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) {
891 newBuffer = static_cast<uint8_t*>(p);
892 newHeadroom = oldHeadroom;
893 }
894 // if xallocx failed, do nothing, fall back to malloc/memcpy/free
895 }
896 }
897 } else { // Not using jemalloc
898 size_t copySlack = capacity() - length_;
899 if (copySlack * 2 <= length_) {
900 void* p = realloc(buf_, newAllocatedCapacity);
901 if (UNLIKELY(p == nullptr)) {
902 throw std::bad_alloc();
903 }
904 newBuffer = static_cast<uint8_t*>(p);
905 newHeadroom = oldHeadroom;
906 }
907 }
908 }
909
910 // None of the previous reallocation strategies worked (or we're using
911 // an internal buffer). malloc/copy/free.
912 if (newBuffer == nullptr) {
913 newAllocatedCapacity = goodExtBufferSize(newCapacity);
914 newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity));
915 if (length_ > 0) {
916 assert(data_ != nullptr);
917 memcpy(newBuffer + minHeadroom, data_, length_);
918 }
919 if (sharedInfo()) {
920 freeExtBuffer();
921 }
922 newHeadroom = minHeadroom;
923 }
924
925 std::size_t cap;
926 initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap);
927
928 if (flags() & kFlagFreeSharedInfo) {
929 delete sharedInfo();
930 } else {
931 if (useHeapFullStorage) {
932 SharedInfo::releaseStorage(sharedInfo());
933 }
934 }
935
936 setFlagsAndSharedInfo(0, info);
937 capacity_ = cap;
938 buf_ = newBuffer;
939 data_ = newBuffer + newHeadroom;
940 // length_ is unchanged
941}
942
943void IOBuf::freeExtBuffer() {
944 SharedInfo* info = sharedInfo();
945 DCHECK(info);
946
947 if (info->freeFn) {
948 try {
949 info->freeFn(buf_, info->userData);
950 } catch (...) {
951 // The user's free function should never throw. Otherwise we might
952 // throw from the IOBuf destructor. Other code paths like coalesce()
953 // also assume that decrementRefcount() cannot throw.
954 abort();
955 }
956 } else {
957 free(buf_);
958 }
959}
960
961void IOBuf::allocExtBuffer(
962 std::size_t minCapacity,
963 uint8_t** bufReturn,
964 SharedInfo** infoReturn,
965 std::size_t* capacityReturn) {
966 size_t mallocSize = goodExtBufferSize(minCapacity);
967 uint8_t* buf = static_cast<uint8_t*>(checkedMalloc(mallocSize));
968 initExtBuffer(buf, mallocSize, infoReturn, capacityReturn);
969 *bufReturn = buf;
970}
971
972size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) {
973 // Determine how much space we should allocate. We'll store the SharedInfo
974 // for the external buffer just after the buffer itself. (We store it just
975 // after the buffer rather than just before so that the code can still just
976 // use free(buf_) to free the buffer.)
977 size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo);
978 // Add room for padding so that the SharedInfo will be aligned on an 8-byte
979 // boundary.
980 minSize = (minSize + 7) & ~7;
981
982 // Use goodMallocSize() to bump up the capacity to a decent size to request
983 // from malloc, so we can use all of the space that malloc will probably give
984 // us anyway.
985 return goodMallocSize(minSize);
986}
987
988void IOBuf::initExtBuffer(
989 uint8_t* buf,
990 size_t mallocSize,
991 SharedInfo** infoReturn,
992 std::size_t* capacityReturn) {
993 // Find the SharedInfo storage at the end of the buffer
994 // and construct the SharedInfo.
995 uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
996 SharedInfo* sharedInfo = new (infoStart) SharedInfo;
997
998 *capacityReturn = std::size_t(infoStart - buf);
999 *infoReturn = sharedInfo;
1000}
1001
1002fbstring IOBuf::moveToFbString() {
1003 // we need to save useHeapFullStorage since
1004 // sharedInfo() may not be valid after fbstring str
1005 bool useHeapFullStorage = false;
1006 // malloc-allocated buffers are just fine, everything else needs
1007 // to be turned into one.
1008 if (!sharedInfo() || // user owned, not ours to give up
1009 sharedInfo()->freeFn || // not malloc()-ed
1010 headroom() != 0 || // malloc()-ed block doesn't start at beginning
1011 tailroom() == 0 || // no room for NUL terminator
1012 isShared() || // shared
1013 isChained()) { // chained
1014 // We might as well get rid of all head and tailroom if we're going
1015 // to reallocate; we need 1 byte for NUL terminator.
1016 coalesceAndReallocate(0, computeChainDataLength(), this, 1);
1017 } else {
1018 // if we do not call coalesceAndReallocate
1019 // we might need to call SharedInfo::releaseStorage()
1020 useHeapFullStorage = sharedInfo() && sharedInfo()->useHeapFullStorage;
1021 }
1022
1023 // Ensure NUL terminated
1024 *writableTail() = 0;
1025 fbstring str(
1026 reinterpret_cast<char*>(writableData()),
1027 length(),
1028 capacity(),
1029 AcquireMallocatedString());
1030
1031 if (flags() & kFlagFreeSharedInfo) {
1032 delete sharedInfo();
1033 } else {
1034 if (useHeapFullStorage) {
1035 SharedInfo::releaseStorage(sharedInfo());
1036 }
1037 }
1038
1039 // Reset to a state where we can be deleted cleanly
1040 flagsAndSharedInfo_ = 0;
1041 buf_ = nullptr;
1042 clear();
1043 return str;
1044}
1045
1046IOBuf::Iterator IOBuf::cbegin() const {
1047 return Iterator(this, this);
1048}
1049
1050IOBuf::Iterator IOBuf::cend() const {
1051 return Iterator(nullptr, nullptr);
1052}
1053
1054folly::fbvector<struct iovec> IOBuf::getIov() const {
1055 folly::fbvector<struct iovec> iov;
1056 iov.reserve(countChainElements());
1057 appendToIov(&iov);
1058 return iov;
1059}
1060
1061void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const {
1062 IOBuf const* p = this;
1063 do {
1064 // some code can get confused by empty iovs, so skip them
1065 if (p->length() > 0) {
1066 iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())});
1067 }
1068 p = p->next();
1069 } while (p != this);
1070}
1071
1072unique_ptr<IOBuf> IOBuf::wrapIov(const iovec* vec, size_t count) {
1073 unique_ptr<IOBuf> result = nullptr;
1074 for (size_t i = 0; i < count; ++i) {
1075 size_t len = vec[i].iov_len;
1076 void* data = vec[i].iov_base;
1077 if (len > 0) {
1078 auto buf = wrapBuffer(data, len);
1079 if (!result) {
1080 result = std::move(buf);
1081 } else {
1082 result->prependChain(std::move(buf));
1083 }
1084 }
1085 }
1086 if (UNLIKELY(result == nullptr)) {
1087 return create(0);
1088 }
1089 return result;
1090}
1091
1092std::unique_ptr<IOBuf> IOBuf::takeOwnershipIov(
1093 const iovec* vec,
1094 size_t count,
1095 FreeFunction freeFn,
1096 void* userData,
1097 bool freeOnError) {
1098 unique_ptr<IOBuf> result = nullptr;
1099 for (size_t i = 0; i < count; ++i) {
1100 size_t len = vec[i].iov_len;
1101 void* data = vec[i].iov_base;
1102 if (len > 0) {
1103 auto buf = takeOwnership(data, len, freeFn, userData, freeOnError);
1104 if (!result) {
1105 result = std::move(buf);
1106 } else {
1107 result->prependChain(std::move(buf));
1108 }
1109 }
1110 }
1111 if (UNLIKELY(result == nullptr)) {
1112 return create(0);
1113 }
1114 return result;
1115}
1116
1117IOBuf::FillIovResult IOBuf::fillIov(struct iovec* iov, size_t len) const {
1118 IOBuf const* p = this;
1119 size_t i = 0;
1120 size_t totalBytes = 0;
1121 while (i < len) {
1122 // some code can get confused by empty iovs, so skip them
1123 if (p->length() > 0) {
1124 iov[i].iov_base = const_cast<uint8_t*>(p->data());
1125 iov[i].iov_len = p->length();
1126 totalBytes += p->length();
1127 i++;
1128 }
1129 p = p->next();
1130 if (p == this) {
1131 return {i, totalBytes};
1132 }
1133 }
1134 return {0, 0};
1135}
1136
1137size_t IOBufHash::operator()(const IOBuf& buf) const noexcept {
1138 folly::hash::SpookyHashV2 hasher;
1139 hasher.Init(0, 0);
1140 io::Cursor cursor(&buf);
1141 for (;;) {
1142 auto b = cursor.peekBytes();
1143 if (b.empty()) {
1144 break;
1145 }
1146 hasher.Update(b.data(), b.size());
1147 cursor.skip(b.size());
1148 }
1149 uint64_t h1;
1150 uint64_t h2;
1151 hasher.Final(&h1, &h2);
1152 return static_cast<std::size_t>(h1);
1153}
1154
1155ordering IOBufCompare::impl(const IOBuf& a, const IOBuf& b) const noexcept {
1156 io::Cursor ca(&a);
1157 io::Cursor cb(&b);
1158 for (;;) {
1159 auto ba = ca.peekBytes();
1160 auto bb = cb.peekBytes();
1161 if (ba.empty() || bb.empty()) {
1162 return to_ordering(int(bb.empty()) - int(ba.empty()));
1163 }
1164 const size_t n = std::min(ba.size(), bb.size());
1165 DCHECK_GT(n, 0u);
1166 const ordering r = to_ordering(std::memcmp(ba.data(), bb.data(), n));
1167 if (r != ordering::eq) {
1168 return r;
1169 }
1170 // Cursor::skip() may throw if n is too large, but n is not too large here
1171 ca.skip(n);
1172 cb.skip(n);
1173 }
1174}
1175
1176} // namespace folly
1177