1 | /* |
2 | * Copyright 2011-present Facebook, Inc. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | |
17 | /* |
18 | * Nicholas Ormrod (njormrod) |
19 | * Andrei Alexandrescu (aalexandre) |
20 | * |
21 | * FBVector is Facebook's drop-in implementation of std::vector. It has special |
22 | * optimizations for use with relocatable types and jemalloc. |
23 | */ |
24 | |
25 | #pragma once |
26 | |
27 | //============================================================================= |
28 | // headers |
29 | |
30 | #include <algorithm> |
31 | #include <cassert> |
32 | #include <iterator> |
33 | #include <memory> |
34 | #include <stdexcept> |
35 | #include <type_traits> |
36 | #include <utility> |
37 | |
38 | #include <folly/FormatTraits.h> |
39 | #include <folly/Likely.h> |
40 | #include <folly/Traits.h> |
41 | #include <folly/lang/Exception.h> |
42 | #include <folly/memory/Malloc.h> |
43 | |
44 | //============================================================================= |
45 | // forward declaration |
46 | |
47 | namespace folly { |
48 | template <class T, class Allocator = std::allocator<T>> |
49 | class fbvector; |
50 | } // namespace folly |
51 | |
52 | //============================================================================= |
53 | // unrolling |
54 | |
55 | #define FOLLY_FBV_UNROLL_PTR(first, last, OP) \ |
56 | do { \ |
57 | for (; (last) - (first) >= 4; (first) += 4) { \ |
58 | OP(((first) + 0)); \ |
59 | OP(((first) + 1)); \ |
60 | OP(((first) + 2)); \ |
61 | OP(((first) + 3)); \ |
62 | } \ |
63 | for (; (first) != (last); ++(first)) \ |
64 | OP((first)); \ |
65 | } while (0); |
66 | |
67 | //============================================================================= |
68 | /////////////////////////////////////////////////////////////////////////////// |
69 | // // |
70 | // fbvector class // |
71 | // // |
72 | /////////////////////////////////////////////////////////////////////////////// |
73 | |
74 | namespace folly { |
75 | |
76 | template <class T, class Allocator> |
77 | class fbvector { |
78 | //=========================================================================== |
79 | //--------------------------------------------------------------------------- |
80 | // implementation |
81 | private: |
82 | typedef std::allocator_traits<Allocator> A; |
83 | |
84 | struct Impl : public Allocator { |
85 | // typedefs |
86 | typedef typename A::pointer pointer; |
87 | typedef typename A::size_type size_type; |
88 | |
89 | // data |
90 | pointer b_, e_, z_; |
91 | |
92 | // constructors |
93 | Impl() : Allocator(), b_(nullptr), e_(nullptr), z_(nullptr) {} |
94 | /* implicit */ Impl(const Allocator& alloc) |
95 | : Allocator(alloc), b_(nullptr), e_(nullptr), z_(nullptr) {} |
96 | /* implicit */ Impl(Allocator&& alloc) |
97 | : Allocator(std::move(alloc)), b_(nullptr), e_(nullptr), z_(nullptr) {} |
98 | |
99 | /* implicit */ Impl(size_type n, const Allocator& alloc = Allocator()) |
100 | : Allocator(alloc) { |
101 | init(n); |
102 | } |
103 | |
104 | Impl(Impl&& other) noexcept |
105 | : Allocator(std::move(other)), |
106 | b_(other.b_), |
107 | e_(other.e_), |
108 | z_(other.z_) { |
109 | other.b_ = other.e_ = other.z_ = nullptr; |
110 | } |
111 | |
112 | // destructor |
113 | ~Impl() { |
114 | destroy(); |
115 | } |
116 | |
117 | // allocation |
118 | // note that 'allocate' and 'deallocate' are inherited from Allocator |
119 | T* D_allocate(size_type n) { |
120 | if (usingStdAllocator::value) { |
121 | return static_cast<T*>(checkedMalloc(n * sizeof(T))); |
122 | } else { |
123 | return std::allocator_traits<Allocator>::allocate(*this, n); |
124 | } |
125 | } |
126 | |
127 | void D_deallocate(T* p, size_type n) noexcept { |
128 | if (usingStdAllocator::value) { |
129 | free(p); |
130 | } else { |
131 | std::allocator_traits<Allocator>::deallocate(*this, p, n); |
132 | } |
133 | } |
134 | |
135 | // helpers |
136 | void swapData(Impl& other) { |
137 | std::swap(b_, other.b_); |
138 | std::swap(e_, other.e_); |
139 | std::swap(z_, other.z_); |
140 | } |
141 | |
142 | // data ops |
143 | inline void destroy() noexcept { |
144 | if (b_) { |
145 | // THIS DISPATCH CODE IS DUPLICATED IN fbvector::D_destroy_range_a. |
146 | // It has been inlined here for speed. It calls the static fbvector |
147 | // methods to perform the actual destruction. |
148 | if (usingStdAllocator::value) { |
149 | S_destroy_range(b_, e_); |
150 | } else { |
151 | S_destroy_range_a(*this, b_, e_); |
152 | } |
153 | |
154 | D_deallocate(b_, size_type(z_ - b_)); |
155 | } |
156 | } |
157 | |
158 | void init(size_type n) { |
159 | if (UNLIKELY(n == 0)) { |
160 | b_ = e_ = z_ = nullptr; |
161 | } else { |
162 | size_type sz = folly::goodMallocSize(n * sizeof(T)) / sizeof(T); |
163 | b_ = D_allocate(sz); |
164 | e_ = b_; |
165 | z_ = b_ + sz; |
166 | } |
167 | } |
168 | |
169 | void set(pointer newB, size_type newSize, size_type newCap) { |
170 | z_ = newB + newCap; |
171 | e_ = newB + newSize; |
172 | b_ = newB; |
173 | } |
174 | |
175 | void reset(size_type newCap) { |
176 | destroy(); |
177 | try { |
178 | init(newCap); |
179 | } catch (...) { |
180 | init(0); |
181 | throw; |
182 | } |
183 | } |
184 | void reset() { // same as reset(0) |
185 | destroy(); |
186 | b_ = e_ = z_ = nullptr; |
187 | } |
188 | } impl_; |
189 | |
190 | static void swap(Impl& a, Impl& b) { |
191 | using std::swap; |
192 | if (!usingStdAllocator::value) { |
193 | swap(static_cast<Allocator&>(a), static_cast<Allocator&>(b)); |
194 | } |
195 | a.swapData(b); |
196 | } |
197 | |
198 | //=========================================================================== |
199 | //--------------------------------------------------------------------------- |
200 | // types and constants |
201 | public: |
202 | typedef T value_type; |
203 | typedef value_type& reference; |
204 | typedef const value_type& const_reference; |
205 | typedef T* iterator; |
206 | typedef const T* const_iterator; |
207 | typedef size_t size_type; |
208 | typedef typename std::make_signed<size_type>::type difference_type; |
209 | typedef Allocator allocator_type; |
210 | typedef typename A::pointer pointer; |
211 | typedef typename A::const_pointer const_pointer; |
212 | typedef std::reverse_iterator<iterator> reverse_iterator; |
213 | typedef std::reverse_iterator<const_iterator> const_reverse_iterator; |
214 | |
215 | private: |
216 | typedef bool_constant< |
217 | is_trivially_copyable<T>::value && |
218 | sizeof(T) <= 16 // don't force large structures to be passed by value |
219 | > |
220 | should_pass_by_value; |
221 | typedef |
222 | typename std::conditional<should_pass_by_value::value, T, const T&>::type |
223 | VT; |
224 | typedef |
225 | typename std::conditional<should_pass_by_value::value, T, T&&>::type MT; |
226 | |
227 | typedef bool_constant<std::is_same<Allocator, std::allocator<T>>::value> |
228 | usingStdAllocator; |
229 | typedef bool_constant< |
230 | usingStdAllocator::value || |
231 | A::propagate_on_container_move_assignment::value> |
232 | moveIsSwap; |
233 | |
234 | //=========================================================================== |
235 | //--------------------------------------------------------------------------- |
236 | // allocator helpers |
237 | private: |
238 | //--------------------------------------------------------------------------- |
239 | // allocate |
240 | |
241 | T* M_allocate(size_type n) { |
242 | return impl_.D_allocate(n); |
243 | } |
244 | |
245 | //--------------------------------------------------------------------------- |
246 | // deallocate |
247 | |
248 | void M_deallocate(T* p, size_type n) noexcept { |
249 | impl_.D_deallocate(p, n); |
250 | } |
251 | |
252 | //--------------------------------------------------------------------------- |
253 | // construct |
254 | |
255 | // GCC is very sensitive to the exact way that construct is called. For |
256 | // that reason there are several different specializations of construct. |
257 | |
258 | template <typename U, typename... Args> |
259 | void M_construct(U* p, Args&&... args) { |
260 | if (usingStdAllocator::value) { |
261 | new (p) U(std::forward<Args>(args)...); |
262 | } else { |
263 | std::allocator_traits<Allocator>::construct( |
264 | impl_, p, std::forward<Args>(args)...); |
265 | } |
266 | } |
267 | |
268 | template <typename U, typename... Args> |
269 | static void S_construct(U* p, Args&&... args) { |
270 | new (p) U(std::forward<Args>(args)...); |
271 | } |
272 | |
273 | template <typename U, typename... Args> |
274 | static void S_construct_a(Allocator& a, U* p, Args&&... args) { |
275 | std::allocator_traits<Allocator>::construct( |
276 | a, p, std::forward<Args>(args)...); |
277 | } |
278 | |
279 | // scalar optimization |
280 | // TODO we can expand this optimization to: default copyable and assignable |
281 | template < |
282 | typename U, |
283 | typename Enable = typename std::enable_if<std::is_scalar<U>::value>::type> |
284 | void M_construct(U* p, U arg) { |
285 | if (usingStdAllocator::value) { |
286 | *p = arg; |
287 | } else { |
288 | std::allocator_traits<Allocator>::construct(impl_, p, arg); |
289 | } |
290 | } |
291 | |
292 | template < |
293 | typename U, |
294 | typename Enable = typename std::enable_if<std::is_scalar<U>::value>::type> |
295 | static void S_construct(U* p, U arg) { |
296 | *p = arg; |
297 | } |
298 | |
299 | template < |
300 | typename U, |
301 | typename Enable = typename std::enable_if<std::is_scalar<U>::value>::type> |
302 | static void S_construct_a(Allocator& a, U* p, U arg) { |
303 | std::allocator_traits<Allocator>::construct(a, p, arg); |
304 | } |
305 | |
306 | // const& optimization |
307 | template < |
308 | typename U, |
309 | typename Enable = |
310 | typename std::enable_if<!std::is_scalar<U>::value>::type> |
311 | void M_construct(U* p, const U& value) { |
312 | if (usingStdAllocator::value) { |
313 | new (p) U(value); |
314 | } else { |
315 | std::allocator_traits<Allocator>::construct(impl_, p, value); |
316 | } |
317 | } |
318 | |
319 | template < |
320 | typename U, |
321 | typename Enable = |
322 | typename std::enable_if<!std::is_scalar<U>::value>::type> |
323 | static void S_construct(U* p, const U& value) { |
324 | new (p) U(value); |
325 | } |
326 | |
327 | template < |
328 | typename U, |
329 | typename Enable = |
330 | typename std::enable_if<!std::is_scalar<U>::value>::type> |
331 | static void S_construct_a(Allocator& a, U* p, const U& value) { |
332 | std::allocator_traits<Allocator>::construct(a, p, value); |
333 | } |
334 | |
335 | //--------------------------------------------------------------------------- |
336 | // destroy |
337 | |
338 | void M_destroy(T* p) noexcept { |
339 | if (usingStdAllocator::value) { |
340 | if (!std::is_trivially_destructible<T>::value) { |
341 | p->~T(); |
342 | } |
343 | } else { |
344 | std::allocator_traits<Allocator>::destroy(impl_, p); |
345 | } |
346 | } |
347 | |
348 | //=========================================================================== |
349 | //--------------------------------------------------------------------------- |
350 | // algorithmic helpers |
351 | private: |
352 | //--------------------------------------------------------------------------- |
353 | // destroy_range |
354 | |
355 | // wrappers |
356 | void M_destroy_range_e(T* pos) noexcept { |
357 | D_destroy_range_a(pos, impl_.e_); |
358 | impl_.e_ = pos; |
359 | } |
360 | |
361 | // dispatch |
362 | // THIS DISPATCH CODE IS DUPLICATED IN IMPL. SEE IMPL FOR DETAILS. |
363 | void D_destroy_range_a(T* first, T* last) noexcept { |
364 | if (usingStdAllocator::value) { |
365 | S_destroy_range(first, last); |
366 | } else { |
367 | S_destroy_range_a(impl_, first, last); |
368 | } |
369 | } |
370 | |
371 | // allocator |
372 | static void S_destroy_range_a(Allocator& a, T* first, T* last) noexcept { |
373 | for (; first != last; ++first) { |
374 | std::allocator_traits<Allocator>::destroy(a, first); |
375 | } |
376 | } |
377 | |
378 | // optimized |
379 | static void S_destroy_range(T* first, T* last) noexcept { |
380 | if (!std::is_trivially_destructible<T>::value) { |
381 | #define FOLLY_FBV_OP(p) (p)->~T() |
382 | // EXPERIMENTAL DATA on fbvector<vector<int>> (where each vector<int> has |
383 | // size 0). |
384 | // The unrolled version seems to work faster for small to medium sized |
385 | // fbvectors. It gets a 10% speedup on fbvectors of size 1024, 64, and |
386 | // 16. |
387 | // The simple loop version seems to work faster for large fbvectors. The |
388 | // unrolled version is about 6% slower on fbvectors on size 16384. |
389 | // The two methods seem tied for very large fbvectors. The unrolled |
390 | // version is about 0.5% slower on size 262144. |
391 | |
392 | // for (; first != last; ++first) first->~T(); |
393 | FOLLY_FBV_UNROLL_PTR(first, last, FOLLY_FBV_OP) |
394 | #undef FOLLY_FBV_OP |
395 | } |
396 | } |
397 | |
398 | //--------------------------------------------------------------------------- |
399 | // uninitialized_fill_n |
400 | |
401 | // wrappers |
402 | void M_uninitialized_fill_n_e(size_type sz) { |
403 | D_uninitialized_fill_n_a(impl_.e_, sz); |
404 | impl_.e_ += sz; |
405 | } |
406 | |
407 | void M_uninitialized_fill_n_e(size_type sz, VT value) { |
408 | D_uninitialized_fill_n_a(impl_.e_, sz, value); |
409 | impl_.e_ += sz; |
410 | } |
411 | |
412 | // dispatch |
413 | void D_uninitialized_fill_n_a(T* dest, size_type sz) { |
414 | if (usingStdAllocator::value) { |
415 | S_uninitialized_fill_n(dest, sz); |
416 | } else { |
417 | S_uninitialized_fill_n_a(impl_, dest, sz); |
418 | } |
419 | } |
420 | |
421 | void D_uninitialized_fill_n_a(T* dest, size_type sz, VT value) { |
422 | if (usingStdAllocator::value) { |
423 | S_uninitialized_fill_n(dest, sz, value); |
424 | } else { |
425 | S_uninitialized_fill_n_a(impl_, dest, sz, value); |
426 | } |
427 | } |
428 | |
429 | // allocator |
430 | template <typename... Args> |
431 | static void S_uninitialized_fill_n_a( |
432 | Allocator& a, |
433 | T* dest, |
434 | size_type sz, |
435 | Args&&... args) { |
436 | auto b = dest; |
437 | auto e = dest + sz; |
438 | try { |
439 | for (; b != e; ++b) { |
440 | std::allocator_traits<Allocator>::construct( |
441 | a, b, std::forward<Args>(args)...); |
442 | } |
443 | } catch (...) { |
444 | S_destroy_range_a(a, dest, b); |
445 | throw; |
446 | } |
447 | } |
448 | |
449 | // optimized |
450 | static void S_uninitialized_fill_n(T* dest, size_type n) { |
451 | if (folly::IsZeroInitializable<T>::value) { |
452 | if (LIKELY(n != 0)) { |
453 | std::memset((void*)dest, 0, sizeof(T) * n); |
454 | } |
455 | } else { |
456 | auto b = dest; |
457 | auto e = dest + n; |
458 | try { |
459 | for (; b != e; ++b) { |
460 | S_construct(b); |
461 | } |
462 | } catch (...) { |
463 | --b; |
464 | for (; b >= dest; --b) { |
465 | b->~T(); |
466 | } |
467 | throw; |
468 | } |
469 | } |
470 | } |
471 | |
472 | static void S_uninitialized_fill_n(T* dest, size_type n, const T& value) { |
473 | auto b = dest; |
474 | auto e = dest + n; |
475 | try { |
476 | for (; b != e; ++b) { |
477 | S_construct(b, value); |
478 | } |
479 | } catch (...) { |
480 | S_destroy_range(dest, b); |
481 | throw; |
482 | } |
483 | } |
484 | |
485 | //--------------------------------------------------------------------------- |
486 | // uninitialized_copy |
487 | |
488 | // it is possible to add an optimization for the case where |
489 | // It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T> |
490 | |
491 | // wrappers |
492 | template <typename It> |
493 | void M_uninitialized_copy_e(It first, It last) { |
494 | D_uninitialized_copy_a(impl_.e_, first, last); |
495 | impl_.e_ += std::distance(first, last); |
496 | } |
497 | |
498 | template <typename It> |
499 | void M_uninitialized_move_e(It first, It last) { |
500 | D_uninitialized_move_a(impl_.e_, first, last); |
501 | impl_.e_ += std::distance(first, last); |
502 | } |
503 | |
504 | // dispatch |
505 | template <typename It> |
506 | void D_uninitialized_copy_a(T* dest, It first, It last) { |
507 | if (usingStdAllocator::value) { |
508 | if (folly::is_trivially_copyable<T>::value) { |
509 | S_uninitialized_copy_bits(dest, first, last); |
510 | } else { |
511 | S_uninitialized_copy(dest, first, last); |
512 | } |
513 | } else { |
514 | S_uninitialized_copy_a(impl_, dest, first, last); |
515 | } |
516 | } |
517 | |
518 | template <typename It> |
519 | void D_uninitialized_move_a(T* dest, It first, It last) { |
520 | D_uninitialized_copy_a( |
521 | dest, std::make_move_iterator(first), std::make_move_iterator(last)); |
522 | } |
523 | |
524 | // allocator |
525 | template <typename It> |
526 | static void S_uninitialized_copy_a(Allocator& a, T* dest, It first, It last) { |
527 | auto b = dest; |
528 | try { |
529 | for (; first != last; ++first, ++b) { |
530 | std::allocator_traits<Allocator>::construct(a, b, *first); |
531 | } |
532 | } catch (...) { |
533 | S_destroy_range_a(a, dest, b); |
534 | throw; |
535 | } |
536 | } |
537 | |
538 | // optimized |
539 | template <typename It> |
540 | static void S_uninitialized_copy(T* dest, It first, It last) { |
541 | auto b = dest; |
542 | try { |
543 | for (; first != last; ++first, ++b) { |
544 | S_construct(b, *first); |
545 | } |
546 | } catch (...) { |
547 | S_destroy_range(dest, b); |
548 | throw; |
549 | } |
550 | } |
551 | |
552 | static void |
553 | S_uninitialized_copy_bits(T* dest, const T* first, const T* last) { |
554 | if (last != first) { |
555 | std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T)); |
556 | } |
557 | } |
558 | |
559 | static void S_uninitialized_copy_bits( |
560 | T* dest, |
561 | std::move_iterator<T*> first, |
562 | std::move_iterator<T*> last) { |
563 | T* bFirst = first.base(); |
564 | T* bLast = last.base(); |
565 | if (bLast != bFirst) { |
566 | std::memcpy((void*)dest, (void*)bFirst, (bLast - bFirst) * sizeof(T)); |
567 | } |
568 | } |
569 | |
570 | template <typename It> |
571 | static void S_uninitialized_copy_bits(T* dest, It first, It last) { |
572 | S_uninitialized_copy(dest, first, last); |
573 | } |
574 | |
575 | //--------------------------------------------------------------------------- |
576 | // copy_n |
577 | |
578 | // This function is "unsafe": it assumes that the iterator can be advanced at |
579 | // least n times. However, as a private function, that unsafety is managed |
580 | // wholly by fbvector itself. |
581 | |
582 | template <typename It> |
583 | static It S_copy_n(T* dest, It first, size_type n) { |
584 | auto e = dest + n; |
585 | for (; dest != e; ++dest, ++first) { |
586 | *dest = *first; |
587 | } |
588 | return first; |
589 | } |
590 | |
591 | static const T* S_copy_n(T* dest, const T* first, size_type n) { |
592 | if (is_trivially_copyable<T>::value) { |
593 | std::memcpy((void*)dest, (void*)first, n * sizeof(T)); |
594 | return first + n; |
595 | } else { |
596 | return S_copy_n<const T*>(dest, first, n); |
597 | } |
598 | } |
599 | |
600 | static std::move_iterator<T*> |
601 | S_copy_n(T* dest, std::move_iterator<T*> mIt, size_type n) { |
602 | if (is_trivially_copyable<T>::value) { |
603 | T* first = mIt.base(); |
604 | std::memcpy((void*)dest, (void*)first, n * sizeof(T)); |
605 | return std::make_move_iterator(first + n); |
606 | } else { |
607 | return S_copy_n<std::move_iterator<T*>>(dest, mIt, n); |
608 | } |
609 | } |
610 | |
611 | //=========================================================================== |
612 | //--------------------------------------------------------------------------- |
613 | // relocation helpers |
614 | private: |
615 | // Relocation is divided into three parts: |
616 | // |
617 | // 1: relocate_move |
618 | // Performs the actual movement of data from point a to point b. |
619 | // |
620 | // 2: relocate_done |
621 | // Destroys the old data. |
622 | // |
623 | // 3: relocate_undo |
624 | // Destoys the new data and restores the old data. |
625 | // |
626 | // The three steps are used because there may be an exception after part 1 |
627 | // has completed. If that is the case, then relocate_undo can nullify the |
628 | // initial move. Otherwise, relocate_done performs the last bit of tidying |
629 | // up. |
630 | // |
631 | // The relocation trio may use either memcpy, move, or copy. It is decided |
632 | // by the following case statement: |
633 | // |
634 | // IsRelocatable && usingStdAllocator -> memcpy |
635 | // has_nothrow_move && usingStdAllocator -> move |
636 | // cannot copy -> move |
637 | // default -> copy |
638 | // |
639 | // If the class is non-copyable then it must be movable. However, if the |
640 | // move constructor is not noexcept, i.e. an error could be thrown, then |
641 | // relocate_undo will be unable to restore the old data, for fear of a |
642 | // second exception being thrown. This is a known and unavoidable |
643 | // deficiency. In lieu of a strong exception guarantee, relocate_undo does |
644 | // the next best thing: it provides a weak exception guarantee by |
645 | // destorying the new data, but leaving the old data in an indeterminate |
646 | // state. Note that that indeterminate state will be valid, since the |
647 | // old data has not been destroyed; it has merely been the source of a |
648 | // move, which is required to leave the source in a valid state. |
649 | |
650 | // wrappers |
651 | void M_relocate(T* newB) { |
652 | relocate_move(newB, impl_.b_, impl_.e_); |
653 | relocate_done(newB, impl_.b_, impl_.e_); |
654 | } |
655 | |
656 | // dispatch type trait |
657 | typedef bool_constant< |
658 | folly::IsRelocatable<T>::value && usingStdAllocator::value> |
659 | relocate_use_memcpy; |
660 | |
661 | typedef bool_constant< |
662 | (std::is_nothrow_move_constructible<T>::value && |
663 | usingStdAllocator::value) || |
664 | !std::is_copy_constructible<T>::value> |
665 | relocate_use_move; |
666 | |
667 | // move |
668 | void relocate_move(T* dest, T* first, T* last) { |
669 | relocate_move_or_memcpy(dest, first, last, relocate_use_memcpy()); |
670 | } |
671 | |
672 | void relocate_move_or_memcpy(T* dest, T* first, T* last, std::true_type) { |
673 | if (first != nullptr) { |
674 | std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T)); |
675 | } |
676 | } |
677 | |
678 | void relocate_move_or_memcpy(T* dest, T* first, T* last, std::false_type) { |
679 | relocate_move_or_copy(dest, first, last, relocate_use_move()); |
680 | } |
681 | |
682 | void relocate_move_or_copy(T* dest, T* first, T* last, std::true_type) { |
683 | D_uninitialized_move_a(dest, first, last); |
684 | } |
685 | |
686 | void relocate_move_or_copy(T* dest, T* first, T* last, std::false_type) { |
687 | D_uninitialized_copy_a(dest, first, last); |
688 | } |
689 | |
690 | // done |
691 | void relocate_done(T* /*dest*/, T* first, T* last) noexcept { |
692 | if (folly::IsRelocatable<T>::value && usingStdAllocator::value) { |
693 | // used memcpy; data has been relocated, do not call destructor |
694 | } else { |
695 | D_destroy_range_a(first, last); |
696 | } |
697 | } |
698 | |
699 | // undo |
700 | void relocate_undo(T* dest, T* first, T* last) noexcept { |
701 | if (folly::IsRelocatable<T>::value && usingStdAllocator::value) { |
702 | // used memcpy, old data is still valid, nothing to do |
703 | } else if ( |
704 | std::is_nothrow_move_constructible<T>::value && |
705 | usingStdAllocator::value) { |
706 | // noexcept move everything back, aka relocate_move |
707 | relocate_move(first, dest, dest + (last - first)); |
708 | } else if (!std::is_copy_constructible<T>::value) { |
709 | // weak guarantee |
710 | D_destroy_range_a(dest, dest + (last - first)); |
711 | } else { |
712 | // used copy, old data is still valid |
713 | D_destroy_range_a(dest, dest + (last - first)); |
714 | } |
715 | } |
716 | |
717 | //=========================================================================== |
718 | //--------------------------------------------------------------------------- |
719 | // construct/copy/destroy |
720 | public: |
721 | fbvector() = default; |
722 | |
723 | explicit fbvector(const Allocator& a) : impl_(a) {} |
724 | |
725 | explicit fbvector(size_type n, const Allocator& a = Allocator()) |
726 | : impl_(n, a) { |
727 | M_uninitialized_fill_n_e(n); |
728 | } |
729 | |
730 | fbvector(size_type n, VT value, const Allocator& a = Allocator()) |
731 | : impl_(n, a) { |
732 | M_uninitialized_fill_n_e(n, value); |
733 | } |
734 | |
735 | template < |
736 | class It, |
737 | class Category = typename std::iterator_traits<It>::iterator_category> |
738 | fbvector(It first, It last, const Allocator& a = Allocator()) |
739 | : fbvector(first, last, a, Category()) {} |
740 | |
741 | fbvector(const fbvector& other) |
742 | : impl_( |
743 | other.size(), |
744 | A::select_on_container_copy_construction(other.impl_)) { |
745 | M_uninitialized_copy_e(other.begin(), other.end()); |
746 | } |
747 | |
748 | fbvector(fbvector&& other) noexcept : impl_(std::move(other.impl_)) {} |
749 | |
750 | fbvector(const fbvector& other, const Allocator& a) |
751 | : fbvector(other.begin(), other.end(), a) {} |
752 | |
753 | /* may throw */ fbvector(fbvector&& other, const Allocator& a) : impl_(a) { |
754 | if (impl_ == other.impl_) { |
755 | impl_.swapData(other.impl_); |
756 | } else { |
757 | impl_.init(other.size()); |
758 | M_uninitialized_move_e(other.begin(), other.end()); |
759 | } |
760 | } |
761 | |
762 | fbvector(std::initializer_list<T> il, const Allocator& a = Allocator()) |
763 | : fbvector(il.begin(), il.end(), a) {} |
764 | |
765 | ~fbvector() = default; // the cleanup occurs in impl_ |
766 | |
767 | fbvector& operator=(const fbvector& other) { |
768 | if (UNLIKELY(this == &other)) { |
769 | return *this; |
770 | } |
771 | |
772 | if (!usingStdAllocator::value && |
773 | A::propagate_on_container_copy_assignment::value) { |
774 | if (impl_ != other.impl_) { |
775 | // can't use other's different allocator to clean up self |
776 | impl_.reset(); |
777 | } |
778 | (Allocator&)impl_ = (Allocator&)other.impl_; |
779 | } |
780 | |
781 | assign(other.begin(), other.end()); |
782 | return *this; |
783 | } |
784 | |
785 | fbvector& operator=(fbvector&& other) { |
786 | if (UNLIKELY(this == &other)) { |
787 | return *this; |
788 | } |
789 | moveFrom(std::move(other), moveIsSwap()); |
790 | return *this; |
791 | } |
792 | |
793 | fbvector& operator=(std::initializer_list<T> il) { |
794 | assign(il.begin(), il.end()); |
795 | return *this; |
796 | } |
797 | |
798 | template < |
799 | class It, |
800 | class Category = typename std::iterator_traits<It>::iterator_category> |
801 | void assign(It first, It last) { |
802 | assign(first, last, Category()); |
803 | } |
804 | |
805 | void assign(size_type n, VT value) { |
806 | if (n > capacity()) { |
807 | // Not enough space. Do not reserve in place, since we will |
808 | // discard the old values anyways. |
809 | if (dataIsInternalAndNotVT(value)) { |
810 | T copy(std::move(value)); |
811 | impl_.reset(n); |
812 | M_uninitialized_fill_n_e(n, copy); |
813 | } else { |
814 | impl_.reset(n); |
815 | M_uninitialized_fill_n_e(n, value); |
816 | } |
817 | } else if (n <= size()) { |
818 | auto newE = impl_.b_ + n; |
819 | std::fill(impl_.b_, newE, value); |
820 | M_destroy_range_e(newE); |
821 | } else { |
822 | std::fill(impl_.b_, impl_.e_, value); |
823 | M_uninitialized_fill_n_e(n - size(), value); |
824 | } |
825 | } |
826 | |
827 | void assign(std::initializer_list<T> il) { |
828 | assign(il.begin(), il.end()); |
829 | } |
830 | |
831 | allocator_type get_allocator() const noexcept { |
832 | return impl_; |
833 | } |
834 | |
835 | private: |
836 | // contract dispatch for iterator types fbvector(It first, It last) |
837 | template <class ForwardIterator> |
838 | fbvector( |
839 | ForwardIterator first, |
840 | ForwardIterator last, |
841 | const Allocator& a, |
842 | std::forward_iterator_tag) |
843 | : impl_(size_type(std::distance(first, last)), a) { |
844 | M_uninitialized_copy_e(first, last); |
845 | } |
846 | |
847 | template <class InputIterator> |
848 | fbvector( |
849 | InputIterator first, |
850 | InputIterator last, |
851 | const Allocator& a, |
852 | std::input_iterator_tag) |
853 | : impl_(a) { |
854 | for (; first != last; ++first) { |
855 | emplace_back(*first); |
856 | } |
857 | } |
858 | |
859 | // contract dispatch for allocator movement in operator=(fbvector&&) |
860 | void moveFrom(fbvector&& other, std::true_type) { |
861 | swap(impl_, other.impl_); |
862 | } |
863 | void moveFrom(fbvector&& other, std::false_type) { |
864 | if (impl_ == other.impl_) { |
865 | impl_.swapData(other.impl_); |
866 | } else { |
867 | impl_.reset(other.size()); |
868 | M_uninitialized_move_e(other.begin(), other.end()); |
869 | } |
870 | } |
871 | |
872 | // contract dispatch for iterator types in assign(It first, It last) |
873 | template <class ForwardIterator> |
874 | void assign( |
875 | ForwardIterator first, |
876 | ForwardIterator last, |
877 | std::forward_iterator_tag) { |
878 | const auto newSize = size_type(std::distance(first, last)); |
879 | if (newSize > capacity()) { |
880 | impl_.reset(newSize); |
881 | M_uninitialized_copy_e(first, last); |
882 | } else if (newSize <= size()) { |
883 | auto newEnd = std::copy(first, last, impl_.b_); |
884 | M_destroy_range_e(newEnd); |
885 | } else { |
886 | auto mid = S_copy_n(impl_.b_, first, size()); |
887 | M_uninitialized_copy_e<decltype(last)>(mid, last); |
888 | } |
889 | } |
890 | |
891 | template <class InputIterator> |
892 | void |
893 | assign(InputIterator first, InputIterator last, std::input_iterator_tag) { |
894 | auto p = impl_.b_; |
895 | for (; first != last && p != impl_.e_; ++first, ++p) { |
896 | *p = *first; |
897 | } |
898 | if (p != impl_.e_) { |
899 | M_destroy_range_e(p); |
900 | } else { |
901 | for (; first != last; ++first) { |
902 | emplace_back(*first); |
903 | } |
904 | } |
905 | } |
906 | |
907 | // contract dispatch for aliasing under VT optimization |
908 | bool dataIsInternalAndNotVT(const T& t) { |
909 | if (should_pass_by_value::value) { |
910 | return false; |
911 | } |
912 | return dataIsInternal(t); |
913 | } |
914 | bool dataIsInternal(const T& t) { |
915 | return UNLIKELY( |
916 | impl_.b_ <= std::addressof(t) && std::addressof(t) < impl_.e_); |
917 | } |
918 | |
919 | //=========================================================================== |
920 | //--------------------------------------------------------------------------- |
921 | // iterators |
922 | public: |
923 | iterator begin() noexcept { |
924 | return impl_.b_; |
925 | } |
926 | const_iterator begin() const noexcept { |
927 | return impl_.b_; |
928 | } |
929 | iterator end() noexcept { |
930 | return impl_.e_; |
931 | } |
932 | const_iterator end() const noexcept { |
933 | return impl_.e_; |
934 | } |
935 | reverse_iterator rbegin() noexcept { |
936 | return reverse_iterator(end()); |
937 | } |
938 | const_reverse_iterator rbegin() const noexcept { |
939 | return const_reverse_iterator(end()); |
940 | } |
941 | reverse_iterator rend() noexcept { |
942 | return reverse_iterator(begin()); |
943 | } |
944 | const_reverse_iterator rend() const noexcept { |
945 | return const_reverse_iterator(begin()); |
946 | } |
947 | |
948 | const_iterator cbegin() const noexcept { |
949 | return impl_.b_; |
950 | } |
951 | const_iterator cend() const noexcept { |
952 | return impl_.e_; |
953 | } |
954 | const_reverse_iterator crbegin() const noexcept { |
955 | return const_reverse_iterator(end()); |
956 | } |
957 | const_reverse_iterator crend() const noexcept { |
958 | return const_reverse_iterator(begin()); |
959 | } |
960 | |
961 | //=========================================================================== |
962 | //--------------------------------------------------------------------------- |
963 | // capacity |
964 | public: |
965 | size_type size() const noexcept { |
966 | return size_type(impl_.e_ - impl_.b_); |
967 | } |
968 | |
969 | size_type max_size() const noexcept { |
970 | // good luck gettin' there |
971 | return ~size_type(0); |
972 | } |
973 | |
974 | void resize(size_type n) { |
975 | if (n <= size()) { |
976 | M_destroy_range_e(impl_.b_ + n); |
977 | } else { |
978 | reserve(n); |
979 | M_uninitialized_fill_n_e(n - size()); |
980 | } |
981 | } |
982 | |
983 | void resize(size_type n, VT t) { |
984 | if (n <= size()) { |
985 | M_destroy_range_e(impl_.b_ + n); |
986 | } else if (dataIsInternalAndNotVT(t) && n > capacity()) { |
987 | T copy(t); |
988 | reserve(n); |
989 | M_uninitialized_fill_n_e(n - size(), copy); |
990 | } else { |
991 | reserve(n); |
992 | M_uninitialized_fill_n_e(n - size(), t); |
993 | } |
994 | } |
995 | |
996 | size_type capacity() const noexcept { |
997 | return size_type(impl_.z_ - impl_.b_); |
998 | } |
999 | |
1000 | bool empty() const noexcept { |
1001 | return impl_.b_ == impl_.e_; |
1002 | } |
1003 | |
1004 | void reserve(size_type n) { |
1005 | if (n <= capacity()) { |
1006 | return; |
1007 | } |
1008 | if (impl_.b_ && reserve_in_place(n)) { |
1009 | return; |
1010 | } |
1011 | |
1012 | auto newCap = folly::goodMallocSize(n * sizeof(T)) / sizeof(T); |
1013 | auto newB = M_allocate(newCap); |
1014 | try { |
1015 | M_relocate(newB); |
1016 | } catch (...) { |
1017 | M_deallocate(newB, newCap); |
1018 | throw; |
1019 | } |
1020 | if (impl_.b_) { |
1021 | M_deallocate(impl_.b_, size_type(impl_.z_ - impl_.b_)); |
1022 | } |
1023 | impl_.z_ = newB + newCap; |
1024 | impl_.e_ = newB + (impl_.e_ - impl_.b_); |
1025 | impl_.b_ = newB; |
1026 | } |
1027 | |
1028 | void shrink_to_fit() noexcept { |
1029 | if (empty()) { |
1030 | impl_.reset(); |
1031 | return; |
1032 | } |
1033 | |
1034 | auto const newCapacityBytes = folly::goodMallocSize(size() * sizeof(T)); |
1035 | auto const newCap = newCapacityBytes / sizeof(T); |
1036 | auto const oldCap = capacity(); |
1037 | |
1038 | if (newCap >= oldCap) { |
1039 | return; |
1040 | } |
1041 | |
1042 | void* p = impl_.b_; |
1043 | // xallocx() will shrink to precisely newCapacityBytes (which was generated |
1044 | // by goodMallocSize()) if it successfully shrinks in place. |
1045 | if ((usingJEMalloc() && usingStdAllocator::value) && |
1046 | newCapacityBytes >= folly::jemallocMinInPlaceExpandable && |
1047 | xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) { |
1048 | impl_.z_ += newCap - oldCap; |
1049 | } else { |
1050 | T* newB; // intentionally uninitialized |
1051 | try { |
1052 | newB = M_allocate(newCap); |
1053 | try { |
1054 | M_relocate(newB); |
1055 | } catch (...) { |
1056 | M_deallocate(newB, newCap); |
1057 | return; // swallow the error |
1058 | } |
1059 | } catch (...) { |
1060 | return; |
1061 | } |
1062 | if (impl_.b_) { |
1063 | M_deallocate(impl_.b_, size_type(impl_.z_ - impl_.b_)); |
1064 | } |
1065 | impl_.z_ = newB + newCap; |
1066 | impl_.e_ = newB + (impl_.e_ - impl_.b_); |
1067 | impl_.b_ = newB; |
1068 | } |
1069 | } |
1070 | |
1071 | private: |
1072 | bool reserve_in_place(size_type n) { |
1073 | if (!usingStdAllocator::value || !usingJEMalloc()) { |
1074 | return false; |
1075 | } |
1076 | |
1077 | // jemalloc can never grow in place blocks smaller than 4096 bytes. |
1078 | if ((impl_.z_ - impl_.b_) * sizeof(T) < |
1079 | folly::jemallocMinInPlaceExpandable) { |
1080 | return false; |
1081 | } |
1082 | |
1083 | auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T)); |
1084 | void* p = impl_.b_; |
1085 | if (xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) { |
1086 | impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T); |
1087 | return true; |
1088 | } |
1089 | return false; |
1090 | } |
1091 | |
1092 | //=========================================================================== |
1093 | //--------------------------------------------------------------------------- |
1094 | // element access |
1095 | public: |
1096 | reference operator[](size_type n) { |
1097 | assert(n < size()); |
1098 | return impl_.b_[n]; |
1099 | } |
1100 | const_reference operator[](size_type n) const { |
1101 | assert(n < size()); |
1102 | return impl_.b_[n]; |
1103 | } |
1104 | const_reference at(size_type n) const { |
1105 | if (UNLIKELY(n >= size())) { |
1106 | throw_exception<std::out_of_range>( |
1107 | "fbvector: index is greater than size." ); |
1108 | } |
1109 | return (*this)[n]; |
1110 | } |
1111 | reference at(size_type n) { |
1112 | auto const& cThis = *this; |
1113 | return const_cast<reference>(cThis.at(n)); |
1114 | } |
1115 | reference front() { |
1116 | assert(!empty()); |
1117 | return *impl_.b_; |
1118 | } |
1119 | const_reference front() const { |
1120 | assert(!empty()); |
1121 | return *impl_.b_; |
1122 | } |
1123 | reference back() { |
1124 | assert(!empty()); |
1125 | return impl_.e_[-1]; |
1126 | } |
1127 | const_reference back() const { |
1128 | assert(!empty()); |
1129 | return impl_.e_[-1]; |
1130 | } |
1131 | |
1132 | //=========================================================================== |
1133 | //--------------------------------------------------------------------------- |
1134 | // data access |
1135 | public: |
1136 | T* data() noexcept { |
1137 | return impl_.b_; |
1138 | } |
1139 | const T* data() const noexcept { |
1140 | return impl_.b_; |
1141 | } |
1142 | |
1143 | //=========================================================================== |
1144 | //--------------------------------------------------------------------------- |
1145 | // modifiers (common) |
1146 | public: |
1147 | template <class... Args> |
1148 | void emplace_back(Args&&... args) { |
1149 | if (impl_.e_ != impl_.z_) { |
1150 | M_construct(impl_.e_, std::forward<Args>(args)...); |
1151 | ++impl_.e_; |
1152 | } else { |
1153 | emplace_back_aux(std::forward<Args>(args)...); |
1154 | } |
1155 | } |
1156 | |
1157 | void push_back(const T& value) { |
1158 | if (impl_.e_ != impl_.z_) { |
1159 | M_construct(impl_.e_, value); |
1160 | ++impl_.e_; |
1161 | } else { |
1162 | emplace_back_aux(value); |
1163 | } |
1164 | } |
1165 | |
1166 | void push_back(T&& value) { |
1167 | if (impl_.e_ != impl_.z_) { |
1168 | M_construct(impl_.e_, std::move(value)); |
1169 | ++impl_.e_; |
1170 | } else { |
1171 | emplace_back_aux(std::move(value)); |
1172 | } |
1173 | } |
1174 | |
1175 | void pop_back() { |
1176 | assert(!empty()); |
1177 | --impl_.e_; |
1178 | M_destroy(impl_.e_); |
1179 | } |
1180 | |
1181 | void swap(fbvector& other) noexcept { |
1182 | if (!usingStdAllocator::value && A::propagate_on_container_swap::value) { |
1183 | swap(impl_, other.impl_); |
1184 | } else { |
1185 | impl_.swapData(other.impl_); |
1186 | } |
1187 | } |
1188 | |
1189 | void clear() noexcept { |
1190 | M_destroy_range_e(impl_.b_); |
1191 | } |
1192 | |
1193 | private: |
1194 | // std::vector implements a similar function with a different growth |
1195 | // strategy: empty() ? 1 : capacity() * 2. |
1196 | // |
1197 | // fbvector grows differently on two counts: |
1198 | // |
1199 | // (1) initial size |
1200 | // Instead of growing to size 1 from empty, fbvector allocates at least |
1201 | // 64 bytes. You may still use reserve to reserve a lesser amount of |
1202 | // memory. |
1203 | // (2) 1.5x |
1204 | // For medium-sized vectors, the growth strategy is 1.5x. See the docs |
1205 | // for details. |
1206 | // This does not apply to very small or very large fbvectors. This is a |
1207 | // heuristic. |
1208 | // A nice addition to fbvector would be the capability of having a user- |
1209 | // defined growth strategy, probably as part of the allocator. |
1210 | // |
1211 | |
1212 | size_type computePushBackCapacity() const { |
1213 | if (capacity() == 0) { |
1214 | return std::max(64 / sizeof(T), size_type(1)); |
1215 | } |
1216 | if (capacity() < folly::jemallocMinInPlaceExpandable / sizeof(T)) { |
1217 | return capacity() * 2; |
1218 | } |
1219 | if (capacity() > 4096 * 32 / sizeof(T)) { |
1220 | return capacity() * 2; |
1221 | } |
1222 | return (capacity() * 3 + 1) / 2; |
1223 | } |
1224 | |
1225 | template <class... Args> |
1226 | void emplace_back_aux(Args&&... args); |
1227 | |
1228 | //=========================================================================== |
1229 | //--------------------------------------------------------------------------- |
1230 | // modifiers (erase) |
1231 | public: |
1232 | iterator erase(const_iterator position) { |
1233 | return erase(position, position + 1); |
1234 | } |
1235 | |
1236 | iterator erase(const_iterator first, const_iterator last) { |
1237 | assert(isValid(first) && isValid(last)); |
1238 | assert(first <= last); |
1239 | if (first != last) { |
1240 | if (last == end()) { |
1241 | M_destroy_range_e((iterator)first); |
1242 | } else { |
1243 | if (folly::IsRelocatable<T>::value && usingStdAllocator::value) { |
1244 | D_destroy_range_a((iterator)first, (iterator)last); |
1245 | if (last - first >= cend() - last) { |
1246 | std::memcpy((void*)first, (void*)last, (cend() - last) * sizeof(T)); |
1247 | } else { |
1248 | std::memmove( |
1249 | (void*)first, (void*)last, (cend() - last) * sizeof(T)); |
1250 | } |
1251 | impl_.e_ -= (last - first); |
1252 | } else { |
1253 | std::copy( |
1254 | std::make_move_iterator((iterator)last), |
1255 | std::make_move_iterator(end()), |
1256 | (iterator)first); |
1257 | auto newEnd = impl_.e_ - std::distance(first, last); |
1258 | M_destroy_range_e(newEnd); |
1259 | } |
1260 | } |
1261 | } |
1262 | return (iterator)first; |
1263 | } |
1264 | |
1265 | //=========================================================================== |
1266 | //--------------------------------------------------------------------------- |
1267 | // modifiers (insert) |
1268 | private: // we have the private section first because it defines some macros |
1269 | bool isValid(const_iterator it) { |
1270 | return cbegin() <= it && it <= cend(); |
1271 | } |
1272 | |
1273 | size_type computeInsertCapacity(size_type n) { |
1274 | size_type nc = std::max(computePushBackCapacity(), size() + n); |
1275 | size_type ac = folly::goodMallocSize(nc * sizeof(T)) / sizeof(T); |
1276 | return ac; |
1277 | } |
1278 | |
1279 | //--------------------------------------------------------------------------- |
1280 | // |
1281 | // make_window takes an fbvector, and creates an uninitialized gap (a |
1282 | // window) at the given position, of the given size. The fbvector must |
1283 | // have enough capacity. |
1284 | // |
1285 | // Explanation by picture. |
1286 | // |
1287 | // 123456789______ |
1288 | // ^ |
1289 | // make_window here of size 3 |
1290 | // |
1291 | // 1234___56789___ |
1292 | // |
1293 | // If something goes wrong and the window must be destroyed, use |
1294 | // undo_window to provide a weak exception guarantee. It destroys |
1295 | // the right ledge. |
1296 | // |
1297 | // 1234___________ |
1298 | // |
1299 | //--------------------------------------------------------------------------- |
1300 | // |
1301 | // wrap_frame takes an inverse window and relocates an fbvector around it. |
1302 | // The fbvector must have at least as many elements as the left ledge. |
1303 | // |
1304 | // Explanation by picture. |
1305 | // |
1306 | // START |
1307 | // fbvector: inverse window: |
1308 | // 123456789______ _____abcde_______ |
1309 | // [idx][ n ] |
1310 | // |
1311 | // RESULT |
1312 | // _______________ 12345abcde6789___ |
1313 | // |
1314 | //--------------------------------------------------------------------------- |
1315 | // |
1316 | // insert_use_fresh_memory returns true iff the fbvector should use a fresh |
1317 | // block of memory for the insertion. If the fbvector does not have enough |
1318 | // spare capacity, then it must return true. Otherwise either true or false |
1319 | // may be returned. |
1320 | // |
1321 | //--------------------------------------------------------------------------- |
1322 | // |
1323 | // These three functions, make_window, wrap_frame, and |
1324 | // insert_use_fresh_memory, can be combined into a uniform interface. |
1325 | // Since that interface involves a lot of case-work, it is built into |
1326 | // some macros: FOLLY_FBVECTOR_INSERT_(PRE|START|TRY|END) |
1327 | // Macros are used in an attempt to let GCC perform better optimizations, |
1328 | // especially control flow optimization. |
1329 | // |
1330 | |
1331 | //--------------------------------------------------------------------------- |
1332 | // window |
1333 | |
1334 | void make_window(iterator position, size_type n) { |
1335 | // The result is guaranteed to be non-negative, so use an unsigned type: |
1336 | size_type tail = size_type(std::distance(position, impl_.e_)); |
1337 | |
1338 | if (tail <= n) { |
1339 | relocate_move(position + n, position, impl_.e_); |
1340 | relocate_done(position + n, position, impl_.e_); |
1341 | impl_.e_ += n; |
1342 | } else { |
1343 | if (folly::IsRelocatable<T>::value && usingStdAllocator::value) { |
1344 | std::memmove((void*)(position + n), (void*)position, tail * sizeof(T)); |
1345 | impl_.e_ += n; |
1346 | } else { |
1347 | D_uninitialized_move_a(impl_.e_, impl_.e_ - n, impl_.e_); |
1348 | try { |
1349 | std::copy_backward( |
1350 | std::make_move_iterator(position), |
1351 | std::make_move_iterator(impl_.e_ - n), |
1352 | impl_.e_); |
1353 | } catch (...) { |
1354 | D_destroy_range_a(impl_.e_ - n, impl_.e_ + n); |
1355 | impl_.e_ -= n; |
1356 | throw; |
1357 | } |
1358 | impl_.e_ += n; |
1359 | D_destroy_range_a(position, position + n); |
1360 | } |
1361 | } |
1362 | } |
1363 | |
1364 | void undo_window(iterator position, size_type n) noexcept { |
1365 | D_destroy_range_a(position + n, impl_.e_); |
1366 | impl_.e_ = position; |
1367 | } |
1368 | |
1369 | //--------------------------------------------------------------------------- |
1370 | // frame |
1371 | |
1372 | void wrap_frame(T* ledge, size_type idx, size_type n) { |
1373 | assert(size() >= idx); |
1374 | assert(n != 0); |
1375 | |
1376 | relocate_move(ledge, impl_.b_, impl_.b_ + idx); |
1377 | try { |
1378 | relocate_move(ledge + idx + n, impl_.b_ + idx, impl_.e_); |
1379 | } catch (...) { |
1380 | relocate_undo(ledge, impl_.b_, impl_.b_ + idx); |
1381 | throw; |
1382 | } |
1383 | relocate_done(ledge, impl_.b_, impl_.b_ + idx); |
1384 | relocate_done(ledge + idx + n, impl_.b_ + idx, impl_.e_); |
1385 | } |
1386 | |
1387 | //--------------------------------------------------------------------------- |
1388 | // use fresh? |
1389 | |
1390 | bool insert_use_fresh(bool at_end, size_type n) { |
1391 | if (at_end) { |
1392 | if (size() + n <= capacity()) { |
1393 | return false; |
1394 | } |
1395 | if (reserve_in_place(size() + n)) { |
1396 | return false; |
1397 | } |
1398 | return true; |
1399 | } |
1400 | |
1401 | if (size() + n > capacity()) { |
1402 | return true; |
1403 | } |
1404 | |
1405 | return false; |
1406 | } |
1407 | |
1408 | //--------------------------------------------------------------------------- |
1409 | // interface |
1410 | |
1411 | template < |
1412 | typename IsInternalFunc, |
1413 | typename InsertInternalFunc, |
1414 | typename ConstructFunc, |
1415 | typename DestroyFunc> |
1416 | iterator do_real_insert( |
1417 | const_iterator cpos, |
1418 | size_type n, |
1419 | IsInternalFunc&& isInternalFunc, |
1420 | InsertInternalFunc&& insertInternalFunc, |
1421 | ConstructFunc&& constructFunc, |
1422 | DestroyFunc&& destroyFunc) { |
1423 | if (n == 0) { |
1424 | return iterator(cpos); |
1425 | } |
1426 | bool at_end = cpos == cend(); |
1427 | bool fresh = insert_use_fresh(at_end, n); |
1428 | if (!at_end) { |
1429 | if (!fresh && isInternalFunc()) { |
1430 | // check for internal data (technically not required by the standard) |
1431 | return insertInternalFunc(); |
1432 | } |
1433 | assert(isValid(cpos)); |
1434 | } |
1435 | T* position = const_cast<T*>(cpos); |
1436 | size_type idx = size_type(std::distance(impl_.b_, position)); |
1437 | T* b; |
1438 | size_type newCap; /* intentionally uninitialized */ |
1439 | |
1440 | if (fresh) { |
1441 | newCap = computeInsertCapacity(n); |
1442 | b = M_allocate(newCap); |
1443 | } else { |
1444 | if (!at_end) { |
1445 | make_window(position, n); |
1446 | } else { |
1447 | impl_.e_ += n; |
1448 | } |
1449 | b = impl_.b_; |
1450 | } |
1451 | |
1452 | T* start = b + idx; |
1453 | try { |
1454 | // construct the inserted elements |
1455 | constructFunc(start); |
1456 | } catch (...) { |
1457 | if (fresh) { |
1458 | M_deallocate(b, newCap); |
1459 | } else { |
1460 | if (!at_end) { |
1461 | undo_window(position, n); |
1462 | } else { |
1463 | impl_.e_ -= n; |
1464 | } |
1465 | } |
1466 | throw; |
1467 | } |
1468 | |
1469 | if (fresh) { |
1470 | try { |
1471 | wrap_frame(b, idx, n); |
1472 | } catch (...) { |
1473 | // delete the inserted elements (exception has been thrown) |
1474 | destroyFunc(start); |
1475 | M_deallocate(b, newCap); |
1476 | throw; |
1477 | } |
1478 | if (impl_.b_) { |
1479 | M_deallocate(impl_.b_, capacity()); |
1480 | } |
1481 | impl_.set(b, size() + n, newCap); |
1482 | return impl_.b_ + idx; |
1483 | } else { |
1484 | return position; |
1485 | } |
1486 | } |
1487 | |
1488 | public: |
1489 | template <class... Args> |
1490 | iterator emplace(const_iterator cpos, Args&&... args) { |
1491 | return do_real_insert( |
1492 | cpos, |
1493 | 1, |
1494 | [&] { return false; }, |
1495 | [&] { return iterator{}; }, |
1496 | [&](iterator start) { |
1497 | M_construct(start, std::forward<Args>(args)...); |
1498 | }, |
1499 | [&](iterator start) { M_destroy(start); }); |
1500 | } |
1501 | |
1502 | iterator insert(const_iterator cpos, const T& value) { |
1503 | return do_real_insert( |
1504 | cpos, |
1505 | 1, |
1506 | [&] { return dataIsInternal(value); }, |
1507 | [&] { return insert(cpos, T(value)); }, |
1508 | [&](iterator start) { M_construct(start, value); }, |
1509 | [&](iterator start) { M_destroy(start); }); |
1510 | } |
1511 | |
1512 | iterator insert(const_iterator cpos, T&& value) { |
1513 | return do_real_insert( |
1514 | cpos, |
1515 | 1, |
1516 | [&] { return dataIsInternal(value); }, |
1517 | [&] { return insert(cpos, T(std::move(value))); }, |
1518 | [&](iterator start) { M_construct(start, std::move(value)); }, |
1519 | [&](iterator start) { M_destroy(start); }); |
1520 | } |
1521 | |
1522 | iterator insert(const_iterator cpos, size_type n, VT value) { |
1523 | return do_real_insert( |
1524 | cpos, |
1525 | n, |
1526 | [&] { return dataIsInternalAndNotVT(value); }, |
1527 | [&] { return insert(cpos, n, T(value)); }, |
1528 | [&](iterator start) { D_uninitialized_fill_n_a(start, n, value); }, |
1529 | [&](iterator start) { D_destroy_range_a(start, start + n); }); |
1530 | } |
1531 | |
1532 | template < |
1533 | class It, |
1534 | class Category = typename std::iterator_traits<It>::iterator_category> |
1535 | iterator insert(const_iterator cpos, It first, It last) { |
1536 | return insert(cpos, first, last, Category()); |
1537 | } |
1538 | |
1539 | iterator insert(const_iterator cpos, std::initializer_list<T> il) { |
1540 | return insert(cpos, il.begin(), il.end()); |
1541 | } |
1542 | |
1543 | //--------------------------------------------------------------------------- |
1544 | // insert dispatch for iterator types |
1545 | private: |
1546 | template <class FIt> |
1547 | iterator |
1548 | insert(const_iterator cpos, FIt first, FIt last, std::forward_iterator_tag) { |
1549 | size_type n = size_type(std::distance(first, last)); |
1550 | return do_real_insert( |
1551 | cpos, |
1552 | n, |
1553 | [&] { return false; }, |
1554 | [&] { return iterator{}; }, |
1555 | [&](iterator start) { D_uninitialized_copy_a(start, first, last); }, |
1556 | [&](iterator start) { D_destroy_range_a(start, start + n); }); |
1557 | } |
1558 | |
1559 | template <class IIt> |
1560 | iterator |
1561 | insert(const_iterator cpos, IIt first, IIt last, std::input_iterator_tag) { |
1562 | T* position = const_cast<T*>(cpos); |
1563 | assert(isValid(position)); |
1564 | size_type idx = std::distance(begin(), position); |
1565 | |
1566 | fbvector storage( |
1567 | std::make_move_iterator(position), |
1568 | std::make_move_iterator(end()), |
1569 | A::select_on_container_copy_construction(impl_)); |
1570 | M_destroy_range_e(position); |
1571 | for (; first != last; ++first) { |
1572 | emplace_back(*first); |
1573 | } |
1574 | insert( |
1575 | cend(), |
1576 | std::make_move_iterator(storage.begin()), |
1577 | std::make_move_iterator(storage.end())); |
1578 | return impl_.b_ + idx; |
1579 | } |
1580 | |
1581 | //=========================================================================== |
1582 | //--------------------------------------------------------------------------- |
1583 | // lexicographical functions |
1584 | public: |
1585 | bool operator==(const fbvector& other) const { |
1586 | return size() == other.size() && std::equal(begin(), end(), other.begin()); |
1587 | } |
1588 | |
1589 | bool operator!=(const fbvector& other) const { |
1590 | return !(*this == other); |
1591 | } |
1592 | |
1593 | bool operator<(const fbvector& other) const { |
1594 | return std::lexicographical_compare( |
1595 | begin(), end(), other.begin(), other.end()); |
1596 | } |
1597 | |
1598 | bool operator>(const fbvector& other) const { |
1599 | return other < *this; |
1600 | } |
1601 | |
1602 | bool operator<=(const fbvector& other) const { |
1603 | return !(*this > other); |
1604 | } |
1605 | |
1606 | bool operator>=(const fbvector& other) const { |
1607 | return !(*this < other); |
1608 | } |
1609 | |
1610 | //=========================================================================== |
1611 | //--------------------------------------------------------------------------- |
1612 | // friends |
1613 | private: |
1614 | template <class _T, class _A> |
1615 | friend _T* relinquish(fbvector<_T, _A>&); |
1616 | |
1617 | template <class _T, class _A> |
1618 | friend void attach(fbvector<_T, _A>&, _T* data, size_t sz, size_t cap); |
1619 | |
1620 | }; // class fbvector |
1621 | |
1622 | //============================================================================= |
1623 | //----------------------------------------------------------------------------- |
1624 | // outlined functions (gcc, you finicky compiler you) |
1625 | |
1626 | template <typename T, typename Allocator> |
1627 | template <class... Args> |
1628 | void fbvector<T, Allocator>::emplace_back_aux(Args&&... args) { |
1629 | size_type byte_sz = |
1630 | folly::goodMallocSize(computePushBackCapacity() * sizeof(T)); |
1631 | if (usingStdAllocator::value && usingJEMalloc() && |
1632 | ((impl_.z_ - impl_.b_) * sizeof(T) >= |
1633 | folly::jemallocMinInPlaceExpandable)) { |
1634 | // Try to reserve in place. |
1635 | // Ask xallocx to allocate in place at least size()+1 and at most sz space. |
1636 | // xallocx will allocate as much as possible within that range, which |
1637 | // is the best possible outcome: if sz space is available, take it all, |
1638 | // otherwise take as much as possible. If nothing is available, then fail. |
1639 | // In this fashion, we never relocate if there is a possibility of |
1640 | // expanding in place, and we never reallocate by less than the desired |
1641 | // amount unless we cannot expand further. Hence we will not reallocate |
1642 | // sub-optimally twice in a row (modulo the blocking memory being freed). |
1643 | size_type lower = folly::goodMallocSize(sizeof(T) + size() * sizeof(T)); |
1644 | size_type upper = byte_sz; |
1645 | size_type = upper - lower; |
1646 | |
1647 | void* p = impl_.b_; |
1648 | size_t actual; |
1649 | |
1650 | if ((actual = xallocx(p, lower, extra, 0)) >= lower) { |
1651 | impl_.z_ = impl_.b_ + actual / sizeof(T); |
1652 | M_construct(impl_.e_, std::forward<Args>(args)...); |
1653 | ++impl_.e_; |
1654 | return; |
1655 | } |
1656 | } |
1657 | |
1658 | // Reallocation failed. Perform a manual relocation. |
1659 | size_type sz = byte_sz / sizeof(T); |
1660 | auto newB = M_allocate(sz); |
1661 | auto newE = newB + size(); |
1662 | try { |
1663 | if (folly::IsRelocatable<T>::value && usingStdAllocator::value) { |
1664 | // For linear memory access, relocate before construction. |
1665 | // By the test condition, relocate is noexcept. |
1666 | // Note that there is no cleanup to do if M_construct throws - that's |
1667 | // one of the beauties of relocation. |
1668 | // Benchmarks for this code have high variance, and seem to be close. |
1669 | relocate_move(newB, impl_.b_, impl_.e_); |
1670 | M_construct(newE, std::forward<Args>(args)...); |
1671 | ++newE; |
1672 | } else { |
1673 | M_construct(newE, std::forward<Args>(args)...); |
1674 | ++newE; |
1675 | try { |
1676 | M_relocate(newB); |
1677 | } catch (...) { |
1678 | M_destroy(newE - 1); |
1679 | throw; |
1680 | } |
1681 | } |
1682 | } catch (...) { |
1683 | M_deallocate(newB, sz); |
1684 | throw; |
1685 | } |
1686 | if (impl_.b_) { |
1687 | M_deallocate(impl_.b_, size()); |
1688 | } |
1689 | impl_.b_ = newB; |
1690 | impl_.e_ = newE; |
1691 | impl_.z_ = newB + sz; |
1692 | } |
1693 | |
1694 | //============================================================================= |
1695 | //----------------------------------------------------------------------------- |
1696 | // specialized functions |
1697 | |
1698 | template <class T, class A> |
1699 | void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) noexcept { |
1700 | lhs.swap(rhs); |
1701 | } |
1702 | |
1703 | //============================================================================= |
1704 | //----------------------------------------------------------------------------- |
1705 | // other |
1706 | |
1707 | namespace detail { |
1708 | |
1709 | // Format support. |
1710 | template <class T, class A> |
1711 | struct IndexableTraits<fbvector<T, A>> |
1712 | : public IndexableTraitsSeq<fbvector<T, A>> {}; |
1713 | |
1714 | } // namespace detail |
1715 | |
1716 | template <class T, class A> |
1717 | void compactResize(fbvector<T, A>* v, size_t sz) { |
1718 | v->resize(sz); |
1719 | v->shrink_to_fit(); |
1720 | } |
1721 | |
1722 | // DANGER |
1723 | // |
1724 | // relinquish and attach are not a members function specifically so that it is |
1725 | // awkward to call them. It is very easy to shoot yourself in the foot with |
1726 | // these functions. |
1727 | // |
1728 | // If you call relinquish, then it is your responsibility to free the data |
1729 | // and the storage, both of which may have been generated in a non-standard |
1730 | // way through the fbvector's allocator. |
1731 | // |
1732 | // If you call attach, it is your responsibility to ensure that the fbvector |
1733 | // is fresh (size and capacity both zero), and that the supplied data is |
1734 | // capable of being manipulated by the allocator. |
1735 | // It is acceptable to supply a stack pointer IF: |
1736 | // (1) The vector's data does not outlive the stack pointer. This includes |
1737 | // extension of the data's life through a move operation. |
1738 | // (2) The pointer has enough capacity that the vector will never be |
1739 | // relocated. |
1740 | // (3) Insert is not called on the vector; these functions have leeway to |
1741 | // relocate the vector even if there is enough capacity. |
1742 | // (4) A stack pointer is compatible with the fbvector's allocator. |
1743 | // |
1744 | |
1745 | template <class T, class A> |
1746 | T* relinquish(fbvector<T, A>& v) { |
1747 | T* ret = v.data(); |
1748 | v.impl_.b_ = v.impl_.e_ = v.impl_.z_ = nullptr; |
1749 | return ret; |
1750 | } |
1751 | |
1752 | template <class T, class A> |
1753 | void attach(fbvector<T, A>& v, T* data, size_t sz, size_t cap) { |
1754 | assert(v.data() == nullptr); |
1755 | v.impl_.b_ = data; |
1756 | v.impl_.e_ = data + sz; |
1757 | v.impl_.z_ = data + cap; |
1758 | } |
1759 | |
1760 | } // namespace folly |
1761 | |