1 | /* |
2 | * Copyright 2011 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef SkTArray_DEFINED |
9 | #define SkTArray_DEFINED |
10 | |
11 | #include "include/core/SkMath.h" |
12 | #include "include/core/SkTypes.h" |
13 | #include "include/private/SkMalloc.h" |
14 | #include "include/private/SkSafe32.h" |
15 | #include "include/private/SkTLogic.h" |
16 | #include "include/private/SkTemplates.h" |
17 | |
18 | #include <string.h> |
19 | #include <memory> |
20 | #include <new> |
21 | #include <utility> |
22 | |
23 | /** When MEM_MOVE is true T will be bit copied when moved. |
24 | When MEM_MOVE is false, T will be copy constructed / destructed. |
25 | In all cases T will be default-initialized on allocation, |
26 | and its destructor will be called from this object's destructor. |
27 | */ |
28 | template <typename T, bool MEM_MOVE = false> class SkTArray { |
29 | public: |
30 | /** |
31 | * Creates an empty array with no initial storage |
32 | */ |
33 | SkTArray() { this->init(); } |
34 | |
35 | /** |
36 | * Creates an empty array that will preallocate space for reserveCount |
37 | * elements. |
38 | */ |
39 | explicit SkTArray(int reserveCount) { this->init(0, reserveCount); } |
40 | |
41 | /** |
42 | * Copies one array to another. The new array will be heap allocated. |
43 | */ |
44 | SkTArray(const SkTArray& that) { |
45 | this->init(that.fCount); |
46 | this->copy(that.fItemArray); |
47 | } |
48 | |
49 | SkTArray(SkTArray&& that) { |
50 | // TODO: If 'that' owns its memory why don't we just steal the pointer? |
51 | this->init(that.fCount); |
52 | that.move(fItemArray); |
53 | that.fCount = 0; |
54 | } |
55 | |
56 | /** |
57 | * Creates a SkTArray by copying contents of a standard C array. The new |
58 | * array will be heap allocated. Be careful not to use this constructor |
59 | * when you really want the (void*, int) version. |
60 | */ |
61 | SkTArray(const T* array, int count) { |
62 | this->init(count); |
63 | this->copy(array); |
64 | } |
65 | |
66 | SkTArray& operator=(const SkTArray& that) { |
67 | if (this == &that) { |
68 | return *this; |
69 | } |
70 | for (int i = 0; i < fCount; ++i) { |
71 | fItemArray[i].~T(); |
72 | } |
73 | fCount = 0; |
74 | this->checkRealloc(that.count()); |
75 | fCount = that.count(); |
76 | this->copy(that.fItemArray); |
77 | return *this; |
78 | } |
79 | SkTArray& operator=(SkTArray&& that) { |
80 | if (this == &that) { |
81 | return *this; |
82 | } |
83 | for (int i = 0; i < fCount; ++i) { |
84 | fItemArray[i].~T(); |
85 | } |
86 | fCount = 0; |
87 | this->checkRealloc(that.count()); |
88 | fCount = that.count(); |
89 | that.move(fItemArray); |
90 | that.fCount = 0; |
91 | return *this; |
92 | } |
93 | |
94 | ~SkTArray() { |
95 | for (int i = 0; i < fCount; ++i) { |
96 | fItemArray[i].~T(); |
97 | } |
98 | if (fOwnMemory) { |
99 | sk_free(fItemArray); |
100 | } |
101 | } |
102 | |
103 | /** |
104 | * Resets to count() == 0 and resets any reserve count. |
105 | */ |
106 | void reset() { |
107 | this->pop_back_n(fCount); |
108 | fReserved = false; |
109 | } |
110 | |
111 | /** |
112 | * Resets to count() = n newly constructed T objects and resets any reserve count. |
113 | */ |
114 | void reset(int n) { |
115 | SkASSERT(n >= 0); |
116 | for (int i = 0; i < fCount; ++i) { |
117 | fItemArray[i].~T(); |
118 | } |
119 | // Set fCount to 0 before calling checkRealloc so that no elements are moved. |
120 | fCount = 0; |
121 | this->checkRealloc(n); |
122 | fCount = n; |
123 | for (int i = 0; i < fCount; ++i) { |
124 | new (fItemArray + i) T; |
125 | } |
126 | fReserved = false; |
127 | } |
128 | |
129 | /** |
130 | * Resets to a copy of a C array and resets any reserve count. |
131 | */ |
132 | void reset(const T* array, int count) { |
133 | for (int i = 0; i < fCount; ++i) { |
134 | fItemArray[i].~T(); |
135 | } |
136 | fCount = 0; |
137 | this->checkRealloc(count); |
138 | fCount = count; |
139 | this->copy(array); |
140 | fReserved = false; |
141 | } |
142 | |
143 | /** |
144 | * Ensures there is enough reserved space for n additional elements. The is guaranteed at least |
145 | * until the array size grows above n and subsequently shrinks below n, any version of reset() |
146 | * is called, or reserve() is called again. |
147 | */ |
148 | void reserve(int n) { |
149 | SkASSERT(n >= 0); |
150 | if (n > 0) { |
151 | this->checkRealloc(n); |
152 | fReserved = fOwnMemory; |
153 | } else { |
154 | fReserved = false; |
155 | } |
156 | } |
157 | |
158 | void removeShuffle(int n) { |
159 | SkASSERT(n < fCount); |
160 | int newCount = fCount - 1; |
161 | fCount = newCount; |
162 | fItemArray[n].~T(); |
163 | if (n != newCount) { |
164 | this->move(n, newCount); |
165 | } |
166 | } |
167 | |
168 | /** |
169 | * Number of elements in the array. |
170 | */ |
171 | int count() const { return fCount; } |
172 | |
173 | /** |
174 | * Is the array empty. |
175 | */ |
176 | bool empty() const { return !fCount; } |
177 | |
178 | /** |
179 | * Adds 1 new default-initialized T value and returns it by reference. Note |
180 | * the reference only remains valid until the next call that adds or removes |
181 | * elements. |
182 | */ |
183 | T& push_back() { |
184 | void* newT = this->push_back_raw(1); |
185 | return *new (newT) T; |
186 | } |
187 | |
188 | /** |
189 | * Version of above that uses a copy constructor to initialize the new item |
190 | */ |
191 | T& push_back(const T& t) { |
192 | void* newT = this->push_back_raw(1); |
193 | return *new (newT) T(t); |
194 | } |
195 | |
196 | /** |
197 | * Version of above that uses a move constructor to initialize the new item |
198 | */ |
199 | T& push_back(T&& t) { |
200 | void* newT = this->push_back_raw(1); |
201 | return *new (newT) T(std::move(t)); |
202 | } |
203 | |
204 | /** |
205 | * Construct a new T at the back of this array. |
206 | */ |
207 | template<class... Args> T& emplace_back(Args&&... args) { |
208 | void* newT = this->push_back_raw(1); |
209 | return *new (newT) T(std::forward<Args>(args)...); |
210 | } |
211 | |
212 | /** |
213 | * Allocates n more default-initialized T values, and returns the address of |
214 | * the start of that new range. Note: this address is only valid until the |
215 | * next API call made on the array that might add or remove elements. |
216 | */ |
217 | T* push_back_n(int n) { |
218 | SkASSERT(n >= 0); |
219 | void* newTs = this->push_back_raw(n); |
220 | for (int i = 0; i < n; ++i) { |
221 | new (static_cast<char*>(newTs) + i * sizeof(T)) T; |
222 | } |
223 | return static_cast<T*>(newTs); |
224 | } |
225 | |
226 | /** |
227 | * Version of above that uses a copy constructor to initialize all n items |
228 | * to the same T. |
229 | */ |
230 | T* push_back_n(int n, const T& t) { |
231 | SkASSERT(n >= 0); |
232 | void* newTs = this->push_back_raw(n); |
233 | for (int i = 0; i < n; ++i) { |
234 | new (static_cast<char*>(newTs) + i * sizeof(T)) T(t); |
235 | } |
236 | return static_cast<T*>(newTs); |
237 | } |
238 | |
239 | /** |
240 | * Version of above that uses a copy constructor to initialize the n items |
241 | * to separate T values. |
242 | */ |
243 | T* push_back_n(int n, const T t[]) { |
244 | SkASSERT(n >= 0); |
245 | this->checkRealloc(n); |
246 | for (int i = 0; i < n; ++i) { |
247 | new (fItemArray + fCount + i) T(t[i]); |
248 | } |
249 | fCount += n; |
250 | return fItemArray + fCount - n; |
251 | } |
252 | |
253 | /** |
254 | * Version of above that uses the move constructor to set n items. |
255 | */ |
256 | T* move_back_n(int n, T* t) { |
257 | SkASSERT(n >= 0); |
258 | this->checkRealloc(n); |
259 | for (int i = 0; i < n; ++i) { |
260 | new (fItemArray + fCount + i) T(std::move(t[i])); |
261 | } |
262 | fCount += n; |
263 | return fItemArray + fCount - n; |
264 | } |
265 | |
266 | /** |
267 | * Removes the last element. Not safe to call when count() == 0. |
268 | */ |
269 | void pop_back() { |
270 | SkASSERT(fCount > 0); |
271 | --fCount; |
272 | fItemArray[fCount].~T(); |
273 | this->checkRealloc(0); |
274 | } |
275 | |
276 | /** |
277 | * Removes the last n elements. Not safe to call when count() < n. |
278 | */ |
279 | void pop_back_n(int n) { |
280 | SkASSERT(n >= 0); |
281 | SkASSERT(fCount >= n); |
282 | fCount -= n; |
283 | for (int i = 0; i < n; ++i) { |
284 | fItemArray[fCount + i].~T(); |
285 | } |
286 | this->checkRealloc(0); |
287 | } |
288 | |
289 | /** |
290 | * Pushes or pops from the back to resize. Pushes will be default |
291 | * initialized. |
292 | */ |
293 | void resize_back(int newCount) { |
294 | SkASSERT(newCount >= 0); |
295 | |
296 | if (newCount > fCount) { |
297 | this->push_back_n(newCount - fCount); |
298 | } else if (newCount < fCount) { |
299 | this->pop_back_n(fCount - newCount); |
300 | } |
301 | } |
302 | |
303 | /** Swaps the contents of this array with that array. Does a pointer swap if possible, |
304 | otherwise copies the T values. */ |
305 | void swap(SkTArray& that) { |
306 | using std::swap; |
307 | if (this == &that) { |
308 | return; |
309 | } |
310 | if (fOwnMemory && that.fOwnMemory) { |
311 | swap(fItemArray, that.fItemArray); |
312 | swap(fCount, that.fCount); |
313 | swap(fAllocCount, that.fAllocCount); |
314 | } else { |
315 | // This could be more optimal... |
316 | SkTArray copy(std::move(that)); |
317 | that = std::move(*this); |
318 | *this = std::move(copy); |
319 | } |
320 | } |
321 | |
322 | T* begin() { |
323 | return fItemArray; |
324 | } |
325 | const T* begin() const { |
326 | return fItemArray; |
327 | } |
328 | T* end() { |
329 | return fItemArray ? fItemArray + fCount : nullptr; |
330 | } |
331 | const T* end() const { |
332 | return fItemArray ? fItemArray + fCount : nullptr; |
333 | } |
334 | T* data() { return fItemArray; } |
335 | const T* data() const { return fItemArray; } |
336 | size_t size() const { return (size_t)fCount; } |
337 | void resize(size_t count) { this->resize_back((int)count); } |
338 | |
339 | /** |
340 | * Get the i^th element. |
341 | */ |
342 | T& operator[] (int i) { |
343 | SkASSERT(i < fCount); |
344 | SkASSERT(i >= 0); |
345 | return fItemArray[i]; |
346 | } |
347 | |
348 | const T& operator[] (int i) const { |
349 | SkASSERT(i < fCount); |
350 | SkASSERT(i >= 0); |
351 | return fItemArray[i]; |
352 | } |
353 | |
354 | /** |
355 | * equivalent to operator[](0) |
356 | */ |
357 | T& front() { SkASSERT(fCount > 0); return fItemArray[0];} |
358 | |
359 | const T& front() const { SkASSERT(fCount > 0); return fItemArray[0];} |
360 | |
361 | /** |
362 | * equivalent to operator[](count() - 1) |
363 | */ |
364 | T& back() { SkASSERT(fCount); return fItemArray[fCount - 1];} |
365 | |
366 | const T& back() const { SkASSERT(fCount > 0); return fItemArray[fCount - 1];} |
367 | |
368 | /** |
369 | * equivalent to operator[](count()-1-i) |
370 | */ |
371 | T& fromBack(int i) { |
372 | SkASSERT(i >= 0); |
373 | SkASSERT(i < fCount); |
374 | return fItemArray[fCount - i - 1]; |
375 | } |
376 | |
377 | const T& fromBack(int i) const { |
378 | SkASSERT(i >= 0); |
379 | SkASSERT(i < fCount); |
380 | return fItemArray[fCount - i - 1]; |
381 | } |
382 | |
383 | bool operator==(const SkTArray<T, MEM_MOVE>& right) const { |
384 | int leftCount = this->count(); |
385 | if (leftCount != right.count()) { |
386 | return false; |
387 | } |
388 | for (int index = 0; index < leftCount; ++index) { |
389 | if (fItemArray[index] != right.fItemArray[index]) { |
390 | return false; |
391 | } |
392 | } |
393 | return true; |
394 | } |
395 | |
396 | bool operator!=(const SkTArray<T, MEM_MOVE>& right) const { |
397 | return !(*this == right); |
398 | } |
399 | |
400 | inline int allocCntForTest() const; |
401 | |
402 | protected: |
403 | /** |
404 | * Creates an empty array that will use the passed storage block until it |
405 | * is insufficiently large to hold the entire array. |
406 | */ |
407 | template <int N> |
408 | SkTArray(SkAlignedSTStorage<N,T>* storage) { |
409 | this->initWithPreallocatedStorage(0, storage->get(), N); |
410 | } |
411 | |
412 | /** |
413 | * Copy another array, using preallocated storage if preAllocCount >= |
414 | * array.count(). Otherwise storage will only be used when array shrinks |
415 | * to fit. |
416 | */ |
417 | template <int N> |
418 | SkTArray(const SkTArray& array, SkAlignedSTStorage<N,T>* storage) { |
419 | this->initWithPreallocatedStorage(array.fCount, storage->get(), N); |
420 | this->copy(array.fItemArray); |
421 | } |
422 | |
423 | /** |
424 | * Move another array, using preallocated storage if preAllocCount >= |
425 | * array.count(). Otherwise storage will only be used when array shrinks |
426 | * to fit. |
427 | */ |
428 | template <int N> |
429 | SkTArray(SkTArray&& array, SkAlignedSTStorage<N,T>* storage) { |
430 | this->initWithPreallocatedStorage(array.fCount, storage->get(), N); |
431 | array.move(fItemArray); |
432 | array.fCount = 0; |
433 | } |
434 | |
435 | /** |
436 | * Copy a C array, using preallocated storage if preAllocCount >= |
437 | * count. Otherwise storage will only be used when array shrinks |
438 | * to fit. |
439 | */ |
440 | template <int N> |
441 | SkTArray(const T* array, int count, SkAlignedSTStorage<N,T>* storage) { |
442 | this->initWithPreallocatedStorage(count, storage->get(), N); |
443 | this->copy(array); |
444 | } |
445 | |
446 | private: |
447 | void init(int count = 0, int reserveCount = 0) { |
448 | SkASSERT(count >= 0); |
449 | SkASSERT(reserveCount >= 0); |
450 | fCount = count; |
451 | if (!count && !reserveCount) { |
452 | fAllocCount = 0; |
453 | fItemArray = nullptr; |
454 | fOwnMemory = true; |
455 | fReserved = false; |
456 | } else { |
457 | fAllocCount = std::max(count, std::max(kMinHeapAllocCount, reserveCount)); |
458 | fItemArray = (T*)sk_malloc_throw(fAllocCount, sizeof(T)); |
459 | fOwnMemory = true; |
460 | fReserved = reserveCount > 0; |
461 | } |
462 | } |
463 | |
464 | void initWithPreallocatedStorage(int count, void* preallocStorage, int preallocCount) { |
465 | SkASSERT(count >= 0); |
466 | SkASSERT(preallocCount > 0); |
467 | SkASSERT(preallocStorage); |
468 | fCount = count; |
469 | fItemArray = nullptr; |
470 | fReserved = false; |
471 | if (count > preallocCount) { |
472 | fAllocCount = std::max(count, kMinHeapAllocCount); |
473 | fItemArray = (T*)sk_malloc_throw(fAllocCount, sizeof(T)); |
474 | fOwnMemory = true; |
475 | } else { |
476 | fAllocCount = preallocCount; |
477 | fItemArray = (T*)preallocStorage; |
478 | fOwnMemory = false; |
479 | } |
480 | } |
481 | |
482 | /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage. |
483 | * In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage. |
484 | */ |
485 | void copy(const T* src) { |
486 | // Some types may be trivially copyable, in which case we *could* use memcopy; but |
487 | // MEM_MOVE == true implies that the type is trivially movable, and not necessarily |
488 | // trivially copyable (think sk_sp<>). So short of adding another template arg, we |
489 | // must be conservative and use copy construction. |
490 | for (int i = 0; i < fCount; ++i) { |
491 | new (fItemArray + i) T(src[i]); |
492 | } |
493 | } |
494 | |
495 | template <bool E = MEM_MOVE> SK_WHEN(E, void) move(int dst, int src) { |
496 | memcpy(&fItemArray[dst], &fItemArray[src], sizeof(T)); |
497 | } |
498 | template <bool E = MEM_MOVE> SK_WHEN(E, void) move(void* dst) { |
499 | sk_careful_memcpy(dst, fItemArray, fCount * sizeof(T)); |
500 | } |
501 | |
502 | template <bool E = MEM_MOVE> SK_WHEN(!E, void) move(int dst, int src) { |
503 | new (&fItemArray[dst]) T(std::move(fItemArray[src])); |
504 | fItemArray[src].~T(); |
505 | } |
506 | template <bool E = MEM_MOVE> SK_WHEN(!E, void) move(void* dst) { |
507 | for (int i = 0; i < fCount; ++i) { |
508 | new (static_cast<char*>(dst) + sizeof(T) * i) T(std::move(fItemArray[i])); |
509 | fItemArray[i].~T(); |
510 | } |
511 | } |
512 | |
513 | static constexpr int kMinHeapAllocCount = 8; |
514 | |
515 | // Helper function that makes space for n objects, adjusts the count, but does not initialize |
516 | // the new objects. |
517 | void* push_back_raw(int n) { |
518 | this->checkRealloc(n); |
519 | void* ptr = fItemArray + fCount; |
520 | fCount += n; |
521 | return ptr; |
522 | } |
523 | |
524 | void checkRealloc(int delta) { |
525 | SkASSERT(fCount >= 0); |
526 | SkASSERT(fAllocCount >= 0); |
527 | SkASSERT(-delta <= fCount); |
528 | |
529 | // Move into 64bit math temporarily, to avoid local overflows |
530 | int64_t newCount = fCount + delta; |
531 | |
532 | // We allow fAllocCount to be in the range [newCount, 3*newCount]. We also never shrink |
533 | // when we're currently using preallocated memory, would allocate less than |
534 | // kMinHeapAllocCount, or a reserve count was specified that has yet to be exceeded. |
535 | bool mustGrow = newCount > fAllocCount; |
536 | bool shouldShrink = fAllocCount > 3 * newCount && fOwnMemory && !fReserved; |
537 | if (!mustGrow && !shouldShrink) { |
538 | return; |
539 | } |
540 | |
541 | |
542 | // Whether we're growing or shrinking, we leave at least 50% extra space for future growth. |
543 | int64_t newAllocCount = newCount + ((newCount + 1) >> 1); |
544 | // Align the new allocation count to kMinHeapAllocCount. |
545 | static_assert(SkIsPow2(kMinHeapAllocCount), "min alloc count not power of two." ); |
546 | newAllocCount = (newAllocCount + (kMinHeapAllocCount - 1)) & ~(kMinHeapAllocCount - 1); |
547 | // At small sizes the old and new alloc count can both be kMinHeapAllocCount. |
548 | if (newAllocCount == fAllocCount) { |
549 | return; |
550 | } |
551 | |
552 | fAllocCount = Sk64_pin_to_s32(newAllocCount); |
553 | SkASSERT(fAllocCount >= newCount); |
554 | T* newItemArray = (T*)sk_malloc_throw(fAllocCount, sizeof(T)); |
555 | this->move(newItemArray); |
556 | if (fOwnMemory) { |
557 | sk_free(fItemArray); |
558 | |
559 | } |
560 | fItemArray = newItemArray; |
561 | fOwnMemory = true; |
562 | fReserved = false; |
563 | } |
564 | |
565 | T* fItemArray; |
566 | int fCount; |
567 | int fAllocCount; |
568 | bool fOwnMemory : 1; |
569 | bool fReserved : 1; |
570 | }; |
571 | |
572 | template <typename T, bool M> static inline void swap(SkTArray<T, M>& a, SkTArray<T, M>& b) { |
573 | a.swap(b); |
574 | } |
575 | |
576 | template<typename T, bool MEM_MOVE> constexpr int SkTArray<T, MEM_MOVE>::kMinHeapAllocCount; |
577 | |
578 | /** |
579 | * Subclass of SkTArray that contains a preallocated memory block for the array. |
580 | */ |
581 | template <int N, typename T, bool MEM_MOVE= false> |
582 | class SkSTArray : public SkTArray<T, MEM_MOVE> { |
583 | private: |
584 | typedef SkTArray<T, MEM_MOVE> INHERITED; |
585 | |
586 | public: |
587 | SkSTArray() : INHERITED(&fStorage) { |
588 | } |
589 | |
590 | SkSTArray(const SkSTArray& array) |
591 | : INHERITED(array, &fStorage) { |
592 | } |
593 | |
594 | SkSTArray(SkSTArray&& array) |
595 | : INHERITED(std::move(array), &fStorage) { |
596 | } |
597 | |
598 | explicit SkSTArray(const INHERITED& array) |
599 | : INHERITED(array, &fStorage) { |
600 | } |
601 | |
602 | explicit SkSTArray(INHERITED&& array) |
603 | : INHERITED(std::move(array), &fStorage) { |
604 | } |
605 | |
606 | explicit SkSTArray(int reserveCount) |
607 | : INHERITED(reserveCount) { |
608 | } |
609 | |
610 | SkSTArray(const T* array, int count) |
611 | : INHERITED(array, count, &fStorage) { |
612 | } |
613 | |
614 | SkSTArray& operator=(const SkSTArray& array) { |
615 | INHERITED::operator=(array); |
616 | return *this; |
617 | } |
618 | |
619 | SkSTArray& operator=(SkSTArray&& array) { |
620 | INHERITED::operator=(std::move(array)); |
621 | return *this; |
622 | } |
623 | |
624 | SkSTArray& operator=(const INHERITED& array) { |
625 | INHERITED::operator=(array); |
626 | return *this; |
627 | } |
628 | |
629 | SkSTArray& operator=(INHERITED&& array) { |
630 | INHERITED::operator=(std::move(array)); |
631 | return *this; |
632 | } |
633 | |
634 | private: |
635 | SkAlignedSTStorage<N,T> fStorage; |
636 | }; |
637 | |
638 | #endif |
639 | |