1// basisu_containers.h
2#pragma once
3#include <stdlib.h>
4#include <stdio.h>
5#include <stdint.h>
6#include <assert.h>
7#include <algorithm>
8
9#if defined(__linux__) && !defined(ANDROID)
10// Only for malloc_usable_size() in basisu_containers_impl.h
11#include <malloc.h>
12#define HAS_MALLOC_USABLE_SIZE 1
13#endif
14
15// Set to 1 to always check vector operator[], front(), and back() even in release.
16#define BASISU_VECTOR_FORCE_CHECKING 0
17
18// If 1, the vector container will not query the CRT to get the size of resized memory blocks.
19#define BASISU_VECTOR_DETERMINISTIC 1
20
21#ifdef _MSC_VER
22#define BASISU_FORCE_INLINE __forceinline
23#else
24#define BASISU_FORCE_INLINE inline
25#endif
26
27namespace basisu
28{
29 enum { cInvalidIndex = -1 };
30
31 namespace helpers
32 {
33 inline bool is_power_of_2(uint32_t x) { return x && ((x & (x - 1U)) == 0U); }
34 inline bool is_power_of_2(uint64_t x) { return x && ((x & (x - 1U)) == 0U); }
35 template<class T> const T& minimum(const T& a, const T& b) { return (b < a) ? b : a; }
36 template<class T> const T& maximum(const T& a, const T& b) { return (a < b) ? b : a; }
37
38 inline uint32_t floor_log2i(uint32_t v)
39 {
40 uint32_t l = 0;
41 while (v > 1U)
42 {
43 v >>= 1;
44 l++;
45 }
46 return l;
47 }
48
49 inline uint32_t next_pow2(uint32_t val)
50 {
51 val--;
52 val |= val >> 16;
53 val |= val >> 8;
54 val |= val >> 4;
55 val |= val >> 2;
56 val |= val >> 1;
57 return val + 1;
58 }
59
60 inline uint64_t next_pow2(uint64_t val)
61 {
62 val--;
63 val |= val >> 32;
64 val |= val >> 16;
65 val |= val >> 8;
66 val |= val >> 4;
67 val |= val >> 2;
68 val |= val >> 1;
69 return val + 1;
70 }
71 } // namespace helpers
72
73 template <typename T>
74 inline T* construct(T* p)
75 {
76 return new (static_cast<void*>(p)) T;
77 }
78
79 template <typename T, typename U>
80 inline T* construct(T* p, const U& init)
81 {
82 return new (static_cast<void*>(p)) T(init);
83 }
84
85 template <typename T>
86 inline void construct_array(T* p, size_t n)
87 {
88 T* q = p + n;
89 for (; p != q; ++p)
90 new (static_cast<void*>(p)) T;
91 }
92
93 template <typename T, typename U>
94 inline void construct_array(T* p, size_t n, const U& init)
95 {
96 T* q = p + n;
97 for (; p != q; ++p)
98 new (static_cast<void*>(p)) T(init);
99 }
100
101 template <typename T>
102 inline void destruct(T* p)
103 {
104 (void)p;
105 p->~T();
106 }
107
108 template <typename T> inline void destruct_array(T* p, size_t n)
109 {
110 T* q = p + n;
111 for (; p != q; ++p)
112 p->~T();
113 }
114
115 template<typename T> struct int_traits { enum { cMin = INT32_MIN, cMax = INT32_MAX, cSigned = true }; };
116
117 template<> struct int_traits<int8_t> { enum { cMin = INT8_MIN, cMax = INT8_MAX, cSigned = true }; };
118 template<> struct int_traits<int16_t> { enum { cMin = INT16_MIN, cMax = INT16_MAX, cSigned = true }; };
119 template<> struct int_traits<int32_t> { enum { cMin = INT32_MIN, cMax = INT32_MAX, cSigned = true }; };
120
121 template<> struct int_traits<uint8_t> { enum { cMin = 0, cMax = UINT8_MAX, cSigned = false }; };
122 template<> struct int_traits<uint16_t> { enum { cMin = 0, cMax = UINT16_MAX, cSigned = false }; };
123 template<> struct int_traits<uint32_t> { enum { cMin = 0, cMax = UINT32_MAX, cSigned = false }; };
124
125 template<typename T>
126 struct scalar_type
127 {
128 enum { cFlag = false };
129 static inline void construct(T* p) { basisu::construct(p); }
130 static inline void construct(T* p, const T& init) { basisu::construct(p, init); }
131 static inline void construct_array(T* p, size_t n) { basisu::construct_array(p, n); }
132 static inline void destruct(T* p) { basisu::destruct(p); }
133 static inline void destruct_array(T* p, size_t n) { basisu::destruct_array(p, n); }
134 };
135
136 template<typename T> struct scalar_type<T*>
137 {
138 enum { cFlag = true };
139 static inline void construct(T** p) { memset(p, 0, sizeof(T*)); }
140 static inline void construct(T** p, T* init) { *p = init; }
141 static inline void construct_array(T** p, size_t n) { memset(p, 0, sizeof(T*) * n); }
142 static inline void destruct(T** p) { p; }
143 static inline void destruct_array(T** p, size_t n) { p, n; }
144 };
145
146#define BASISU_DEFINE_BUILT_IN_TYPE(X) \
147 template<> struct scalar_type<X> { \
148 enum { cFlag = true }; \
149 static inline void construct(X* p) { memset(p, 0, sizeof(X)); } \
150 static inline void construct(X* p, const X& init) { memcpy(p, &init, sizeof(X)); } \
151 static inline void construct_array(X* p, size_t n) { memset(p, 0, sizeof(X) * n); } \
152 static inline void destruct(X* p) { p; } \
153 static inline void destruct_array(X* p, size_t n) { p, n; } };
154
155 BASISU_DEFINE_BUILT_IN_TYPE(bool)
156 BASISU_DEFINE_BUILT_IN_TYPE(char)
157 BASISU_DEFINE_BUILT_IN_TYPE(unsigned char)
158 BASISU_DEFINE_BUILT_IN_TYPE(short)
159 BASISU_DEFINE_BUILT_IN_TYPE(unsigned short)
160 BASISU_DEFINE_BUILT_IN_TYPE(int)
161 BASISU_DEFINE_BUILT_IN_TYPE(unsigned int)
162 BASISU_DEFINE_BUILT_IN_TYPE(long)
163 BASISU_DEFINE_BUILT_IN_TYPE(unsigned long)
164#ifdef __GNUC__
165 BASISU_DEFINE_BUILT_IN_TYPE(long long)
166 BASISU_DEFINE_BUILT_IN_TYPE(unsigned long long)
167#else
168 BASISU_DEFINE_BUILT_IN_TYPE(__int64)
169 BASISU_DEFINE_BUILT_IN_TYPE(unsigned __int64)
170#endif
171 BASISU_DEFINE_BUILT_IN_TYPE(float)
172 BASISU_DEFINE_BUILT_IN_TYPE(double)
173 BASISU_DEFINE_BUILT_IN_TYPE(long double)
174
175#undef BASISU_DEFINE_BUILT_IN_TYPE
176
177 template<typename T>
178 struct bitwise_movable { enum { cFlag = false }; };
179
180#define BASISU_DEFINE_BITWISE_MOVABLE(Q) template<> struct bitwise_movable<Q> { enum { cFlag = true }; };
181
182 template<typename T>
183 struct bitwise_copyable { enum { cFlag = false }; };
184
185#define BASISU_DEFINE_BITWISE_COPYABLE(Q) template<> struct bitwise_copyable<Q> { enum { cFlag = true }; };
186
187#define BASISU_IS_POD(T) __is_pod(T)
188
189#define BASISU_IS_SCALAR_TYPE(T) (scalar_type<T>::cFlag)
190
191#if defined(__GNUC__) && __GNUC__<5
192 #define BASISU_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__)
193#else
194 #define BASISU_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value
195#endif
196
197// TODO: clean this up
198#define BASISU_IS_BITWISE_COPYABLE(T) (BASISU_IS_SCALAR_TYPE(T) || BASISU_IS_POD(T) || BASISU_IS_TRIVIALLY_COPYABLE(T) || (bitwise_copyable<T>::cFlag))
199
200#define BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) (BASISU_IS_BITWISE_COPYABLE(T) || (bitwise_movable<T>::cFlag))
201
202#define BASISU_HAS_DESTRUCTOR(T) ((!scalar_type<T>::cFlag) && (!__is_pod(T)))
203
204 typedef char(&yes_t)[1];
205 typedef char(&no_t)[2];
206
207 template <class U> yes_t class_test(int U::*);
208 template <class U> no_t class_test(...);
209
210 template <class T> struct is_class
211 {
212 enum { value = (sizeof(class_test<T>(0)) == sizeof(yes_t)) };
213 };
214
215 template <typename T> struct is_pointer
216 {
217 enum { value = false };
218 };
219
220 template <typename T> struct is_pointer<T*>
221 {
222 enum { value = true };
223 };
224
225 struct empty_type { };
226
227 BASISU_DEFINE_BITWISE_COPYABLE(empty_type);
228 BASISU_DEFINE_BITWISE_MOVABLE(empty_type);
229
230 template<typename T> struct rel_ops
231 {
232 friend bool operator!=(const T& x, const T& y) { return (!(x == y)); }
233 friend bool operator> (const T& x, const T& y) { return (y < x); }
234 friend bool operator<=(const T& x, const T& y) { return (!(y < x)); }
235 friend bool operator>=(const T& x, const T& y) { return (!(x < y)); }
236 };
237
238 struct elemental_vector
239 {
240 void* m_p;
241 uint32_t m_size;
242 uint32_t m_capacity;
243
244 typedef void (*object_mover)(void* pDst, void* pSrc, uint32_t num);
245
246 bool increase_capacity(uint32_t min_new_capacity, bool grow_hint, uint32_t element_size, object_mover pRelocate, bool nofail);
247 };
248
249 template<typename T>
250 class vector : public rel_ops< vector<T> >
251 {
252 public:
253 typedef T* iterator;
254 typedef const T* const_iterator;
255 typedef T value_type;
256 typedef T& reference;
257 typedef const T& const_reference;
258 typedef T* pointer;
259 typedef const T* const_pointer;
260
261 inline vector() :
262 m_p(NULL),
263 m_size(0),
264 m_capacity(0)
265 {
266 }
267
268 inline vector(uint32_t n, const T& init) :
269 m_p(NULL),
270 m_size(0),
271 m_capacity(0)
272 {
273 increase_capacity(n, false);
274 construct_array(m_p, n, init);
275 m_size = n;
276 }
277
278 inline vector(const vector& other) :
279 m_p(NULL),
280 m_size(0),
281 m_capacity(0)
282 {
283 increase_capacity(other.m_size, false);
284
285 m_size = other.m_size;
286
287 if (BASISU_IS_BITWISE_COPYABLE(T))
288 {
289 if ((m_p) && (other.m_p))
290 memcpy(m_p, other.m_p, m_size * sizeof(T));
291 }
292 else
293 {
294 T* pDst = m_p;
295 const T* pSrc = other.m_p;
296 for (uint32_t i = m_size; i > 0; i--)
297 construct(pDst++, *pSrc++);
298 }
299 }
300
301 inline explicit vector(size_t size) :
302 m_p(NULL),
303 m_size(0),
304 m_capacity(0)
305 {
306 resize(size);
307 }
308
309 inline ~vector()
310 {
311 if (m_p)
312 {
313 scalar_type<T>::destruct_array(m_p, m_size);
314 free(m_p);
315 }
316 }
317
318 inline vector& operator= (const vector& other)
319 {
320 if (this == &other)
321 return *this;
322
323 if (m_capacity >= other.m_size)
324 resize(0);
325 else
326 {
327 clear();
328 increase_capacity(other.m_size, false);
329 }
330
331 if (BASISU_IS_BITWISE_COPYABLE(T))
332 {
333 if ((m_p) && (other.m_p))
334 memcpy(m_p, other.m_p, other.m_size * sizeof(T));
335 }
336 else
337 {
338 T* pDst = m_p;
339 const T* pSrc = other.m_p;
340 for (uint32_t i = other.m_size; i > 0; i--)
341 construct(pDst++, *pSrc++);
342 }
343
344 m_size = other.m_size;
345
346 return *this;
347 }
348
349 BASISU_FORCE_INLINE const T* begin() const { return m_p; }
350 BASISU_FORCE_INLINE T* begin() { return m_p; }
351
352 BASISU_FORCE_INLINE const T* end() const { return m_p + m_size; }
353 BASISU_FORCE_INLINE T* end() { return m_p + m_size; }
354
355 BASISU_FORCE_INLINE bool empty() const { return !m_size; }
356 BASISU_FORCE_INLINE uint32_t size() const { return m_size; }
357 BASISU_FORCE_INLINE uint32_t size_in_bytes() const { return m_size * sizeof(T); }
358 BASISU_FORCE_INLINE uint32_t capacity() const { return m_capacity; }
359
360 // operator[] will assert on out of range indices, but in final builds there is (and will never be) any range checking on this method.
361 //BASISU_FORCE_INLINE const T& operator[] (uint32_t i) const { assert(i < m_size); return m_p[i]; }
362 //BASISU_FORCE_INLINE T& operator[] (uint32_t i) { assert(i < m_size); return m_p[i]; }
363
364#if !BASISU_VECTOR_FORCE_CHECKING
365 BASISU_FORCE_INLINE const T& operator[] (size_t i) const { assert(i < m_size); return m_p[i]; }
366 BASISU_FORCE_INLINE T& operator[] (size_t i) { assert(i < m_size); return m_p[i]; }
367#else
368 BASISU_FORCE_INLINE const T& operator[] (size_t i) const
369 {
370 if (i >= m_size)
371 {
372 fprintf(stderr, "operator[] invalid index: %u, max entries %u, type size %u\n", (uint32_t)i, m_size, (uint32_t)sizeof(T));
373 abort();
374 }
375 return m_p[i];
376 }
377 BASISU_FORCE_INLINE T& operator[] (size_t i)
378 {
379 if (i >= m_size)
380 {
381 fprintf(stderr, "operator[] invalid index: %u, max entries %u, type size %u\n", (uint32_t)i, m_size, (uint32_t)sizeof(T));
382 abort();
383 }
384 return m_p[i];
385 }
386#endif
387
388 // at() always includes range checking, even in final builds, unlike operator [].
389 // The first element is returned if the index is out of range.
390 BASISU_FORCE_INLINE const T& at(size_t i) const { assert(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
391 BASISU_FORCE_INLINE T& at(size_t i) { assert(i < m_size); return (i >= m_size) ? m_p[0] : m_p[i]; }
392
393#if !BASISU_VECTOR_FORCE_CHECKING
394 BASISU_FORCE_INLINE const T& front() const { assert(m_size); return m_p[0]; }
395 BASISU_FORCE_INLINE T& front() { assert(m_size); return m_p[0]; }
396
397 BASISU_FORCE_INLINE const T& back() const { assert(m_size); return m_p[m_size - 1]; }
398 BASISU_FORCE_INLINE T& back() { assert(m_size); return m_p[m_size - 1]; }
399#else
400 BASISU_FORCE_INLINE const T& front() const
401 {
402 if (!m_size)
403 {
404 fprintf(stderr, "front: vector is empty, type size %u\n", (uint32_t)sizeof(T));
405 abort();
406 }
407 return m_p[0];
408 }
409 BASISU_FORCE_INLINE T& front()
410 {
411 if (!m_size)
412 {
413 fprintf(stderr, "front: vector is empty, type size %u\n", (uint32_t)sizeof(T));
414 abort();
415 }
416 return m_p[0];
417 }
418
419 BASISU_FORCE_INLINE const T& back() const
420 {
421 if(!m_size)
422 {
423 fprintf(stderr, "back: vector is empty, type size %u\n", (uint32_t)sizeof(T));
424 abort();
425 }
426 return m_p[m_size - 1];
427 }
428 BASISU_FORCE_INLINE T& back()
429 {
430 if (!m_size)
431 {
432 fprintf(stderr, "back: vector is empty, type size %u\n", (uint32_t)sizeof(T));
433 abort();
434 }
435 return m_p[m_size - 1];
436 }
437#endif
438
439 BASISU_FORCE_INLINE const T* get_ptr() const { return m_p; }
440 BASISU_FORCE_INLINE T* get_ptr() { return m_p; }
441
442 BASISU_FORCE_INLINE const T* data() const { return m_p; }
443 BASISU_FORCE_INLINE T* data() { return m_p; }
444
445 // clear() sets the container to empty, then frees the allocated block.
446 inline void clear()
447 {
448 if (m_p)
449 {
450 scalar_type<T>::destruct_array(m_p, m_size);
451 free(m_p);
452 m_p = NULL;
453 m_size = 0;
454 m_capacity = 0;
455 }
456 }
457
458 inline void clear_no_destruction()
459 {
460 if (m_p)
461 {
462 free(m_p);
463 m_p = NULL;
464 m_size = 0;
465 m_capacity = 0;
466 }
467 }
468
469 inline void reserve(size_t new_capacity_size_t)
470 {
471 if (new_capacity_size_t > UINT32_MAX)
472 {
473 assert(0);
474 return;
475 }
476
477 uint32_t new_capacity = (uint32_t)new_capacity_size_t;
478
479 if (new_capacity > m_capacity)
480 increase_capacity(new_capacity, false);
481 else if (new_capacity < m_capacity)
482 {
483 // Must work around the lack of a "decrease_capacity()" method.
484 // This case is rare enough in practice that it's probably not worth implementing an optimized in-place resize.
485 vector tmp;
486 tmp.increase_capacity(helpers::maximum(m_size, new_capacity), false);
487 tmp = *this;
488 swap(tmp);
489 }
490 }
491
492 inline bool try_reserve(size_t new_capacity_size_t)
493 {
494 if (new_capacity_size_t > UINT32_MAX)
495 {
496 assert(0);
497 return false;
498 }
499
500 uint32_t new_capacity = (uint32_t)new_capacity_size_t;
501
502 if (new_capacity > m_capacity)
503 {
504 if (!increase_capacity(new_capacity, false))
505 return false;
506 }
507 else if (new_capacity < m_capacity)
508 {
509 // Must work around the lack of a "decrease_capacity()" method.
510 // This case is rare enough in practice that it's probably not worth implementing an optimized in-place resize.
511 vector tmp;
512 tmp.increase_capacity(helpers::maximum(m_size, new_capacity), false);
513 tmp = *this;
514 swap(tmp);
515 }
516
517 return true;
518 }
519
520 // resize(0) sets the container to empty, but does not free the allocated block.
521 inline void resize(size_t new_size_size_t, bool grow_hint = false)
522 {
523 if (new_size_size_t > UINT32_MAX)
524 {
525 assert(0);
526 return;
527 }
528
529 uint32_t new_size = (uint32_t)new_size_size_t;
530
531 if (m_size != new_size)
532 {
533 if (new_size < m_size)
534 scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
535 else
536 {
537 if (new_size > m_capacity)
538 increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint);
539
540 scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
541 }
542
543 m_size = new_size;
544 }
545 }
546
547 inline bool try_resize(size_t new_size_size_t, bool grow_hint = false)
548 {
549 if (new_size_size_t > UINT32_MAX)
550 {
551 assert(0);
552 return false;
553 }
554
555 uint32_t new_size = (uint32_t)new_size_size_t;
556
557 if (m_size != new_size)
558 {
559 if (new_size < m_size)
560 scalar_type<T>::destruct_array(m_p + new_size, m_size - new_size);
561 else
562 {
563 if (new_size > m_capacity)
564 {
565 if (!increase_capacity(new_size, (new_size == (m_size + 1)) || grow_hint, true))
566 return false;
567 }
568
569 scalar_type<T>::construct_array(m_p + m_size, new_size - m_size);
570 }
571
572 m_size = new_size;
573 }
574
575 return true;
576 }
577
578 // If size >= capacity/2, reset() sets the container's size to 0 but doesn't free the allocated block (because the container may be similarly loaded in the future).
579 // Otherwise it blows away the allocated block. See http://www.codercorner.com/blog/?p=494
580 inline void reset()
581 {
582 if (m_size >= (m_capacity >> 1))
583 resize(0);
584 else
585 clear();
586 }
587
588 inline T* enlarge(uint32_t i)
589 {
590 uint32_t cur_size = m_size;
591 resize(cur_size + i, true);
592 return get_ptr() + cur_size;
593 }
594
595 inline T* try_enlarge(uint32_t i)
596 {
597 uint32_t cur_size = m_size;
598 if (!try_resize(cur_size + i, true))
599 return NULL;
600 return get_ptr() + cur_size;
601 }
602
603 BASISU_FORCE_INLINE void push_back(const T& obj)
604 {
605 assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
606
607 if (m_size >= m_capacity)
608 increase_capacity(m_size + 1, true);
609
610 scalar_type<T>::construct(m_p + m_size, obj);
611 m_size++;
612 }
613
614 inline bool try_push_back(const T& obj)
615 {
616 assert(!m_p || (&obj < m_p) || (&obj >= (m_p + m_size)));
617
618 if (m_size >= m_capacity)
619 {
620 if (!increase_capacity(m_size + 1, true, true))
621 return false;
622 }
623
624 scalar_type<T>::construct(m_p + m_size, obj);
625 m_size++;
626
627 return true;
628 }
629
630 inline void push_back_value(T obj)
631 {
632 if (m_size >= m_capacity)
633 increase_capacity(m_size + 1, true);
634
635 scalar_type<T>::construct(m_p + m_size, obj);
636 m_size++;
637 }
638
639 inline void pop_back()
640 {
641 assert(m_size);
642
643 if (m_size)
644 {
645 m_size--;
646 scalar_type<T>::destruct(&m_p[m_size]);
647 }
648 }
649
650 inline void insert(uint32_t index, const T* p, uint32_t n)
651 {
652 assert(index <= m_size);
653 if (!n)
654 return;
655
656 const uint32_t orig_size = m_size;
657 resize(m_size + n, true);
658
659 const uint32_t num_to_move = orig_size - index;
660
661 if (BASISU_IS_BITWISE_COPYABLE(T))
662 {
663 // This overwrites the destination object bits, but bitwise copyable means we don't need to worry about destruction.
664 memmove(m_p + index + n, m_p + index, sizeof(T) * num_to_move);
665 }
666 else
667 {
668 const T* pSrc = m_p + orig_size - 1;
669 T* pDst = const_cast<T*>(pSrc) + n;
670
671 for (uint32_t i = 0; i < num_to_move; i++)
672 {
673 assert((pDst - m_p) < (int)m_size);
674 *pDst-- = *pSrc--;
675 }
676 }
677
678 T* pDst = m_p + index;
679
680 if (BASISU_IS_BITWISE_COPYABLE(T))
681 {
682 // This copies in the new bits, overwriting the existing objects, which is OK for copyable types that don't need destruction.
683 memcpy(pDst, p, sizeof(T) * n);
684 }
685 else
686 {
687 for (uint32_t i = 0; i < n; i++)
688 {
689 assert((pDst - m_p) < (int)m_size);
690 *pDst++ = *p++;
691 }
692 }
693 }
694
695 inline void insert(T* p, const T& obj)
696 {
697 int64_t ofs = p - begin();
698 if ((ofs < 0) || (ofs > UINT32_MAX))
699 {
700 assert(0);
701 return;
702 }
703
704 insert((uint32_t)ofs, &obj, 1);
705 }
706
707 // push_front() isn't going to be very fast - it's only here for usability.
708 inline void push_front(const T& obj)
709 {
710 insert(0, &obj, 1);
711 }
712
713 vector& append(const vector& other)
714 {
715 if (other.m_size)
716 insert(m_size, &other[0], other.m_size);
717 return *this;
718 }
719
720 vector& append(const T* p, uint32_t n)
721 {
722 if (n)
723 insert(m_size, p, n);
724 return *this;
725 }
726
727 inline void erase(uint32_t start, uint32_t n)
728 {
729 assert((start + n) <= m_size);
730 if ((start + n) > m_size)
731 return;
732
733 if (!n)
734 return;
735
736 const uint32_t num_to_move = m_size - (start + n);
737
738 T* pDst = m_p + start;
739
740 const T* pSrc = m_p + start + n;
741
742 if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T))
743 {
744 // This test is overly cautious.
745 if ((!BASISU_IS_BITWISE_COPYABLE(T)) || (BASISU_HAS_DESTRUCTOR(T)))
746 {
747 // Type has been marked explictly as bitwise movable, which means we can move them around but they may need to be destructed.
748 // First destroy the erased objects.
749 scalar_type<T>::destruct_array(pDst, n);
750 }
751
752 // Copy "down" the objects to preserve, filling in the empty slots.
753 memmove(pDst, pSrc, num_to_move * sizeof(T));
754 }
755 else
756 {
757 // Type is not bitwise copyable or movable.
758 // Move them down one at a time by using the equals operator, and destroying anything that's left over at the end.
759 T* pDst_end = pDst + num_to_move;
760 while (pDst != pDst_end)
761 *pDst++ = *pSrc++;
762
763 scalar_type<T>::destruct_array(pDst_end, n);
764 }
765
766 m_size -= n;
767 }
768
769 inline void erase(uint32_t index)
770 {
771 erase(index, 1);
772 }
773
774 inline void erase(T* p)
775 {
776 assert((p >= m_p) && (p < (m_p + m_size)));
777 erase(static_cast<uint32_t>(p - m_p));
778 }
779
780 inline void erase(T *pFirst, T *pEnd)
781 {
782 assert(pFirst <= pEnd);
783 assert(pFirst >= begin() && pFirst <= end());
784 assert(pEnd >= begin() && pEnd <= end());
785
786 int64_t ofs = pFirst - begin();
787 if ((ofs < 0) || (ofs > UINT32_MAX))
788 {
789 assert(0);
790 return;
791 }
792
793 int64_t n = pEnd - pFirst;
794 if ((n < 0) || (n > UINT32_MAX))
795 {
796 assert(0);
797 return;
798 }
799
800 erase((uint32_t)ofs, (uint32_t)n);
801 }
802
803 void erase_unordered(uint32_t index)
804 {
805 assert(index < m_size);
806
807 if ((index + 1) < m_size)
808 (*this)[index] = back();
809
810 pop_back();
811 }
812
813 inline bool operator== (const vector& rhs) const
814 {
815 if (m_size != rhs.m_size)
816 return false;
817 else if (m_size)
818 {
819 if (scalar_type<T>::cFlag)
820 return memcmp(m_p, rhs.m_p, sizeof(T) * m_size) == 0;
821 else
822 {
823 const T* pSrc = m_p;
824 const T* pDst = rhs.m_p;
825 for (uint32_t i = m_size; i; i--)
826 if (!(*pSrc++ == *pDst++))
827 return false;
828 }
829 }
830
831 return true;
832 }
833
834 inline bool operator< (const vector& rhs) const
835 {
836 const uint32_t min_size = helpers::minimum(m_size, rhs.m_size);
837
838 const T* pSrc = m_p;
839 const T* pSrc_end = m_p + min_size;
840 const T* pDst = rhs.m_p;
841
842 while ((pSrc < pSrc_end) && (*pSrc == *pDst))
843 {
844 pSrc++;
845 pDst++;
846 }
847
848 if (pSrc < pSrc_end)
849 return *pSrc < *pDst;
850
851 return m_size < rhs.m_size;
852 }
853
854 inline void swap(vector& other)
855 {
856 std::swap(m_p, other.m_p);
857 std::swap(m_size, other.m_size);
858 std::swap(m_capacity, other.m_capacity);
859 }
860
861 inline void sort()
862 {
863 std::sort(begin(), end());
864 }
865
866 inline void unique()
867 {
868 if (!empty())
869 {
870 sort();
871
872 resize(std::unique(begin(), end()) - begin());
873 }
874 }
875
876 inline void reverse()
877 {
878 uint32_t j = m_size >> 1;
879 for (uint32_t i = 0; i < j; i++)
880 std::swap(m_p[i], m_p[m_size - 1 - i]);
881 }
882
883 inline int find(const T& key) const
884 {
885 const T* p = m_p;
886 const T* p_end = m_p + m_size;
887
888 uint32_t index = 0;
889
890 while (p != p_end)
891 {
892 if (key == *p)
893 return index;
894
895 p++;
896 index++;
897 }
898
899 return cInvalidIndex;
900 }
901
902 inline int find_sorted(const T& key) const
903 {
904 if (m_size)
905 {
906 // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
907 int i = ((m_size + 1) >> 1) - 1;
908 int m = m_size;
909
910 for (; ; )
911 {
912 assert(i >= 0 && i < (int)m_size);
913 const T* pKey_i = m_p + i;
914 int cmp = key < *pKey_i;
915#if defined(_DEBUG) || defined(DEBUG)
916 int cmp2 = *pKey_i < key;
917 assert((cmp != cmp2) || (key == *pKey_i));
918#endif
919 if ((!cmp) && (key == *pKey_i)) return i;
920 m >>= 1;
921 if (!m) break;
922 cmp = -cmp;
923 i += (((m + 1) >> 1) ^ cmp) - cmp;
924 if (i < 0)
925 break;
926
927 assert(i >= 0 && i < (int)m_size);
928 pKey_i = m_p + i;
929 cmp = key < *pKey_i;
930#if defined(_DEBUG) || defined(DEBUG)
931 cmp2 = *pKey_i < key;
932 assert((cmp != cmp2) || (key == *pKey_i));
933#endif
934 if ((!cmp) && (key == *pKey_i)) return i;
935 m >>= 1;
936 if (!m) break;
937 cmp = -cmp;
938 i += (((m + 1) >> 1) ^ cmp) - cmp;
939 if (i < 0)
940 break;
941 }
942 }
943
944 return cInvalidIndex;
945 }
946
947 template<typename Q>
948 inline int find_sorted(const T& key, Q less_than) const
949 {
950 if (m_size)
951 {
952 // Uniform binary search - Knuth Algorithm 6.2.1 U, unrolled twice.
953 int i = ((m_size + 1) >> 1) - 1;
954 int m = m_size;
955
956 for (; ; )
957 {
958 assert(i >= 0 && i < (int)m_size);
959 const T* pKey_i = m_p + i;
960 int cmp = less_than(key, *pKey_i);
961 if ((!cmp) && (!less_than(*pKey_i, key))) return i;
962 m >>= 1;
963 if (!m) break;
964 cmp = -cmp;
965 i += (((m + 1) >> 1) ^ cmp) - cmp;
966 if (i < 0)
967 break;
968
969 assert(i >= 0 && i < (int)m_size);
970 pKey_i = m_p + i;
971 cmp = less_than(key, *pKey_i);
972 if ((!cmp) && (!less_than(*pKey_i, key))) return i;
973 m >>= 1;
974 if (!m) break;
975 cmp = -cmp;
976 i += (((m + 1) >> 1) ^ cmp) - cmp;
977 if (i < 0)
978 break;
979 }
980 }
981
982 return cInvalidIndex;
983 }
984
985 inline uint32_t count_occurences(const T& key) const
986 {
987 uint32_t c = 0;
988
989 const T* p = m_p;
990 const T* p_end = m_p + m_size;
991
992 while (p != p_end)
993 {
994 if (key == *p)
995 c++;
996
997 p++;
998 }
999
1000 return c;
1001 }
1002
1003 inline void set_all(const T& o)
1004 {
1005 if ((sizeof(T) == 1) && (scalar_type<T>::cFlag))
1006 memset(m_p, *reinterpret_cast<const uint8_t*>(&o), m_size);
1007 else
1008 {
1009 T* pDst = m_p;
1010 T* pDst_end = pDst + m_size;
1011 while (pDst != pDst_end)
1012 *pDst++ = o;
1013 }
1014 }
1015
1016 // Caller assumes ownership of the heap block associated with the container. Container is cleared.
1017 inline void* assume_ownership()
1018 {
1019 T* p = m_p;
1020 m_p = NULL;
1021 m_size = 0;
1022 m_capacity = 0;
1023 return p;
1024 }
1025
1026 // Caller is granting ownership of the indicated heap block.
1027 // Block must have size constructed elements, and have enough room for capacity elements.
1028 // The block must have been allocated using malloc().
1029 // Important: This method is used in Basis Universal. If you change how this container allocates memory, you'll need to change any users of this method.
1030 inline bool grant_ownership(T* p, uint32_t size, uint32_t capacity)
1031 {
1032 // To to prevent the caller from obviously shooting themselves in the foot.
1033 if (((p + capacity) > m_p) && (p < (m_p + m_capacity)))
1034 {
1035 // Can grant ownership of a block inside the container itself!
1036 assert(0);
1037 return false;
1038 }
1039
1040 if (size > capacity)
1041 {
1042 assert(0);
1043 return false;
1044 }
1045
1046 if (!p)
1047 {
1048 if (capacity)
1049 {
1050 assert(0);
1051 return false;
1052 }
1053 }
1054 else if (!capacity)
1055 {
1056 assert(0);
1057 return false;
1058 }
1059
1060 clear();
1061 m_p = p;
1062 m_size = size;
1063 m_capacity = capacity;
1064 return true;
1065 }
1066
1067 private:
1068 T* m_p;
1069 uint32_t m_size;
1070 uint32_t m_capacity;
1071
1072 template<typename Q> struct is_vector { enum { cFlag = false }; };
1073 template<typename Q> struct is_vector< vector<Q> > { enum { cFlag = true }; };
1074
1075 static void object_mover(void* pDst_void, void* pSrc_void, uint32_t num)
1076 {
1077 T* pSrc = static_cast<T*>(pSrc_void);
1078 T* const pSrc_end = pSrc + num;
1079 T* pDst = static_cast<T*>(pDst_void);
1080
1081 while (pSrc != pSrc_end)
1082 {
1083 // placement new
1084 new (static_cast<void*>(pDst)) T(*pSrc);
1085 pSrc->~T();
1086 ++pSrc;
1087 ++pDst;
1088 }
1089 }
1090
1091 inline bool increase_capacity(uint32_t min_new_capacity, bool grow_hint, bool nofail = false)
1092 {
1093 return reinterpret_cast<elemental_vector*>(this)->increase_capacity(
1094 min_new_capacity, grow_hint, sizeof(T),
1095 (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(T) || (is_vector<T>::cFlag)) ? NULL : object_mover, nofail);
1096 }
1097 };
1098
1099 template<typename T> struct bitwise_movable< vector<T> > { enum { cFlag = true }; };
1100
1101 // Hash map
1102
1103 template <typename T>
1104 struct hasher
1105 {
1106 inline size_t operator() (const T& key) const { return static_cast<size_t>(key); }
1107 };
1108
1109 template <typename T>
1110 struct equal_to
1111 {
1112 inline bool operator()(const T& a, const T& b) const { return a == b; }
1113 };
1114
1115 // Important: The Hasher and Equals objects must be bitwise movable!
1116 template<typename Key, typename Value = empty_type, typename Hasher = hasher<Key>, typename Equals = equal_to<Key> >
1117 class hash_map
1118 {
1119 public:
1120 class iterator;
1121 class const_iterator;
1122
1123 private:
1124 friend class iterator;
1125 friend class const_iterator;
1126
1127 enum state
1128 {
1129 cStateInvalid = 0,
1130 cStateValid = 1
1131 };
1132
1133 enum
1134 {
1135 cMinHashSize = 4U
1136 };
1137
1138 public:
1139 typedef hash_map<Key, Value, Hasher, Equals> hash_map_type;
1140 typedef std::pair<Key, Value> value_type;
1141 typedef Key key_type;
1142 typedef Value referent_type;
1143 typedef Hasher hasher_type;
1144 typedef Equals equals_type;
1145
1146 hash_map() :
1147 m_hash_shift(32), m_num_valid(0), m_grow_threshold(0)
1148 {
1149 }
1150
1151 hash_map(const hash_map& other) :
1152 m_values(other.m_values),
1153 m_hash_shift(other.m_hash_shift),
1154 m_hasher(other.m_hasher),
1155 m_equals(other.m_equals),
1156 m_num_valid(other.m_num_valid),
1157 m_grow_threshold(other.m_grow_threshold)
1158 {
1159 }
1160
1161 hash_map& operator= (const hash_map& other)
1162 {
1163 if (this == &other)
1164 return *this;
1165
1166 clear();
1167
1168 m_values = other.m_values;
1169 m_hash_shift = other.m_hash_shift;
1170 m_num_valid = other.m_num_valid;
1171 m_grow_threshold = other.m_grow_threshold;
1172 m_hasher = other.m_hasher;
1173 m_equals = other.m_equals;
1174
1175 return *this;
1176 }
1177
1178 inline ~hash_map()
1179 {
1180 clear();
1181 }
1182
1183 const Equals& get_equals() const { return m_equals; }
1184 Equals& get_equals() { return m_equals; }
1185
1186 void set_equals(const Equals& equals) { m_equals = equals; }
1187
1188 const Hasher& get_hasher() const { return m_hasher; }
1189 Hasher& get_hasher() { return m_hasher; }
1190
1191 void set_hasher(const Hasher& hasher) { m_hasher = hasher; }
1192
1193 inline void clear()
1194 {
1195 if (!m_values.empty())
1196 {
1197 if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
1198 {
1199 node* p = &get_node(0);
1200 node* p_end = p + m_values.size();
1201
1202 uint32_t num_remaining = m_num_valid;
1203 while (p != p_end)
1204 {
1205 if (p->state)
1206 {
1207 destruct_value_type(p);
1208 num_remaining--;
1209 if (!num_remaining)
1210 break;
1211 }
1212
1213 p++;
1214 }
1215 }
1216
1217 m_values.clear_no_destruction();
1218
1219 m_hash_shift = 32;
1220 m_num_valid = 0;
1221 m_grow_threshold = 0;
1222 }
1223 }
1224
1225 inline void reset()
1226 {
1227 if (!m_num_valid)
1228 return;
1229
1230 if (BASISU_HAS_DESTRUCTOR(Key) || BASISU_HAS_DESTRUCTOR(Value))
1231 {
1232 node* p = &get_node(0);
1233 node* p_end = p + m_values.size();
1234
1235 uint32_t num_remaining = m_num_valid;
1236 while (p != p_end)
1237 {
1238 if (p->state)
1239 {
1240 destruct_value_type(p);
1241 p->state = cStateInvalid;
1242
1243 num_remaining--;
1244 if (!num_remaining)
1245 break;
1246 }
1247
1248 p++;
1249 }
1250 }
1251 else if (sizeof(node) <= 32)
1252 {
1253 memset(&m_values[0], 0, m_values.size_in_bytes());
1254 }
1255 else
1256 {
1257 node* p = &get_node(0);
1258 node* p_end = p + m_values.size();
1259
1260 uint32_t num_remaining = m_num_valid;
1261 while (p != p_end)
1262 {
1263 if (p->state)
1264 {
1265 p->state = cStateInvalid;
1266
1267 num_remaining--;
1268 if (!num_remaining)
1269 break;
1270 }
1271
1272 p++;
1273 }
1274 }
1275
1276 m_num_valid = 0;
1277 }
1278
1279 inline uint32_t size()
1280 {
1281 return m_num_valid;
1282 }
1283
1284 inline uint32_t get_table_size()
1285 {
1286 return m_values.size();
1287 }
1288
1289 inline bool empty()
1290 {
1291 return !m_num_valid;
1292 }
1293
1294 inline void reserve(uint32_t new_capacity)
1295 {
1296 uint64_t new_hash_size = helpers::maximum(1U, new_capacity);
1297
1298 new_hash_size = new_hash_size * 2ULL;
1299
1300 if (!helpers::is_power_of_2(new_hash_size))
1301 new_hash_size = helpers::next_pow2(new_hash_size);
1302
1303 new_hash_size = helpers::maximum<uint64_t>(cMinHashSize, new_hash_size);
1304
1305 new_hash_size = helpers::minimum<uint64_t>(0x80000000UL, new_hash_size);
1306
1307 if (new_hash_size > m_values.size())
1308 rehash((uint32_t)new_hash_size);
1309 }
1310
1311 class iterator
1312 {
1313 friend class hash_map<Key, Value, Hasher, Equals>;
1314 friend class hash_map<Key, Value, Hasher, Equals>::const_iterator;
1315
1316 public:
1317 inline iterator() : m_pTable(NULL), m_index(0) { }
1318 inline iterator(hash_map_type& table, uint32_t index) : m_pTable(&table), m_index(index) { }
1319 inline iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
1320
1321 inline iterator& operator= (const iterator& other)
1322 {
1323 m_pTable = other.m_pTable;
1324 m_index = other.m_index;
1325 return *this;
1326 }
1327
1328 // post-increment
1329 inline iterator operator++(int)
1330 {
1331 iterator result(*this);
1332 ++*this;
1333 return result;
1334 }
1335
1336 // pre-increment
1337 inline iterator& operator++()
1338 {
1339 probe();
1340 return *this;
1341 }
1342
1343 inline value_type& operator*() const { return *get_cur(); }
1344 inline value_type* operator->() const { return get_cur(); }
1345
1346 inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1347 inline bool operator != (const iterator& b) const { return !(*this == b); }
1348 inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1349 inline bool operator != (const const_iterator& b) const { return !(*this == b); }
1350
1351 private:
1352 hash_map_type* m_pTable;
1353 uint32_t m_index;
1354
1355 inline value_type* get_cur() const
1356 {
1357 assert(m_pTable && (m_index < m_pTable->m_values.size()));
1358 assert(m_pTable->get_node_state(m_index) == cStateValid);
1359
1360 return &m_pTable->get_node(m_index);
1361 }
1362
1363 inline void probe()
1364 {
1365 assert(m_pTable);
1366 m_index = m_pTable->find_next(m_index);
1367 }
1368 };
1369
1370 class const_iterator
1371 {
1372 friend class hash_map<Key, Value, Hasher, Equals>;
1373 friend class hash_map<Key, Value, Hasher, Equals>::iterator;
1374
1375 public:
1376 inline const_iterator() : m_pTable(NULL), m_index(0) { }
1377 inline const_iterator(const hash_map_type& table, uint32_t index) : m_pTable(&table), m_index(index) { }
1378 inline const_iterator(const iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
1379 inline const_iterator(const const_iterator& other) : m_pTable(other.m_pTable), m_index(other.m_index) { }
1380
1381 inline const_iterator& operator= (const const_iterator& other)
1382 {
1383 m_pTable = other.m_pTable;
1384 m_index = other.m_index;
1385 return *this;
1386 }
1387
1388 inline const_iterator& operator= (const iterator& other)
1389 {
1390 m_pTable = other.m_pTable;
1391 m_index = other.m_index;
1392 return *this;
1393 }
1394
1395 // post-increment
1396 inline const_iterator operator++(int)
1397 {
1398 const_iterator result(*this);
1399 ++*this;
1400 return result;
1401 }
1402
1403 // pre-increment
1404 inline const_iterator& operator++()
1405 {
1406 probe();
1407 return *this;
1408 }
1409
1410 inline const value_type& operator*() const { return *get_cur(); }
1411 inline const value_type* operator->() const { return get_cur(); }
1412
1413 inline bool operator == (const const_iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1414 inline bool operator != (const const_iterator& b) const { return !(*this == b); }
1415 inline bool operator == (const iterator& b) const { return (m_pTable == b.m_pTable) && (m_index == b.m_index); }
1416 inline bool operator != (const iterator& b) const { return !(*this == b); }
1417
1418 private:
1419 const hash_map_type* m_pTable;
1420 uint32_t m_index;
1421
1422 inline const value_type* get_cur() const
1423 {
1424 assert(m_pTable && (m_index < m_pTable->m_values.size()));
1425 assert(m_pTable->get_node_state(m_index) == cStateValid);
1426
1427 return &m_pTable->get_node(m_index);
1428 }
1429
1430 inline void probe()
1431 {
1432 assert(m_pTable);
1433 m_index = m_pTable->find_next(m_index);
1434 }
1435 };
1436
1437 inline const_iterator begin() const
1438 {
1439 if (!m_num_valid)
1440 return end();
1441
1442 return const_iterator(*this, find_next(UINT32_MAX));
1443 }
1444
1445 inline const_iterator end() const
1446 {
1447 return const_iterator(*this, m_values.size());
1448 }
1449
1450 inline iterator begin()
1451 {
1452 if (!m_num_valid)
1453 return end();
1454
1455 return iterator(*this, find_next(UINT32_MAX));
1456 }
1457
1458 inline iterator end()
1459 {
1460 return iterator(*this, m_values.size());
1461 }
1462
1463 // insert_result.first will always point to inserted key/value (or the already existing key/value).
1464 // insert_resutt.second will be true if a new key/value was inserted, or false if the key already existed (in which case first will point to the already existing value).
1465 typedef std::pair<iterator, bool> insert_result;
1466
1467 inline insert_result insert(const Key& k, const Value& v = Value())
1468 {
1469 insert_result result;
1470 if (!insert_no_grow(result, k, v))
1471 {
1472 grow();
1473
1474 // This must succeed.
1475 if (!insert_no_grow(result, k, v))
1476 {
1477 fprintf(stderr, "insert() failed");
1478 abort();
1479 }
1480 }
1481
1482 return result;
1483 }
1484
1485 inline insert_result insert(const value_type& v)
1486 {
1487 return insert(v.first, v.second);
1488 }
1489
1490 inline const_iterator find(const Key& k) const
1491 {
1492 return const_iterator(*this, find_index(k));
1493 }
1494
1495 inline iterator find(const Key& k)
1496 {
1497 return iterator(*this, find_index(k));
1498 }
1499
1500 inline bool erase(const Key& k)
1501 {
1502 uint32_t i = find_index(k);
1503
1504 if (i >= m_values.size())
1505 return false;
1506
1507 node* pDst = &get_node(i);
1508 destruct_value_type(pDst);
1509 pDst->state = cStateInvalid;
1510
1511 m_num_valid--;
1512
1513 for (; ; )
1514 {
1515 uint32_t r, j = i;
1516
1517 node* pSrc = pDst;
1518
1519 do
1520 {
1521 if (!i)
1522 {
1523 i = m_values.size() - 1;
1524 pSrc = &get_node(i);
1525 }
1526 else
1527 {
1528 i--;
1529 pSrc--;
1530 }
1531
1532 if (!pSrc->state)
1533 return true;
1534
1535 r = hash_key(pSrc->first);
1536
1537 } while ((i <= r && r < j) || (r < j && j < i) || (j < i && i <= r));
1538
1539 move_node(pDst, pSrc);
1540
1541 pDst = pSrc;
1542 }
1543 }
1544
1545 inline void swap(hash_map_type& other)
1546 {
1547 m_values.swap(other.m_values);
1548 std::swap(m_hash_shift, other.m_hash_shift);
1549 std::swap(m_num_valid, other.m_num_valid);
1550 std::swap(m_grow_threshold, other.m_grow_threshold);
1551 std::swap(m_hasher, other.m_hasher);
1552 std::swap(m_equals, other.m_equals);
1553 }
1554
1555 private:
1556 struct node : public value_type
1557 {
1558 uint8_t state;
1559 };
1560
1561 static inline void construct_value_type(value_type* pDst, const Key& k, const Value& v)
1562 {
1563 if (BASISU_IS_BITWISE_COPYABLE(Key))
1564 memcpy(&pDst->first, &k, sizeof(Key));
1565 else
1566 scalar_type<Key>::construct(&pDst->first, k);
1567
1568 if (BASISU_IS_BITWISE_COPYABLE(Value))
1569 memcpy(&pDst->second, &v, sizeof(Value));
1570 else
1571 scalar_type<Value>::construct(&pDst->second, v);
1572 }
1573
1574 static inline void construct_value_type(value_type* pDst, const value_type* pSrc)
1575 {
1576 if ((BASISU_IS_BITWISE_COPYABLE(Key)) && (BASISU_IS_BITWISE_COPYABLE(Value)))
1577 {
1578 memcpy(pDst, pSrc, sizeof(value_type));
1579 }
1580 else
1581 {
1582 if (BASISU_IS_BITWISE_COPYABLE(Key))
1583 memcpy(&pDst->first, &pSrc->first, sizeof(Key));
1584 else
1585 scalar_type<Key>::construct(&pDst->first, pSrc->first);
1586
1587 if (BASISU_IS_BITWISE_COPYABLE(Value))
1588 memcpy(&pDst->second, &pSrc->second, sizeof(Value));
1589 else
1590 scalar_type<Value>::construct(&pDst->second, pSrc->second);
1591 }
1592 }
1593
1594 static inline void destruct_value_type(value_type* p)
1595 {
1596 scalar_type<Key>::destruct(&p->first);
1597 scalar_type<Value>::destruct(&p->second);
1598 }
1599
1600 // Moves *pSrc to *pDst efficiently.
1601 // pDst should NOT be constructed on entry.
1602 static inline void move_node(node* pDst, node* pSrc, bool update_src_state = true)
1603 {
1604 assert(!pDst->state);
1605
1606 if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key) && BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
1607 {
1608 memcpy(pDst, pSrc, sizeof(node));
1609 }
1610 else
1611 {
1612 if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Key))
1613 memcpy(&pDst->first, &pSrc->first, sizeof(Key));
1614 else
1615 {
1616 scalar_type<Key>::construct(&pDst->first, pSrc->first);
1617 scalar_type<Key>::destruct(&pSrc->first);
1618 }
1619
1620 if (BASISU_IS_BITWISE_COPYABLE_OR_MOVABLE(Value))
1621 memcpy(&pDst->second, &pSrc->second, sizeof(Value));
1622 else
1623 {
1624 scalar_type<Value>::construct(&pDst->second, pSrc->second);
1625 scalar_type<Value>::destruct(&pSrc->second);
1626 }
1627
1628 pDst->state = cStateValid;
1629 }
1630
1631 if (update_src_state)
1632 pSrc->state = cStateInvalid;
1633 }
1634
1635 struct raw_node
1636 {
1637 inline raw_node()
1638 {
1639 node* p = reinterpret_cast<node*>(this);
1640 p->state = cStateInvalid;
1641 }
1642
1643 inline ~raw_node()
1644 {
1645 node* p = reinterpret_cast<node*>(this);
1646 if (p->state)
1647 hash_map_type::destruct_value_type(p);
1648 }
1649
1650 inline raw_node(const raw_node& other)
1651 {
1652 node* pDst = reinterpret_cast<node*>(this);
1653 const node* pSrc = reinterpret_cast<const node*>(&other);
1654
1655 if (pSrc->state)
1656 {
1657 hash_map_type::construct_value_type(pDst, pSrc);
1658 pDst->state = cStateValid;
1659 }
1660 else
1661 pDst->state = cStateInvalid;
1662 }
1663
1664 inline raw_node& operator= (const raw_node& rhs)
1665 {
1666 if (this == &rhs)
1667 return *this;
1668
1669 node* pDst = reinterpret_cast<node*>(this);
1670 const node* pSrc = reinterpret_cast<const node*>(&rhs);
1671
1672 if (pSrc->state)
1673 {
1674 if (pDst->state)
1675 {
1676 pDst->first = pSrc->first;
1677 pDst->second = pSrc->second;
1678 }
1679 else
1680 {
1681 hash_map_type::construct_value_type(pDst, pSrc);
1682 pDst->state = cStateValid;
1683 }
1684 }
1685 else if (pDst->state)
1686 {
1687 hash_map_type::destruct_value_type(pDst);
1688 pDst->state = cStateInvalid;
1689 }
1690
1691 return *this;
1692 }
1693
1694 uint8_t m_bits[sizeof(node)];
1695 };
1696
1697 typedef basisu::vector<raw_node> node_vector;
1698
1699 node_vector m_values;
1700 uint32_t m_hash_shift;
1701
1702 Hasher m_hasher;
1703 Equals m_equals;
1704
1705 uint32_t m_num_valid;
1706
1707 uint32_t m_grow_threshold;
1708
1709 inline uint32_t hash_key(const Key& k) const
1710 {
1711 assert((1U << (32U - m_hash_shift)) == m_values.size());
1712
1713 uint32_t hash = static_cast<uint32_t>(m_hasher(k));
1714
1715 // Fibonacci hashing
1716 hash = (2654435769U * hash) >> m_hash_shift;
1717
1718 assert(hash < m_values.size());
1719 return hash;
1720 }
1721
1722 inline const node& get_node(uint32_t index) const
1723 {
1724 return *reinterpret_cast<const node*>(&m_values[index]);
1725 }
1726
1727 inline node& get_node(uint32_t index)
1728 {
1729 return *reinterpret_cast<node*>(&m_values[index]);
1730 }
1731
1732 inline state get_node_state(uint32_t index) const
1733 {
1734 return static_cast<state>(get_node(index).state);
1735 }
1736
1737 inline void set_node_state(uint32_t index, bool valid)
1738 {
1739 get_node(index).state = valid;
1740 }
1741
1742 inline void grow()
1743 {
1744 uint64_t n = m_values.size() * 3ULL; // was * 2
1745
1746 if (!helpers::is_power_of_2(n))
1747 n = helpers::next_pow2(n);
1748
1749 if (n > 0x80000000UL)
1750 n = 0x80000000UL;
1751
1752 rehash(helpers::maximum<uint32_t>(cMinHashSize, (uint32_t)n));
1753 }
1754
1755 inline void rehash(uint32_t new_hash_size)
1756 {
1757 assert(new_hash_size >= m_num_valid);
1758 assert(helpers::is_power_of_2(new_hash_size));
1759
1760 if ((new_hash_size < m_num_valid) || (new_hash_size == m_values.size()))
1761 return;
1762
1763 hash_map new_map;
1764 new_map.m_values.resize(new_hash_size);
1765 new_map.m_hash_shift = 32U - helpers::floor_log2i(new_hash_size);
1766 assert(new_hash_size == (1U << (32U - new_map.m_hash_shift)));
1767 new_map.m_grow_threshold = UINT_MAX;
1768
1769 node* pNode = reinterpret_cast<node*>(m_values.begin());
1770 node* pNode_end = pNode + m_values.size();
1771
1772 while (pNode != pNode_end)
1773 {
1774 if (pNode->state)
1775 {
1776 new_map.move_into(pNode);
1777
1778 if (new_map.m_num_valid == m_num_valid)
1779 break;
1780 }
1781
1782 pNode++;
1783 }
1784
1785 new_map.m_grow_threshold = (new_hash_size + 1U) >> 1U;
1786
1787 m_values.clear_no_destruction();
1788 m_hash_shift = 32;
1789
1790 swap(new_map);
1791 }
1792
1793 inline uint32_t find_next(uint32_t index) const
1794 {
1795 index++;
1796
1797 if (index >= m_values.size())
1798 return index;
1799
1800 const node* pNode = &get_node(index);
1801
1802 for (; ; )
1803 {
1804 if (pNode->state)
1805 break;
1806
1807 if (++index >= m_values.size())
1808 break;
1809
1810 pNode++;
1811 }
1812
1813 return index;
1814 }
1815
1816 inline uint32_t find_index(const Key& k) const
1817 {
1818 if (m_num_valid)
1819 {
1820 uint32_t index = hash_key(k);
1821 const node* pNode = &get_node(index);
1822
1823 if (pNode->state)
1824 {
1825 if (m_equals(pNode->first, k))
1826 return index;
1827
1828 const uint32_t orig_index = index;
1829
1830 for (; ; )
1831 {
1832 if (!index)
1833 {
1834 index = m_values.size() - 1;
1835 pNode = &get_node(index);
1836 }
1837 else
1838 {
1839 index--;
1840 pNode--;
1841 }
1842
1843 if (index == orig_index)
1844 break;
1845
1846 if (!pNode->state)
1847 break;
1848
1849 if (m_equals(pNode->first, k))
1850 return index;
1851 }
1852 }
1853 }
1854
1855 return m_values.size();
1856 }
1857
1858 inline bool insert_no_grow(insert_result& result, const Key& k, const Value& v = Value())
1859 {
1860 if (!m_values.size())
1861 return false;
1862
1863 uint32_t index = hash_key(k);
1864 node* pNode = &get_node(index);
1865
1866 if (pNode->state)
1867 {
1868 if (m_equals(pNode->first, k))
1869 {
1870 result.first = iterator(*this, index);
1871 result.second = false;
1872 return true;
1873 }
1874
1875 const uint32_t orig_index = index;
1876
1877 for (; ; )
1878 {
1879 if (!index)
1880 {
1881 index = m_values.size() - 1;
1882 pNode = &get_node(index);
1883 }
1884 else
1885 {
1886 index--;
1887 pNode--;
1888 }
1889
1890 if (orig_index == index)
1891 return false;
1892
1893 if (!pNode->state)
1894 break;
1895
1896 if (m_equals(pNode->first, k))
1897 {
1898 result.first = iterator(*this, index);
1899 result.second = false;
1900 return true;
1901 }
1902 }
1903 }
1904
1905 if (m_num_valid >= m_grow_threshold)
1906 return false;
1907
1908 construct_value_type(pNode, k, v);
1909
1910 pNode->state = cStateValid;
1911
1912 m_num_valid++;
1913 assert(m_num_valid <= m_values.size());
1914
1915 result.first = iterator(*this, index);
1916 result.second = true;
1917
1918 return true;
1919 }
1920
1921 inline void move_into(node* pNode)
1922 {
1923 uint32_t index = hash_key(pNode->first);
1924 node* pDst_node = &get_node(index);
1925
1926 if (pDst_node->state)
1927 {
1928 const uint32_t orig_index = index;
1929
1930 for (; ; )
1931 {
1932 if (!index)
1933 {
1934 index = m_values.size() - 1;
1935 pDst_node = &get_node(index);
1936 }
1937 else
1938 {
1939 index--;
1940 pDst_node--;
1941 }
1942
1943 if (index == orig_index)
1944 {
1945 assert(false);
1946 return;
1947 }
1948
1949 if (!pDst_node->state)
1950 break;
1951 }
1952 }
1953
1954 move_node(pDst_node, pNode, false);
1955
1956 m_num_valid++;
1957 }
1958 };
1959
1960 template<typename Key, typename Value, typename Hasher, typename Equals>
1961 struct bitwise_movable< hash_map<Key, Value, Hasher, Equals> > { enum { cFlag = true }; };
1962
1963#if BASISU_HASHMAP_TEST
1964 extern void hash_map_test();
1965#endif
1966
1967} // namespace basisu
1968
1969namespace std
1970{
1971 template<typename T>
1972 inline void swap(basisu::vector<T>& a, basisu::vector<T>& b)
1973 {
1974 a.swap(b);
1975 }
1976
1977 template<typename Key, typename Value, typename Hasher, typename Equals>
1978 inline void swap(basisu::hash_map<Key, Value, Hasher, Equals>& a, basisu::hash_map<Key, Value, Hasher, Equals>& b)
1979 {
1980 a.swap(b);
1981 }
1982
1983} // namespace std
1984