1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_concurrent_vector_H
18#define __TBB_concurrent_vector_H
19
20#include "tbb_stddef.h"
21#include "tbb_exception.h"
22#include "atomic.h"
23#include "cache_aligned_allocator.h"
24#include "blocked_range.h"
25#include "tbb_machine.h"
26#include "tbb_profiling.h"
27#include <new>
28#include <cstring> // for memset()
29#include __TBB_STD_SWAP_HEADER
30#include <algorithm>
31#include <iterator>
32
33#include "internal/_allocator_traits.h"
34
35#if _MSC_VER==1500 && !__INTEL_COMPILER
36 // VS2008/VC9 seems to have an issue; limits pull in math.h
37 #pragma warning( push )
38 #pragma warning( disable: 4985 )
39#endif
40#include <limits> /* std::numeric_limits */
41#if _MSC_VER==1500 && !__INTEL_COMPILER
42 #pragma warning( pop )
43#endif
44
45#if __TBB_INITIALIZER_LISTS_PRESENT
46 #include <initializer_list>
47#endif
48
49#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
50 // Workaround for overzealous compiler warnings in /Wp64 mode
51 #pragma warning (push)
52#if defined(_Wp64)
53 #pragma warning (disable: 4267)
54#endif
55 #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
56#endif
57
58namespace tbb {
59
60template<typename T, class A = cache_aligned_allocator<T> >
61class concurrent_vector;
62
63//! @cond INTERNAL
64namespace internal {
65
66 template<typename Container, typename Value>
67 class vector_iterator;
68
69 //! Bad allocation marker
70 static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
71
72 //! Exception helper function
73 template<typename T>
74 void handle_unconstructed_elements(T* array, size_t n_of_elements){
75 std::memset( static_cast<void*>(array), 0, n_of_elements * sizeof( T ) );
76 }
77
78 //! Base class of concurrent vector implementation.
79 /** @ingroup containers */
80 class concurrent_vector_base_v3 {
81 protected:
82
83 // Basic types declarations
84 typedef size_t segment_index_t;
85 typedef size_t size_type;
86
87 // Using enumerations due to Mac linking problems of static const variables
88 enum {
89 // Size constants
90 default_initial_segments = 1, // 2 initial items
91 //! Number of slots for segment pointers inside the class
92 pointers_per_short_table = 3, // to fit into 8 words of entire structure
93 pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
94 };
95
96 struct segment_not_used {};
97 struct segment_allocated {};
98 struct segment_allocation_failed {};
99
100 class segment_t;
101 class segment_value_t {
102 void* array;
103 private:
104 //TODO: More elegant way to grant access to selected functions _only_?
105 friend class segment_t;
106 explicit segment_value_t(void* an_array):array(an_array) {}
107 public:
108 friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
109 friend bool operator==(segment_value_t const& lhs, segment_allocated) { return lhs.array > internal::vector_allocation_error_flag;}
110 friend bool operator==(segment_value_t const& lhs, segment_allocation_failed) { return lhs.array == internal::vector_allocation_error_flag;}
111 template<typename argument_type>
112 friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
113
114 template<typename T>
115 T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
116 };
117
118 friend void enforce_segment_allocated(segment_value_t const& s, internal::exception_id exception = eid_bad_last_alloc){
119 if(s != segment_allocated()){
120 internal::throw_exception(exception);
121 }
122 }
123
124 // Segment pointer.
125 class segment_t {
126 atomic<void*> array;
127 public:
128 segment_t(){ store<relaxed>(segment_not_used());}
129 //Copy ctor and assignment operator are defined to ease using of stl algorithms.
130 //These algorithms usually not a synchronization point, so, semantic is
131 //intentionally relaxed here.
132 segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}
133
134 void swap(segment_t & rhs ){
135 tbb::internal::swap<relaxed>(array, rhs.array);
136 }
137
138 segment_t& operator=(segment_t const& rhs ){
139 array.store<relaxed>(rhs.array.load<relaxed>());
140 return *this;
141 }
142
143 template<memory_semantics M>
144 segment_value_t load() const { return segment_value_t(array.load<M>());}
145
146 template<memory_semantics M>
147 void store(segment_not_used) {
148 array.store<M>(0);
149 }
150
151 template<memory_semantics M>
152 void store(segment_allocation_failed) {
153 __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
154 array.store<M>(internal::vector_allocation_error_flag);
155 }
156
157 template<memory_semantics M>
158 void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
159 __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
160 "other overloads of store should be used for marking segment as not_used or allocation_failed" );
161 array.store<M>(allocated_segment_pointer);
162 }
163
164#if TBB_USE_ASSERT
165 ~segment_t() {
166 __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
167 }
168#endif /* TBB_USE_ASSERT */
169 };
170 friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
171
172 // Data fields
173
174 //! allocator function pointer
175 void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
176
177 //! count of segments in the first block
178 atomic<size_type> my_first_block;
179
180 //! Requested size of vector
181 atomic<size_type> my_early_size;
182
183 //! Pointer to the segments table
184 atomic<segment_t*> my_segment;
185
186 //! embedded storage of segment pointers
187 segment_t my_storage[pointers_per_short_table];
188
189 // Methods
190
191 concurrent_vector_base_v3() {
192 //Here the semantic is intentionally relaxed.
193 //The reason this is next:
194 //Object that is in middle of construction (i.e. its constructor is not yet finished)
195 //cannot be used concurrently until the construction is finished.
196 //Thus to flag other threads that construction is finished, some synchronization with
197 //acquire-release semantic should be done by the (external) code that uses the vector.
198 //So, no need to do the synchronization inside the vector.
199
200 my_early_size.store<relaxed>(0);
201 my_first_block.store<relaxed>(0); // here is not default_initial_segments
202 my_segment.store<relaxed>(my_storage);
203 }
204
205 __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3();
206
207 //these helpers methods use the fact that segments are allocated so
208 //that every segment size is a (increasing) power of 2.
209 //with one exception 0 segment has size of 2 as well segment 1;
210 //e.g. size of segment with index of 3 is 2^3=8;
211 static segment_index_t segment_index_of( size_type index ) {
212 return segment_index_t( __TBB_Log2( index|1 ) );
213 }
214
215 static segment_index_t segment_base( segment_index_t k ) {
216 return (segment_index_t(1)<<k & ~segment_index_t(1));
217 }
218
219 static inline segment_index_t segment_base_index_of( segment_index_t &index ) {
220 segment_index_t k = segment_index_of( index );
221 index -= segment_base(k);
222 return k;
223 }
224
225 static size_type segment_size( segment_index_t k ) {
226 return segment_index_t(1)<<k; // fake value for k==0
227 }
228
229
230 static bool is_first_element_in_segment(size_type element_index){
231 //check if element_index is a power of 2 that is at least 2.
232 //The idea is to detect if the iterator crosses a segment boundary,
233 //and 2 is the minimal index for which it's true
234 __TBB_ASSERT(element_index, "there should be no need to call "
235 "is_first_element_in_segment for 0th element" );
236 return is_power_of_two_at_least( element_index, 2 );
237 }
238
239 //! An operation on an n-element array starting at begin.
240 typedef void (__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n );
241
242 //! An operation on n-element destination array and n-element source array.
243 typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
244
245 //! Internal structure for compact()
246 struct internal_segments_table {
247 segment_index_t first_block;
248 segment_t table[pointers_per_long_table];
249 };
250
251 void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
252 size_type __TBB_EXPORTED_METHOD internal_capacity() const;
253 void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
254 size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
255 void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
256 segment_index_t __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy );
257 void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
258 void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy );
259 void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base_v3& src, size_type element_size,
260 internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy );
261 //! Obsolete
262 void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const;
263 void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3& v);
264
265 void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
266 internal_array_op1 destroy, internal_array_op2 init );
267 size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
268
269 //! Deprecated entry point for backwards compatibility to TBB 2.1.
270 void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src );
271private:
272 //! Private functionality
273 class helper;
274 friend class helper;
275
276 template<typename Container, typename Value>
277 friend class vector_iterator;
278
279 };
280
281 inline void swap(concurrent_vector_base_v3::segment_t & lhs, concurrent_vector_base_v3::segment_t & rhs) __TBB_NOEXCEPT(true) {
282 lhs.swap(rhs);
283 }
284
285 typedef concurrent_vector_base_v3 concurrent_vector_base;
286
287 //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/
288 /** Value is either the T or const T type of the container.
289 @ingroup containers */
290 template<typename Container, typename Value>
291 class vector_iterator
292 {
293 //! concurrent_vector over which we are iterating.
294 Container* my_vector;
295
296 //! Index into the vector
297 size_t my_index;
298
299 //! Caches my_vector-&gt;internal_subscript(my_index)
300 /** NULL if cached value is not available */
301 mutable Value* my_item;
302
303 template<typename C, typename T>
304 friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
305
306 template<typename C, typename T, typename U>
307 friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
308
309 template<typename C, typename T, typename U>
310 friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
311
312 template<typename C, typename T, typename U>
313 friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
314
315 template<typename C, typename U>
316 friend class internal::vector_iterator;
317
318#if !__TBB_TEMPLATE_FRIENDS_BROKEN
319 template<typename T, class A>
320 friend class tbb::concurrent_vector;
321#else
322public:
323#endif
324
325 vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
326 my_vector(const_cast<Container*>(&vector)),
327 my_index(index),
328 my_item(static_cast<Value*>(ptr))
329 {}
330
331 public:
332 //! Default constructor
333 vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
334
335 vector_iterator( const vector_iterator<Container,typename Container::value_type>& other ) :
336 my_vector(other.my_vector),
337 my_index(other.my_index),
338 my_item(other.my_item)
339 {}
340
341 vector_iterator operator+( ptrdiff_t offset ) const {
342 return vector_iterator( *my_vector, my_index+offset );
343 }
344 vector_iterator &operator+=( ptrdiff_t offset ) {
345 my_index+=offset;
346 my_item = NULL;
347 return *this;
348 }
349 vector_iterator operator-( ptrdiff_t offset ) const {
350 return vector_iterator( *my_vector, my_index-offset );
351 }
352 vector_iterator &operator-=( ptrdiff_t offset ) {
353 my_index-=offset;
354 my_item = NULL;
355 return *this;
356 }
357 Value& operator*() const {
358 Value* item = my_item;
359 if( !item ) {
360 item = my_item = &my_vector->internal_subscript(my_index);
361 }
362 __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
363 return *item;
364 }
365 Value& operator[]( ptrdiff_t k ) const {
366 return my_vector->internal_subscript(my_index+k);
367 }
368 Value* operator->() const {return &operator*();}
369
370 //! Pre increment
371 vector_iterator& operator++() {
372 size_t element_index = ++my_index;
373 if( my_item ) {
374 //TODO: consider using of knowledge about "first_block optimization" here as well?
375 if( concurrent_vector_base::is_first_element_in_segment(element_index)) {
376 //if the iterator crosses a segment boundary, the pointer become invalid
377 //as possibly next segment is in another memory location
378 my_item= NULL;
379 } else {
380 ++my_item;
381 }
382 }
383 return *this;
384 }
385
386 //! Pre decrement
387 vector_iterator& operator--() {
388 __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
389 size_t element_index = my_index--;
390 if( my_item ) {
391 if(concurrent_vector_base::is_first_element_in_segment(element_index)) {
392 //if the iterator crosses a segment boundary, the pointer become invalid
393 //as possibly next segment is in another memory location
394 my_item= NULL;
395 } else {
396 --my_item;
397 }
398 }
399 return *this;
400 }
401
402 //! Post increment
403 vector_iterator operator++(int) {
404 vector_iterator result = *this;
405 operator++();
406 return result;
407 }
408
409 //! Post decrement
410 vector_iterator operator--(int) {
411 vector_iterator result = *this;
412 operator--();
413 return result;
414 }
415
416 // STL support
417
418 typedef ptrdiff_t difference_type;
419 typedef Value value_type;
420 typedef Value* pointer;
421 typedef Value& reference;
422 typedef std::random_access_iterator_tag iterator_category;
423 };
424
425 template<typename Container, typename T>
426 vector_iterator<Container,T> operator+( ptrdiff_t offset, const vector_iterator<Container,T>& v ) {
427 return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
428 }
429
430 template<typename Container, typename T, typename U>
431 bool operator==( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
432 return i.my_index==j.my_index && i.my_vector == j.my_vector;
433 }
434
435 template<typename Container, typename T, typename U>
436 bool operator!=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
437 return !(i==j);
438 }
439
440 template<typename Container, typename T, typename U>
441 bool operator<( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
442 return i.my_index<j.my_index;
443 }
444
445 template<typename Container, typename T, typename U>
446 bool operator>( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
447 return j<i;
448 }
449
450 template<typename Container, typename T, typename U>
451 bool operator>=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
452 return !(i<j);
453 }
454
455 template<typename Container, typename T, typename U>
456 bool operator<=( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
457 return !(j<i);
458 }
459
460 template<typename Container, typename T, typename U>
461 ptrdiff_t operator-( const vector_iterator<Container,T>& i, const vector_iterator<Container,U>& j ) {
462 return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
463 }
464
465 template<typename T, class A>
466 class allocator_base {
467 public:
468 typedef typename tbb::internal::allocator_rebind<A, T>::type allocator_type;
469 allocator_type my_allocator;
470 allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {}
471 };
472
473} // namespace internal
474//! @endcond
475
476//! Concurrent vector container
477/** concurrent_vector is a container having the following main properties:
478 - It provides random indexed access to its elements. The index of the first element is 0.
479 - It ensures safe concurrent growing its size (different threads can safely append new elements).
480 - Adding new elements does not invalidate existing iterators and does not change indices of existing items.
481
482@par Compatibility
483 The class meets all Container Requirements and Reversible Container Requirements from
484 C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet
485 Sequence Requirements due to absence of insert() and erase() methods.
486
487@par Exception Safety
488 Methods working with memory allocation and/or new elements construction can throw an
489 exception if allocator fails to allocate memory or element's default constructor throws one.
490 Concurrent vector's element of type T must conform to the following requirements:
491 - Throwing an exception is forbidden for destructor of T.
492 - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized.
493 .
494 Otherwise, the program's behavior is undefined.
495@par
496 If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation.
497 Invalid state means:
498 - There are no guarantees that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens.
499 - An invalid vector instance cannot be repaired; it is unable to grow anymore.
500 - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful.
501 - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown.
502 .
503 If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails.
504
505@par Fragmentation
506 Unlike an STL vector, a concurrent_vector does not move existing elements if it needs
507 to allocate more memory. The container is divided into a series of contiguous arrays of
508 elements. The first reservation, growth, or assignment operation determines the size of
509 the first array. Using small number of elements as initial size incurs fragmentation that
510 may increase element access time. Internal layout can be optimized by method compact() that
511 merges several smaller arrays into one solid.
512
513@par Changes since TBB 2.1
514 - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated.
515 - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector
516 - Added resize() methods (not thread-safe)
517 - Added cbegin/cend/crbegin/crend methods
518 - Changed return type of methods grow* and push_back to iterator
519
520@par Changes since TBB 2.0
521 - Implemented exception-safety guarantees
522 - Added template argument for allocator
523 - Added allocator argument in constructors
524 - Faster index calculation
525 - First growth call specifies a number of segments to be merged in the first allocation.
526 - Fixed memory blow up for swarm of vector's instances of small size
527 - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items.
528 - Added STL-like constructors.
529 - Added operators ==, < and derivatives
530 - Added at() method, approved for using after an exception was thrown inside the vector
531 - Added get_allocator() method.
532 - Added assign() methods
533 - Added compact() method to defragment first segments
534 - Added swap() method
535 - range() defaults on grainsize = 1 supporting auto grainsize algorithms.
536
537 @ingroup containers */
538template<typename T, class A>
539class concurrent_vector: protected internal::allocator_base<T, A>,
540 private internal::concurrent_vector_base {
541private:
542 template<typename I>
543 class generic_range_type: public blocked_range<I> {
544 public:
545 typedef T value_type;
546 typedef T& reference;
547 typedef const T& const_reference;
548 typedef I iterator;
549 typedef ptrdiff_t difference_type;
550 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
551 template<typename U>
552 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
553 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
554 };
555
556 template<typename C, typename U>
557 friend class internal::vector_iterator;
558
559public:
560 //------------------------------------------------------------------------
561 // STL compatible types
562 //------------------------------------------------------------------------
563 typedef internal::concurrent_vector_base_v3::size_type size_type;
564 typedef typename internal::allocator_base<T, A>::allocator_type allocator_type;
565
566 typedef T value_type;
567 typedef ptrdiff_t difference_type;
568 typedef T& reference;
569 typedef const T& const_reference;
570 typedef T *pointer;
571 typedef const T *const_pointer;
572
573 typedef internal::vector_iterator<concurrent_vector,T> iterator;
574 typedef internal::vector_iterator<concurrent_vector,const T> const_iterator;
575
576#if !defined(_MSC_VER) || _CPPLIB_VER>=300
577 // Assume ISO standard definition of std::reverse_iterator
578 typedef std::reverse_iterator<iterator> reverse_iterator;
579 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
580#else
581 // Use non-standard std::reverse_iterator
582 typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
583 typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
584#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
585
586 //------------------------------------------------------------------------
587 // Parallel algorithm support
588 //------------------------------------------------------------------------
589 typedef generic_range_type<iterator> range_type;
590 typedef generic_range_type<const_iterator> const_range_type;
591
592 //------------------------------------------------------------------------
593 // STL compatible constructors & destructors
594 //------------------------------------------------------------------------
595
596 //! Construct empty vector.
597 explicit concurrent_vector(const allocator_type &a = allocator_type())
598 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
599 {
600 vector_allocator_ptr = &internal_allocator;
601 }
602
603 //Constructors are not required to have synchronization
604 //(for more details see comment in the concurrent_vector_base constructor).
605#if __TBB_INITIALIZER_LISTS_PRESENT
606 //! Constructor from initializer_list
607 concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
608 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
609 {
610 vector_allocator_ptr = &internal_allocator;
611 __TBB_TRY {
612 internal_assign_iterators(init_list.begin(), init_list.end());
613 } __TBB_CATCH(...) {
614 segment_t *table = my_segment.load<relaxed>();;
615 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
616 __TBB_RETHROW();
617 }
618
619 }
620#endif //# __TBB_INITIALIZER_LISTS_PRESENT
621
622 //! Copying constructor
623 concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() )
624 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
625 {
626 vector_allocator_ptr = &internal_allocator;
627 __TBB_TRY {
628 internal_copy(vector, sizeof(T), &copy_array);
629 } __TBB_CATCH(...) {
630 segment_t *table = my_segment.load<relaxed>();
631 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
632 __TBB_RETHROW();
633 }
634 }
635
636#if __TBB_CPP11_RVALUE_REF_PRESENT
637 //! Move constructor
638 //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
639 concurrent_vector( concurrent_vector&& source)
640 : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()
641 {
642 vector_allocator_ptr = &internal_allocator;
643 concurrent_vector_base_v3::internal_swap(source);
644 }
645
646 concurrent_vector( concurrent_vector&& source, const allocator_type& a)
647 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
648 {
649 vector_allocator_ptr = &internal_allocator;
650 //C++ standard requires instances of an allocator being compared for equality,
651 //which means that memory allocated by one instance is possible to deallocate with the other one.
652 if (a == source.my_allocator) {
653 concurrent_vector_base_v3::internal_swap(source);
654 } else {
655 __TBB_TRY {
656 internal_copy(source, sizeof(T), &move_array);
657 } __TBB_CATCH(...) {
658 segment_t *table = my_segment.load<relaxed>();
659 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>());
660 __TBB_RETHROW();
661 }
662 }
663 }
664
665#endif
666
667 //! Copying constructor for vector with different allocator type
668 template<class M>
669 concurrent_vector( const concurrent_vector<T, M>& vector, const allocator_type& a = allocator_type() )
670 : internal::allocator_base<T, A>(a), internal::concurrent_vector_base()
671 {
672 vector_allocator_ptr = &internal_allocator;
673 __TBB_TRY {
674 internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);
675 } __TBB_CATCH(...) {
676 segment_t *table = my_segment.load<relaxed>();
677 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
678 __TBB_RETHROW();
679 }
680 }
681
682 //! Construction with initial size specified by argument n
683 explicit concurrent_vector(size_type n)
684 {
685 vector_allocator_ptr = &internal_allocator;
686 __TBB_TRY {
687 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
688 } __TBB_CATCH(...) {
689 segment_t *table = my_segment.load<relaxed>();
690 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
691 __TBB_RETHROW();
692 }
693 }
694
695 //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance
696 concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type())
697 : internal::allocator_base<T, A>(a)
698 {
699 vector_allocator_ptr = &internal_allocator;
700 __TBB_TRY {
701 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
702 } __TBB_CATCH(...) {
703 segment_t *table = my_segment.load<relaxed>();
704 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
705 __TBB_RETHROW();
706 }
707 }
708
709 //! Construction with copying iteration range and given allocator instance
710 template<class I>
711 concurrent_vector(I first, I last, const allocator_type &a = allocator_type())
712 : internal::allocator_base<T, A>(a)
713 {
714 vector_allocator_ptr = &internal_allocator;
715 __TBB_TRY {
716 internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
717 } __TBB_CATCH(...) {
718 segment_t *table = my_segment.load<relaxed>();
719 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
720 __TBB_RETHROW();
721 }
722 }
723
724 //! Assignment
725 concurrent_vector& operator=( const concurrent_vector& vector ) {
726 if( this != &vector )
727 internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);
728 return *this;
729 }
730
731#if __TBB_CPP11_RVALUE_REF_PRESENT
732 //TODO: add __TBB_NOEXCEPT()
733 //! Move assignment
734 concurrent_vector& operator=( concurrent_vector&& other ) {
735 __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
736 typedef typename tbb::internal::allocator_traits<A>::propagate_on_container_move_assignment pocma_t;
737 if(pocma_t::value || this->my_allocator == other.my_allocator) {
738 concurrent_vector trash (std::move(*this));
739 internal_swap(other);
740 tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t());
741 } else {
742 internal_assign(other, sizeof(T), &destroy_array, &move_assign_array, &move_array);
743 }
744 return *this;
745 }
746#endif
747 //TODO: add an template assignment operator? (i.e. with different element type)
748
749 //! Assignment for vector with different allocator type
750 template<class M>
751 concurrent_vector& operator=( const concurrent_vector<T, M>& vector ) {
752 if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
753 internal_assign(vector.internal_vector_base(),
754 sizeof(T), &destroy_array, &assign_array, &copy_array);
755 return *this;
756 }
757
758#if __TBB_INITIALIZER_LISTS_PRESENT
759 //! Assignment for initializer_list
760 concurrent_vector& operator=( std::initializer_list<T> init_list ) {
761 internal_clear(&destroy_array);
762 internal_assign_iterators(init_list.begin(), init_list.end());
763 return *this;
764 }
765#endif //#if __TBB_INITIALIZER_LISTS_PRESENT
766
767 //------------------------------------------------------------------------
768 // Concurrent operations
769 //------------------------------------------------------------------------
770 //! Grow by "delta" elements.
771 /** Returns iterator pointing to the first new element. */
772 iterator grow_by( size_type delta ) {
773 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
774 }
775
776 //! Grow by "delta" elements using copying constructor.
777 /** Returns iterator pointing to the first new element. */
778 iterator grow_by( size_type delta, const_reference t ) {
779 return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
780 }
781
782 /** Returns iterator pointing to the first new element. */
783 template<typename I>
784 iterator grow_by( I first, I last ) {
785 typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
786 __TBB_ASSERT( delta >= 0, NULL);
787
788 return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), &copy_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
789 }
790
791#if __TBB_INITIALIZER_LISTS_PRESENT
792 /** Returns iterator pointing to the first new element. */
793 iterator grow_by( std::initializer_list<T> init_list ) {
794 return grow_by( init_list.begin(), init_list.end() );
795 }
796#endif //#if __TBB_INITIALIZER_LISTS_PRESENT
797
798 //! Append minimal sequence of elements such that size()>=n.
799 /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated.
800 May return while other elements are being constructed by other threads.
801 Returns iterator that points to beginning of appended sequence.
802 If no elements were appended, returns iterator pointing to nth element. */
803 iterator grow_to_at_least( size_type n ) {
804 size_type m=0;
805 if( n ) {
806 m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
807 if( m>n ) m=n;
808 }
809 return iterator(*this, m);
810 };
811
812 /** Analogous to grow_to_at_least( size_type n ) with exception that the new
813 elements are initialized by copying of t instead of default construction. */
814 iterator grow_to_at_least( size_type n, const_reference t ) {
815 size_type m=0;
816 if( n ) {
817 m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array_by, &t);
818 if( m>n ) m=n;
819 }
820 return iterator(*this, m);
821 };
822
823 //! Push item
824 /** Returns iterator pointing to the new element. */
825 iterator push_back( const_reference item )
826 {
827 push_back_helper prolog(*this);
828 new(prolog.internal_push_back_result()) T(item);
829 return prolog.return_iterator_and_dismiss();
830 }
831
832#if __TBB_CPP11_RVALUE_REF_PRESENT
833 //! Push item, move-aware
834 /** Returns iterator pointing to the new element. */
835 iterator push_back( T&& item )
836 {
837 push_back_helper prolog(*this);
838 new(prolog.internal_push_back_result()) T(std::move(item));
839 return prolog.return_iterator_and_dismiss();
840 }
841#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
842 //! Push item, create item "in place" with provided arguments
843 /** Returns iterator pointing to the new element. */
844 template<typename... Args>
845 iterator emplace_back( Args&&... args )
846 {
847 push_back_helper prolog(*this);
848 new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
849 return prolog.return_iterator_and_dismiss();
850 }
851#endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
852#endif //__TBB_CPP11_RVALUE_REF_PRESENT
853 //! Get reference to element at given index.
854 /** This method is thread-safe for concurrent reads, and also while growing the vector,
855 as long as the calling thread has checked that index < size(). */
856 reference operator[]( size_type index ) {
857 return internal_subscript(index);
858 }
859
860 //! Get const reference to element at given index.
861 const_reference operator[]( size_type index ) const {
862 return internal_subscript(index);
863 }
864
865 //! Get reference to element at given index. Throws exceptions on errors.
866 reference at( size_type index ) {
867 return internal_subscript_with_exceptions(index);
868 }
869
870 //! Get const reference to element at given index. Throws exceptions on errors.
871 const_reference at( size_type index ) const {
872 return internal_subscript_with_exceptions(index);
873 }
874
875 //! Get range for iterating with parallel algorithms
876 range_type range( size_t grainsize = 1 ) {
877 return range_type( begin(), end(), grainsize );
878 }
879
880 //! Get const range for iterating with parallel algorithms
881 const_range_type range( size_t grainsize = 1 ) const {
882 return const_range_type( begin(), end(), grainsize );
883 }
884
885 //------------------------------------------------------------------------
886 // Capacity
887 //------------------------------------------------------------------------
888 //! Return size of vector. It may include elements under construction
889 size_type size() const {
890 size_type sz = my_early_size, cp = internal_capacity();
891 return cp < sz ? cp : sz;
892 }
893
894 //! Return false if vector is not empty or has elements under construction at least.
895 bool empty() const {return !my_early_size;}
896
897 //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value.
898 size_type capacity() const {return internal_capacity();}
899
900 //! Allocate enough space to grow to size n without having to allocate more memory later.
901 /** Like most of the methods provided for STL compatibility, this method is *not* thread safe.
902 The capacity afterwards may be bigger than the requested reservation. */
903 void reserve( size_type n ) {
904 if( n )
905 internal_reserve(n, sizeof(T), max_size());
906 }
907
908 //! Resize the vector. Not thread-safe.
909 void resize( size_type n ) {
910 internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
911 }
912
913 //! Resize the vector, copy t for new elements. Not thread-safe.
914 void resize( size_type n, const_reference t ) {
915 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
916 }
917
918 //! Optimize memory usage and fragmentation.
919 void shrink_to_fit();
920
921 //! Upper bound on argument to reserve.
922 size_type max_size() const {return (~size_type(0))/sizeof(T);}
923
924 //------------------------------------------------------------------------
925 // STL support
926 //------------------------------------------------------------------------
927
928 //! start iterator
929 iterator begin() {return iterator(*this,0);}
930 //! end iterator
931 iterator end() {return iterator(*this,size());}
932 //! start const iterator
933 const_iterator begin() const {return const_iterator(*this,0);}
934 //! end const iterator
935 const_iterator end() const {return const_iterator(*this,size());}
936 //! start const iterator
937 const_iterator cbegin() const {return const_iterator(*this,0);}
938 //! end const iterator
939 const_iterator cend() const {return const_iterator(*this,size());}
940 //! reverse start iterator
941 reverse_iterator rbegin() {return reverse_iterator(end());}
942 //! reverse end iterator
943 reverse_iterator rend() {return reverse_iterator(begin());}
944 //! reverse start const iterator
945 const_reverse_iterator rbegin() const {return const_reverse_iterator(end());}
946 //! reverse end const iterator
947 const_reverse_iterator rend() const {return const_reverse_iterator(begin());}
948 //! reverse start const iterator
949 const_reverse_iterator crbegin() const {return const_reverse_iterator(end());}
950 //! reverse end const iterator
951 const_reverse_iterator crend() const {return const_reverse_iterator(begin());}
952 //! the first item
953 reference front() {
954 __TBB_ASSERT( size()>0, NULL);
955 const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
956 return (segment_value.template pointer<T>())[0];
957 }
958 //! the first item const
959 const_reference front() const {
960 __TBB_ASSERT( size()>0, NULL);
961 const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
962 return (segment_value.template pointer<const T>())[0];
963 }
964 //! the last item
965 reference back() {
966 __TBB_ASSERT( size()>0, NULL);
967 return internal_subscript( size()-1 );
968 }
969 //! the last item const
970 const_reference back() const {
971 __TBB_ASSERT( size()>0, NULL);
972 return internal_subscript( size()-1 );
973 }
974 //! return allocator object
975 allocator_type get_allocator() const { return this->my_allocator; }
976
977 //! assign n items by copying t item
978 void assign(size_type n, const_reference t) {
979 clear();
980 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
981 }
982
983 //! assign range [first, last)
984 template<class I>
985 void assign(I first, I last) {
986 clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
987 }
988
989#if __TBB_INITIALIZER_LISTS_PRESENT
990 //! assigns an initializer list
991 void assign(std::initializer_list<T> init_list) {
992 clear(); internal_assign_iterators( init_list.begin(), init_list.end());
993 }
994#endif //# __TBB_INITIALIZER_LISTS_PRESENT
995
996 //! swap two instances
997 void swap(concurrent_vector &vector) {
998 typedef typename tbb::internal::allocator_traits<A>::propagate_on_container_swap pocs_t;
999 if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) {
1000 concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
1001 tbb::internal::allocator_swap(this->my_allocator, vector.my_allocator, pocs_t());
1002 }
1003 }
1004
1005 //! Clear container while keeping memory allocated.
1006 /** To free up the memory, use in conjunction with method compact(). Not thread safe **/
1007 void clear() {
1008 internal_clear(&destroy_array);
1009 }
1010
1011 //! Clear and destroy vector.
1012 ~concurrent_vector() {
1013 segment_t *table = my_segment.load<relaxed>();
1014 internal_free_segments( table, internal_clear(&destroy_array), my_first_block.load<relaxed>() );
1015 // base class destructor call should be then
1016 }
1017
1018 const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; }
1019private:
1020 //! Allocate k items
1021 static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) {
1022 return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1023 }
1024 //! Free k segments from table
1025 void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);
1026
1027 //! Get reference to element at given index.
1028 T& internal_subscript( size_type index ) const;
1029
1030 //! Get reference to element at given index with errors checks
1031 T& internal_subscript_with_exceptions( size_type index ) const;
1032
1033 //! assign n items by copying t
1034 void internal_assign_n(size_type n, const_pointer p) {
1035 internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1036 }
1037
1038 //! True/false function override helper
1039 /* Functions declarations:
1040 * void foo(is_integer_tag<true>*);
1041 * void foo(is_integer_tag<false>*);
1042 * Usage example:
1043 * foo(static_cast<is_integer_tag<std::numeric_limits<T>::is_integer>*>(0));
1044 */
1045 template<bool B> class is_integer_tag;
1046
1047 //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9
1048 template<class I>
1049 void internal_assign_range(I first, I last, is_integer_tag<true> *) {
1050 internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1051 }
1052 //! inline proxy assign by iterators
1053 template<class I>
1054 void internal_assign_range(I first, I last, is_integer_tag<false> *) {
1055 internal_assign_iterators(first, last);
1056 }
1057 //! assign by iterators
1058 template<class I>
1059 void internal_assign_iterators(I first, I last);
1060
1061 //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1062
1063 //! Construct n instances of T, starting at "begin".
1064 static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1065
1066 //! Copy-construct n instances of T, starting at "begin".
1067 static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1068
1069 //! Copy-construct n instances of T by copying single element pointed to by src, starting at "dst".
1070 static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1071
1072#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1073 //! Either opy or move-construct n instances of T, starting at "dst" by copying according element of src array.
1074 static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1075#endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1076
1077#if __TBB_CPP11_RVALUE_REF_PRESENT
1078 //! Move-construct n instances of T, starting at "dst" by copying according element of src array.
1079 static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1080
1081 //! Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array.
1082 static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1083#endif
1084 //! Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator, p_type_erased_iterator+n).
1085 template<typename Iterator>
1086 static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1087
1088 //! Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src array.
1089 static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1090
1091 //! Destroy n instances of T, starting at "begin".
1092 static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
1093
1094 //! Exception-aware helper class for filling a segment by exception-danger operators of user class
1095 class internal_loop_guide : internal::no_copy {
1096 public:
1097 const pointer array;
1098 const size_type n;
1099 size_type i;
1100
1101 static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
1102 static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1103
1104 internal_loop_guide(size_type ntrials, void *ptr)
1105 : array(as_pointer(ptr)), n(ntrials), i(0) {}
1106 void init() { for(; i < n; ++i) new( &array[i] ) T(); }
1107 void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
1108 void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
1109 void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1110#if __TBB_CPP11_RVALUE_REF_PRESENT
1111 void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
1112 void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1113#endif
1114#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1115 void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1116#endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1117
1118 //TODO: rename to construct_range
1119 template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
1120 ~internal_loop_guide() {
1121 if(i < n) {// if an exception was raised, fill the rest of items with zeros
1122 internal::handle_unconstructed_elements(array+i, n-i);
1123 }
1124 }
1125 };
1126
1127 struct push_back_helper : internal::no_copy{
1128 struct element_construction_guard : internal::no_copy{
1129 pointer element;
1130
1131 element_construction_guard(pointer an_element) : element (an_element){}
1132 void dismiss(){ element = NULL; }
1133 ~element_construction_guard(){
1134 if (element){
1135 internal::handle_unconstructed_elements(element, 1);
1136 }
1137 }
1138 };
1139
1140 concurrent_vector & v;
1141 size_type k;
1142 element_construction_guard g;
1143
1144 push_back_helper(concurrent_vector & vector) :
1145 v(vector),
1146 g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1147 {}
1148
1149 pointer internal_push_back_result(){ return g.element;}
1150 iterator return_iterator_and_dismiss(){
1151 pointer ptr = g.element;
1152 g.dismiss();
1153 return iterator(v, k, ptr);
1154 }
1155 };
1156};
1157
1158#if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1159// Deduction guide for the constructor from two iterators
1160template<typename I,
1161 typename T = typename std::iterator_traits<I>::value_type,
1162 typename A = cache_aligned_allocator<T>
1163> concurrent_vector(I, I, const A& = A())
1164-> concurrent_vector<T, A>;
1165
1166// Deduction guide for the constructor from a vector and allocator
1167template<typename T, typename A1, typename A2>
1168concurrent_vector(const concurrent_vector<T, A1> &, const A2 &)
1169-> concurrent_vector<T, A2>;
1170
1171// Deduction guide for the constructor from an initializer_list
1172template<typename T, typename A = cache_aligned_allocator<T>
1173> concurrent_vector(std::initializer_list<T>, const A& = A())
1174-> concurrent_vector<T, A>;
1175#endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1176
1177#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1178#pragma warning (push)
1179#pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1180#endif
1181template<typename T, class A>
1182void concurrent_vector<T, A>::shrink_to_fit() {
1183 internal_segments_table old;
1184 __TBB_TRY {
1185 internal_array_op2 copy_or_move_array =
1186#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1187 &move_array_if_noexcept
1188#else
1189 &copy_array
1190#endif
1191 ;
1192 if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1193 internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1194 } __TBB_CATCH(...) {
1195 if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1196 internal_free_segments( old.table, 1, old.first_block );
1197 __TBB_RETHROW();
1198 }
1199}
1200#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1201#pragma warning (pop)
1202#endif // warning 4701 is back
1203
1204template<typename T, class A>
1205void concurrent_vector<T, A>::internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block) {
1206 // Free the arrays
1207 while( k > first_block ) {
1208 --k;
1209 segment_value_t segment_value = table[k].load<relaxed>();
1210 table[k].store<relaxed>(segment_not_used());
1211 if( segment_value == segment_allocated() ) // check for correct segment pointer
1212 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1213 }
1214 segment_value_t segment_value = table[0].load<relaxed>();
1215 if( segment_value == segment_allocated() ) {
1216 __TBB_ASSERT( first_block > 0, NULL );
1217 while(k > 0) table[--k].store<relaxed>(segment_not_used());
1218 this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1219 }
1220}
1221
1222template<typename T, class A>
1223T& concurrent_vector<T, A>::internal_subscript( size_type index ) const {
1224 //TODO: unify both versions of internal_subscript
1225 __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1226 size_type j = index;
1227 segment_index_t k = segment_base_index_of( j );
1228 __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1229 //no need in load with acquire (load<acquire>) since thread works in own space or gets
1230 //the information about added elements via some form of external synchronization
1231 //TODO: why not make a load of my_segment relaxed as well ?
1232 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1233 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1234 __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1235 __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1236 return (( segment_value.pointer<T>()))[j];
1237}
1238
1239template<typename T, class A>
1240T& concurrent_vector<T, A>::internal_subscript_with_exceptions( size_type index ) const {
1241 if( index >= my_early_size )
1242 internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1243 size_type j = index;
1244 segment_index_t k = segment_base_index_of( j );
1245 //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1246 if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1247 internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error
1248 // no need in load with acquire (load<acquire>) since thread works in own space or gets
1249 //the information about added elements via some form of external synchronization
1250 //TODO: why not make a load of my_segment relaxed as well ?
1251 //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1252 segment_value_t segment_value = my_segment[k].template load<relaxed>();
1253 enforce_segment_allocated(segment_value, internal::eid_index_range_error);
1254 return (segment_value.pointer<T>())[j];
1255}
1256
1257template<typename T, class A> template<class I>
1258void concurrent_vector<T, A>::internal_assign_iterators(I first, I last) {
1259 __TBB_ASSERT(my_early_size == 0, NULL);
1260 size_type n = std::distance(first, last);
1261 if( !n ) return;
1262 internal_reserve(n, sizeof(T), max_size());
1263 my_early_size = n;
1264 segment_index_t k = 0;
1265 //TODO: unify segment iteration code with concurrent_base_v3::helper
1266 size_type sz = segment_size( my_first_block );
1267 while( sz < n ) {
1268 internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1269 loop.iterate(first);
1270 n -= sz;
1271 if( !k ) k = my_first_block;
1272 else { ++k; sz <<= 1; }
1273 }
1274 internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1275 loop.iterate(first);
1276}
1277
1278template<typename T, class A>
1279void concurrent_vector<T, A>::initialize_array( void* begin, const void *, size_type n ) {
1280 internal_loop_guide loop(n, begin); loop.init();
1281}
1282
1283template<typename T, class A>
1284void concurrent_vector<T, A>::initialize_array_by( void* begin, const void *src, size_type n ) {
1285 internal_loop_guide loop(n, begin); loop.init(src);
1286}
1287
1288template<typename T, class A>
1289void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1290 internal_loop_guide loop(n, dst); loop.copy(src);
1291}
1292
1293#if __TBB_CPP11_RVALUE_REF_PRESENT
1294template<typename T, class A>
1295void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1296 internal_loop_guide loop(n, dst); loop.move_construct(src);
1297}
1298template<typename T, class A>
1299void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1300 internal_loop_guide loop(n, dst); loop.move_assign(src);
1301}
1302#endif
1303
1304#if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1305template<typename T, class A>
1306void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1307 internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1308}
1309#endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1310
1311template<typename T, class A>
1312template<typename I>
1313void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1314 internal_loop_guide loop(n, dst);
1315 loop.iterate( *(static_cast<I*>(const_cast<void*>(p_type_erased_iterator))) );
1316}
1317
1318template<typename T, class A>
1319void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1320 internal_loop_guide loop(n, dst); loop.assign(src);
1321}
1322
1323#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1324 // Workaround for overzealous compiler warning
1325 #pragma warning (push)
1326 #pragma warning (disable: 4189)
1327#endif
1328template<typename T, class A>
1329void concurrent_vector<T, A>::destroy_array( void* begin, size_type n ) {
1330 T* array = static_cast<T*>(begin);
1331 for( size_type j=n; j>0; --j )
1332 array[j-1].~T(); // destructors are supposed to not throw any exceptions
1333}
1334#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1335 #pragma warning (pop)
1336#endif // warning 4189 is back
1337
1338// concurrent_vector's template functions
1339template<typename T, class A1, class A2>
1340inline bool operator==(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b) {
1341 //TODO: call size() only once per vector (in operator==)
1342 // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1343 if(a.size() != b.size()) return false;
1344 typename concurrent_vector<T, A1>::const_iterator i(a.begin());
1345 typename concurrent_vector<T, A2>::const_iterator j(b.begin());
1346 for(; i != a.end(); ++i, ++j)
1347 if( !(*i == *j) ) return false;
1348 return true;
1349}
1350
1351template<typename T, class A1, class A2>
1352inline bool operator!=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1353{ return !(a == b); }
1354
1355template<typename T, class A1, class A2>
1356inline bool operator<(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1357{ return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1358
1359template<typename T, class A1, class A2>
1360inline bool operator>(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1361{ return b < a; }
1362
1363template<typename T, class A1, class A2>
1364inline bool operator<=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1365{ return !(b < a); }
1366
1367template<typename T, class A1, class A2>
1368inline bool operator>=(const concurrent_vector<T, A1> &a, const concurrent_vector<T, A2> &b)
1369{ return !(a < b); }
1370
1371template<typename T, class A>
1372inline void swap(concurrent_vector<T, A> &a, concurrent_vector<T, A> &b)
1373{ a.swap( b ); }
1374
1375} // namespace tbb
1376
1377#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1378 #pragma warning (pop)
1379#endif // warning 4267,4127 are back
1380
1381#endif /* __TBB_concurrent_vector_H */
1382